##// END OF EJS Templates
extensions: add detailed loading information...
Martijn Pieters -
r38834:d5895867 default
parent child Browse files
Show More
@@ -1,766 +1,808
1 1 # extensions.py - extension handling for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import ast
11 11 import collections
12 12 import functools
13 13 import imp
14 14 import inspect
15 15 import os
16 16
17 17 from .i18n import (
18 18 _,
19 19 gettext,
20 20 )
21 21
22 22 from . import (
23 23 cmdutil,
24 24 configitems,
25 25 error,
26 26 pycompat,
27 27 util,
28 28 )
29 29
30 30 from .utils import (
31 31 stringutil,
32 32 )
33 33
34 34 _extensions = {}
35 35 _disabledextensions = {}
36 36 _aftercallbacks = {}
37 37 _order = []
38 38 _builtin = {
39 39 'hbisect',
40 40 'bookmarks',
41 41 'color',
42 42 'parentrevspec',
43 43 'progress',
44 44 'interhg',
45 45 'inotify',
46 46 'hgcia'
47 47 }
48 48
49 49 def extensions(ui=None):
50 50 if ui:
51 51 def enabled(name):
52 52 for format in ['%s', 'hgext.%s']:
53 53 conf = ui.config('extensions', format % name)
54 54 if conf is not None and not conf.startswith('!'):
55 55 return True
56 56 else:
57 57 enabled = lambda name: True
58 58 for name in _order:
59 59 module = _extensions[name]
60 60 if module and enabled(name):
61 61 yield name, module
62 62
63 63 def find(name):
64 64 '''return module with given extension name'''
65 65 mod = None
66 66 try:
67 67 mod = _extensions[name]
68 68 except KeyError:
69 69 for k, v in _extensions.iteritems():
70 70 if k.endswith('.' + name) or k.endswith('/' + name):
71 71 mod = v
72 72 break
73 73 if not mod:
74 74 raise KeyError(name)
75 75 return mod
76 76
77 77 def loadpath(path, module_name):
78 78 module_name = module_name.replace('.', '_')
79 79 path = util.normpath(util.expandpath(path))
80 80 module_name = pycompat.fsdecode(module_name)
81 81 path = pycompat.fsdecode(path)
82 82 if os.path.isdir(path):
83 83 # module/__init__.py style
84 84 d, f = os.path.split(path)
85 85 fd, fpath, desc = imp.find_module(f, [d])
86 86 return imp.load_module(module_name, fd, fpath, desc)
87 87 else:
88 88 try:
89 89 return imp.load_source(module_name, path)
90 90 except IOError as exc:
91 91 if not exc.filename:
92 92 exc.filename = path # python does not fill this
93 93 raise
94 94
95 95 def _importh(name):
96 96 """import and return the <name> module"""
97 97 mod = __import__(pycompat.sysstr(name))
98 98 components = name.split('.')
99 99 for comp in components[1:]:
100 100 mod = getattr(mod, comp)
101 101 return mod
102 102
103 103 def _importext(name, path=None, reportfunc=None):
104 104 if path:
105 105 # the module will be loaded in sys.modules
106 106 # choose an unique name so that it doesn't
107 107 # conflicts with other modules
108 108 mod = loadpath(path, 'hgext.%s' % name)
109 109 else:
110 110 try:
111 111 mod = _importh("hgext.%s" % name)
112 112 except ImportError as err:
113 113 if reportfunc:
114 114 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
115 115 try:
116 116 mod = _importh("hgext3rd.%s" % name)
117 117 except ImportError as err:
118 118 if reportfunc:
119 119 reportfunc(err, "hgext3rd.%s" % name, name)
120 120 mod = _importh(name)
121 121 return mod
122 122
123 123 def _reportimporterror(ui, err, failed, next):
124 124 # note: this ui.debug happens before --debug is processed,
125 125 # Use --config ui.debug=1 to see them.
126 126 if ui.configbool('devel', 'debug.extensions'):
127 ui.debug('could not import %s (%s): trying %s\n'
127 ui.debug('debug.extensions: - could not import %s (%s): trying %s\n'
128 128 % (failed, stringutil.forcebytestr(err), next))
129 129 if ui.debugflag:
130 130 ui.traceback()
131 131
132 132 def _rejectunicode(name, xs):
133 133 if isinstance(xs, (list, set, tuple)):
134 134 for x in xs:
135 135 _rejectunicode(name, x)
136 136 elif isinstance(xs, dict):
137 137 for k, v in xs.items():
138 138 _rejectunicode(name, k)
139 139 _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
140 140 elif isinstance(xs, type(u'')):
141 141 raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
142 142 hint="use b'' to make it byte string")
143 143
144 144 # attributes set by registrar.command
145 145 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
146 146
147 147 def _validatecmdtable(ui, cmdtable):
148 148 """Check if extension commands have required attributes"""
149 149 for c, e in cmdtable.iteritems():
150 150 f = e[0]
151 151 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
152 152 if not missing:
153 153 continue
154 154 raise error.ProgrammingError(
155 155 'missing attributes: %s' % ', '.join(missing),
156 156 hint="use @command decorator to register '%s'" % c)
157 157
158 158 def _validatetables(ui, mod):
159 159 """Sanity check for loadable tables provided by extension module"""
160 160 for t in ['cmdtable', 'colortable', 'configtable']:
161 161 _rejectunicode(t, getattr(mod, t, {}))
162 162 for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
163 163 'templatefilter', 'templatefunc', 'templatekeyword']:
164 164 o = getattr(mod, t, None)
165 165 if o:
166 166 _rejectunicode(t, o._table)
167 167 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
168 168
169 def load(ui, name, path):
169 def load(ui, name, path, log=lambda *a: None):
170 170 if name.startswith('hgext.') or name.startswith('hgext/'):
171 171 shortname = name[6:]
172 172 else:
173 173 shortname = name
174 174 if shortname in _builtin:
175 175 return None
176 176 if shortname in _extensions:
177 177 return _extensions[shortname]
178 log(' - loading extension: %r\n', shortname)
178 179 _extensions[shortname] = None
179 mod = _importext(name, path, bind(_reportimporterror, ui))
180 with util.timedcm() as stats:
181 mod = _importext(name, path, bind(_reportimporterror, ui))
182 log(' > %r extension loaded in %s\n', shortname, stats)
180 183
181 184 # Before we do anything with the extension, check against minimum stated
182 185 # compatibility. This gives extension authors a mechanism to have their
183 186 # extensions short circuit when loaded with a known incompatible version
184 187 # of Mercurial.
185 188 minver = getattr(mod, 'minimumhgversion', None)
186 189 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
187 190 ui.warn(_('(third party extension %s requires version %s or newer '
188 191 'of Mercurial; disabling)\n') % (shortname, minver))
189 192 return
193 log(' - validating extension tables: %r\n', shortname)
190 194 _validatetables(ui, mod)
191 195
192 196 _extensions[shortname] = mod
193 197 _order.append(shortname)
194 for fn in _aftercallbacks.get(shortname, []):
195 fn(loaded=True)
198 log(' - invoking registered callbacks: %r\n', shortname)
199 with util.timedcm() as stats:
200 for fn in _aftercallbacks.get(shortname, []):
201 fn(loaded=True)
202 log(' > callbacks completed in %s\n', stats)
196 203 return mod
197 204
198 205 def _runuisetup(name, ui):
199 206 uisetup = getattr(_extensions[name], 'uisetup', None)
200 207 if uisetup:
201 208 try:
202 209 uisetup(ui)
203 210 except Exception as inst:
204 211 ui.traceback(force=True)
205 212 msg = stringutil.forcebytestr(inst)
206 213 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
207 214 return False
208 215 return True
209 216
210 217 def _runextsetup(name, ui):
211 218 extsetup = getattr(_extensions[name], 'extsetup', None)
212 219 if extsetup:
213 220 try:
214 221 try:
215 222 extsetup(ui)
216 223 except TypeError:
217 224 if pycompat.getargspec(extsetup).args:
218 225 raise
219 226 extsetup() # old extsetup with no ui argument
220 227 except Exception as inst:
221 228 ui.traceback(force=True)
222 229 msg = stringutil.forcebytestr(inst)
223 230 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
224 231 return False
225 232 return True
226 233
227 234 def loadall(ui, whitelist=None):
235 if ui.configbool('devel', 'debug.extensions'):
236 log = lambda msg, *values: ui.debug('debug.extensions: ',
237 msg % values, label='debug.extensions')
238 else:
239 log = lambda *a, **kw: None
228 240 result = ui.configitems("extensions")
229 241 if whitelist is not None:
230 242 result = [(k, v) for (k, v) in result if k in whitelist]
231 243 newindex = len(_order)
232 for (name, path) in result:
233 if path:
234 if path[0:1] == '!':
235 _disabledextensions[name] = path[1:]
236 continue
237 try:
238 load(ui, name, path)
239 except Exception as inst:
240 msg = stringutil.forcebytestr(inst)
244 log('loading %sextensions\n', 'additional ' if newindex else '')
245 log('- processing %d entries\n', len(result))
246 with util.timedcm() as stats:
247 for (name, path) in result:
241 248 if path:
242 ui.warn(_("*** failed to import extension %s from %s: %s\n")
243 % (name, path, msg))
244 else:
245 ui.warn(_("*** failed to import extension %s: %s\n")
246 % (name, msg))
247 if isinstance(inst, error.Hint) and inst.hint:
248 ui.warn(_("*** (%s)\n") % inst.hint)
249 ui.traceback()
249 if path[0:1] == '!':
250 if name not in _disabledextensions:
251 log(' - skipping disabled extension: %r\n', name)
252 _disabledextensions[name] = path[1:]
253 continue
254 try:
255 load(ui, name, path, log)
256 except Exception as inst:
257 msg = stringutil.forcebytestr(inst)
258 if path:
259 ui.warn(_("*** failed to import extension %s from %s: %s\n")
260 % (name, path, msg))
261 else:
262 ui.warn(_("*** failed to import extension %s: %s\n")
263 % (name, msg))
264 if isinstance(inst, error.Hint) and inst.hint:
265 ui.warn(_("*** (%s)\n") % inst.hint)
266 ui.traceback()
267
268 log('> loaded %d extensions, total time %s\n',
269 len(_order) - newindex, stats)
250 270 # list of (objname, loadermod, loadername) tuple:
251 271 # - objname is the name of an object in extension module,
252 272 # from which extra information is loaded
253 273 # - loadermod is the module where loader is placed
254 274 # - loadername is the name of the function,
255 275 # which takes (ui, extensionname, extraobj) arguments
256 276 #
257 277 # This one is for the list of item that must be run before running any setup
258 278 earlyextraloaders = [
259 279 ('configtable', configitems, 'loadconfigtable'),
260 280 ]
281
282 log('- loading configtable attributes\n')
261 283 _loadextra(ui, newindex, earlyextraloaders)
262 284
263 285 broken = set()
286 log('- executing uisetup hooks\n')
264 287 for name in _order[newindex:]:
265 if not _runuisetup(name, ui):
266 broken.add(name)
288 log(' - running uisetup for %r\n', name)
289 with util.timedcm() as stats:
290 if not _runuisetup(name, ui):
291 log(' - the %r extension uisetup failed\n', name)
292 broken.add(name)
293 log(' > uisetup for %r took %s\n', name, stats)
267 294
295 log('- executing extsetup hooks\n')
268 296 for name in _order[newindex:]:
269 297 if name in broken:
270 298 continue
271 if not _runextsetup(name, ui):
272 broken.add(name)
299 log(' - running extsetup for %r\n', name)
300 with util.timedcm() as stats:
301 if not _runextsetup(name, ui):
302 log(' - the %r extension extsetup failed\n', name)
303 broken.add(name)
304 log(' > extsetup for %r took %s\n', name, stats)
273 305
274 306 for name in broken:
307 log(' - disabling broken %r extension\n', name)
275 308 _extensions[name] = None
276 309
277 310 # Call aftercallbacks that were never met.
278 for shortname in _aftercallbacks:
279 if shortname in _extensions:
280 continue
311 log('- executing remaining aftercallbacks\n')
312 with util.timedcm() as stats:
313 for shortname in _aftercallbacks:
314 if shortname in _extensions:
315 continue
281 316
282 for fn in _aftercallbacks[shortname]:
283 fn(loaded=False)
317 for fn in _aftercallbacks[shortname]:
318 log(' - extension %r not loaded, notify callbacks\n',
319 shortname)
320 fn(loaded=False)
321 log('> remaining aftercallbacks completed in %s\n', stats)
284 322
285 323 # loadall() is called multiple times and lingering _aftercallbacks
286 324 # entries could result in double execution. See issue4646.
287 325 _aftercallbacks.clear()
288 326
289 327 # delay importing avoids cyclic dependency (especially commands)
290 328 from . import (
291 329 color,
292 330 commands,
293 331 filemerge,
294 332 fileset,
295 333 revset,
296 334 templatefilters,
297 335 templatefuncs,
298 336 templatekw,
299 337 )
300 338
301 339 # list of (objname, loadermod, loadername) tuple:
302 340 # - objname is the name of an object in extension module,
303 341 # from which extra information is loaded
304 342 # - loadermod is the module where loader is placed
305 343 # - loadername is the name of the function,
306 344 # which takes (ui, extensionname, extraobj) arguments
345 log('- loading extension registration objects\n')
307 346 extraloaders = [
308 347 ('cmdtable', commands, 'loadcmdtable'),
309 348 ('colortable', color, 'loadcolortable'),
310 349 ('filesetpredicate', fileset, 'loadpredicate'),
311 350 ('internalmerge', filemerge, 'loadinternalmerge'),
312 351 ('revsetpredicate', revset, 'loadpredicate'),
313 352 ('templatefilter', templatefilters, 'loadfilter'),
314 353 ('templatefunc', templatefuncs, 'loadfunction'),
315 354 ('templatekeyword', templatekw, 'loadkeyword'),
316 355 ]
317 _loadextra(ui, newindex, extraloaders)
356 with util.timedcm() as stats:
357 _loadextra(ui, newindex, extraloaders)
358 log('> extension registration object loading took %s\n', stats)
359 log('extension loading complete\n')
318 360
319 361 def _loadextra(ui, newindex, extraloaders):
320 362 for name in _order[newindex:]:
321 363 module = _extensions[name]
322 364 if not module:
323 365 continue # loading this module failed
324 366
325 367 for objname, loadermod, loadername in extraloaders:
326 368 extraobj = getattr(module, objname, None)
327 369 if extraobj is not None:
328 370 getattr(loadermod, loadername)(ui, name, extraobj)
329 371
330 372 def afterloaded(extension, callback):
331 373 '''Run the specified function after a named extension is loaded.
332 374
333 375 If the named extension is already loaded, the callback will be called
334 376 immediately.
335 377
336 378 If the named extension never loads, the callback will be called after
337 379 all extensions have been loaded.
338 380
339 381 The callback receives the named argument ``loaded``, which is a boolean
340 382 indicating whether the dependent extension actually loaded.
341 383 '''
342 384
343 385 if extension in _extensions:
344 386 # Report loaded as False if the extension is disabled
345 387 loaded = (_extensions[extension] is not None)
346 388 callback(loaded=loaded)
347 389 else:
348 390 _aftercallbacks.setdefault(extension, []).append(callback)
349 391
350 392 def bind(func, *args):
351 393 '''Partial function application
352 394
353 395 Returns a new function that is the partial application of args and kwargs
354 396 to func. For example,
355 397
356 398 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
357 399 assert callable(func)
358 400 def closure(*a, **kw):
359 401 return func(*(args + a), **kw)
360 402 return closure
361 403
362 404 def _updatewrapper(wrap, origfn, unboundwrapper):
363 405 '''Copy and add some useful attributes to wrapper'''
364 406 try:
365 407 wrap.__name__ = origfn.__name__
366 408 except AttributeError:
367 409 pass
368 410 wrap.__module__ = getattr(origfn, '__module__')
369 411 wrap.__doc__ = getattr(origfn, '__doc__')
370 412 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
371 413 wrap._origfunc = origfn
372 414 wrap._unboundwrapper = unboundwrapper
373 415
374 416 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
375 417 '''Wrap the command named `command' in table
376 418
377 419 Replace command in the command table with wrapper. The wrapped command will
378 420 be inserted into the command table specified by the table argument.
379 421
380 422 The wrapper will be called like
381 423
382 424 wrapper(orig, *args, **kwargs)
383 425
384 426 where orig is the original (wrapped) function, and *args, **kwargs
385 427 are the arguments passed to it.
386 428
387 429 Optionally append to the command synopsis and docstring, used for help.
388 430 For example, if your extension wraps the ``bookmarks`` command to add the
389 431 flags ``--remote`` and ``--all`` you might call this function like so:
390 432
391 433 synopsis = ' [-a] [--remote]'
392 434 docstring = """
393 435
394 436 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
395 437 flags to the bookmarks command. Either flag will show the remote bookmarks
396 438 known to the repository; ``--remote`` will also suppress the output of the
397 439 local bookmarks.
398 440 """
399 441
400 442 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
401 443 synopsis, docstring)
402 444 '''
403 445 assert callable(wrapper)
404 446 aliases, entry = cmdutil.findcmd(command, table)
405 447 for alias, e in table.iteritems():
406 448 if e is entry:
407 449 key = alias
408 450 break
409 451
410 452 origfn = entry[0]
411 453 wrap = functools.partial(util.checksignature(wrapper),
412 454 util.checksignature(origfn))
413 455 _updatewrapper(wrap, origfn, wrapper)
414 456 if docstring is not None:
415 457 wrap.__doc__ += docstring
416 458
417 459 newentry = list(entry)
418 460 newentry[0] = wrap
419 461 if synopsis is not None:
420 462 newentry[2] += synopsis
421 463 table[key] = tuple(newentry)
422 464 return entry
423 465
424 466 def wrapfilecache(cls, propname, wrapper):
425 467 """Wraps a filecache property.
426 468
427 469 These can't be wrapped using the normal wrapfunction.
428 470 """
429 471 propname = pycompat.sysstr(propname)
430 472 assert callable(wrapper)
431 473 for currcls in cls.__mro__:
432 474 if propname in currcls.__dict__:
433 475 origfn = currcls.__dict__[propname].func
434 476 assert callable(origfn)
435 477 def wrap(*args, **kwargs):
436 478 return wrapper(origfn, *args, **kwargs)
437 479 currcls.__dict__[propname].func = wrap
438 480 break
439 481
440 482 if currcls is object:
441 483 raise AttributeError(r"type '%s' has no property '%s'" % (
442 484 cls, propname))
443 485
444 486 class wrappedfunction(object):
445 487 '''context manager for temporarily wrapping a function'''
446 488
447 489 def __init__(self, container, funcname, wrapper):
448 490 assert callable(wrapper)
449 491 self._container = container
450 492 self._funcname = funcname
451 493 self._wrapper = wrapper
452 494
453 495 def __enter__(self):
454 496 wrapfunction(self._container, self._funcname, self._wrapper)
455 497
456 498 def __exit__(self, exctype, excvalue, traceback):
457 499 unwrapfunction(self._container, self._funcname, self._wrapper)
458 500
459 501 def wrapfunction(container, funcname, wrapper):
460 502 '''Wrap the function named funcname in container
461 503
462 504 Replace the funcname member in the given container with the specified
463 505 wrapper. The container is typically a module, class, or instance.
464 506
465 507 The wrapper will be called like
466 508
467 509 wrapper(orig, *args, **kwargs)
468 510
469 511 where orig is the original (wrapped) function, and *args, **kwargs
470 512 are the arguments passed to it.
471 513
472 514 Wrapping methods of the repository object is not recommended since
473 515 it conflicts with extensions that extend the repository by
474 516 subclassing. All extensions that need to extend methods of
475 517 localrepository should use this subclassing trick: namely,
476 518 reposetup() should look like
477 519
478 520 def reposetup(ui, repo):
479 521 class myrepo(repo.__class__):
480 522 def whatever(self, *args, **kwargs):
481 523 [...extension stuff...]
482 524 super(myrepo, self).whatever(*args, **kwargs)
483 525 [...extension stuff...]
484 526
485 527 repo.__class__ = myrepo
486 528
487 529 In general, combining wrapfunction() with subclassing does not
488 530 work. Since you cannot control what other extensions are loaded by
489 531 your end users, you should play nicely with others by using the
490 532 subclass trick.
491 533 '''
492 534 assert callable(wrapper)
493 535
494 536 origfn = getattr(container, funcname)
495 537 assert callable(origfn)
496 538 if inspect.ismodule(container):
497 539 # origfn is not an instance or class method. "partial" can be used.
498 540 # "partial" won't insert a frame in traceback.
499 541 wrap = functools.partial(wrapper, origfn)
500 542 else:
501 543 # "partial" cannot be safely used. Emulate its effect by using "bind".
502 544 # The downside is one more frame in traceback.
503 545 wrap = bind(wrapper, origfn)
504 546 _updatewrapper(wrap, origfn, wrapper)
505 547 setattr(container, funcname, wrap)
506 548 return origfn
507 549
508 550 def unwrapfunction(container, funcname, wrapper=None):
509 551 '''undo wrapfunction
510 552
511 553 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
512 554 from the chain of wrappers.
513 555
514 556 Return the removed wrapper.
515 557 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
516 558 wrapper is not None but is not found in the wrapper chain.
517 559 '''
518 560 chain = getwrapperchain(container, funcname)
519 561 origfn = chain.pop()
520 562 if wrapper is None:
521 563 wrapper = chain[0]
522 564 chain.remove(wrapper)
523 565 setattr(container, funcname, origfn)
524 566 for w in reversed(chain):
525 567 wrapfunction(container, funcname, w)
526 568 return wrapper
527 569
528 570 def getwrapperchain(container, funcname):
529 571 '''get a chain of wrappers of a function
530 572
531 573 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
532 574
533 575 The wrapper functions are the ones passed to wrapfunction, whose first
534 576 argument is origfunc.
535 577 '''
536 578 result = []
537 579 fn = getattr(container, funcname)
538 580 while fn:
539 581 assert callable(fn)
540 582 result.append(getattr(fn, '_unboundwrapper', fn))
541 583 fn = getattr(fn, '_origfunc', None)
542 584 return result
543 585
544 586 def _disabledpaths():
545 587 '''find paths of disabled extensions. returns a dict of {name: path}'''
546 588 import hgext
547 589 extpath = os.path.dirname(
548 590 os.path.abspath(pycompat.fsencode(hgext.__file__)))
549 591 try: # might not be a filesystem path
550 592 files = os.listdir(extpath)
551 593 except OSError:
552 594 return {}
553 595
554 596 exts = {}
555 597 for e in files:
556 598 if e.endswith('.py'):
557 599 name = e.rsplit('.', 1)[0]
558 600 path = os.path.join(extpath, e)
559 601 else:
560 602 name = e
561 603 path = os.path.join(extpath, e, '__init__.py')
562 604 if not os.path.exists(path):
563 605 continue
564 606 if name in exts or name in _order or name == '__init__':
565 607 continue
566 608 exts[name] = path
567 609 for name, path in _disabledextensions.iteritems():
568 610 # If no path was provided for a disabled extension (e.g. "color=!"),
569 611 # don't replace the path we already found by the scan above.
570 612 if path:
571 613 exts[name] = path
572 614 return exts
573 615
574 616 def _moduledoc(file):
575 617 '''return the top-level python documentation for the given file
576 618
577 619 Loosely inspired by pydoc.source_synopsis(), but rewritten to
578 620 handle triple quotes and to return the whole text instead of just
579 621 the synopsis'''
580 622 result = []
581 623
582 624 line = file.readline()
583 625 while line[:1] == '#' or not line.strip():
584 626 line = file.readline()
585 627 if not line:
586 628 break
587 629
588 630 start = line[:3]
589 631 if start == '"""' or start == "'''":
590 632 line = line[3:]
591 633 while line:
592 634 if line.rstrip().endswith(start):
593 635 line = line.split(start)[0]
594 636 if line:
595 637 result.append(line)
596 638 break
597 639 elif not line:
598 640 return None # unmatched delimiter
599 641 result.append(line)
600 642 line = file.readline()
601 643 else:
602 644 return None
603 645
604 646 return ''.join(result)
605 647
606 648 def _disabledhelp(path):
607 649 '''retrieve help synopsis of a disabled extension (without importing)'''
608 650 try:
609 651 with open(path, 'rb') as src:
610 652 doc = _moduledoc(src)
611 653 except IOError:
612 654 return
613 655
614 656 if doc: # extracting localized synopsis
615 657 return gettext(doc)
616 658 else:
617 659 return _('(no help text available)')
618 660
619 661 def disabled():
620 662 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
621 663 try:
622 664 from hgext import __index__
623 665 return dict((name, gettext(desc))
624 666 for name, desc in __index__.docs.iteritems()
625 667 if name not in _order)
626 668 except (ImportError, AttributeError):
627 669 pass
628 670
629 671 paths = _disabledpaths()
630 672 if not paths:
631 673 return {}
632 674
633 675 exts = {}
634 676 for name, path in paths.iteritems():
635 677 doc = _disabledhelp(path)
636 678 if doc:
637 679 exts[name] = doc.splitlines()[0]
638 680
639 681 return exts
640 682
641 683 def disabledext(name):
642 684 '''find a specific disabled extension from hgext. returns desc'''
643 685 try:
644 686 from hgext import __index__
645 687 if name in _order: # enabled
646 688 return
647 689 else:
648 690 return gettext(__index__.docs.get(name))
649 691 except (ImportError, AttributeError):
650 692 pass
651 693
652 694 paths = _disabledpaths()
653 695 if name in paths:
654 696 return _disabledhelp(paths[name])
655 697
656 698 def _walkcommand(node):
657 699 """Scan @command() decorators in the tree starting at node"""
658 700 todo = collections.deque([node])
659 701 while todo:
660 702 node = todo.popleft()
661 703 if not isinstance(node, ast.FunctionDef):
662 704 todo.extend(ast.iter_child_nodes(node))
663 705 continue
664 706 for d in node.decorator_list:
665 707 if not isinstance(d, ast.Call):
666 708 continue
667 709 if not isinstance(d.func, ast.Name):
668 710 continue
669 711 if d.func.id != r'command':
670 712 continue
671 713 yield d
672 714
673 715 def _disabledcmdtable(path):
674 716 """Construct a dummy command table without loading the extension module
675 717
676 718 This may raise IOError or SyntaxError.
677 719 """
678 720 with open(path, 'rb') as src:
679 721 root = ast.parse(src.read(), path)
680 722 cmdtable = {}
681 723 for node in _walkcommand(root):
682 724 if not node.args:
683 725 continue
684 726 a = node.args[0]
685 727 if isinstance(a, ast.Str):
686 728 name = pycompat.sysbytes(a.s)
687 729 elif pycompat.ispy3 and isinstance(a, ast.Bytes):
688 730 name = a.s
689 731 else:
690 732 continue
691 733 cmdtable[name] = (None, [], b'')
692 734 return cmdtable
693 735
694 736 def _finddisabledcmd(ui, cmd, name, path, strict):
695 737 try:
696 738 cmdtable = _disabledcmdtable(path)
697 739 except (IOError, SyntaxError):
698 740 return
699 741 try:
700 742 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
701 743 except (error.AmbiguousCommand, error.UnknownCommand):
702 744 return
703 745 for c in aliases:
704 746 if c.startswith(cmd):
705 747 cmd = c
706 748 break
707 749 else:
708 750 cmd = aliases[0]
709 751 doc = _disabledhelp(path)
710 752 return (cmd, name, doc)
711 753
712 754 def disabledcmd(ui, cmd, strict=False):
713 755 '''find cmd from disabled extensions without importing.
714 756 returns (cmdname, extname, doc)'''
715 757
716 758 paths = _disabledpaths()
717 759 if not paths:
718 760 raise error.UnknownCommand(cmd)
719 761
720 762 ext = None
721 763 # first, search for an extension with the same name as the command
722 764 path = paths.pop(cmd, None)
723 765 if path:
724 766 ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict)
725 767 if not ext:
726 768 # otherwise, interrogate each extension until there's a match
727 769 for name, path in paths.iteritems():
728 770 ext = _finddisabledcmd(ui, cmd, name, path, strict=strict)
729 771 if ext:
730 772 break
731 773 if ext:
732 774 return ext
733 775
734 776 raise error.UnknownCommand(cmd)
735 777
736 778 def enabled(shortname=True):
737 779 '''return a dict of {name: desc} of extensions'''
738 780 exts = {}
739 781 for ename, ext in extensions():
740 782 doc = (gettext(ext.__doc__) or _('(no help text available)'))
741 783 if shortname:
742 784 ename = ename.split('.')[-1]
743 785 exts[ename] = doc.splitlines()[0].strip()
744 786
745 787 return exts
746 788
747 789 def notloaded():
748 790 '''return short names of extensions that failed to load'''
749 791 return [name for name, mod in _extensions.iteritems() if mod is None]
750 792
751 793 def moduleversion(module):
752 794 '''return version information from given module as a string'''
753 795 if (util.safehasattr(module, 'getversion')
754 796 and callable(module.getversion)):
755 797 version = module.getversion()
756 798 elif util.safehasattr(module, '__version__'):
757 799 version = module.__version__
758 800 else:
759 801 version = ''
760 802 if isinstance(version, (list, tuple)):
761 803 version = '.'.join(pycompat.bytestr(o) for o in version)
762 804 return version
763 805
764 806 def ismoduleinternal(module):
765 807 exttestedwith = getattr(module, 'testedwith', None)
766 808 return exttestedwith == "ships-with-hg-core"
@@ -1,1169 +1,1177
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 import functools
12 13 import hashlib
13 14 import os
14 15 import shutil
15 16 import stat
16 17
17 18 from .i18n import _
18 19 from .node import (
19 20 nullid,
20 21 )
21 22
22 23 from . import (
23 24 bookmarks,
24 25 bundlerepo,
25 26 cacheutil,
26 27 cmdutil,
27 28 destutil,
28 29 discovery,
29 30 error,
30 31 exchange,
31 32 extensions,
32 33 httppeer,
33 34 localrepo,
34 35 lock,
35 36 logcmdutil,
36 37 logexchange,
37 38 merge as mergemod,
38 39 node,
39 40 phases,
40 41 scmutil,
41 42 sshpeer,
42 43 statichttprepo,
43 44 ui as uimod,
44 45 unionrepo,
45 46 url,
46 47 util,
47 48 verify as verifymod,
48 49 vfs as vfsmod,
49 50 )
50 51
51 52 from .utils import (
52 53 stringutil,
53 54 )
54 55
55 56 release = lock.release
56 57
57 58 # shared features
58 59 sharedbookmarks = 'bookmarks'
59 60
60 61 def _local(path):
61 62 path = util.expandpath(util.urllocalpath(path))
62 63 return (os.path.isfile(path) and bundlerepo or localrepo)
63 64
64 65 def addbranchrevs(lrepo, other, branches, revs):
65 66 peer = other.peer() # a courtesy to callers using a localrepo for other
66 67 hashbranch, branches = branches
67 68 if not hashbranch and not branches:
68 69 x = revs or None
69 70 if revs:
70 71 y = revs[0]
71 72 else:
72 73 y = None
73 74 return x, y
74 75 if revs:
75 76 revs = list(revs)
76 77 else:
77 78 revs = []
78 79
79 80 if not peer.capable('branchmap'):
80 81 if branches:
81 82 raise error.Abort(_("remote branch lookup not supported"))
82 83 revs.append(hashbranch)
83 84 return revs, revs[0]
84 85
85 86 with peer.commandexecutor() as e:
86 87 branchmap = e.callcommand('branchmap', {}).result()
87 88
88 89 def primary(branch):
89 90 if branch == '.':
90 91 if not lrepo:
91 92 raise error.Abort(_("dirstate branch not accessible"))
92 93 branch = lrepo.dirstate.branch()
93 94 if branch in branchmap:
94 95 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 96 return True
96 97 else:
97 98 return False
98 99
99 100 for branch in branches:
100 101 if not primary(branch):
101 102 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 103 if hashbranch:
103 104 if not primary(hashbranch):
104 105 revs.append(hashbranch)
105 106 return revs, revs[0]
106 107
107 108 def parseurl(path, branches=None):
108 109 '''parse url#branch, returning (url, (branch, branches))'''
109 110
110 111 u = util.url(path)
111 112 branch = None
112 113 if u.fragment:
113 114 branch = u.fragment
114 115 u.fragment = None
115 116 return bytes(u), (branch, branches or [])
116 117
117 118 schemes = {
118 119 'bundle': bundlerepo,
119 120 'union': unionrepo,
120 121 'file': _local,
121 122 'http': httppeer,
122 123 'https': httppeer,
123 124 'ssh': sshpeer,
124 125 'static-http': statichttprepo,
125 126 }
126 127
127 128 def _peerlookup(path):
128 129 u = util.url(path)
129 130 scheme = u.scheme or 'file'
130 131 thing = schemes.get(scheme) or schemes['file']
131 132 try:
132 133 return thing(path)
133 134 except TypeError:
134 135 # we can't test callable(thing) because 'thing' can be an unloaded
135 136 # module that implements __call__
136 137 if not util.safehasattr(thing, 'instance'):
137 138 raise
138 139 return thing
139 140
140 141 def islocal(repo):
141 142 '''return true if repo (or path pointing to repo) is local'''
142 143 if isinstance(repo, bytes):
143 144 try:
144 145 return _peerlookup(repo).islocal(repo)
145 146 except AttributeError:
146 147 return False
147 148 return repo.local()
148 149
149 150 def openpath(ui, path):
150 151 '''open path with open if local, url.open if remote'''
151 152 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 153 if pathurl.islocal():
153 154 return util.posixfile(pathurl.localpath(), 'rb')
154 155 else:
155 156 return url.open(ui, path)
156 157
157 158 # a list of (ui, repo) functions called for wire peer initialization
158 159 wirepeersetupfuncs = []
159 160
160 161 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 162 intents=None):
162 163 """return a repository object for the specified path"""
163 164 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
164 165 ui = getattr(obj, "ui", ui)
166 if ui.configbool('devel', 'debug.extensions'):
167 log = functools.partial(
168 ui.debug, 'debug.extensions: ', label='debug.extensions')
169 else:
170 log = lambda *a, **kw: None
165 171 for f in presetupfuncs or []:
166 172 f(ui, obj)
173 log('- executing reposetup hooks\n')
167 174 for name, module in extensions.extensions(ui):
175 log(' - running reposetup for %s\n' % (name,))
168 176 hook = getattr(module, 'reposetup', None)
169 177 if hook:
170 178 hook(ui, obj)
171 179 if not obj.local():
172 180 for f in wirepeersetupfuncs:
173 181 f(ui, obj)
174 182 return obj
175 183
176 184 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
177 185 """return a repository object for the specified path"""
178 186 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
179 187 intents=intents)
180 188 repo = peer.local()
181 189 if not repo:
182 190 raise error.Abort(_("repository '%s' is not local") %
183 191 (path or peer.url()))
184 192 return repo.filtered('visible')
185 193
186 194 def peer(uiorrepo, opts, path, create=False, intents=None):
187 195 '''return a repository peer for the specified path'''
188 196 rui = remoteui(uiorrepo, opts)
189 197 return _peerorrepo(rui, path, create, intents=intents).peer()
190 198
191 199 def defaultdest(source):
192 200 '''return default destination of clone if none is given
193 201
194 202 >>> defaultdest(b'foo')
195 203 'foo'
196 204 >>> defaultdest(b'/foo/bar')
197 205 'bar'
198 206 >>> defaultdest(b'/')
199 207 ''
200 208 >>> defaultdest(b'')
201 209 ''
202 210 >>> defaultdest(b'http://example.org/')
203 211 ''
204 212 >>> defaultdest(b'http://example.org/foo/')
205 213 'foo'
206 214 '''
207 215 path = util.url(source).path
208 216 if not path:
209 217 return ''
210 218 return os.path.basename(os.path.normpath(path))
211 219
212 220 def sharedreposource(repo):
213 221 """Returns repository object for source repository of a shared repo.
214 222
215 223 If repo is not a shared repository, returns None.
216 224 """
217 225 if repo.sharedpath == repo.path:
218 226 return None
219 227
220 228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
221 229 return repo.srcrepo
222 230
223 231 # the sharedpath always ends in the .hg; we want the path to the repo
224 232 source = repo.vfs.split(repo.sharedpath)[0]
225 233 srcurl, branches = parseurl(source)
226 234 srcrepo = repository(repo.ui, srcurl)
227 235 repo.srcrepo = srcrepo
228 236 return srcrepo
229 237
230 238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
231 239 relative=False):
232 240 '''create a shared repository'''
233 241
234 242 if not islocal(source):
235 243 raise error.Abort(_('can only share local repositories'))
236 244
237 245 if not dest:
238 246 dest = defaultdest(source)
239 247 else:
240 248 dest = ui.expandpath(dest)
241 249
242 250 if isinstance(source, bytes):
243 251 origsource = ui.expandpath(source)
244 252 source, branches = parseurl(origsource)
245 253 srcrepo = repository(ui, source)
246 254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
247 255 else:
248 256 srcrepo = source.local()
249 257 origsource = source = srcrepo.url()
250 258 checkout = None
251 259
252 260 sharedpath = srcrepo.sharedpath # if our source is already sharing
253 261
254 262 destwvfs = vfsmod.vfs(dest, realpath=True)
255 263 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
256 264
257 265 if destvfs.lexists():
258 266 raise error.Abort(_('destination already exists'))
259 267
260 268 if not destwvfs.isdir():
261 269 destwvfs.mkdir()
262 270 destvfs.makedir()
263 271
264 272 requirements = ''
265 273 try:
266 274 requirements = srcrepo.vfs.read('requires')
267 275 except IOError as inst:
268 276 if inst.errno != errno.ENOENT:
269 277 raise
270 278
271 279 if relative:
272 280 try:
273 281 sharedpath = os.path.relpath(sharedpath, destvfs.base)
274 282 requirements += 'relshared\n'
275 283 except (IOError, ValueError) as e:
276 284 # ValueError is raised on Windows if the drive letters differ on
277 285 # each path
278 286 raise error.Abort(_('cannot calculate relative path'),
279 287 hint=stringutil.forcebytestr(e))
280 288 else:
281 289 requirements += 'shared\n'
282 290
283 291 destvfs.write('requires', requirements)
284 292 destvfs.write('sharedpath', sharedpath)
285 293
286 294 r = repository(ui, destwvfs.base)
287 295 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
288 296 _postshareupdate(r, update, checkout=checkout)
289 297 return r
290 298
291 299 def unshare(ui, repo):
292 300 """convert a shared repository to a normal one
293 301
294 302 Copy the store data to the repo and remove the sharedpath data.
295 303 """
296 304
297 305 destlock = lock = None
298 306 lock = repo.lock()
299 307 try:
300 308 # we use locks here because if we race with commit, we
301 309 # can end up with extra data in the cloned revlogs that's
302 310 # not pointed to by changesets, thus causing verify to
303 311 # fail
304 312
305 313 destlock = copystore(ui, repo, repo.path)
306 314
307 315 sharefile = repo.vfs.join('sharedpath')
308 316 util.rename(sharefile, sharefile + '.old')
309 317
310 318 repo.requirements.discard('shared')
311 319 repo.requirements.discard('relshared')
312 320 repo._writerequirements()
313 321 finally:
314 322 destlock and destlock.release()
315 323 lock and lock.release()
316 324
317 325 # update store, spath, svfs and sjoin of repo
318 326 repo.unfiltered().__init__(repo.baseui, repo.root)
319 327
320 328 # TODO: figure out how to access subrepos that exist, but were previously
321 329 # removed from .hgsub
322 330 c = repo['.']
323 331 subs = c.substate
324 332 for s in sorted(subs):
325 333 c.sub(s).unshare()
326 334
327 335 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
328 336 """Called after a new shared repo is created.
329 337
330 338 The new repo only has a requirements file and pointer to the source.
331 339 This function configures additional shared data.
332 340
333 341 Extensions can wrap this function and write additional entries to
334 342 destrepo/.hg/shared to indicate additional pieces of data to be shared.
335 343 """
336 344 default = defaultpath or sourcerepo.ui.config('paths', 'default')
337 345 if default:
338 346 template = ('[paths]\n'
339 347 'default = %s\n')
340 348 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
341 349
342 350 with destrepo.wlock():
343 351 if bookmarks:
344 352 destrepo.vfs.write('shared', sharedbookmarks + '\n')
345 353
346 354 def _postshareupdate(repo, update, checkout=None):
347 355 """Maybe perform a working directory update after a shared repo is created.
348 356
349 357 ``update`` can be a boolean or a revision to update to.
350 358 """
351 359 if not update:
352 360 return
353 361
354 362 repo.ui.status(_("updating working directory\n"))
355 363 if update is not True:
356 364 checkout = update
357 365 for test in (checkout, 'default', 'tip'):
358 366 if test is None:
359 367 continue
360 368 try:
361 369 uprev = repo.lookup(test)
362 370 break
363 371 except error.RepoLookupError:
364 372 continue
365 373 _update(repo, uprev)
366 374
367 375 def copystore(ui, srcrepo, destpath):
368 376 '''copy files from store of srcrepo in destpath
369 377
370 378 returns destlock
371 379 '''
372 380 destlock = None
373 381 try:
374 382 hardlink = None
375 383 topic = _('linking') if hardlink else _('copying')
376 384 progress = ui.makeprogress(topic)
377 385 num = 0
378 386 srcpublishing = srcrepo.publishing()
379 387 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
380 388 dstvfs = vfsmod.vfs(destpath)
381 389 for f in srcrepo.store.copylist():
382 390 if srcpublishing and f.endswith('phaseroots'):
383 391 continue
384 392 dstbase = os.path.dirname(f)
385 393 if dstbase and not dstvfs.exists(dstbase):
386 394 dstvfs.mkdir(dstbase)
387 395 if srcvfs.exists(f):
388 396 if f.endswith('data'):
389 397 # 'dstbase' may be empty (e.g. revlog format 0)
390 398 lockfile = os.path.join(dstbase, "lock")
391 399 # lock to avoid premature writing to the target
392 400 destlock = lock.lock(dstvfs, lockfile)
393 401 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
394 402 hardlink, progress)
395 403 num += n
396 404 if hardlink:
397 405 ui.debug("linked %d files\n" % num)
398 406 else:
399 407 ui.debug("copied %d files\n" % num)
400 408 progress.complete()
401 409 return destlock
402 410 except: # re-raises
403 411 release(destlock)
404 412 raise
405 413
406 414 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
407 415 rev=None, update=True, stream=False):
408 416 """Perform a clone using a shared repo.
409 417
410 418 The store for the repository will be located at <sharepath>/.hg. The
411 419 specified revisions will be cloned or pulled from "source". A shared repo
412 420 will be created at "dest" and a working copy will be created if "update" is
413 421 True.
414 422 """
415 423 revs = None
416 424 if rev:
417 425 if not srcpeer.capable('lookup'):
418 426 raise error.Abort(_("src repository does not support "
419 427 "revision lookup and so doesn't "
420 428 "support clone by revision"))
421 429
422 430 # TODO this is batchable.
423 431 remoterevs = []
424 432 for r in rev:
425 433 with srcpeer.commandexecutor() as e:
426 434 remoterevs.append(e.callcommand('lookup', {
427 435 'key': r,
428 436 }).result())
429 437 revs = remoterevs
430 438
431 439 # Obtain a lock before checking for or cloning the pooled repo otherwise
432 440 # 2 clients may race creating or populating it.
433 441 pooldir = os.path.dirname(sharepath)
434 442 # lock class requires the directory to exist.
435 443 try:
436 444 util.makedir(pooldir, False)
437 445 except OSError as e:
438 446 if e.errno != errno.EEXIST:
439 447 raise
440 448
441 449 poolvfs = vfsmod.vfs(pooldir)
442 450 basename = os.path.basename(sharepath)
443 451
444 452 with lock.lock(poolvfs, '%s.lock' % basename):
445 453 if os.path.exists(sharepath):
446 454 ui.status(_('(sharing from existing pooled repository %s)\n') %
447 455 basename)
448 456 else:
449 457 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
450 458 # Always use pull mode because hardlinks in share mode don't work
451 459 # well. Never update because working copies aren't necessary in
452 460 # share mode.
453 461 clone(ui, peeropts, source, dest=sharepath, pull=True,
454 462 revs=rev, update=False, stream=stream)
455 463
456 464 # Resolve the value to put in [paths] section for the source.
457 465 if islocal(source):
458 466 defaultpath = os.path.abspath(util.urllocalpath(source))
459 467 else:
460 468 defaultpath = source
461 469
462 470 sharerepo = repository(ui, path=sharepath)
463 471 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
464 472 defaultpath=defaultpath)
465 473
466 474 # We need to perform a pull against the dest repo to fetch bookmarks
467 475 # and other non-store data that isn't shared by default. In the case of
468 476 # non-existing shared repo, this means we pull from the remote twice. This
469 477 # is a bit weird. But at the time it was implemented, there wasn't an easy
470 478 # way to pull just non-changegroup data.
471 479 destrepo = repository(ui, path=dest)
472 480 exchange.pull(destrepo, srcpeer, heads=revs)
473 481
474 482 _postshareupdate(destrepo, update)
475 483
476 484 return srcpeer, peer(ui, peeropts, dest)
477 485
478 486 # Recomputing branch cache might be slow on big repos,
479 487 # so just copy it
480 488 def _copycache(srcrepo, dstcachedir, fname):
481 489 """copy a cache from srcrepo to destcachedir (if it exists)"""
482 490 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
483 491 dstbranchcache = os.path.join(dstcachedir, fname)
484 492 if os.path.exists(srcbranchcache):
485 493 if not os.path.exists(dstcachedir):
486 494 os.mkdir(dstcachedir)
487 495 util.copyfile(srcbranchcache, dstbranchcache)
488 496
489 497 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
490 498 update=True, stream=False, branch=None, shareopts=None):
491 499 """Make a copy of an existing repository.
492 500
493 501 Create a copy of an existing repository in a new directory. The
494 502 source and destination are URLs, as passed to the repository
495 503 function. Returns a pair of repository peers, the source and
496 504 newly created destination.
497 505
498 506 The location of the source is added to the new repository's
499 507 .hg/hgrc file, as the default to be used for future pulls and
500 508 pushes.
501 509
502 510 If an exception is raised, the partly cloned/updated destination
503 511 repository will be deleted.
504 512
505 513 Arguments:
506 514
507 515 source: repository object or URL
508 516
509 517 dest: URL of destination repository to create (defaults to base
510 518 name of source repository)
511 519
512 520 pull: always pull from source repository, even in local case or if the
513 521 server prefers streaming
514 522
515 523 stream: stream raw data uncompressed from repository (fast over
516 524 LAN, slow over WAN)
517 525
518 526 revs: revision to clone up to (implies pull=True)
519 527
520 528 update: update working directory after clone completes, if
521 529 destination is local repository (True means update to default rev,
522 530 anything else is treated as a revision)
523 531
524 532 branch: branches to clone
525 533
526 534 shareopts: dict of options to control auto sharing behavior. The "pool" key
527 535 activates auto sharing mode and defines the directory for stores. The
528 536 "mode" key determines how to construct the directory name of the shared
529 537 repository. "identity" means the name is derived from the node of the first
530 538 changeset in the repository. "remote" means the name is derived from the
531 539 remote's path/URL. Defaults to "identity."
532 540 """
533 541
534 542 if isinstance(source, bytes):
535 543 origsource = ui.expandpath(source)
536 544 source, branches = parseurl(origsource, branch)
537 545 srcpeer = peer(ui, peeropts, source)
538 546 else:
539 547 srcpeer = source.peer() # in case we were called with a localrepo
540 548 branches = (None, branch or [])
541 549 origsource = source = srcpeer.url()
542 550 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
543 551
544 552 if dest is None:
545 553 dest = defaultdest(source)
546 554 if dest:
547 555 ui.status(_("destination directory: %s\n") % dest)
548 556 else:
549 557 dest = ui.expandpath(dest)
550 558
551 559 dest = util.urllocalpath(dest)
552 560 source = util.urllocalpath(source)
553 561
554 562 if not dest:
555 563 raise error.Abort(_("empty destination path is not valid"))
556 564
557 565 destvfs = vfsmod.vfs(dest, expandpath=True)
558 566 if destvfs.lexists():
559 567 if not destvfs.isdir():
560 568 raise error.Abort(_("destination '%s' already exists") % dest)
561 569 elif destvfs.listdir():
562 570 raise error.Abort(_("destination '%s' is not empty") % dest)
563 571
564 572 shareopts = shareopts or {}
565 573 sharepool = shareopts.get('pool')
566 574 sharenamemode = shareopts.get('mode')
567 575 if sharepool and islocal(dest):
568 576 sharepath = None
569 577 if sharenamemode == 'identity':
570 578 # Resolve the name from the initial changeset in the remote
571 579 # repository. This returns nullid when the remote is empty. It
572 580 # raises RepoLookupError if revision 0 is filtered or otherwise
573 581 # not available. If we fail to resolve, sharing is not enabled.
574 582 try:
575 583 with srcpeer.commandexecutor() as e:
576 584 rootnode = e.callcommand('lookup', {
577 585 'key': '0',
578 586 }).result()
579 587
580 588 if rootnode != node.nullid:
581 589 sharepath = os.path.join(sharepool, node.hex(rootnode))
582 590 else:
583 591 ui.status(_('(not using pooled storage: '
584 592 'remote appears to be empty)\n'))
585 593 except error.RepoLookupError:
586 594 ui.status(_('(not using pooled storage: '
587 595 'unable to resolve identity of remote)\n'))
588 596 elif sharenamemode == 'remote':
589 597 sharepath = os.path.join(
590 598 sharepool, node.hex(hashlib.sha1(source).digest()))
591 599 else:
592 600 raise error.Abort(_('unknown share naming mode: %s') %
593 601 sharenamemode)
594 602
595 603 if sharepath:
596 604 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
597 605 dest, pull=pull, rev=revs, update=update,
598 606 stream=stream)
599 607
600 608 srclock = destlock = cleandir = None
601 609 srcrepo = srcpeer.local()
602 610 try:
603 611 abspath = origsource
604 612 if islocal(origsource):
605 613 abspath = os.path.abspath(util.urllocalpath(origsource))
606 614
607 615 if islocal(dest):
608 616 cleandir = dest
609 617
610 618 copy = False
611 619 if (srcrepo and srcrepo.cancopy() and islocal(dest)
612 620 and not phases.hassecret(srcrepo)):
613 621 copy = not pull and not revs
614 622
615 623 if copy:
616 624 try:
617 625 # we use a lock here because if we race with commit, we
618 626 # can end up with extra data in the cloned revlogs that's
619 627 # not pointed to by changesets, thus causing verify to
620 628 # fail
621 629 srclock = srcrepo.lock(wait=False)
622 630 except error.LockError:
623 631 copy = False
624 632
625 633 if copy:
626 634 srcrepo.hook('preoutgoing', throw=True, source='clone')
627 635 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
628 636 if not os.path.exists(dest):
629 637 os.mkdir(dest)
630 638 else:
631 639 # only clean up directories we create ourselves
632 640 cleandir = hgdir
633 641 try:
634 642 destpath = hgdir
635 643 util.makedir(destpath, notindexed=True)
636 644 except OSError as inst:
637 645 if inst.errno == errno.EEXIST:
638 646 cleandir = None
639 647 raise error.Abort(_("destination '%s' already exists")
640 648 % dest)
641 649 raise
642 650
643 651 destlock = copystore(ui, srcrepo, destpath)
644 652 # copy bookmarks over
645 653 srcbookmarks = srcrepo.vfs.join('bookmarks')
646 654 dstbookmarks = os.path.join(destpath, 'bookmarks')
647 655 if os.path.exists(srcbookmarks):
648 656 util.copyfile(srcbookmarks, dstbookmarks)
649 657
650 658 dstcachedir = os.path.join(destpath, 'cache')
651 659 for cache in cacheutil.cachetocopy(srcrepo):
652 660 _copycache(srcrepo, dstcachedir, cache)
653 661
654 662 # we need to re-init the repo after manually copying the data
655 663 # into it
656 664 destpeer = peer(srcrepo, peeropts, dest)
657 665 srcrepo.hook('outgoing', source='clone',
658 666 node=node.hex(node.nullid))
659 667 else:
660 668 try:
661 669 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
662 670 # only pass ui when no srcrepo
663 671 except OSError as inst:
664 672 if inst.errno == errno.EEXIST:
665 673 cleandir = None
666 674 raise error.Abort(_("destination '%s' already exists")
667 675 % dest)
668 676 raise
669 677
670 678 if revs:
671 679 if not srcpeer.capable('lookup'):
672 680 raise error.Abort(_("src repository does not support "
673 681 "revision lookup and so doesn't "
674 682 "support clone by revision"))
675 683
676 684 # TODO this is batchable.
677 685 remoterevs = []
678 686 for rev in revs:
679 687 with srcpeer.commandexecutor() as e:
680 688 remoterevs.append(e.callcommand('lookup', {
681 689 'key': rev,
682 690 }).result())
683 691 revs = remoterevs
684 692
685 693 checkout = revs[0]
686 694 else:
687 695 revs = None
688 696 local = destpeer.local()
689 697 if local:
690 698 u = util.url(abspath)
691 699 defaulturl = bytes(u)
692 700 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
693 701 if not stream:
694 702 if pull:
695 703 stream = False
696 704 else:
697 705 stream = None
698 706 # internal config: ui.quietbookmarkmove
699 707 overrides = {('ui', 'quietbookmarkmove'): True}
700 708 with local.ui.configoverride(overrides, 'clone'):
701 709 exchange.pull(local, srcpeer, revs,
702 710 streamclonerequested=stream)
703 711 elif srcrepo:
704 712 exchange.push(srcrepo, destpeer, revs=revs,
705 713 bookmarks=srcrepo._bookmarks.keys())
706 714 else:
707 715 raise error.Abort(_("clone from remote to remote not supported")
708 716 )
709 717
710 718 cleandir = None
711 719
712 720 destrepo = destpeer.local()
713 721 if destrepo:
714 722 template = uimod.samplehgrcs['cloned']
715 723 u = util.url(abspath)
716 724 u.passwd = None
717 725 defaulturl = bytes(u)
718 726 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
719 727 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
720 728
721 729 if ui.configbool('experimental', 'remotenames'):
722 730 logexchange.pullremotenames(destrepo, srcpeer)
723 731
724 732 if update:
725 733 if update is not True:
726 734 with srcpeer.commandexecutor() as e:
727 735 checkout = e.callcommand('lookup', {
728 736 'key': update,
729 737 }).result()
730 738
731 739 uprev = None
732 740 status = None
733 741 if checkout is not None:
734 742 # Some extensions (at least hg-git and hg-subversion) have
735 743 # a peer.lookup() implementation that returns a name instead
736 744 # of a nodeid. We work around it here until we've figured
737 745 # out a better solution.
738 746 if len(checkout) == 20 and checkout in destrepo:
739 747 uprev = checkout
740 748 elif scmutil.isrevsymbol(destrepo, checkout):
741 749 uprev = scmutil.revsymbol(destrepo, checkout).node()
742 750 else:
743 751 if update is not True:
744 752 try:
745 753 uprev = destrepo.lookup(update)
746 754 except error.RepoLookupError:
747 755 pass
748 756 if uprev is None:
749 757 try:
750 758 uprev = destrepo._bookmarks['@']
751 759 update = '@'
752 760 bn = destrepo[uprev].branch()
753 761 if bn == 'default':
754 762 status = _("updating to bookmark @\n")
755 763 else:
756 764 status = (_("updating to bookmark @ on branch %s\n")
757 765 % bn)
758 766 except KeyError:
759 767 try:
760 768 uprev = destrepo.branchtip('default')
761 769 except error.RepoLookupError:
762 770 uprev = destrepo.lookup('tip')
763 771 if not status:
764 772 bn = destrepo[uprev].branch()
765 773 status = _("updating to branch %s\n") % bn
766 774 destrepo.ui.status(status)
767 775 _update(destrepo, uprev)
768 776 if update in destrepo._bookmarks:
769 777 bookmarks.activate(destrepo, update)
770 778 finally:
771 779 release(srclock, destlock)
772 780 if cleandir is not None:
773 781 shutil.rmtree(cleandir, True)
774 782 if srcpeer is not None:
775 783 srcpeer.close()
776 784 return srcpeer, destpeer
777 785
778 786 def _showstats(repo, stats, quietempty=False):
779 787 if quietempty and stats.isempty():
780 788 return
781 789 repo.ui.status(_("%d files updated, %d files merged, "
782 790 "%d files removed, %d files unresolved\n") % (
783 791 stats.updatedcount, stats.mergedcount,
784 792 stats.removedcount, stats.unresolvedcount))
785 793
786 794 def updaterepo(repo, node, overwrite, updatecheck=None):
787 795 """Update the working directory to node.
788 796
789 797 When overwrite is set, changes are clobbered, merged else
790 798
791 799 returns stats (see pydoc mercurial.merge.applyupdates)"""
792 800 return mergemod.update(repo, node, False, overwrite,
793 801 labels=['working copy', 'destination'],
794 802 updatecheck=updatecheck)
795 803
796 804 def update(repo, node, quietempty=False, updatecheck=None):
797 805 """update the working directory to node"""
798 806 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
799 807 _showstats(repo, stats, quietempty)
800 808 if stats.unresolvedcount:
801 809 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
802 810 return stats.unresolvedcount > 0
803 811
804 812 # naming conflict in clone()
805 813 _update = update
806 814
807 815 def clean(repo, node, show_stats=True, quietempty=False):
808 816 """forcibly switch the working directory to node, clobbering changes"""
809 817 stats = updaterepo(repo, node, True)
810 818 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
811 819 if show_stats:
812 820 _showstats(repo, stats, quietempty)
813 821 return stats.unresolvedcount > 0
814 822
815 823 # naming conflict in updatetotally()
816 824 _clean = clean
817 825
818 826 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
819 827 """Update the working directory with extra care for non-file components
820 828
821 829 This takes care of non-file components below:
822 830
823 831 :bookmark: might be advanced or (in)activated
824 832
825 833 This takes arguments below:
826 834
827 835 :checkout: to which revision the working directory is updated
828 836 :brev: a name, which might be a bookmark to be activated after updating
829 837 :clean: whether changes in the working directory can be discarded
830 838 :updatecheck: how to deal with a dirty working directory
831 839
832 840 Valid values for updatecheck are (None => linear):
833 841
834 842 * abort: abort if the working directory is dirty
835 843 * none: don't check (merge working directory changes into destination)
836 844 * linear: check that update is linear before merging working directory
837 845 changes into destination
838 846 * noconflict: check that the update does not result in file merges
839 847
840 848 This returns whether conflict is detected at updating or not.
841 849 """
842 850 if updatecheck is None:
843 851 updatecheck = ui.config('commands', 'update.check')
844 852 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
845 853 # If not configured, or invalid value configured
846 854 updatecheck = 'linear'
847 855 with repo.wlock():
848 856 movemarkfrom = None
849 857 warndest = False
850 858 if checkout is None:
851 859 updata = destutil.destupdate(repo, clean=clean)
852 860 checkout, movemarkfrom, brev = updata
853 861 warndest = True
854 862
855 863 if clean:
856 864 ret = _clean(repo, checkout)
857 865 else:
858 866 if updatecheck == 'abort':
859 867 cmdutil.bailifchanged(repo, merge=False)
860 868 updatecheck = 'none'
861 869 ret = _update(repo, checkout, updatecheck=updatecheck)
862 870
863 871 if not ret and movemarkfrom:
864 872 if movemarkfrom == repo['.'].node():
865 873 pass # no-op update
866 874 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
867 875 b = ui.label(repo._activebookmark, 'bookmarks.active')
868 876 ui.status(_("updating bookmark %s\n") % b)
869 877 else:
870 878 # this can happen with a non-linear update
871 879 b = ui.label(repo._activebookmark, 'bookmarks')
872 880 ui.status(_("(leaving bookmark %s)\n") % b)
873 881 bookmarks.deactivate(repo)
874 882 elif brev in repo._bookmarks:
875 883 if brev != repo._activebookmark:
876 884 b = ui.label(brev, 'bookmarks.active')
877 885 ui.status(_("(activating bookmark %s)\n") % b)
878 886 bookmarks.activate(repo, brev)
879 887 elif brev:
880 888 if repo._activebookmark:
881 889 b = ui.label(repo._activebookmark, 'bookmarks')
882 890 ui.status(_("(leaving bookmark %s)\n") % b)
883 891 bookmarks.deactivate(repo)
884 892
885 893 if warndest:
886 894 destutil.statusotherdests(ui, repo)
887 895
888 896 return ret
889 897
890 898 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
891 899 abort=False):
892 900 """Branch merge with node, resolving changes. Return true if any
893 901 unresolved conflicts."""
894 902 if not abort:
895 903 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
896 904 labels=labels)
897 905 else:
898 906 ms = mergemod.mergestate.read(repo)
899 907 if ms.active():
900 908 # there were conflicts
901 909 node = ms.localctx.hex()
902 910 else:
903 911 # there were no conficts, mergestate was not stored
904 912 node = repo['.'].hex()
905 913
906 914 repo.ui.status(_("aborting the merge, updating back to"
907 915 " %s\n") % node[:12])
908 916 stats = mergemod.update(repo, node, branchmerge=False, force=True,
909 917 labels=labels)
910 918
911 919 _showstats(repo, stats)
912 920 if stats.unresolvedcount:
913 921 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
914 922 "or 'hg merge --abort' to abandon\n"))
915 923 elif remind and not abort:
916 924 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
917 925 return stats.unresolvedcount > 0
918 926
919 927 def _incoming(displaychlist, subreporecurse, ui, repo, source,
920 928 opts, buffered=False):
921 929 """
922 930 Helper for incoming / gincoming.
923 931 displaychlist gets called with
924 932 (remoterepo, incomingchangesetlist, displayer) parameters,
925 933 and is supposed to contain only code that can't be unified.
926 934 """
927 935 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
928 936 other = peer(repo, opts, source)
929 937 ui.status(_('comparing with %s\n') % util.hidepassword(source))
930 938 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
931 939
932 940 if revs:
933 941 revs = [other.lookup(rev) for rev in revs]
934 942 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
935 943 revs, opts["bundle"], opts["force"])
936 944 try:
937 945 if not chlist:
938 946 ui.status(_("no changes found\n"))
939 947 return subreporecurse()
940 948 ui.pager('incoming')
941 949 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
942 950 buffered=buffered)
943 951 displaychlist(other, chlist, displayer)
944 952 displayer.close()
945 953 finally:
946 954 cleanupfn()
947 955 subreporecurse()
948 956 return 0 # exit code is zero since we found incoming changes
949 957
950 958 def incoming(ui, repo, source, opts):
951 959 def subreporecurse():
952 960 ret = 1
953 961 if opts.get('subrepos'):
954 962 ctx = repo[None]
955 963 for subpath in sorted(ctx.substate):
956 964 sub = ctx.sub(subpath)
957 965 ret = min(ret, sub.incoming(ui, source, opts))
958 966 return ret
959 967
960 968 def display(other, chlist, displayer):
961 969 limit = logcmdutil.getlimit(opts)
962 970 if opts.get('newest_first'):
963 971 chlist.reverse()
964 972 count = 0
965 973 for n in chlist:
966 974 if limit is not None and count >= limit:
967 975 break
968 976 parents = [p for p in other.changelog.parents(n) if p != nullid]
969 977 if opts.get('no_merges') and len(parents) == 2:
970 978 continue
971 979 count += 1
972 980 displayer.show(other[n])
973 981 return _incoming(display, subreporecurse, ui, repo, source, opts)
974 982
975 983 def _outgoing(ui, repo, dest, opts):
976 984 path = ui.paths.getpath(dest, default=('default-push', 'default'))
977 985 if not path:
978 986 raise error.Abort(_('default repository not configured!'),
979 987 hint=_("see 'hg help config.paths'"))
980 988 dest = path.pushloc or path.loc
981 989 branches = path.branch, opts.get('branch') or []
982 990
983 991 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
984 992 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
985 993 if revs:
986 994 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
987 995
988 996 other = peer(repo, opts, dest)
989 997 outgoing = discovery.findcommonoutgoing(repo, other, revs,
990 998 force=opts.get('force'))
991 999 o = outgoing.missing
992 1000 if not o:
993 1001 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
994 1002 return o, other
995 1003
996 1004 def outgoing(ui, repo, dest, opts):
997 1005 def recurse():
998 1006 ret = 1
999 1007 if opts.get('subrepos'):
1000 1008 ctx = repo[None]
1001 1009 for subpath in sorted(ctx.substate):
1002 1010 sub = ctx.sub(subpath)
1003 1011 ret = min(ret, sub.outgoing(ui, dest, opts))
1004 1012 return ret
1005 1013
1006 1014 limit = logcmdutil.getlimit(opts)
1007 1015 o, other = _outgoing(ui, repo, dest, opts)
1008 1016 if not o:
1009 1017 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1010 1018 return recurse()
1011 1019
1012 1020 if opts.get('newest_first'):
1013 1021 o.reverse()
1014 1022 ui.pager('outgoing')
1015 1023 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1016 1024 count = 0
1017 1025 for n in o:
1018 1026 if limit is not None and count >= limit:
1019 1027 break
1020 1028 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1021 1029 if opts.get('no_merges') and len(parents) == 2:
1022 1030 continue
1023 1031 count += 1
1024 1032 displayer.show(repo[n])
1025 1033 displayer.close()
1026 1034 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1027 1035 recurse()
1028 1036 return 0 # exit code is zero since we found outgoing changes
1029 1037
1030 1038 def verify(repo):
1031 1039 """verify the consistency of a repository"""
1032 1040 ret = verifymod.verify(repo)
1033 1041
1034 1042 # Broken subrepo references in hidden csets don't seem worth worrying about,
1035 1043 # since they can't be pushed/pulled, and --hidden can be used if they are a
1036 1044 # concern.
1037 1045
1038 1046 # pathto() is needed for -R case
1039 1047 revs = repo.revs("filelog(%s)",
1040 1048 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1041 1049
1042 1050 if revs:
1043 1051 repo.ui.status(_('checking subrepo links\n'))
1044 1052 for rev in revs:
1045 1053 ctx = repo[rev]
1046 1054 try:
1047 1055 for subpath in ctx.substate:
1048 1056 try:
1049 1057 ret = (ctx.sub(subpath, allowcreate=False).verify()
1050 1058 or ret)
1051 1059 except error.RepoError as e:
1052 1060 repo.ui.warn(('%d: %s\n') % (rev, e))
1053 1061 except Exception:
1054 1062 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1055 1063 node.short(ctx.node()))
1056 1064
1057 1065 return ret
1058 1066
1059 1067 def remoteui(src, opts):
1060 1068 'build a remote ui from ui or repo and opts'
1061 1069 if util.safehasattr(src, 'baseui'): # looks like a repository
1062 1070 dst = src.baseui.copy() # drop repo-specific config
1063 1071 src = src.ui # copy target options from repo
1064 1072 else: # assume it's a global ui object
1065 1073 dst = src.copy() # keep all global options
1066 1074
1067 1075 # copy ssh-specific options
1068 1076 for o in 'ssh', 'remotecmd':
1069 1077 v = opts.get(o) or src.config('ui', o)
1070 1078 if v:
1071 1079 dst.setconfig("ui", o, v, 'copied')
1072 1080
1073 1081 # copy bundle-specific options
1074 1082 r = src.config('bundle', 'mainreporoot')
1075 1083 if r:
1076 1084 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1077 1085
1078 1086 # copy selected local settings to the remote ui
1079 1087 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1080 1088 for key, val in src.configitems(sect):
1081 1089 dst.setconfig(sect, key, val, 'copied')
1082 1090 v = src.config('web', 'cacerts')
1083 1091 if v:
1084 1092 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1085 1093
1086 1094 return dst
1087 1095
1088 1096 # Files of interest
1089 1097 # Used to check if the repository has changed looking at mtime and size of
1090 1098 # these files.
1091 1099 foi = [('spath', '00changelog.i'),
1092 1100 ('spath', 'phaseroots'), # ! phase can change content at the same size
1093 1101 ('spath', 'obsstore'),
1094 1102 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1095 1103 ]
1096 1104
1097 1105 class cachedlocalrepo(object):
1098 1106 """Holds a localrepository that can be cached and reused."""
1099 1107
1100 1108 def __init__(self, repo):
1101 1109 """Create a new cached repo from an existing repo.
1102 1110
1103 1111 We assume the passed in repo was recently created. If the
1104 1112 repo has changed between when it was created and when it was
1105 1113 turned into a cache, it may not refresh properly.
1106 1114 """
1107 1115 assert isinstance(repo, localrepo.localrepository)
1108 1116 self._repo = repo
1109 1117 self._state, self.mtime = self._repostate()
1110 1118 self._filtername = repo.filtername
1111 1119
1112 1120 def fetch(self):
1113 1121 """Refresh (if necessary) and return a repository.
1114 1122
1115 1123 If the cached instance is out of date, it will be recreated
1116 1124 automatically and returned.
1117 1125
1118 1126 Returns a tuple of the repo and a boolean indicating whether a new
1119 1127 repo instance was created.
1120 1128 """
1121 1129 # We compare the mtimes and sizes of some well-known files to
1122 1130 # determine if the repo changed. This is not precise, as mtimes
1123 1131 # are susceptible to clock skew and imprecise filesystems and
1124 1132 # file content can change while maintaining the same size.
1125 1133
1126 1134 state, mtime = self._repostate()
1127 1135 if state == self._state:
1128 1136 return self._repo, False
1129 1137
1130 1138 repo = repository(self._repo.baseui, self._repo.url())
1131 1139 if self._filtername:
1132 1140 self._repo = repo.filtered(self._filtername)
1133 1141 else:
1134 1142 self._repo = repo.unfiltered()
1135 1143 self._state = state
1136 1144 self.mtime = mtime
1137 1145
1138 1146 return self._repo, True
1139 1147
1140 1148 def _repostate(self):
1141 1149 state = []
1142 1150 maxmtime = -1
1143 1151 for attr, fname in foi:
1144 1152 prefix = getattr(self._repo, attr)
1145 1153 p = os.path.join(prefix, fname)
1146 1154 try:
1147 1155 st = os.stat(p)
1148 1156 except OSError:
1149 1157 st = os.stat(prefix)
1150 1158 state.append((st[stat.ST_MTIME], st.st_size))
1151 1159 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1152 1160
1153 1161 return tuple(state), maxmtime
1154 1162
1155 1163 def copy(self):
1156 1164 """Obtain a copy of this class instance.
1157 1165
1158 1166 A new localrepository instance is obtained. The new instance should be
1159 1167 completely independent of the original.
1160 1168 """
1161 1169 repo = repository(self._repo.baseui, self._repo.origroot)
1162 1170 if self._filtername:
1163 1171 repo = repo.filtered(self._filtername)
1164 1172 else:
1165 1173 repo = repo.unfiltered()
1166 1174 c = cachedlocalrepo(repo)
1167 1175 c._state = self._state
1168 1176 c.mtime = self.mtime
1169 1177 return c
@@ -1,100 +1,131
1 1 ensure that failing ui.atexit handlers report sensibly
2 2
3 3 $ cat > $TESTTMP/bailatexit.py <<EOF
4 4 > from mercurial import util
5 5 > def bail():
6 6 > raise RuntimeError('ui.atexit handler exception')
7 7 >
8 8 > def extsetup(ui):
9 9 > ui.atexit(bail)
10 10 > EOF
11 11 $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
12 12 > help help
13 13 hg help [-ecks] [TOPIC]
14 14
15 15 show help for a given topic or a help overview
16 16 error in exit handlers:
17 17 Traceback (most recent call last):
18 18 File "*/mercurial/dispatch.py", line *, in _runexithandlers (glob)
19 19 func(*args, **kwargs)
20 20 File "$TESTTMP/bailatexit.py", line *, in bail (glob)
21 21 raise RuntimeError('ui.atexit handler exception')
22 22 RuntimeError: ui.atexit handler exception
23 23 [255]
24 24
25 25 $ rm $TESTTMP/bailatexit.py
26 26
27 27 another bad extension
28 28
29 29 $ echo 'raise Exception("bit bucket overflow")' > badext.py
30 30 $ abspathexc=`pwd`/badext.py
31 31
32 32 $ cat >baddocext.py <<EOF
33 33 > """
34 34 > baddocext is bad
35 35 > """
36 36 > EOF
37 37 $ abspathdoc=`pwd`/baddocext.py
38 38
39 39 $ cat <<EOF >> $HGRCPATH
40 40 > [extensions]
41 41 > gpg =
42 42 > hgext.gpg =
43 43 > badext = $abspathexc
44 44 > baddocext = $abspathdoc
45 45 > badext2 =
46 46 > EOF
47 47
48 48 $ hg -q help help 2>&1 |grep extension
49 49 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
50 50 *** failed to import extension badext2: No module named badext2
51 51
52 52 show traceback
53 53
54 54 $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError'
55 55 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
56 56 Traceback (most recent call last):
57 57 Exception: bit bucket overflow
58 58 *** failed to import extension badext2: No module named badext2
59 59 Traceback (most recent call last):
60 60 ImportError: No module named badext2
61 61
62 62 names of extensions failed to load can be accessed via extensions.notloaded()
63 63
64 64 $ cat <<EOF > showbadexts.py
65 65 > from mercurial import commands, extensions, registrar
66 66 > cmdtable = {}
67 67 > command = registrar.command(cmdtable)
68 68 > @command(b'showbadexts', norepo=True)
69 69 > def showbadexts(ui, *pats, **opts):
70 70 > ui.write('BADEXTS: %s\n' % ' '.join(sorted(extensions.notloaded())))
71 71 > EOF
72 72 $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
73 73 BADEXTS: badext badext2
74 74
75 75 show traceback for ImportError of hgext.name if devel.debug.extensions is set
76 76
77 77 $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
78 78 > | grep -v '^ ' \
79 79 > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import'
80 debug.extensions: loading extensions
81 debug.extensions: - processing 5 entries
82 debug.extensions: - loading extension: 'gpg'
83 debug.extensions: > 'gpg' extension loaded in * (glob)
84 debug.extensions: - validating extension tables: 'gpg'
85 debug.extensions: - invoking registered callbacks: 'gpg'
86 debug.extensions: > callbacks completed in * (glob)
87 debug.extensions: - loading extension: 'badext'
80 88 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
81 89 Traceback (most recent call last):
82 90 Exception: bit bucket overflow
83 could not import hgext.badext2 (No module named *badext2): trying hgext3rd.badext2 (glob)
91 debug.extensions: - loading extension: 'baddocext'
92 debug.extensions: > 'baddocext' extension loaded in * (glob)
93 debug.extensions: - validating extension tables: 'baddocext'
94 debug.extensions: - invoking registered callbacks: 'baddocext'
95 debug.extensions: > callbacks completed in * (glob)
96 debug.extensions: - loading extension: 'badext2'
97 debug.extensions: - could not import hgext.badext2 (No module named badext2): trying hgext3rd.badext2
84 98 Traceback (most recent call last):
85 99 ImportError: No module named *badext2 (glob)
86 could not import hgext3rd.badext2 (No module named *badext2): trying badext2 (glob)
100 debug.extensions: - could not import hgext3rd.badext2 (No module named badext2): trying badext2
87 101 Traceback (most recent call last):
88 102 ImportError: No module named *badext2 (glob)
89 103 *** failed to import extension badext2: No module named badext2
90 104 Traceback (most recent call last):
91 105 ImportError: No module named badext2
106 debug.extensions: > loaded 2 extensions, total time * (glob)
107 debug.extensions: - loading configtable attributes
108 debug.extensions: - executing uisetup hooks
109 debug.extensions: - running uisetup for 'gpg'
110 debug.extensions: > uisetup for 'gpg' took * (glob)
111 debug.extensions: - running uisetup for 'baddocext'
112 debug.extensions: > uisetup for 'baddocext' took * (glob)
113 debug.extensions: - executing extsetup hooks
114 debug.extensions: - running extsetup for 'gpg'
115 debug.extensions: > extsetup for 'gpg' took * (glob)
116 debug.extensions: - running extsetup for 'baddocext'
117 debug.extensions: > extsetup for 'baddocext' took * (glob)
118 debug.extensions: - executing remaining aftercallbacks
119 debug.extensions: > remaining aftercallbacks completed in * (glob)
120 debug.extensions: - loading extension registration objects
121 debug.extensions: > extension registration object loading took * (glob)
122 debug.extensions: extension loading complete
92 123
93 124 confirm that there's no crash when an extension's documentation is bad
94 125
95 126 $ hg help --keyword baddocext
96 127 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
97 128 *** failed to import extension badext2: No module named badext2
98 129 Topics:
99 130
100 131 extensions Using Additional Features
General Comments 0
You need to be logged in to leave comments. Login now