##// END OF EJS Templates
extensions: use ui.log() interface to provide detailed loading information...
Yuya Nishihara -
r41032:6f2510b5 default
parent child Browse files
Show More
@@ -1,843 +1,844 b''
1 1 # extensions.py - extension handling for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import ast
11 11 import collections
12 12 import functools
13 13 import imp
14 14 import inspect
15 15 import os
16 16
17 17 from .i18n import (
18 18 _,
19 19 gettext,
20 20 )
21 21
22 22 from . import (
23 23 cmdutil,
24 24 configitems,
25 25 error,
26 26 pycompat,
27 27 util,
28 28 )
29 29
30 30 from .utils import (
31 31 stringutil,
32 32 )
33 33
34 34 _extensions = {}
35 35 _disabledextensions = {}
36 36 _aftercallbacks = {}
37 37 _order = []
38 38 _builtin = {
39 39 'hbisect',
40 40 'bookmarks',
41 41 'color',
42 42 'parentrevspec',
43 43 'progress',
44 44 'interhg',
45 45 'inotify',
46 46 'hgcia'
47 47 }
48 48
49 49 def extensions(ui=None):
50 50 if ui:
51 51 def enabled(name):
52 52 for format in ['%s', 'hgext.%s']:
53 53 conf = ui.config('extensions', format % name)
54 54 if conf is not None and not conf.startswith('!'):
55 55 return True
56 56 else:
57 57 enabled = lambda name: True
58 58 for name in _order:
59 59 module = _extensions[name]
60 60 if module and enabled(name):
61 61 yield name, module
62 62
63 63 def find(name):
64 64 '''return module with given extension name'''
65 65 mod = None
66 66 try:
67 67 mod = _extensions[name]
68 68 except KeyError:
69 69 for k, v in _extensions.iteritems():
70 70 if k.endswith('.' + name) or k.endswith('/' + name):
71 71 mod = v
72 72 break
73 73 if not mod:
74 74 raise KeyError(name)
75 75 return mod
76 76
77 77 def loadpath(path, module_name):
78 78 module_name = module_name.replace('.', '_')
79 79 path = util.normpath(util.expandpath(path))
80 80 module_name = pycompat.fsdecode(module_name)
81 81 path = pycompat.fsdecode(path)
82 82 if os.path.isdir(path):
83 83 # module/__init__.py style
84 84 d, f = os.path.split(path)
85 85 fd, fpath, desc = imp.find_module(f, [d])
86 86 return imp.load_module(module_name, fd, fpath, desc)
87 87 else:
88 88 try:
89 89 return imp.load_source(module_name, path)
90 90 except IOError as exc:
91 91 if not exc.filename:
92 92 exc.filename = path # python does not fill this
93 93 raise
94 94
95 95 def _importh(name):
96 96 """import and return the <name> module"""
97 97 mod = __import__(pycompat.sysstr(name))
98 98 components = name.split('.')
99 99 for comp in components[1:]:
100 100 mod = getattr(mod, comp)
101 101 return mod
102 102
103 103 def _importext(name, path=None, reportfunc=None):
104 104 if path:
105 105 # the module will be loaded in sys.modules
106 106 # choose an unique name so that it doesn't
107 107 # conflicts with other modules
108 108 mod = loadpath(path, 'hgext.%s' % name)
109 109 else:
110 110 try:
111 111 mod = _importh("hgext.%s" % name)
112 112 except ImportError as err:
113 113 if reportfunc:
114 114 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
115 115 try:
116 116 mod = _importh("hgext3rd.%s" % name)
117 117 except ImportError as err:
118 118 if reportfunc:
119 119 reportfunc(err, "hgext3rd.%s" % name, name)
120 120 mod = _importh(name)
121 121 return mod
122 122
123 123 def _reportimporterror(ui, err, failed, next):
124 # note: this ui.debug happens before --debug is processed,
124 # note: this ui.log happens before --debug is processed,
125 125 # Use --config ui.debug=1 to see them.
126 if ui.configbool('devel', 'debug.extensions'):
127 ui.debug('debug.extensions: - could not import %s (%s): trying %s\n'
128 % (failed, stringutil.forcebytestr(err), next))
129 if ui.debugflag:
130 ui.traceback()
126 ui.log(b'extension', b' - could not import %s (%s): trying %s\n',
127 failed, stringutil.forcebytestr(err), next)
128 if ui.debugflag and ui.configbool('devel', 'debug.extensions'):
129 ui.traceback()
131 130
132 131 def _rejectunicode(name, xs):
133 132 if isinstance(xs, (list, set, tuple)):
134 133 for x in xs:
135 134 _rejectunicode(name, x)
136 135 elif isinstance(xs, dict):
137 136 for k, v in xs.items():
138 137 _rejectunicode(name, k)
139 138 _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
140 139 elif isinstance(xs, type(u'')):
141 140 raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
142 141 hint="use b'' to make it byte string")
143 142
144 143 # attributes set by registrar.command
145 144 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
146 145
147 146 def _validatecmdtable(ui, cmdtable):
148 147 """Check if extension commands have required attributes"""
149 148 for c, e in cmdtable.iteritems():
150 149 f = e[0]
151 150 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
152 151 if not missing:
153 152 continue
154 153 raise error.ProgrammingError(
155 154 'missing attributes: %s' % ', '.join(missing),
156 155 hint="use @command decorator to register '%s'" % c)
157 156
158 157 def _validatetables(ui, mod):
159 158 """Sanity check for loadable tables provided by extension module"""
160 159 for t in ['cmdtable', 'colortable', 'configtable']:
161 160 _rejectunicode(t, getattr(mod, t, {}))
162 161 for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
163 162 'templatefilter', 'templatefunc', 'templatekeyword']:
164 163 o = getattr(mod, t, None)
165 164 if o:
166 165 _rejectunicode(t, o._table)
167 166 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
168 167
169 def load(ui, name, path, log=lambda *a: None, loadingtime=None):
168 def load(ui, name, path, loadingtime=None):
170 169 if name.startswith('hgext.') or name.startswith('hgext/'):
171 170 shortname = name[6:]
172 171 else:
173 172 shortname = name
174 173 if shortname in _builtin:
175 174 return None
176 175 if shortname in _extensions:
177 176 return _extensions[shortname]
178 log(' - loading extension: %s\n', shortname)
177 ui.log(b'extension', b' - loading extension: %s\n', shortname)
179 178 _extensions[shortname] = None
180 179 with util.timedcm('load extension %s', shortname) as stats:
181 180 mod = _importext(name, path, bind(_reportimporterror, ui))
182 log(' > %s extension loaded in %s\n', shortname, stats)
181 ui.log(b'extension', b' > %s extension loaded in %s\n', shortname, stats)
183 182 if loadingtime is not None:
184 183 loadingtime[shortname] += stats.elapsed
185 184
186 185 # Before we do anything with the extension, check against minimum stated
187 186 # compatibility. This gives extension authors a mechanism to have their
188 187 # extensions short circuit when loaded with a known incompatible version
189 188 # of Mercurial.
190 189 minver = getattr(mod, 'minimumhgversion', None)
191 190 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
192 191 msg = _('(third party extension %s requires version %s or newer '
193 192 'of Mercurial (current: %s); disabling)\n')
194 193 ui.warn(msg % (shortname, minver, util.version()))
195 194 return
196 log(' - validating extension tables: %s\n', shortname)
195 ui.log(b'extension', b' - validating extension tables: %s\n', shortname)
197 196 _validatetables(ui, mod)
198 197
199 198 _extensions[shortname] = mod
200 199 _order.append(shortname)
201 log(' - invoking registered callbacks: %s\n', shortname)
200 ui.log(b'extension', b' - invoking registered callbacks: %s\n',
201 shortname)
202 202 with util.timedcm('callbacks extension %s', shortname) as stats:
203 203 for fn in _aftercallbacks.get(shortname, []):
204 204 fn(loaded=True)
205 log(' > callbacks completed in %s\n', stats)
205 ui.log(b'extension', b' > callbacks completed in %s\n', stats)
206 206 return mod
207 207
208 208 def _runuisetup(name, ui):
209 209 uisetup = getattr(_extensions[name], 'uisetup', None)
210 210 if uisetup:
211 211 try:
212 212 uisetup(ui)
213 213 except Exception as inst:
214 214 ui.traceback(force=True)
215 215 msg = stringutil.forcebytestr(inst)
216 216 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
217 217 return False
218 218 return True
219 219
220 220 def _runextsetup(name, ui):
221 221 extsetup = getattr(_extensions[name], 'extsetup', None)
222 222 if extsetup:
223 223 try:
224 224 try:
225 225 extsetup(ui)
226 226 except TypeError:
227 227 if pycompat.getargspec(extsetup).args:
228 228 raise
229 229 extsetup() # old extsetup with no ui argument
230 230 except Exception as inst:
231 231 ui.traceback(force=True)
232 232 msg = stringutil.forcebytestr(inst)
233 233 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
234 234 return False
235 235 return True
236 236
237 237 def loadall(ui, whitelist=None):
238 if ui.configbool('devel', 'debug.extensions'):
239 log = lambda msg, *values: ui.debug('debug.extensions: ',
240 msg % values, label='debug.extensions')
241 else:
242 log = lambda *a, **kw: None
243 238 loadingtime = collections.defaultdict(int)
244 239 result = ui.configitems("extensions")
245 240 if whitelist is not None:
246 241 result = [(k, v) for (k, v) in result if k in whitelist]
247 242 newindex = len(_order)
248 log('loading %sextensions\n', 'additional ' if newindex else '')
249 log('- processing %d entries\n', len(result))
243 ui.log(b'extension', b'loading %sextensions\n',
244 'additional ' if newindex else '')
245 ui.log(b'extension', b'- processing %d entries\n', len(result))
250 246 with util.timedcm('load all extensions') as stats:
251 247 for (name, path) in result:
252 248 if path:
253 249 if path[0:1] == '!':
254 250 if name not in _disabledextensions:
255 log(' - skipping disabled extension: %s\n', name)
251 ui.log(b'extension',
252 b' - skipping disabled extension: %s\n', name)
256 253 _disabledextensions[name] = path[1:]
257 254 continue
258 255 try:
259 load(ui, name, path, log, loadingtime)
256 load(ui, name, path, loadingtime)
260 257 except Exception as inst:
261 258 msg = stringutil.forcebytestr(inst)
262 259 if path:
263 260 ui.warn(_("*** failed to import extension %s from %s: %s\n")
264 261 % (name, path, msg))
265 262 else:
266 263 ui.warn(_("*** failed to import extension %s: %s\n")
267 264 % (name, msg))
268 265 if isinstance(inst, error.Hint) and inst.hint:
269 266 ui.warn(_("*** (%s)\n") % inst.hint)
270 267 ui.traceback()
271 268
272 log('> loaded %d extensions, total time %s\n',
273 len(_order) - newindex, stats)
269 ui.log(b'extension', b'> loaded %d extensions, total time %s\n',
270 len(_order) - newindex, stats)
274 271 # list of (objname, loadermod, loadername) tuple:
275 272 # - objname is the name of an object in extension module,
276 273 # from which extra information is loaded
277 274 # - loadermod is the module where loader is placed
278 275 # - loadername is the name of the function,
279 276 # which takes (ui, extensionname, extraobj) arguments
280 277 #
281 278 # This one is for the list of item that must be run before running any setup
282 279 earlyextraloaders = [
283 280 ('configtable', configitems, 'loadconfigtable'),
284 281 ]
285 282
286 log('- loading configtable attributes\n')
283 ui.log(b'extension', b'- loading configtable attributes\n')
287 284 _loadextra(ui, newindex, earlyextraloaders)
288 285
289 286 broken = set()
290 log('- executing uisetup hooks\n')
287 ui.log(b'extension', b'- executing uisetup hooks\n')
291 288 with util.timedcm('all uisetup') as alluisetupstats:
292 289 for name in _order[newindex:]:
293 log(' - running uisetup for %s\n', name)
290 ui.log(b'extension', b' - running uisetup for %s\n', name)
294 291 with util.timedcm('uisetup %s', name) as stats:
295 292 if not _runuisetup(name, ui):
296 log(' - the %s extension uisetup failed\n', name)
293 ui.log(b'extension',
294 b' - the %s extension uisetup failed\n', name)
297 295 broken.add(name)
298 log(' > uisetup for %s took %s\n', name, stats)
296 ui.log(b'extension', b' > uisetup for %s took %s\n', name, stats)
299 297 loadingtime[name] += stats.elapsed
300 log('> all uisetup took %s\n', alluisetupstats)
298 ui.log(b'extension', b'> all uisetup took %s\n', alluisetupstats)
301 299
302 log('- executing extsetup hooks\n')
300 ui.log(b'extension', b'- executing extsetup hooks\n')
303 301 with util.timedcm('all extsetup') as allextetupstats:
304 302 for name in _order[newindex:]:
305 303 if name in broken:
306 304 continue
307 log(' - running extsetup for %s\n', name)
305 ui.log(b'extension', b' - running extsetup for %s\n', name)
308 306 with util.timedcm('extsetup %s', name) as stats:
309 307 if not _runextsetup(name, ui):
310 log(' - the %s extension extsetup failed\n', name)
308 ui.log(b'extension',
309 b' - the %s extension extsetup failed\n', name)
311 310 broken.add(name)
312 log(' > extsetup for %s took %s\n', name, stats)
311 ui.log(b'extension', b' > extsetup for %s took %s\n', name, stats)
313 312 loadingtime[name] += stats.elapsed
314 log('> all extsetup took %s\n', allextetupstats)
313 ui.log(b'extension', b'> all extsetup took %s\n', allextetupstats)
315 314
316 315 for name in broken:
317 log(' - disabling broken %s extension\n', name)
316 ui.log(b'extension', b' - disabling broken %s extension\n', name)
318 317 _extensions[name] = None
319 318
320 319 # Call aftercallbacks that were never met.
321 log('- executing remaining aftercallbacks\n')
320 ui.log(b'extension', b'- executing remaining aftercallbacks\n')
322 321 with util.timedcm('aftercallbacks') as stats:
323 322 for shortname in _aftercallbacks:
324 323 if shortname in _extensions:
325 324 continue
326 325
327 326 for fn in _aftercallbacks[shortname]:
328 log(' - extension %s not loaded, notify callbacks\n',
329 shortname)
327 ui.log(b'extension',
328 b' - extension %s not loaded, notify callbacks\n',
329 shortname)
330 330 fn(loaded=False)
331 log('> remaining aftercallbacks completed in %s\n', stats)
331 ui.log(b'extension', b'> remaining aftercallbacks completed in %s\n', stats)
332 332
333 333 # loadall() is called multiple times and lingering _aftercallbacks
334 334 # entries could result in double execution. See issue4646.
335 335 _aftercallbacks.clear()
336 336
337 337 # delay importing avoids cyclic dependency (especially commands)
338 338 from . import (
339 339 color,
340 340 commands,
341 341 filemerge,
342 342 fileset,
343 343 revset,
344 344 templatefilters,
345 345 templatefuncs,
346 346 templatekw,
347 347 )
348 348
349 349 # list of (objname, loadermod, loadername) tuple:
350 350 # - objname is the name of an object in extension module,
351 351 # from which extra information is loaded
352 352 # - loadermod is the module where loader is placed
353 353 # - loadername is the name of the function,
354 354 # which takes (ui, extensionname, extraobj) arguments
355 log('- loading extension registration objects\n')
355 ui.log(b'extension', b'- loading extension registration objects\n')
356 356 extraloaders = [
357 357 ('cmdtable', commands, 'loadcmdtable'),
358 358 ('colortable', color, 'loadcolortable'),
359 359 ('filesetpredicate', fileset, 'loadpredicate'),
360 360 ('internalmerge', filemerge, 'loadinternalmerge'),
361 361 ('revsetpredicate', revset, 'loadpredicate'),
362 362 ('templatefilter', templatefilters, 'loadfilter'),
363 363 ('templatefunc', templatefuncs, 'loadfunction'),
364 364 ('templatekeyword', templatekw, 'loadkeyword'),
365 365 ]
366 366 with util.timedcm('load registration objects') as stats:
367 367 _loadextra(ui, newindex, extraloaders)
368 log('> extension registration object loading took %s\n', stats)
368 ui.log(b'extension', b'> extension registration object loading took %s\n',
369 stats)
369 370
370 371 # Report per extension loading time (except reposetup)
371 372 for name in sorted(loadingtime):
372 extension_msg = '> extension %s take a total of %s to load\n'
373 log(extension_msg, name, util.timecount(loadingtime[name]))
373 ui.log(b'extension', b'> extension %s take a total of %s to load\n',
374 name, util.timecount(loadingtime[name]))
374 375
375 log('extension loading complete\n')
376 ui.log(b'extension', b'extension loading complete\n')
376 377
377 378 def _loadextra(ui, newindex, extraloaders):
378 379 for name in _order[newindex:]:
379 380 module = _extensions[name]
380 381 if not module:
381 382 continue # loading this module failed
382 383
383 384 for objname, loadermod, loadername in extraloaders:
384 385 extraobj = getattr(module, objname, None)
385 386 if extraobj is not None:
386 387 getattr(loadermod, loadername)(ui, name, extraobj)
387 388
388 389 def afterloaded(extension, callback):
389 390 '''Run the specified function after a named extension is loaded.
390 391
391 392 If the named extension is already loaded, the callback will be called
392 393 immediately.
393 394
394 395 If the named extension never loads, the callback will be called after
395 396 all extensions have been loaded.
396 397
397 398 The callback receives the named argument ``loaded``, which is a boolean
398 399 indicating whether the dependent extension actually loaded.
399 400 '''
400 401
401 402 if extension in _extensions:
402 403 # Report loaded as False if the extension is disabled
403 404 loaded = (_extensions[extension] is not None)
404 405 callback(loaded=loaded)
405 406 else:
406 407 _aftercallbacks.setdefault(extension, []).append(callback)
407 408
408 409 def populateui(ui):
409 410 """Run extension hooks on the given ui to populate additional members,
410 411 extend the class dynamically, etc.
411 412
412 413 This will be called after the configuration is loaded, and/or extensions
413 414 are loaded. In general, it's once per ui instance, but in command-server
414 415 and hgweb, this may be called more than once with the same ui.
415 416 """
416 417 for name, mod in extensions(ui):
417 418 hook = getattr(mod, 'uipopulate', None)
418 419 if not hook:
419 420 continue
420 421 try:
421 422 hook(ui)
422 423 except Exception as inst:
423 424 ui.traceback(force=True)
424 425 ui.warn(_('*** failed to populate ui by extension %s: %s\n')
425 426 % (name, stringutil.forcebytestr(inst)))
426 427
427 428 def bind(func, *args):
428 429 '''Partial function application
429 430
430 431 Returns a new function that is the partial application of args and kwargs
431 432 to func. For example,
432 433
433 434 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
434 435 assert callable(func)
435 436 def closure(*a, **kw):
436 437 return func(*(args + a), **kw)
437 438 return closure
438 439
439 440 def _updatewrapper(wrap, origfn, unboundwrapper):
440 441 '''Copy and add some useful attributes to wrapper'''
441 442 try:
442 443 wrap.__name__ = origfn.__name__
443 444 except AttributeError:
444 445 pass
445 446 wrap.__module__ = getattr(origfn, '__module__')
446 447 wrap.__doc__ = getattr(origfn, '__doc__')
447 448 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
448 449 wrap._origfunc = origfn
449 450 wrap._unboundwrapper = unboundwrapper
450 451
451 452 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
452 453 '''Wrap the command named `command' in table
453 454
454 455 Replace command in the command table with wrapper. The wrapped command will
455 456 be inserted into the command table specified by the table argument.
456 457
457 458 The wrapper will be called like
458 459
459 460 wrapper(orig, *args, **kwargs)
460 461
461 462 where orig is the original (wrapped) function, and *args, **kwargs
462 463 are the arguments passed to it.
463 464
464 465 Optionally append to the command synopsis and docstring, used for help.
465 466 For example, if your extension wraps the ``bookmarks`` command to add the
466 467 flags ``--remote`` and ``--all`` you might call this function like so:
467 468
468 469 synopsis = ' [-a] [--remote]'
469 470 docstring = """
470 471
471 472 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
472 473 flags to the bookmarks command. Either flag will show the remote bookmarks
473 474 known to the repository; ``--remote`` will also suppress the output of the
474 475 local bookmarks.
475 476 """
476 477
477 478 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
478 479 synopsis, docstring)
479 480 '''
480 481 assert callable(wrapper)
481 482 aliases, entry = cmdutil.findcmd(command, table)
482 483 for alias, e in table.iteritems():
483 484 if e is entry:
484 485 key = alias
485 486 break
486 487
487 488 origfn = entry[0]
488 489 wrap = functools.partial(util.checksignature(wrapper),
489 490 util.checksignature(origfn))
490 491 _updatewrapper(wrap, origfn, wrapper)
491 492 if docstring is not None:
492 493 wrap.__doc__ += docstring
493 494
494 495 newentry = list(entry)
495 496 newentry[0] = wrap
496 497 if synopsis is not None:
497 498 newentry[2] += synopsis
498 499 table[key] = tuple(newentry)
499 500 return entry
500 501
501 502 def wrapfilecache(cls, propname, wrapper):
502 503 """Wraps a filecache property.
503 504
504 505 These can't be wrapped using the normal wrapfunction.
505 506 """
506 507 propname = pycompat.sysstr(propname)
507 508 assert callable(wrapper)
508 509 for currcls in cls.__mro__:
509 510 if propname in currcls.__dict__:
510 511 origfn = currcls.__dict__[propname].func
511 512 assert callable(origfn)
512 513 def wrap(*args, **kwargs):
513 514 return wrapper(origfn, *args, **kwargs)
514 515 currcls.__dict__[propname].func = wrap
515 516 break
516 517
517 518 if currcls is object:
518 519 raise AttributeError(r"type '%s' has no property '%s'" % (
519 520 cls, propname))
520 521
521 522 class wrappedfunction(object):
522 523 '''context manager for temporarily wrapping a function'''
523 524
524 525 def __init__(self, container, funcname, wrapper):
525 526 assert callable(wrapper)
526 527 self._container = container
527 528 self._funcname = funcname
528 529 self._wrapper = wrapper
529 530
530 531 def __enter__(self):
531 532 wrapfunction(self._container, self._funcname, self._wrapper)
532 533
533 534 def __exit__(self, exctype, excvalue, traceback):
534 535 unwrapfunction(self._container, self._funcname, self._wrapper)
535 536
536 537 def wrapfunction(container, funcname, wrapper):
537 538 '''Wrap the function named funcname in container
538 539
539 540 Replace the funcname member in the given container with the specified
540 541 wrapper. The container is typically a module, class, or instance.
541 542
542 543 The wrapper will be called like
543 544
544 545 wrapper(orig, *args, **kwargs)
545 546
546 547 where orig is the original (wrapped) function, and *args, **kwargs
547 548 are the arguments passed to it.
548 549
549 550 Wrapping methods of the repository object is not recommended since
550 551 it conflicts with extensions that extend the repository by
551 552 subclassing. All extensions that need to extend methods of
552 553 localrepository should use this subclassing trick: namely,
553 554 reposetup() should look like
554 555
555 556 def reposetup(ui, repo):
556 557 class myrepo(repo.__class__):
557 558 def whatever(self, *args, **kwargs):
558 559 [...extension stuff...]
559 560 super(myrepo, self).whatever(*args, **kwargs)
560 561 [...extension stuff...]
561 562
562 563 repo.__class__ = myrepo
563 564
564 565 In general, combining wrapfunction() with subclassing does not
565 566 work. Since you cannot control what other extensions are loaded by
566 567 your end users, you should play nicely with others by using the
567 568 subclass trick.
568 569 '''
569 570 assert callable(wrapper)
570 571
571 572 origfn = getattr(container, funcname)
572 573 assert callable(origfn)
573 574 if inspect.ismodule(container):
574 575 # origfn is not an instance or class method. "partial" can be used.
575 576 # "partial" won't insert a frame in traceback.
576 577 wrap = functools.partial(wrapper, origfn)
577 578 else:
578 579 # "partial" cannot be safely used. Emulate its effect by using "bind".
579 580 # The downside is one more frame in traceback.
580 581 wrap = bind(wrapper, origfn)
581 582 _updatewrapper(wrap, origfn, wrapper)
582 583 setattr(container, funcname, wrap)
583 584 return origfn
584 585
585 586 def unwrapfunction(container, funcname, wrapper=None):
586 587 '''undo wrapfunction
587 588
588 589 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
589 590 from the chain of wrappers.
590 591
591 592 Return the removed wrapper.
592 593 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
593 594 wrapper is not None but is not found in the wrapper chain.
594 595 '''
595 596 chain = getwrapperchain(container, funcname)
596 597 origfn = chain.pop()
597 598 if wrapper is None:
598 599 wrapper = chain[0]
599 600 chain.remove(wrapper)
600 601 setattr(container, funcname, origfn)
601 602 for w in reversed(chain):
602 603 wrapfunction(container, funcname, w)
603 604 return wrapper
604 605
605 606 def getwrapperchain(container, funcname):
606 607 '''get a chain of wrappers of a function
607 608
608 609 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
609 610
610 611 The wrapper functions are the ones passed to wrapfunction, whose first
611 612 argument is origfunc.
612 613 '''
613 614 result = []
614 615 fn = getattr(container, funcname)
615 616 while fn:
616 617 assert callable(fn)
617 618 result.append(getattr(fn, '_unboundwrapper', fn))
618 619 fn = getattr(fn, '_origfunc', None)
619 620 return result
620 621
621 622 def _disabledpaths():
622 623 '''find paths of disabled extensions. returns a dict of {name: path}'''
623 624 import hgext
624 625 extpath = os.path.dirname(
625 626 os.path.abspath(pycompat.fsencode(hgext.__file__)))
626 627 try: # might not be a filesystem path
627 628 files = os.listdir(extpath)
628 629 except OSError:
629 630 return {}
630 631
631 632 exts = {}
632 633 for e in files:
633 634 if e.endswith('.py'):
634 635 name = e.rsplit('.', 1)[0]
635 636 path = os.path.join(extpath, e)
636 637 else:
637 638 name = e
638 639 path = os.path.join(extpath, e, '__init__.py')
639 640 if not os.path.exists(path):
640 641 continue
641 642 if name in exts or name in _order or name == '__init__':
642 643 continue
643 644 exts[name] = path
644 645 for name, path in _disabledextensions.iteritems():
645 646 # If no path was provided for a disabled extension (e.g. "color=!"),
646 647 # don't replace the path we already found by the scan above.
647 648 if path:
648 649 exts[name] = path
649 650 return exts
650 651
651 652 def _moduledoc(file):
652 653 '''return the top-level python documentation for the given file
653 654
654 655 Loosely inspired by pydoc.source_synopsis(), but rewritten to
655 656 handle triple quotes and to return the whole text instead of just
656 657 the synopsis'''
657 658 result = []
658 659
659 660 line = file.readline()
660 661 while line[:1] == '#' or not line.strip():
661 662 line = file.readline()
662 663 if not line:
663 664 break
664 665
665 666 start = line[:3]
666 667 if start == '"""' or start == "'''":
667 668 line = line[3:]
668 669 while line:
669 670 if line.rstrip().endswith(start):
670 671 line = line.split(start)[0]
671 672 if line:
672 673 result.append(line)
673 674 break
674 675 elif not line:
675 676 return None # unmatched delimiter
676 677 result.append(line)
677 678 line = file.readline()
678 679 else:
679 680 return None
680 681
681 682 return ''.join(result)
682 683
683 684 def _disabledhelp(path):
684 685 '''retrieve help synopsis of a disabled extension (without importing)'''
685 686 try:
686 687 with open(path, 'rb') as src:
687 688 doc = _moduledoc(src)
688 689 except IOError:
689 690 return
690 691
691 692 if doc: # extracting localized synopsis
692 693 return gettext(doc)
693 694 else:
694 695 return _('(no help text available)')
695 696
696 697 def disabled():
697 698 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
698 699 try:
699 700 from hgext import __index__
700 701 return dict((name, gettext(desc))
701 702 for name, desc in __index__.docs.iteritems()
702 703 if name not in _order)
703 704 except (ImportError, AttributeError):
704 705 pass
705 706
706 707 paths = _disabledpaths()
707 708 if not paths:
708 709 return {}
709 710
710 711 exts = {}
711 712 for name, path in paths.iteritems():
712 713 doc = _disabledhelp(path)
713 714 if doc:
714 715 exts[name] = doc.splitlines()[0]
715 716
716 717 return exts
717 718
718 719 def disabledext(name):
719 720 '''find a specific disabled extension from hgext. returns desc'''
720 721 try:
721 722 from hgext import __index__
722 723 if name in _order: # enabled
723 724 return
724 725 else:
725 726 return gettext(__index__.docs.get(name))
726 727 except (ImportError, AttributeError):
727 728 pass
728 729
729 730 paths = _disabledpaths()
730 731 if name in paths:
731 732 return _disabledhelp(paths[name])
732 733
733 734 def _walkcommand(node):
734 735 """Scan @command() decorators in the tree starting at node"""
735 736 todo = collections.deque([node])
736 737 while todo:
737 738 node = todo.popleft()
738 739 if not isinstance(node, ast.FunctionDef):
739 740 todo.extend(ast.iter_child_nodes(node))
740 741 continue
741 742 for d in node.decorator_list:
742 743 if not isinstance(d, ast.Call):
743 744 continue
744 745 if not isinstance(d.func, ast.Name):
745 746 continue
746 747 if d.func.id != r'command':
747 748 continue
748 749 yield d
749 750
750 751 def _disabledcmdtable(path):
751 752 """Construct a dummy command table without loading the extension module
752 753
753 754 This may raise IOError or SyntaxError.
754 755 """
755 756 with open(path, 'rb') as src:
756 757 root = ast.parse(src.read(), path)
757 758 cmdtable = {}
758 759 for node in _walkcommand(root):
759 760 if not node.args:
760 761 continue
761 762 a = node.args[0]
762 763 if isinstance(a, ast.Str):
763 764 name = pycompat.sysbytes(a.s)
764 765 elif pycompat.ispy3 and isinstance(a, ast.Bytes):
765 766 name = a.s
766 767 else:
767 768 continue
768 769 cmdtable[name] = (None, [], b'')
769 770 return cmdtable
770 771
771 772 def _finddisabledcmd(ui, cmd, name, path, strict):
772 773 try:
773 774 cmdtable = _disabledcmdtable(path)
774 775 except (IOError, SyntaxError):
775 776 return
776 777 try:
777 778 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
778 779 except (error.AmbiguousCommand, error.UnknownCommand):
779 780 return
780 781 for c in aliases:
781 782 if c.startswith(cmd):
782 783 cmd = c
783 784 break
784 785 else:
785 786 cmd = aliases[0]
786 787 doc = _disabledhelp(path)
787 788 return (cmd, name, doc)
788 789
789 790 def disabledcmd(ui, cmd, strict=False):
790 791 '''find cmd from disabled extensions without importing.
791 792 returns (cmdname, extname, doc)'''
792 793
793 794 paths = _disabledpaths()
794 795 if not paths:
795 796 raise error.UnknownCommand(cmd)
796 797
797 798 ext = None
798 799 # first, search for an extension with the same name as the command
799 800 path = paths.pop(cmd, None)
800 801 if path:
801 802 ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict)
802 803 if not ext:
803 804 # otherwise, interrogate each extension until there's a match
804 805 for name, path in paths.iteritems():
805 806 ext = _finddisabledcmd(ui, cmd, name, path, strict=strict)
806 807 if ext:
807 808 break
808 809 if ext:
809 810 return ext
810 811
811 812 raise error.UnknownCommand(cmd)
812 813
813 814 def enabled(shortname=True):
814 815 '''return a dict of {name: desc} of extensions'''
815 816 exts = {}
816 817 for ename, ext in extensions():
817 818 doc = (gettext(ext.__doc__) or _('(no help text available)'))
818 819 if shortname:
819 820 ename = ename.split('.')[-1]
820 821 exts[ename] = doc.splitlines()[0].strip()
821 822
822 823 return exts
823 824
824 825 def notloaded():
825 826 '''return short names of extensions that failed to load'''
826 827 return [name for name, mod in _extensions.iteritems() if mod is None]
827 828
828 829 def moduleversion(module):
829 830 '''return version information from given module as a string'''
830 831 if (util.safehasattr(module, 'getversion')
831 832 and callable(module.getversion)):
832 833 version = module.getversion()
833 834 elif util.safehasattr(module, '__version__'):
834 835 version = module.__version__
835 836 else:
836 837 version = ''
837 838 if isinstance(version, (list, tuple)):
838 839 version = '.'.join(pycompat.bytestr(o) for o in version)
839 840 return version
840 841
841 842 def ismoduleinternal(module):
842 843 exttestedwith = getattr(module, 'testedwith', None)
843 844 return exttestedwith == "ships-with-hg-core"
@@ -1,1229 +1,1225 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 41 scmutil,
42 42 sshpeer,
43 43 statichttprepo,
44 44 ui as uimod,
45 45 unionrepo,
46 46 url,
47 47 util,
48 48 verify as verifymod,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 release = lock.release
53 53
54 54 # shared features
55 55 sharedbookmarks = 'bookmarks'
56 56
57 57 def _local(path):
58 58 path = util.expandpath(util.urllocalpath(path))
59 59 return (os.path.isfile(path) and bundlerepo or localrepo)
60 60
61 61 def addbranchrevs(lrepo, other, branches, revs):
62 62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 63 hashbranch, branches = branches
64 64 if not hashbranch and not branches:
65 65 x = revs or None
66 66 if revs:
67 67 y = revs[0]
68 68 else:
69 69 y = None
70 70 return x, y
71 71 if revs:
72 72 revs = list(revs)
73 73 else:
74 74 revs = []
75 75
76 76 if not peer.capable('branchmap'):
77 77 if branches:
78 78 raise error.Abort(_("remote branch lookup not supported"))
79 79 revs.append(hashbranch)
80 80 return revs, revs[0]
81 81
82 82 with peer.commandexecutor() as e:
83 83 branchmap = e.callcommand('branchmap', {}).result()
84 84
85 85 def primary(branch):
86 86 if branch == '.':
87 87 if not lrepo:
88 88 raise error.Abort(_("dirstate branch not accessible"))
89 89 branch = lrepo.dirstate.branch()
90 90 if branch in branchmap:
91 91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 92 return True
93 93 else:
94 94 return False
95 95
96 96 for branch in branches:
97 97 if not primary(branch):
98 98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 99 if hashbranch:
100 100 if not primary(hashbranch):
101 101 revs.append(hashbranch)
102 102 return revs, revs[0]
103 103
104 104 def parseurl(path, branches=None):
105 105 '''parse url#branch, returning (url, (branch, branches))'''
106 106
107 107 u = util.url(path)
108 108 branch = None
109 109 if u.fragment:
110 110 branch = u.fragment
111 111 u.fragment = None
112 112 return bytes(u), (branch, branches or [])
113 113
114 114 schemes = {
115 115 'bundle': bundlerepo,
116 116 'union': unionrepo,
117 117 'file': _local,
118 118 'http': httppeer,
119 119 'https': httppeer,
120 120 'ssh': sshpeer,
121 121 'static-http': statichttprepo,
122 122 }
123 123
124 124 def _peerlookup(path):
125 125 u = util.url(path)
126 126 scheme = u.scheme or 'file'
127 127 thing = schemes.get(scheme) or schemes['file']
128 128 try:
129 129 return thing(path)
130 130 except TypeError:
131 131 # we can't test callable(thing) because 'thing' can be an unloaded
132 132 # module that implements __call__
133 133 if not util.safehasattr(thing, 'instance'):
134 134 raise
135 135 return thing
136 136
137 137 def islocal(repo):
138 138 '''return true if repo (or path pointing to repo) is local'''
139 139 if isinstance(repo, bytes):
140 140 try:
141 141 return _peerlookup(repo).islocal(repo)
142 142 except AttributeError:
143 143 return False
144 144 return repo.local()
145 145
146 146 def openpath(ui, path):
147 147 '''open path with open if local, url.open if remote'''
148 148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 149 if pathurl.islocal():
150 150 return util.posixfile(pathurl.localpath(), 'rb')
151 151 else:
152 152 return url.open(ui, path)
153 153
154 154 # a list of (ui, repo) functions called for wire peer initialization
155 155 wirepeersetupfuncs = []
156 156
157 157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 158 intents=None, createopts=None):
159 159 """return a repository object for the specified path"""
160 160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 161 createopts=createopts)
162 162 ui = getattr(obj, "ui", ui)
163 if ui.configbool('devel', 'debug.extensions'):
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 msg % values, label='debug.extensions')
166 else:
167 log = lambda *a, **kw: None
168 163 for f in presetupfuncs or []:
169 164 f(ui, obj)
170 log('- executing reposetup hooks\n')
165 ui.log(b'extension', b'- executing reposetup hooks\n')
171 166 with util.timedcm('all reposetup') as allreposetupstats:
172 167 for name, module in extensions.extensions(ui):
173 log(' - running reposetup for %s\n' % (name,))
168 ui.log(b'extension', b' - running reposetup for %s\n', name)
174 169 hook = getattr(module, 'reposetup', None)
175 170 if hook:
176 171 with util.timedcm('reposetup %r', name) as stats:
177 172 hook(ui, obj)
178 log(' > reposetup for %s took %s\n', name, stats)
179 log('> all reposetup took %s\n', allreposetupstats)
173 ui.log(b'extension', b' > reposetup for %s took %s\n',
174 name, stats)
175 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
180 176 if not obj.local():
181 177 for f in wirepeersetupfuncs:
182 178 f(ui, obj)
183 179 return obj
184 180
185 181 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 182 createopts=None):
187 183 """return a repository object for the specified path"""
188 184 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 185 intents=intents, createopts=createopts)
190 186 repo = peer.local()
191 187 if not repo:
192 188 raise error.Abort(_("repository '%s' is not local") %
193 189 (path or peer.url()))
194 190 return repo.filtered('visible')
195 191
196 192 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 193 '''return a repository peer for the specified path'''
198 194 rui = remoteui(uiorrepo, opts)
199 195 return _peerorrepo(rui, path, create, intents=intents,
200 196 createopts=createopts).peer()
201 197
202 198 def defaultdest(source):
203 199 '''return default destination of clone if none is given
204 200
205 201 >>> defaultdest(b'foo')
206 202 'foo'
207 203 >>> defaultdest(b'/foo/bar')
208 204 'bar'
209 205 >>> defaultdest(b'/')
210 206 ''
211 207 >>> defaultdest(b'')
212 208 ''
213 209 >>> defaultdest(b'http://example.org/')
214 210 ''
215 211 >>> defaultdest(b'http://example.org/foo/')
216 212 'foo'
217 213 '''
218 214 path = util.url(source).path
219 215 if not path:
220 216 return ''
221 217 return os.path.basename(os.path.normpath(path))
222 218
223 219 def sharedreposource(repo):
224 220 """Returns repository object for source repository of a shared repo.
225 221
226 222 If repo is not a shared repository, returns None.
227 223 """
228 224 if repo.sharedpath == repo.path:
229 225 return None
230 226
231 227 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 228 return repo.srcrepo
233 229
234 230 # the sharedpath always ends in the .hg; we want the path to the repo
235 231 source = repo.vfs.split(repo.sharedpath)[0]
236 232 srcurl, branches = parseurl(source)
237 233 srcrepo = repository(repo.ui, srcurl)
238 234 repo.srcrepo = srcrepo
239 235 return srcrepo
240 236
241 237 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 238 relative=False):
243 239 '''create a shared repository'''
244 240
245 241 if not islocal(source):
246 242 raise error.Abort(_('can only share local repositories'))
247 243
248 244 if not dest:
249 245 dest = defaultdest(source)
250 246 else:
251 247 dest = ui.expandpath(dest)
252 248
253 249 if isinstance(source, bytes):
254 250 origsource = ui.expandpath(source)
255 251 source, branches = parseurl(origsource)
256 252 srcrepo = repository(ui, source)
257 253 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 254 else:
259 255 srcrepo = source.local()
260 256 checkout = None
261 257
262 258 shareditems = set()
263 259 if bookmarks:
264 260 shareditems.add(sharedbookmarks)
265 261
266 262 r = repository(ui, dest, create=True, createopts={
267 263 'sharedrepo': srcrepo,
268 264 'sharedrelative': relative,
269 265 'shareditems': shareditems,
270 266 })
271 267
272 268 postshare(srcrepo, r, defaultpath=defaultpath)
273 269 r = repository(ui, dest)
274 270 _postshareupdate(r, update, checkout=checkout)
275 271 return r
276 272
277 273 def unshare(ui, repo):
278 274 """convert a shared repository to a normal one
279 275
280 276 Copy the store data to the repo and remove the sharedpath data.
281 277
282 278 Returns a new repository object representing the unshared repository.
283 279
284 280 The passed repository object is not usable after this function is
285 281 called.
286 282 """
287 283
288 284 destlock = lock = None
289 285 lock = repo.lock()
290 286 try:
291 287 # we use locks here because if we race with commit, we
292 288 # can end up with extra data in the cloned revlogs that's
293 289 # not pointed to by changesets, thus causing verify to
294 290 # fail
295 291
296 292 destlock = copystore(ui, repo, repo.path)
297 293
298 294 sharefile = repo.vfs.join('sharedpath')
299 295 util.rename(sharefile, sharefile + '.old')
300 296
301 297 repo.requirements.discard('shared')
302 298 repo.requirements.discard('relshared')
303 299 repo._writerequirements()
304 300 finally:
305 301 destlock and destlock.release()
306 302 lock and lock.release()
307 303
308 304 # Removing share changes some fundamental properties of the repo instance.
309 305 # So we instantiate a new repo object and operate on it rather than
310 306 # try to keep the existing repo usable.
311 307 newrepo = repository(repo.baseui, repo.root, create=False)
312 308
313 309 # TODO: figure out how to access subrepos that exist, but were previously
314 310 # removed from .hgsub
315 311 c = newrepo['.']
316 312 subs = c.substate
317 313 for s in sorted(subs):
318 314 c.sub(s).unshare()
319 315
320 316 localrepo.poisonrepository(repo)
321 317
322 318 return newrepo
323 319
324 320 def postshare(sourcerepo, destrepo, defaultpath=None):
325 321 """Called after a new shared repo is created.
326 322
327 323 The new repo only has a requirements file and pointer to the source.
328 324 This function configures additional shared data.
329 325
330 326 Extensions can wrap this function and write additional entries to
331 327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 328 """
333 329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 330 if default:
335 331 template = ('[paths]\n'
336 332 'default = %s\n')
337 333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338 334
339 335 def _postshareupdate(repo, update, checkout=None):
340 336 """Maybe perform a working directory update after a shared repo is created.
341 337
342 338 ``update`` can be a boolean or a revision to update to.
343 339 """
344 340 if not update:
345 341 return
346 342
347 343 repo.ui.status(_("updating working directory\n"))
348 344 if update is not True:
349 345 checkout = update
350 346 for test in (checkout, 'default', 'tip'):
351 347 if test is None:
352 348 continue
353 349 try:
354 350 uprev = repo.lookup(test)
355 351 break
356 352 except error.RepoLookupError:
357 353 continue
358 354 _update(repo, uprev)
359 355
360 356 def copystore(ui, srcrepo, destpath):
361 357 '''copy files from store of srcrepo in destpath
362 358
363 359 returns destlock
364 360 '''
365 361 destlock = None
366 362 try:
367 363 hardlink = None
368 364 topic = _('linking') if hardlink else _('copying')
369 365 with ui.makeprogress(topic, unit=_('files')) as progress:
370 366 num = 0
371 367 srcpublishing = srcrepo.publishing()
372 368 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
373 369 dstvfs = vfsmod.vfs(destpath)
374 370 for f in srcrepo.store.copylist():
375 371 if srcpublishing and f.endswith('phaseroots'):
376 372 continue
377 373 dstbase = os.path.dirname(f)
378 374 if dstbase and not dstvfs.exists(dstbase):
379 375 dstvfs.mkdir(dstbase)
380 376 if srcvfs.exists(f):
381 377 if f.endswith('data'):
382 378 # 'dstbase' may be empty (e.g. revlog format 0)
383 379 lockfile = os.path.join(dstbase, "lock")
384 380 # lock to avoid premature writing to the target
385 381 destlock = lock.lock(dstvfs, lockfile)
386 382 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
387 383 hardlink, progress)
388 384 num += n
389 385 if hardlink:
390 386 ui.debug("linked %d files\n" % num)
391 387 else:
392 388 ui.debug("copied %d files\n" % num)
393 389 return destlock
394 390 except: # re-raises
395 391 release(destlock)
396 392 raise
397 393
398 394 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
399 395 rev=None, update=True, stream=False):
400 396 """Perform a clone using a shared repo.
401 397
402 398 The store for the repository will be located at <sharepath>/.hg. The
403 399 specified revisions will be cloned or pulled from "source". A shared repo
404 400 will be created at "dest" and a working copy will be created if "update" is
405 401 True.
406 402 """
407 403 revs = None
408 404 if rev:
409 405 if not srcpeer.capable('lookup'):
410 406 raise error.Abort(_("src repository does not support "
411 407 "revision lookup and so doesn't "
412 408 "support clone by revision"))
413 409
414 410 # TODO this is batchable.
415 411 remoterevs = []
416 412 for r in rev:
417 413 with srcpeer.commandexecutor() as e:
418 414 remoterevs.append(e.callcommand('lookup', {
419 415 'key': r,
420 416 }).result())
421 417 revs = remoterevs
422 418
423 419 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 420 # 2 clients may race creating or populating it.
425 421 pooldir = os.path.dirname(sharepath)
426 422 # lock class requires the directory to exist.
427 423 try:
428 424 util.makedir(pooldir, False)
429 425 except OSError as e:
430 426 if e.errno != errno.EEXIST:
431 427 raise
432 428
433 429 poolvfs = vfsmod.vfs(pooldir)
434 430 basename = os.path.basename(sharepath)
435 431
436 432 with lock.lock(poolvfs, '%s.lock' % basename):
437 433 if os.path.exists(sharepath):
438 434 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 435 basename)
440 436 else:
441 437 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 438 # Always use pull mode because hardlinks in share mode don't work
443 439 # well. Never update because working copies aren't necessary in
444 440 # share mode.
445 441 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 442 revs=rev, update=False, stream=stream)
447 443
448 444 # Resolve the value to put in [paths] section for the source.
449 445 if islocal(source):
450 446 defaultpath = os.path.abspath(util.urllocalpath(source))
451 447 else:
452 448 defaultpath = source
453 449
454 450 sharerepo = repository(ui, path=sharepath)
455 451 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 452 defaultpath=defaultpath)
457 453
458 454 # We need to perform a pull against the dest repo to fetch bookmarks
459 455 # and other non-store data that isn't shared by default. In the case of
460 456 # non-existing shared repo, this means we pull from the remote twice. This
461 457 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 458 # way to pull just non-changegroup data.
463 459 exchange.pull(destrepo, srcpeer, heads=revs)
464 460
465 461 _postshareupdate(destrepo, update)
466 462
467 463 return srcpeer, peer(ui, peeropts, dest)
468 464
469 465 # Recomputing branch cache might be slow on big repos,
470 466 # so just copy it
471 467 def _copycache(srcrepo, dstcachedir, fname):
472 468 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 469 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 470 dstbranchcache = os.path.join(dstcachedir, fname)
475 471 if os.path.exists(srcbranchcache):
476 472 if not os.path.exists(dstcachedir):
477 473 os.mkdir(dstcachedir)
478 474 util.copyfile(srcbranchcache, dstbranchcache)
479 475
480 476 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 477 update=True, stream=False, branch=None, shareopts=None,
482 478 storeincludepats=None, storeexcludepats=None, depth=None):
483 479 """Make a copy of an existing repository.
484 480
485 481 Create a copy of an existing repository in a new directory. The
486 482 source and destination are URLs, as passed to the repository
487 483 function. Returns a pair of repository peers, the source and
488 484 newly created destination.
489 485
490 486 The location of the source is added to the new repository's
491 487 .hg/hgrc file, as the default to be used for future pulls and
492 488 pushes.
493 489
494 490 If an exception is raised, the partly cloned/updated destination
495 491 repository will be deleted.
496 492
497 493 Arguments:
498 494
499 495 source: repository object or URL
500 496
501 497 dest: URL of destination repository to create (defaults to base
502 498 name of source repository)
503 499
504 500 pull: always pull from source repository, even in local case or if the
505 501 server prefers streaming
506 502
507 503 stream: stream raw data uncompressed from repository (fast over
508 504 LAN, slow over WAN)
509 505
510 506 revs: revision to clone up to (implies pull=True)
511 507
512 508 update: update working directory after clone completes, if
513 509 destination is local repository (True means update to default rev,
514 510 anything else is treated as a revision)
515 511
516 512 branch: branches to clone
517 513
518 514 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 515 activates auto sharing mode and defines the directory for stores. The
520 516 "mode" key determines how to construct the directory name of the shared
521 517 repository. "identity" means the name is derived from the node of the first
522 518 changeset in the repository. "remote" means the name is derived from the
523 519 remote's path/URL. Defaults to "identity."
524 520
525 521 storeincludepats and storeexcludepats: sets of file patterns to include and
526 522 exclude in the repository copy, respectively. If not defined, all files
527 523 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 524 only the requested files will be performed. If ``storeincludepats`` is not
529 525 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 526 ``path:.``. If both are empty sets, no files will be cloned.
531 527 """
532 528
533 529 if isinstance(source, bytes):
534 530 origsource = ui.expandpath(source)
535 531 source, branches = parseurl(origsource, branch)
536 532 srcpeer = peer(ui, peeropts, source)
537 533 else:
538 534 srcpeer = source.peer() # in case we were called with a localrepo
539 535 branches = (None, branch or [])
540 536 origsource = source = srcpeer.url()
541 537 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542 538
543 539 if dest is None:
544 540 dest = defaultdest(source)
545 541 if dest:
546 542 ui.status(_("destination directory: %s\n") % dest)
547 543 else:
548 544 dest = ui.expandpath(dest)
549 545
550 546 dest = util.urllocalpath(dest)
551 547 source = util.urllocalpath(source)
552 548
553 549 if not dest:
554 550 raise error.Abort(_("empty destination path is not valid"))
555 551
556 552 destvfs = vfsmod.vfs(dest, expandpath=True)
557 553 if destvfs.lexists():
558 554 if not destvfs.isdir():
559 555 raise error.Abort(_("destination '%s' already exists") % dest)
560 556 elif destvfs.listdir():
561 557 raise error.Abort(_("destination '%s' is not empty") % dest)
562 558
563 559 createopts = {}
564 560 narrow = False
565 561
566 562 if storeincludepats is not None:
567 563 narrowspec.validatepatterns(storeincludepats)
568 564 narrow = True
569 565
570 566 if storeexcludepats is not None:
571 567 narrowspec.validatepatterns(storeexcludepats)
572 568 narrow = True
573 569
574 570 if narrow:
575 571 # Include everything by default if only exclusion patterns defined.
576 572 if storeexcludepats and not storeincludepats:
577 573 storeincludepats = {'path:.'}
578 574
579 575 createopts['narrowfiles'] = True
580 576
581 577 if depth:
582 578 createopts['shallowfilestore'] = True
583 579
584 580 if srcpeer.capable(b'lfs-serve'):
585 581 # Repository creation honors the config if it disabled the extension, so
586 582 # we can't just announce that lfs will be enabled. This check avoids
587 583 # saying that lfs will be enabled, and then saying it's an unknown
588 584 # feature. The lfs creation option is set in either case so that a
589 585 # requirement is added. If the extension is explicitly disabled but the
590 586 # requirement is set, the clone aborts early, before transferring any
591 587 # data.
592 588 createopts['lfs'] = True
593 589
594 590 if extensions.disabledext('lfs'):
595 591 ui.status(_('(remote is using large file support (lfs), but it is '
596 592 'explicitly disabled in the local configuration)\n'))
597 593 else:
598 594 ui.status(_('(remote is using large file support (lfs); lfs will '
599 595 'be enabled for this repository)\n'))
600 596
601 597 shareopts = shareopts or {}
602 598 sharepool = shareopts.get('pool')
603 599 sharenamemode = shareopts.get('mode')
604 600 if sharepool and islocal(dest):
605 601 sharepath = None
606 602 if sharenamemode == 'identity':
607 603 # Resolve the name from the initial changeset in the remote
608 604 # repository. This returns nullid when the remote is empty. It
609 605 # raises RepoLookupError if revision 0 is filtered or otherwise
610 606 # not available. If we fail to resolve, sharing is not enabled.
611 607 try:
612 608 with srcpeer.commandexecutor() as e:
613 609 rootnode = e.callcommand('lookup', {
614 610 'key': '0',
615 611 }).result()
616 612
617 613 if rootnode != node.nullid:
618 614 sharepath = os.path.join(sharepool, node.hex(rootnode))
619 615 else:
620 616 ui.status(_('(not using pooled storage: '
621 617 'remote appears to be empty)\n'))
622 618 except error.RepoLookupError:
623 619 ui.status(_('(not using pooled storage: '
624 620 'unable to resolve identity of remote)\n'))
625 621 elif sharenamemode == 'remote':
626 622 sharepath = os.path.join(
627 623 sharepool, node.hex(hashlib.sha1(source).digest()))
628 624 else:
629 625 raise error.Abort(_('unknown share naming mode: %s') %
630 626 sharenamemode)
631 627
632 628 # TODO this is a somewhat arbitrary restriction.
633 629 if narrow:
634 630 ui.status(_('(pooled storage not supported for narrow clones)\n'))
635 631 sharepath = None
636 632
637 633 if sharepath:
638 634 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
639 635 dest, pull=pull, rev=revs, update=update,
640 636 stream=stream)
641 637
642 638 srclock = destlock = cleandir = None
643 639 srcrepo = srcpeer.local()
644 640 try:
645 641 abspath = origsource
646 642 if islocal(origsource):
647 643 abspath = os.path.abspath(util.urllocalpath(origsource))
648 644
649 645 if islocal(dest):
650 646 cleandir = dest
651 647
652 648 copy = False
653 649 if (srcrepo and srcrepo.cancopy() and islocal(dest)
654 650 and not phases.hassecret(srcrepo)):
655 651 copy = not pull and not revs
656 652
657 653 # TODO this is a somewhat arbitrary restriction.
658 654 if narrow:
659 655 copy = False
660 656
661 657 if copy:
662 658 try:
663 659 # we use a lock here because if we race with commit, we
664 660 # can end up with extra data in the cloned revlogs that's
665 661 # not pointed to by changesets, thus causing verify to
666 662 # fail
667 663 srclock = srcrepo.lock(wait=False)
668 664 except error.LockError:
669 665 copy = False
670 666
671 667 if copy:
672 668 srcrepo.hook('preoutgoing', throw=True, source='clone')
673 669 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
674 670 if not os.path.exists(dest):
675 671 util.makedirs(dest)
676 672 else:
677 673 # only clean up directories we create ourselves
678 674 cleandir = hgdir
679 675 try:
680 676 destpath = hgdir
681 677 util.makedir(destpath, notindexed=True)
682 678 except OSError as inst:
683 679 if inst.errno == errno.EEXIST:
684 680 cleandir = None
685 681 raise error.Abort(_("destination '%s' already exists")
686 682 % dest)
687 683 raise
688 684
689 685 destlock = copystore(ui, srcrepo, destpath)
690 686 # copy bookmarks over
691 687 srcbookmarks = srcrepo.vfs.join('bookmarks')
692 688 dstbookmarks = os.path.join(destpath, 'bookmarks')
693 689 if os.path.exists(srcbookmarks):
694 690 util.copyfile(srcbookmarks, dstbookmarks)
695 691
696 692 dstcachedir = os.path.join(destpath, 'cache')
697 693 for cache in cacheutil.cachetocopy(srcrepo):
698 694 _copycache(srcrepo, dstcachedir, cache)
699 695
700 696 # we need to re-init the repo after manually copying the data
701 697 # into it
702 698 destpeer = peer(srcrepo, peeropts, dest)
703 699 srcrepo.hook('outgoing', source='clone',
704 700 node=node.hex(node.nullid))
705 701 else:
706 702 try:
707 703 # only pass ui when no srcrepo
708 704 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
709 705 createopts=createopts)
710 706 except OSError as inst:
711 707 if inst.errno == errno.EEXIST:
712 708 cleandir = None
713 709 raise error.Abort(_("destination '%s' already exists")
714 710 % dest)
715 711 raise
716 712
717 713 if revs:
718 714 if not srcpeer.capable('lookup'):
719 715 raise error.Abort(_("src repository does not support "
720 716 "revision lookup and so doesn't "
721 717 "support clone by revision"))
722 718
723 719 # TODO this is batchable.
724 720 remoterevs = []
725 721 for rev in revs:
726 722 with srcpeer.commandexecutor() as e:
727 723 remoterevs.append(e.callcommand('lookup', {
728 724 'key': rev,
729 725 }).result())
730 726 revs = remoterevs
731 727
732 728 checkout = revs[0]
733 729 else:
734 730 revs = None
735 731 local = destpeer.local()
736 732 if local:
737 733 if narrow:
738 734 with local.lock():
739 735 local.setnarrowpats(storeincludepats, storeexcludepats)
740 736
741 737 u = util.url(abspath)
742 738 defaulturl = bytes(u)
743 739 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
744 740 if not stream:
745 741 if pull:
746 742 stream = False
747 743 else:
748 744 stream = None
749 745 # internal config: ui.quietbookmarkmove
750 746 overrides = {('ui', 'quietbookmarkmove'): True}
751 747 with local.ui.configoverride(overrides, 'clone'):
752 748 exchange.pull(local, srcpeer, revs,
753 749 streamclonerequested=stream,
754 750 includepats=storeincludepats,
755 751 excludepats=storeexcludepats,
756 752 depth=depth)
757 753 elif srcrepo:
758 754 # TODO lift restriction once exchange.push() accepts narrow
759 755 # push.
760 756 if narrow:
761 757 raise error.Abort(_('narrow clone not available for '
762 758 'remote destinations'))
763 759
764 760 exchange.push(srcrepo, destpeer, revs=revs,
765 761 bookmarks=srcrepo._bookmarks.keys())
766 762 else:
767 763 raise error.Abort(_("clone from remote to remote not supported")
768 764 )
769 765
770 766 cleandir = None
771 767
772 768 destrepo = destpeer.local()
773 769 if destrepo:
774 770 template = uimod.samplehgrcs['cloned']
775 771 u = util.url(abspath)
776 772 u.passwd = None
777 773 defaulturl = bytes(u)
778 774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
779 775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
780 776
781 777 if ui.configbool('experimental', 'remotenames'):
782 778 logexchange.pullremotenames(destrepo, srcpeer)
783 779
784 780 if update:
785 781 if update is not True:
786 782 with srcpeer.commandexecutor() as e:
787 783 checkout = e.callcommand('lookup', {
788 784 'key': update,
789 785 }).result()
790 786
791 787 uprev = None
792 788 status = None
793 789 if checkout is not None:
794 790 # Some extensions (at least hg-git and hg-subversion) have
795 791 # a peer.lookup() implementation that returns a name instead
796 792 # of a nodeid. We work around it here until we've figured
797 793 # out a better solution.
798 794 if len(checkout) == 20 and checkout in destrepo:
799 795 uprev = checkout
800 796 elif scmutil.isrevsymbol(destrepo, checkout):
801 797 uprev = scmutil.revsymbol(destrepo, checkout).node()
802 798 else:
803 799 if update is not True:
804 800 try:
805 801 uprev = destrepo.lookup(update)
806 802 except error.RepoLookupError:
807 803 pass
808 804 if uprev is None:
809 805 try:
810 806 uprev = destrepo._bookmarks['@']
811 807 update = '@'
812 808 bn = destrepo[uprev].branch()
813 809 if bn == 'default':
814 810 status = _("updating to bookmark @\n")
815 811 else:
816 812 status = (_("updating to bookmark @ on branch %s\n")
817 813 % bn)
818 814 except KeyError:
819 815 try:
820 816 uprev = destrepo.branchtip('default')
821 817 except error.RepoLookupError:
822 818 uprev = destrepo.lookup('tip')
823 819 if not status:
824 820 bn = destrepo[uprev].branch()
825 821 status = _("updating to branch %s\n") % bn
826 822 destrepo.ui.status(status)
827 823 _update(destrepo, uprev)
828 824 if update in destrepo._bookmarks:
829 825 bookmarks.activate(destrepo, update)
830 826 finally:
831 827 release(srclock, destlock)
832 828 if cleandir is not None:
833 829 shutil.rmtree(cleandir, True)
834 830 if srcpeer is not None:
835 831 srcpeer.close()
836 832 return srcpeer, destpeer
837 833
838 834 def _showstats(repo, stats, quietempty=False):
839 835 if quietempty and stats.isempty():
840 836 return
841 837 repo.ui.status(_("%d files updated, %d files merged, "
842 838 "%d files removed, %d files unresolved\n") % (
843 839 stats.updatedcount, stats.mergedcount,
844 840 stats.removedcount, stats.unresolvedcount))
845 841
846 842 def updaterepo(repo, node, overwrite, updatecheck=None):
847 843 """Update the working directory to node.
848 844
849 845 When overwrite is set, changes are clobbered, merged else
850 846
851 847 returns stats (see pydoc mercurial.merge.applyupdates)"""
852 848 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
853 849 labels=['working copy', 'destination'],
854 850 updatecheck=updatecheck)
855 851
856 852 def update(repo, node, quietempty=False, updatecheck=None):
857 853 """update the working directory to node"""
858 854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
859 855 _showstats(repo, stats, quietempty)
860 856 if stats.unresolvedcount:
861 857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
862 858 return stats.unresolvedcount > 0
863 859
864 860 # naming conflict in clone()
865 861 _update = update
866 862
867 863 def clean(repo, node, show_stats=True, quietempty=False):
868 864 """forcibly switch the working directory to node, clobbering changes"""
869 865 stats = updaterepo(repo, node, True)
870 866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
871 867 if show_stats:
872 868 _showstats(repo, stats, quietempty)
873 869 return stats.unresolvedcount > 0
874 870
875 871 # naming conflict in updatetotally()
876 872 _clean = clean
877 873
878 874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
879 875 """Update the working directory with extra care for non-file components
880 876
881 877 This takes care of non-file components below:
882 878
883 879 :bookmark: might be advanced or (in)activated
884 880
885 881 This takes arguments below:
886 882
887 883 :checkout: to which revision the working directory is updated
888 884 :brev: a name, which might be a bookmark to be activated after updating
889 885 :clean: whether changes in the working directory can be discarded
890 886 :updatecheck: how to deal with a dirty working directory
891 887
892 888 Valid values for updatecheck are (None => linear):
893 889
894 890 * abort: abort if the working directory is dirty
895 891 * none: don't check (merge working directory changes into destination)
896 892 * linear: check that update is linear before merging working directory
897 893 changes into destination
898 894 * noconflict: check that the update does not result in file merges
899 895
900 896 This returns whether conflict is detected at updating or not.
901 897 """
902 898 if updatecheck is None:
903 899 updatecheck = ui.config('commands', 'update.check')
904 900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
905 901 # If not configured, or invalid value configured
906 902 updatecheck = 'linear'
907 903 with repo.wlock():
908 904 movemarkfrom = None
909 905 warndest = False
910 906 if checkout is None:
911 907 updata = destutil.destupdate(repo, clean=clean)
912 908 checkout, movemarkfrom, brev = updata
913 909 warndest = True
914 910
915 911 if clean:
916 912 ret = _clean(repo, checkout)
917 913 else:
918 914 if updatecheck == 'abort':
919 915 cmdutil.bailifchanged(repo, merge=False)
920 916 updatecheck = 'none'
921 917 ret = _update(repo, checkout, updatecheck=updatecheck)
922 918
923 919 if not ret and movemarkfrom:
924 920 if movemarkfrom == repo['.'].node():
925 921 pass # no-op update
926 922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
927 923 b = ui.label(repo._activebookmark, 'bookmarks.active')
928 924 ui.status(_("updating bookmark %s\n") % b)
929 925 else:
930 926 # this can happen with a non-linear update
931 927 b = ui.label(repo._activebookmark, 'bookmarks')
932 928 ui.status(_("(leaving bookmark %s)\n") % b)
933 929 bookmarks.deactivate(repo)
934 930 elif brev in repo._bookmarks:
935 931 if brev != repo._activebookmark:
936 932 b = ui.label(brev, 'bookmarks.active')
937 933 ui.status(_("(activating bookmark %s)\n") % b)
938 934 bookmarks.activate(repo, brev)
939 935 elif brev:
940 936 if repo._activebookmark:
941 937 b = ui.label(repo._activebookmark, 'bookmarks')
942 938 ui.status(_("(leaving bookmark %s)\n") % b)
943 939 bookmarks.deactivate(repo)
944 940
945 941 if warndest:
946 942 destutil.statusotherdests(ui, repo)
947 943
948 944 return ret
949 945
950 946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
951 947 abort=False):
952 948 """Branch merge with node, resolving changes. Return true if any
953 949 unresolved conflicts."""
954 950 if not abort:
955 951 stats = mergemod.update(repo, node, branchmerge=True, force=force,
956 952 mergeforce=mergeforce, labels=labels)
957 953 else:
958 954 ms = mergemod.mergestate.read(repo)
959 955 if ms.active():
960 956 # there were conflicts
961 957 node = ms.localctx.hex()
962 958 else:
963 959 # there were no conficts, mergestate was not stored
964 960 node = repo['.'].hex()
965 961
966 962 repo.ui.status(_("aborting the merge, updating back to"
967 963 " %s\n") % node[:12])
968 964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
969 965 labels=labels)
970 966
971 967 _showstats(repo, stats)
972 968 if stats.unresolvedcount:
973 969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
974 970 "or 'hg merge --abort' to abandon\n"))
975 971 elif remind and not abort:
976 972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
977 973 return stats.unresolvedcount > 0
978 974
979 975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
980 976 opts, buffered=False):
981 977 """
982 978 Helper for incoming / gincoming.
983 979 displaychlist gets called with
984 980 (remoterepo, incomingchangesetlist, displayer) parameters,
985 981 and is supposed to contain only code that can't be unified.
986 982 """
987 983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
988 984 other = peer(repo, opts, source)
989 985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
990 986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
991 987
992 988 if revs:
993 989 revs = [other.lookup(rev) for rev in revs]
994 990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
995 991 revs, opts["bundle"], opts["force"])
996 992 try:
997 993 if not chlist:
998 994 ui.status(_("no changes found\n"))
999 995 return subreporecurse()
1000 996 ui.pager('incoming')
1001 997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1002 998 buffered=buffered)
1003 999 displaychlist(other, chlist, displayer)
1004 1000 displayer.close()
1005 1001 finally:
1006 1002 cleanupfn()
1007 1003 subreporecurse()
1008 1004 return 0 # exit code is zero since we found incoming changes
1009 1005
1010 1006 def incoming(ui, repo, source, opts):
1011 1007 def subreporecurse():
1012 1008 ret = 1
1013 1009 if opts.get('subrepos'):
1014 1010 ctx = repo[None]
1015 1011 for subpath in sorted(ctx.substate):
1016 1012 sub = ctx.sub(subpath)
1017 1013 ret = min(ret, sub.incoming(ui, source, opts))
1018 1014 return ret
1019 1015
1020 1016 def display(other, chlist, displayer):
1021 1017 limit = logcmdutil.getlimit(opts)
1022 1018 if opts.get('newest_first'):
1023 1019 chlist.reverse()
1024 1020 count = 0
1025 1021 for n in chlist:
1026 1022 if limit is not None and count >= limit:
1027 1023 break
1028 1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1029 1025 if opts.get('no_merges') and len(parents) == 2:
1030 1026 continue
1031 1027 count += 1
1032 1028 displayer.show(other[n])
1033 1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1034 1030
1035 1031 def _outgoing(ui, repo, dest, opts):
1036 1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1037 1033 if not path:
1038 1034 raise error.Abort(_('default repository not configured!'),
1039 1035 hint=_("see 'hg help config.paths'"))
1040 1036 dest = path.pushloc or path.loc
1041 1037 branches = path.branch, opts.get('branch') or []
1042 1038
1043 1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1044 1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1045 1041 if revs:
1046 1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1047 1043
1048 1044 other = peer(repo, opts, dest)
1049 1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1050 1046 force=opts.get('force'))
1051 1047 o = outgoing.missing
1052 1048 if not o:
1053 1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1054 1050 return o, other
1055 1051
1056 1052 def outgoing(ui, repo, dest, opts):
1057 1053 def recurse():
1058 1054 ret = 1
1059 1055 if opts.get('subrepos'):
1060 1056 ctx = repo[None]
1061 1057 for subpath in sorted(ctx.substate):
1062 1058 sub = ctx.sub(subpath)
1063 1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1064 1060 return ret
1065 1061
1066 1062 limit = logcmdutil.getlimit(opts)
1067 1063 o, other = _outgoing(ui, repo, dest, opts)
1068 1064 if not o:
1069 1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1070 1066 return recurse()
1071 1067
1072 1068 if opts.get('newest_first'):
1073 1069 o.reverse()
1074 1070 ui.pager('outgoing')
1075 1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1076 1072 count = 0
1077 1073 for n in o:
1078 1074 if limit is not None and count >= limit:
1079 1075 break
1080 1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1081 1077 if opts.get('no_merges') and len(parents) == 2:
1082 1078 continue
1083 1079 count += 1
1084 1080 displayer.show(repo[n])
1085 1081 displayer.close()
1086 1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1087 1083 recurse()
1088 1084 return 0 # exit code is zero since we found outgoing changes
1089 1085
1090 1086 def verify(repo):
1091 1087 """verify the consistency of a repository"""
1092 1088 ret = verifymod.verify(repo)
1093 1089
1094 1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1095 1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1096 1092 # concern.
1097 1093
1098 1094 # pathto() is needed for -R case
1099 1095 revs = repo.revs("filelog(%s)",
1100 1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1101 1097
1102 1098 if revs:
1103 1099 repo.ui.status(_('checking subrepo links\n'))
1104 1100 for rev in revs:
1105 1101 ctx = repo[rev]
1106 1102 try:
1107 1103 for subpath in ctx.substate:
1108 1104 try:
1109 1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1110 1106 or ret)
1111 1107 except error.RepoError as e:
1112 1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1113 1109 except Exception:
1114 1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1115 1111 node.short(ctx.node()))
1116 1112
1117 1113 return ret
1118 1114
1119 1115 def remoteui(src, opts):
1120 1116 'build a remote ui from ui or repo and opts'
1121 1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1122 1118 dst = src.baseui.copy() # drop repo-specific config
1123 1119 src = src.ui # copy target options from repo
1124 1120 else: # assume it's a global ui object
1125 1121 dst = src.copy() # keep all global options
1126 1122
1127 1123 # copy ssh-specific options
1128 1124 for o in 'ssh', 'remotecmd':
1129 1125 v = opts.get(o) or src.config('ui', o)
1130 1126 if v:
1131 1127 dst.setconfig("ui", o, v, 'copied')
1132 1128
1133 1129 # copy bundle-specific options
1134 1130 r = src.config('bundle', 'mainreporoot')
1135 1131 if r:
1136 1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1137 1133
1138 1134 # copy selected local settings to the remote ui
1139 1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1140 1136 for key, val in src.configitems(sect):
1141 1137 dst.setconfig(sect, key, val, 'copied')
1142 1138 v = src.config('web', 'cacerts')
1143 1139 if v:
1144 1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1145 1141
1146 1142 return dst
1147 1143
1148 1144 # Files of interest
1149 1145 # Used to check if the repository has changed looking at mtime and size of
1150 1146 # these files.
1151 1147 foi = [('spath', '00changelog.i'),
1152 1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1153 1149 ('spath', 'obsstore'),
1154 1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1155 1151 ]
1156 1152
1157 1153 class cachedlocalrepo(object):
1158 1154 """Holds a localrepository that can be cached and reused."""
1159 1155
1160 1156 def __init__(self, repo):
1161 1157 """Create a new cached repo from an existing repo.
1162 1158
1163 1159 We assume the passed in repo was recently created. If the
1164 1160 repo has changed between when it was created and when it was
1165 1161 turned into a cache, it may not refresh properly.
1166 1162 """
1167 1163 assert isinstance(repo, localrepo.localrepository)
1168 1164 self._repo = repo
1169 1165 self._state, self.mtime = self._repostate()
1170 1166 self._filtername = repo.filtername
1171 1167
1172 1168 def fetch(self):
1173 1169 """Refresh (if necessary) and return a repository.
1174 1170
1175 1171 If the cached instance is out of date, it will be recreated
1176 1172 automatically and returned.
1177 1173
1178 1174 Returns a tuple of the repo and a boolean indicating whether a new
1179 1175 repo instance was created.
1180 1176 """
1181 1177 # We compare the mtimes and sizes of some well-known files to
1182 1178 # determine if the repo changed. This is not precise, as mtimes
1183 1179 # are susceptible to clock skew and imprecise filesystems and
1184 1180 # file content can change while maintaining the same size.
1185 1181
1186 1182 state, mtime = self._repostate()
1187 1183 if state == self._state:
1188 1184 return self._repo, False
1189 1185
1190 1186 repo = repository(self._repo.baseui, self._repo.url())
1191 1187 if self._filtername:
1192 1188 self._repo = repo.filtered(self._filtername)
1193 1189 else:
1194 1190 self._repo = repo.unfiltered()
1195 1191 self._state = state
1196 1192 self.mtime = mtime
1197 1193
1198 1194 return self._repo, True
1199 1195
1200 1196 def _repostate(self):
1201 1197 state = []
1202 1198 maxmtime = -1
1203 1199 for attr, fname in foi:
1204 1200 prefix = getattr(self._repo, attr)
1205 1201 p = os.path.join(prefix, fname)
1206 1202 try:
1207 1203 st = os.stat(p)
1208 1204 except OSError:
1209 1205 st = os.stat(prefix)
1210 1206 state.append((st[stat.ST_MTIME], st.st_size))
1211 1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1212 1208
1213 1209 return tuple(state), maxmtime
1214 1210
1215 1211 def copy(self):
1216 1212 """Obtain a copy of this class instance.
1217 1213
1218 1214 A new localrepository instance is obtained. The new instance should be
1219 1215 completely independent of the original.
1220 1216 """
1221 1217 repo = repository(self._repo.baseui, self._repo.origroot)
1222 1218 if self._filtername:
1223 1219 repo = repo.filtered(self._filtername)
1224 1220 else:
1225 1221 repo = repo.unfiltered()
1226 1222 c = cachedlocalrepo(repo)
1227 1223 c._state = self._state
1228 1224 c.mtime = self.mtime
1229 1225 return c
@@ -1,151 +1,156 b''
1 $ filterlog () {
2 > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
3 > }
4
1 5 ensure that failing ui.atexit handlers report sensibly
2 6
3 7 $ cat > $TESTTMP/bailatexit.py <<EOF
4 8 > from mercurial import util
5 9 > def bail():
6 10 > raise RuntimeError('ui.atexit handler exception')
7 11 >
8 12 > def extsetup(ui):
9 13 > ui.atexit(bail)
10 14 > EOF
11 15 $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
12 16 > help help
13 17 hg help [-eck] [-s PLATFORM] [TOPIC]
14 18
15 19 show help for a given topic or a help overview
16 20 error in exit handlers:
17 21 Traceback (most recent call last):
18 22 File "*/mercurial/dispatch.py", line *, in _runexithandlers (glob)
19 23 func(*args, **kwargs)
20 24 File "$TESTTMP/bailatexit.py", line *, in bail (glob)
21 25 raise RuntimeError('ui.atexit handler exception')
22 26 RuntimeError: ui.atexit handler exception
23 27 [255]
24 28
25 29 $ rm $TESTTMP/bailatexit.py
26 30
27 31 another bad extension
28 32
29 33 $ echo 'raise Exception("bit bucket overflow")' > badext.py
30 34 $ abspathexc=`pwd`/badext.py
31 35
32 36 $ cat >baddocext.py <<EOF
33 37 > """
34 38 > baddocext is bad
35 39 > """
36 40 > EOF
37 41 $ abspathdoc=`pwd`/baddocext.py
38 42
39 43 $ cat <<EOF >> $HGRCPATH
40 44 > [extensions]
41 45 > gpg =
42 46 > hgext.gpg =
43 47 > badext = $abspathexc
44 48 > baddocext = $abspathdoc
45 49 > badext2 =
46 50 > EOF
47 51
48 52 $ hg -q help help 2>&1 |grep extension
49 53 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
50 54 *** failed to import extension badext2: No module named *badext2* (glob)
51 55
52 56 show traceback
53 57
54 58 $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound'
55 59 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
56 60 Traceback (most recent call last):
57 61 Exception: bit bucket overflow
58 62 *** failed to import extension badext2: No module named *badext2* (glob)
59 63 Traceback (most recent call last):
60 64 ImportError: No module named badext2 (no-py3 !)
61 65 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
62 66 Traceback (most recent call last): (py3 !)
63 67 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
64 68 Traceback (most recent call last): (py3 !)
65 69 ModuleNotFoundError: No module named 'badext2' (py3 !)
66 70
67 71 names of extensions failed to load can be accessed via extensions.notloaded()
68 72
69 73 $ cat <<EOF > showbadexts.py
70 74 > from mercurial import commands, extensions, registrar
71 75 > cmdtable = {}
72 76 > command = registrar.command(cmdtable)
73 77 > @command(b'showbadexts', norepo=True)
74 78 > def showbadexts(ui, *pats, **opts):
75 79 > ui.write(b'BADEXTS: %s\n' % b' '.join(sorted(extensions.notloaded())))
76 80 > EOF
77 81 $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
78 82 BADEXTS: badext badext2
79 83
80 84 #if no-extraextensions
81 85 show traceback for ImportError of hgext.name if devel.debug.extensions is set
82 86
83 87 $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
84 88 > | grep -v '^ ' \
85 > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import|ModuleNotFound'
86 debug.extensions: loading extensions
87 debug.extensions: - processing 5 entries
88 debug.extensions: - loading extension: gpg
89 debug.extensions: > gpg extension loaded in * (glob)
90 debug.extensions: - validating extension tables: gpg
91 debug.extensions: - invoking registered callbacks: gpg
92 debug.extensions: > callbacks completed in * (glob)
93 debug.extensions: - loading extension: badext
89 > | filterlog \
90 > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|^YYYY|not import|ModuleNotFound'
91 YYYY/MM/DD HH:MM:SS (PID)> loading extensions
92 YYYY/MM/DD HH:MM:SS (PID)> - processing 5 entries
93 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: gpg
94 YYYY/MM/DD HH:MM:SS (PID)> > gpg extension loaded in * (glob)
95 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: gpg
96 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: gpg
97 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
98 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext
94 99 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
95 100 Traceback (most recent call last):
96 101 Exception: bit bucket overflow
97 debug.extensions: - loading extension: baddocext
98 debug.extensions: > baddocext extension loaded in * (glob)
99 debug.extensions: - validating extension tables: baddocext
100 debug.extensions: - invoking registered callbacks: baddocext
101 debug.extensions: > callbacks completed in * (glob)
102 debug.extensions: - loading extension: badext2
103 debug.extensions: - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
102 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: baddocext
103 YYYY/MM/DD HH:MM:SS (PID)> > baddocext extension loaded in * (glob)
104 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: baddocext
105 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: baddocext
106 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
107 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext2
108 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
104 109 Traceback (most recent call last):
105 110 ImportError: No module named badext2 (no-py3 !)
106 111 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
107 debug.extensions: - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
112 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
108 113 Traceback (most recent call last):
109 114 ImportError: No module named badext2 (no-py3 !)
110 115 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
111 116 Traceback (most recent call last): (py3 !)
112 117 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
113 118 *** failed to import extension badext2: No module named *badext2* (glob)
114 119 Traceback (most recent call last):
115 120 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
116 121 Traceback (most recent call last): (py3 !)
117 122 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
118 123 Traceback (most recent call last): (py3 !)
119 124 ModuleNotFoundError: No module named 'badext2' (py3 !)
120 125 ImportError: No module named badext2 (no-py3 !)
121 debug.extensions: > loaded 2 extensions, total time * (glob)
122 debug.extensions: - loading configtable attributes
123 debug.extensions: - executing uisetup hooks
124 debug.extensions: - running uisetup for gpg
125 debug.extensions: > uisetup for gpg took * (glob)
126 debug.extensions: - running uisetup for baddocext
127 debug.extensions: > uisetup for baddocext took * (glob)
128 debug.extensions: > all uisetup took * (glob)
129 debug.extensions: - executing extsetup hooks
130 debug.extensions: - running extsetup for gpg
131 debug.extensions: > extsetup for gpg took * (glob)
132 debug.extensions: - running extsetup for baddocext
133 debug.extensions: > extsetup for baddocext took * (glob)
134 debug.extensions: > all extsetup took * (glob)
135 debug.extensions: - executing remaining aftercallbacks
136 debug.extensions: > remaining aftercallbacks completed in * (glob)
137 debug.extensions: - loading extension registration objects
138 debug.extensions: > extension registration object loading took * (glob)
139 debug.extensions: > extension baddocext take a total of * to load (glob)
140 debug.extensions: > extension gpg take a total of * to load (glob)
141 debug.extensions: extension loading complete
126 YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
127 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
128 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
129 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for gpg
130 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for gpg took * (glob)
131 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for baddocext
132 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for baddocext took * (glob)
133 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
134 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
135 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for gpg
136 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for gpg took * (glob)
137 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for baddocext
138 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for baddocext took * (glob)
139 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
140 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
141 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
142 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
143 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
144 YYYY/MM/DD HH:MM:SS (PID)> > extension baddocext take a total of * to load (glob)
145 YYYY/MM/DD HH:MM:SS (PID)> > extension gpg take a total of * to load (glob)
146 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
142 147 #endif
143 148
144 149 confirm that there's no crash when an extension's documentation is bad
145 150
146 151 $ hg help --keyword baddocext
147 152 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
148 153 *** failed to import extension badext2: No module named *badext2* (glob)
149 154 Topics:
150 155
151 156 extensions Using Additional Features
@@ -1,96 +1,100 b''
1 1 Test basic extension support
2 2
3 3 $ cat > foobar.py <<EOF
4 4 > import os
5 5 > from mercurial import commands, registrar
6 6 > cmdtable = {}
7 7 > command = registrar.command(cmdtable)
8 8 > configtable = {}
9 9 > configitem = registrar.configitem(configtable)
10 10 > configitem(b'tests', b'foo', default=b"Foo")
11 11 > def uisetup(ui):
12 12 > ui.debug(b"uisetup called [debug]\\n")
13 13 > ui.write(b"uisetup called\\n")
14 14 > ui.status(b"uisetup called [status]\\n")
15 15 > ui.flush()
16 16 > def reposetup(ui, repo):
17 17 > ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
18 18 > ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
19 19 > ui.flush()
20 20 > @command(b'foo', [], b'hg foo')
21 21 > def foo(ui, *args, **kwargs):
22 22 > foo = ui.config(b'tests', b'foo')
23 23 > ui.write(foo)
24 24 > ui.write(b"\\n")
25 25 > @command(b'bar', [], b'hg bar', norepo=True)
26 26 > def bar(ui, *args, **kwargs):
27 27 > ui.write(b"Bar\\n")
28 28 > EOF
29 29 $ abspath=`pwd`/foobar.py
30 30
31 31 $ mkdir barfoo
32 32 $ cp foobar.py barfoo/__init__.py
33 33 $ barfoopath=`pwd`/barfoo
34 34
35 35 $ hg init a
36 36 $ cd a
37 37 $ echo foo > file
38 38 $ hg add file
39 39 $ hg commit -m 'add file'
40 40
41 41 $ echo '[extensions]' >> $HGRCPATH
42 42 $ echo "foobar = $abspath" >> $HGRCPATH
43 43
44 $ filterlog () {
45 > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
46 > }
47
44 48 Test extension setup timings
45 49
46 $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1
47 debug.extensions: loading extensions
48 debug.extensions: - processing 1 entries
49 debug.extensions: - loading extension: foobar
50 debug.extensions: > foobar extension loaded in * (glob)
51 debug.extensions: - validating extension tables: foobar
52 debug.extensions: - invoking registered callbacks: foobar
53 debug.extensions: > callbacks completed in * (glob)
54 debug.extensions: > loaded 1 extensions, total time * (glob)
55 debug.extensions: - loading configtable attributes
56 debug.extensions: - executing uisetup hooks
57 debug.extensions: - running uisetup for foobar
50 $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1 | filterlog
51 YYYY/MM/DD HH:MM:SS (PID)> loading extensions
52 YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
53 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: foobar
54 YYYY/MM/DD HH:MM:SS (PID)> > foobar extension loaded in * (glob)
55 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: foobar
56 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: foobar
57 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
58 YYYY/MM/DD HH:MM:SS (PID)> > loaded 1 extensions, total time * (glob)
59 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
60 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
61 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for foobar
58 62 uisetup called [debug]
59 63 uisetup called
60 64 uisetup called [status]
61 debug.extensions: > uisetup for foobar took * (glob)
62 debug.extensions: > all uisetup took * (glob)
63 debug.extensions: - executing extsetup hooks
64 debug.extensions: - running extsetup for foobar
65 debug.extensions: > extsetup for foobar took * (glob)
66 debug.extensions: > all extsetup took * (glob)
67 debug.extensions: - executing remaining aftercallbacks
68 debug.extensions: > remaining aftercallbacks completed in * (glob)
69 debug.extensions: - loading extension registration objects
70 debug.extensions: > extension registration object loading took * (glob)
71 debug.extensions: > extension foobar take a total of * to load (glob)
72 debug.extensions: extension loading complete
73 debug.extensions: loading additional extensions
74 debug.extensions: - processing 1 entries
75 debug.extensions: > loaded 0 extensions, total time * (glob)
76 debug.extensions: - loading configtable attributes
77 debug.extensions: - executing uisetup hooks
78 debug.extensions: > all uisetup took * (glob)
79 debug.extensions: - executing extsetup hooks
80 debug.extensions: > all extsetup took * (glob)
81 debug.extensions: - executing remaining aftercallbacks
82 debug.extensions: > remaining aftercallbacks completed in * (glob)
83 debug.extensions: - loading extension registration objects
84 debug.extensions: > extension registration object loading took * (glob)
85 debug.extensions: extension loading complete
86 debug.extensions: - executing reposetup hooks
87 debug.extensions: - running reposetup for foobar
65 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for foobar took * (glob)
66 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
67 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
68 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for foobar
69 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for foobar took * (glob)
70 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
71 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
72 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
73 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
74 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
75 YYYY/MM/DD HH:MM:SS (PID)> > extension foobar take a total of * to load (glob)
76 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
77 YYYY/MM/DD HH:MM:SS (PID)> loading additional extensions
78 YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
79 YYYY/MM/DD HH:MM:SS (PID)> > loaded 0 extensions, total time * (glob)
80 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
81 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
82 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
83 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
84 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
85 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
86 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
87 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
88 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
89 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
90 YYYY/MM/DD HH:MM:SS (PID)> - executing reposetup hooks
91 YYYY/MM/DD HH:MM:SS (PID)> - running reposetup for foobar
88 92 reposetup called for a
89 93 ui == repo.ui
90 debug.extensions: > reposetup for foobar took * (glob)
91 debug.extensions: > all reposetup took * (glob)
94 YYYY/MM/DD HH:MM:SS (PID)> > reposetup for foobar took * (glob)
95 YYYY/MM/DD HH:MM:SS (PID)> > all reposetup took * (glob)
92 96 Foo
93 97
94 98 $ cd ..
95 99
96 100 $ echo 'foobar = !' >> $HGRCPATH
General Comments 0
You need to be logged in to leave comments. Login now