##// END OF EJS Templates
extensions: use ui.log() interface to provide detailed loading information...
Yuya Nishihara -
r41032:6f2510b5 default
parent child Browse files
Show More
@@ -1,843 +1,844 b''
1 # extensions.py - extension handling for mercurial
1 # extensions.py - extension handling for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import ast
10 import ast
11 import collections
11 import collections
12 import functools
12 import functools
13 import imp
13 import imp
14 import inspect
14 import inspect
15 import os
15 import os
16
16
17 from .i18n import (
17 from .i18n import (
18 _,
18 _,
19 gettext,
19 gettext,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 cmdutil,
23 cmdutil,
24 configitems,
24 configitems,
25 error,
25 error,
26 pycompat,
26 pycompat,
27 util,
27 util,
28 )
28 )
29
29
30 from .utils import (
30 from .utils import (
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34 _extensions = {}
34 _extensions = {}
35 _disabledextensions = {}
35 _disabledextensions = {}
36 _aftercallbacks = {}
36 _aftercallbacks = {}
37 _order = []
37 _order = []
38 _builtin = {
38 _builtin = {
39 'hbisect',
39 'hbisect',
40 'bookmarks',
40 'bookmarks',
41 'color',
41 'color',
42 'parentrevspec',
42 'parentrevspec',
43 'progress',
43 'progress',
44 'interhg',
44 'interhg',
45 'inotify',
45 'inotify',
46 'hgcia'
46 'hgcia'
47 }
47 }
48
48
49 def extensions(ui=None):
49 def extensions(ui=None):
50 if ui:
50 if ui:
51 def enabled(name):
51 def enabled(name):
52 for format in ['%s', 'hgext.%s']:
52 for format in ['%s', 'hgext.%s']:
53 conf = ui.config('extensions', format % name)
53 conf = ui.config('extensions', format % name)
54 if conf is not None and not conf.startswith('!'):
54 if conf is not None and not conf.startswith('!'):
55 return True
55 return True
56 else:
56 else:
57 enabled = lambda name: True
57 enabled = lambda name: True
58 for name in _order:
58 for name in _order:
59 module = _extensions[name]
59 module = _extensions[name]
60 if module and enabled(name):
60 if module and enabled(name):
61 yield name, module
61 yield name, module
62
62
63 def find(name):
63 def find(name):
64 '''return module with given extension name'''
64 '''return module with given extension name'''
65 mod = None
65 mod = None
66 try:
66 try:
67 mod = _extensions[name]
67 mod = _extensions[name]
68 except KeyError:
68 except KeyError:
69 for k, v in _extensions.iteritems():
69 for k, v in _extensions.iteritems():
70 if k.endswith('.' + name) or k.endswith('/' + name):
70 if k.endswith('.' + name) or k.endswith('/' + name):
71 mod = v
71 mod = v
72 break
72 break
73 if not mod:
73 if not mod:
74 raise KeyError(name)
74 raise KeyError(name)
75 return mod
75 return mod
76
76
77 def loadpath(path, module_name):
77 def loadpath(path, module_name):
78 module_name = module_name.replace('.', '_')
78 module_name = module_name.replace('.', '_')
79 path = util.normpath(util.expandpath(path))
79 path = util.normpath(util.expandpath(path))
80 module_name = pycompat.fsdecode(module_name)
80 module_name = pycompat.fsdecode(module_name)
81 path = pycompat.fsdecode(path)
81 path = pycompat.fsdecode(path)
82 if os.path.isdir(path):
82 if os.path.isdir(path):
83 # module/__init__.py style
83 # module/__init__.py style
84 d, f = os.path.split(path)
84 d, f = os.path.split(path)
85 fd, fpath, desc = imp.find_module(f, [d])
85 fd, fpath, desc = imp.find_module(f, [d])
86 return imp.load_module(module_name, fd, fpath, desc)
86 return imp.load_module(module_name, fd, fpath, desc)
87 else:
87 else:
88 try:
88 try:
89 return imp.load_source(module_name, path)
89 return imp.load_source(module_name, path)
90 except IOError as exc:
90 except IOError as exc:
91 if not exc.filename:
91 if not exc.filename:
92 exc.filename = path # python does not fill this
92 exc.filename = path # python does not fill this
93 raise
93 raise
94
94
95 def _importh(name):
95 def _importh(name):
96 """import and return the <name> module"""
96 """import and return the <name> module"""
97 mod = __import__(pycompat.sysstr(name))
97 mod = __import__(pycompat.sysstr(name))
98 components = name.split('.')
98 components = name.split('.')
99 for comp in components[1:]:
99 for comp in components[1:]:
100 mod = getattr(mod, comp)
100 mod = getattr(mod, comp)
101 return mod
101 return mod
102
102
103 def _importext(name, path=None, reportfunc=None):
103 def _importext(name, path=None, reportfunc=None):
104 if path:
104 if path:
105 # the module will be loaded in sys.modules
105 # the module will be loaded in sys.modules
106 # choose an unique name so that it doesn't
106 # choose an unique name so that it doesn't
107 # conflicts with other modules
107 # conflicts with other modules
108 mod = loadpath(path, 'hgext.%s' % name)
108 mod = loadpath(path, 'hgext.%s' % name)
109 else:
109 else:
110 try:
110 try:
111 mod = _importh("hgext.%s" % name)
111 mod = _importh("hgext.%s" % name)
112 except ImportError as err:
112 except ImportError as err:
113 if reportfunc:
113 if reportfunc:
114 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
114 reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
115 try:
115 try:
116 mod = _importh("hgext3rd.%s" % name)
116 mod = _importh("hgext3rd.%s" % name)
117 except ImportError as err:
117 except ImportError as err:
118 if reportfunc:
118 if reportfunc:
119 reportfunc(err, "hgext3rd.%s" % name, name)
119 reportfunc(err, "hgext3rd.%s" % name, name)
120 mod = _importh(name)
120 mod = _importh(name)
121 return mod
121 return mod
122
122
123 def _reportimporterror(ui, err, failed, next):
123 def _reportimporterror(ui, err, failed, next):
124 # note: this ui.debug happens before --debug is processed,
124 # note: this ui.log happens before --debug is processed,
125 # Use --config ui.debug=1 to see them.
125 # Use --config ui.debug=1 to see them.
126 if ui.configbool('devel', 'debug.extensions'):
126 ui.log(b'extension', b' - could not import %s (%s): trying %s\n',
127 ui.debug('debug.extensions: - could not import %s (%s): trying %s\n'
127 failed, stringutil.forcebytestr(err), next)
128 % (failed, stringutil.forcebytestr(err), next))
128 if ui.debugflag and ui.configbool('devel', 'debug.extensions'):
129 if ui.debugflag:
129 ui.traceback()
130 ui.traceback()
131
130
132 def _rejectunicode(name, xs):
131 def _rejectunicode(name, xs):
133 if isinstance(xs, (list, set, tuple)):
132 if isinstance(xs, (list, set, tuple)):
134 for x in xs:
133 for x in xs:
135 _rejectunicode(name, x)
134 _rejectunicode(name, x)
136 elif isinstance(xs, dict):
135 elif isinstance(xs, dict):
137 for k, v in xs.items():
136 for k, v in xs.items():
138 _rejectunicode(name, k)
137 _rejectunicode(name, k)
139 _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
138 _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
140 elif isinstance(xs, type(u'')):
139 elif isinstance(xs, type(u'')):
141 raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
140 raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
142 hint="use b'' to make it byte string")
141 hint="use b'' to make it byte string")
143
142
144 # attributes set by registrar.command
143 # attributes set by registrar.command
145 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
144 _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
146
145
147 def _validatecmdtable(ui, cmdtable):
146 def _validatecmdtable(ui, cmdtable):
148 """Check if extension commands have required attributes"""
147 """Check if extension commands have required attributes"""
149 for c, e in cmdtable.iteritems():
148 for c, e in cmdtable.iteritems():
150 f = e[0]
149 f = e[0]
151 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
150 missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
152 if not missing:
151 if not missing:
153 continue
152 continue
154 raise error.ProgrammingError(
153 raise error.ProgrammingError(
155 'missing attributes: %s' % ', '.join(missing),
154 'missing attributes: %s' % ', '.join(missing),
156 hint="use @command decorator to register '%s'" % c)
155 hint="use @command decorator to register '%s'" % c)
157
156
158 def _validatetables(ui, mod):
157 def _validatetables(ui, mod):
159 """Sanity check for loadable tables provided by extension module"""
158 """Sanity check for loadable tables provided by extension module"""
160 for t in ['cmdtable', 'colortable', 'configtable']:
159 for t in ['cmdtable', 'colortable', 'configtable']:
161 _rejectunicode(t, getattr(mod, t, {}))
160 _rejectunicode(t, getattr(mod, t, {}))
162 for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
161 for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
163 'templatefilter', 'templatefunc', 'templatekeyword']:
162 'templatefilter', 'templatefunc', 'templatekeyword']:
164 o = getattr(mod, t, None)
163 o = getattr(mod, t, None)
165 if o:
164 if o:
166 _rejectunicode(t, o._table)
165 _rejectunicode(t, o._table)
167 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
166 _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
168
167
169 def load(ui, name, path, log=lambda *a: None, loadingtime=None):
168 def load(ui, name, path, loadingtime=None):
170 if name.startswith('hgext.') or name.startswith('hgext/'):
169 if name.startswith('hgext.') or name.startswith('hgext/'):
171 shortname = name[6:]
170 shortname = name[6:]
172 else:
171 else:
173 shortname = name
172 shortname = name
174 if shortname in _builtin:
173 if shortname in _builtin:
175 return None
174 return None
176 if shortname in _extensions:
175 if shortname in _extensions:
177 return _extensions[shortname]
176 return _extensions[shortname]
178 log(' - loading extension: %s\n', shortname)
177 ui.log(b'extension', b' - loading extension: %s\n', shortname)
179 _extensions[shortname] = None
178 _extensions[shortname] = None
180 with util.timedcm('load extension %s', shortname) as stats:
179 with util.timedcm('load extension %s', shortname) as stats:
181 mod = _importext(name, path, bind(_reportimporterror, ui))
180 mod = _importext(name, path, bind(_reportimporterror, ui))
182 log(' > %s extension loaded in %s\n', shortname, stats)
181 ui.log(b'extension', b' > %s extension loaded in %s\n', shortname, stats)
183 if loadingtime is not None:
182 if loadingtime is not None:
184 loadingtime[shortname] += stats.elapsed
183 loadingtime[shortname] += stats.elapsed
185
184
186 # Before we do anything with the extension, check against minimum stated
185 # Before we do anything with the extension, check against minimum stated
187 # compatibility. This gives extension authors a mechanism to have their
186 # compatibility. This gives extension authors a mechanism to have their
188 # extensions short circuit when loaded with a known incompatible version
187 # extensions short circuit when loaded with a known incompatible version
189 # of Mercurial.
188 # of Mercurial.
190 minver = getattr(mod, 'minimumhgversion', None)
189 minver = getattr(mod, 'minimumhgversion', None)
191 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
190 if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
192 msg = _('(third party extension %s requires version %s or newer '
191 msg = _('(third party extension %s requires version %s or newer '
193 'of Mercurial (current: %s); disabling)\n')
192 'of Mercurial (current: %s); disabling)\n')
194 ui.warn(msg % (shortname, minver, util.version()))
193 ui.warn(msg % (shortname, minver, util.version()))
195 return
194 return
196 log(' - validating extension tables: %s\n', shortname)
195 ui.log(b'extension', b' - validating extension tables: %s\n', shortname)
197 _validatetables(ui, mod)
196 _validatetables(ui, mod)
198
197
199 _extensions[shortname] = mod
198 _extensions[shortname] = mod
200 _order.append(shortname)
199 _order.append(shortname)
201 log(' - invoking registered callbacks: %s\n', shortname)
200 ui.log(b'extension', b' - invoking registered callbacks: %s\n',
201 shortname)
202 with util.timedcm('callbacks extension %s', shortname) as stats:
202 with util.timedcm('callbacks extension %s', shortname) as stats:
203 for fn in _aftercallbacks.get(shortname, []):
203 for fn in _aftercallbacks.get(shortname, []):
204 fn(loaded=True)
204 fn(loaded=True)
205 log(' > callbacks completed in %s\n', stats)
205 ui.log(b'extension', b' > callbacks completed in %s\n', stats)
206 return mod
206 return mod
207
207
208 def _runuisetup(name, ui):
208 def _runuisetup(name, ui):
209 uisetup = getattr(_extensions[name], 'uisetup', None)
209 uisetup = getattr(_extensions[name], 'uisetup', None)
210 if uisetup:
210 if uisetup:
211 try:
211 try:
212 uisetup(ui)
212 uisetup(ui)
213 except Exception as inst:
213 except Exception as inst:
214 ui.traceback(force=True)
214 ui.traceback(force=True)
215 msg = stringutil.forcebytestr(inst)
215 msg = stringutil.forcebytestr(inst)
216 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
216 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
217 return False
217 return False
218 return True
218 return True
219
219
220 def _runextsetup(name, ui):
220 def _runextsetup(name, ui):
221 extsetup = getattr(_extensions[name], 'extsetup', None)
221 extsetup = getattr(_extensions[name], 'extsetup', None)
222 if extsetup:
222 if extsetup:
223 try:
223 try:
224 try:
224 try:
225 extsetup(ui)
225 extsetup(ui)
226 except TypeError:
226 except TypeError:
227 if pycompat.getargspec(extsetup).args:
227 if pycompat.getargspec(extsetup).args:
228 raise
228 raise
229 extsetup() # old extsetup with no ui argument
229 extsetup() # old extsetup with no ui argument
230 except Exception as inst:
230 except Exception as inst:
231 ui.traceback(force=True)
231 ui.traceback(force=True)
232 msg = stringutil.forcebytestr(inst)
232 msg = stringutil.forcebytestr(inst)
233 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
233 ui.warn(_("*** failed to set up extension %s: %s\n") % (name, msg))
234 return False
234 return False
235 return True
235 return True
236
236
237 def loadall(ui, whitelist=None):
237 def loadall(ui, whitelist=None):
238 if ui.configbool('devel', 'debug.extensions'):
239 log = lambda msg, *values: ui.debug('debug.extensions: ',
240 msg % values, label='debug.extensions')
241 else:
242 log = lambda *a, **kw: None
243 loadingtime = collections.defaultdict(int)
238 loadingtime = collections.defaultdict(int)
244 result = ui.configitems("extensions")
239 result = ui.configitems("extensions")
245 if whitelist is not None:
240 if whitelist is not None:
246 result = [(k, v) for (k, v) in result if k in whitelist]
241 result = [(k, v) for (k, v) in result if k in whitelist]
247 newindex = len(_order)
242 newindex = len(_order)
248 log('loading %sextensions\n', 'additional ' if newindex else '')
243 ui.log(b'extension', b'loading %sextensions\n',
249 log('- processing %d entries\n', len(result))
244 'additional ' if newindex else '')
245 ui.log(b'extension', b'- processing %d entries\n', len(result))
250 with util.timedcm('load all extensions') as stats:
246 with util.timedcm('load all extensions') as stats:
251 for (name, path) in result:
247 for (name, path) in result:
252 if path:
248 if path:
253 if path[0:1] == '!':
249 if path[0:1] == '!':
254 if name not in _disabledextensions:
250 if name not in _disabledextensions:
255 log(' - skipping disabled extension: %s\n', name)
251 ui.log(b'extension',
252 b' - skipping disabled extension: %s\n', name)
256 _disabledextensions[name] = path[1:]
253 _disabledextensions[name] = path[1:]
257 continue
254 continue
258 try:
255 try:
259 load(ui, name, path, log, loadingtime)
256 load(ui, name, path, loadingtime)
260 except Exception as inst:
257 except Exception as inst:
261 msg = stringutil.forcebytestr(inst)
258 msg = stringutil.forcebytestr(inst)
262 if path:
259 if path:
263 ui.warn(_("*** failed to import extension %s from %s: %s\n")
260 ui.warn(_("*** failed to import extension %s from %s: %s\n")
264 % (name, path, msg))
261 % (name, path, msg))
265 else:
262 else:
266 ui.warn(_("*** failed to import extension %s: %s\n")
263 ui.warn(_("*** failed to import extension %s: %s\n")
267 % (name, msg))
264 % (name, msg))
268 if isinstance(inst, error.Hint) and inst.hint:
265 if isinstance(inst, error.Hint) and inst.hint:
269 ui.warn(_("*** (%s)\n") % inst.hint)
266 ui.warn(_("*** (%s)\n") % inst.hint)
270 ui.traceback()
267 ui.traceback()
271
268
272 log('> loaded %d extensions, total time %s\n',
269 ui.log(b'extension', b'> loaded %d extensions, total time %s\n',
273 len(_order) - newindex, stats)
270 len(_order) - newindex, stats)
274 # list of (objname, loadermod, loadername) tuple:
271 # list of (objname, loadermod, loadername) tuple:
275 # - objname is the name of an object in extension module,
272 # - objname is the name of an object in extension module,
276 # from which extra information is loaded
273 # from which extra information is loaded
277 # - loadermod is the module where loader is placed
274 # - loadermod is the module where loader is placed
278 # - loadername is the name of the function,
275 # - loadername is the name of the function,
279 # which takes (ui, extensionname, extraobj) arguments
276 # which takes (ui, extensionname, extraobj) arguments
280 #
277 #
281 # This one is for the list of item that must be run before running any setup
278 # This one is for the list of item that must be run before running any setup
282 earlyextraloaders = [
279 earlyextraloaders = [
283 ('configtable', configitems, 'loadconfigtable'),
280 ('configtable', configitems, 'loadconfigtable'),
284 ]
281 ]
285
282
286 log('- loading configtable attributes\n')
283 ui.log(b'extension', b'- loading configtable attributes\n')
287 _loadextra(ui, newindex, earlyextraloaders)
284 _loadextra(ui, newindex, earlyextraloaders)
288
285
289 broken = set()
286 broken = set()
290 log('- executing uisetup hooks\n')
287 ui.log(b'extension', b'- executing uisetup hooks\n')
291 with util.timedcm('all uisetup') as alluisetupstats:
288 with util.timedcm('all uisetup') as alluisetupstats:
292 for name in _order[newindex:]:
289 for name in _order[newindex:]:
293 log(' - running uisetup for %s\n', name)
290 ui.log(b'extension', b' - running uisetup for %s\n', name)
294 with util.timedcm('uisetup %s', name) as stats:
291 with util.timedcm('uisetup %s', name) as stats:
295 if not _runuisetup(name, ui):
292 if not _runuisetup(name, ui):
296 log(' - the %s extension uisetup failed\n', name)
293 ui.log(b'extension',
294 b' - the %s extension uisetup failed\n', name)
297 broken.add(name)
295 broken.add(name)
298 log(' > uisetup for %s took %s\n', name, stats)
296 ui.log(b'extension', b' > uisetup for %s took %s\n', name, stats)
299 loadingtime[name] += stats.elapsed
297 loadingtime[name] += stats.elapsed
300 log('> all uisetup took %s\n', alluisetupstats)
298 ui.log(b'extension', b'> all uisetup took %s\n', alluisetupstats)
301
299
302 log('- executing extsetup hooks\n')
300 ui.log(b'extension', b'- executing extsetup hooks\n')
303 with util.timedcm('all extsetup') as allextetupstats:
301 with util.timedcm('all extsetup') as allextetupstats:
304 for name in _order[newindex:]:
302 for name in _order[newindex:]:
305 if name in broken:
303 if name in broken:
306 continue
304 continue
307 log(' - running extsetup for %s\n', name)
305 ui.log(b'extension', b' - running extsetup for %s\n', name)
308 with util.timedcm('extsetup %s', name) as stats:
306 with util.timedcm('extsetup %s', name) as stats:
309 if not _runextsetup(name, ui):
307 if not _runextsetup(name, ui):
310 log(' - the %s extension extsetup failed\n', name)
308 ui.log(b'extension',
309 b' - the %s extension extsetup failed\n', name)
311 broken.add(name)
310 broken.add(name)
312 log(' > extsetup for %s took %s\n', name, stats)
311 ui.log(b'extension', b' > extsetup for %s took %s\n', name, stats)
313 loadingtime[name] += stats.elapsed
312 loadingtime[name] += stats.elapsed
314 log('> all extsetup took %s\n', allextetupstats)
313 ui.log(b'extension', b'> all extsetup took %s\n', allextetupstats)
315
314
316 for name in broken:
315 for name in broken:
317 log(' - disabling broken %s extension\n', name)
316 ui.log(b'extension', b' - disabling broken %s extension\n', name)
318 _extensions[name] = None
317 _extensions[name] = None
319
318
320 # Call aftercallbacks that were never met.
319 # Call aftercallbacks that were never met.
321 log('- executing remaining aftercallbacks\n')
320 ui.log(b'extension', b'- executing remaining aftercallbacks\n')
322 with util.timedcm('aftercallbacks') as stats:
321 with util.timedcm('aftercallbacks') as stats:
323 for shortname in _aftercallbacks:
322 for shortname in _aftercallbacks:
324 if shortname in _extensions:
323 if shortname in _extensions:
325 continue
324 continue
326
325
327 for fn in _aftercallbacks[shortname]:
326 for fn in _aftercallbacks[shortname]:
328 log(' - extension %s not loaded, notify callbacks\n',
327 ui.log(b'extension',
329 shortname)
328 b' - extension %s not loaded, notify callbacks\n',
329 shortname)
330 fn(loaded=False)
330 fn(loaded=False)
331 log('> remaining aftercallbacks completed in %s\n', stats)
331 ui.log(b'extension', b'> remaining aftercallbacks completed in %s\n', stats)
332
332
333 # loadall() is called multiple times and lingering _aftercallbacks
333 # loadall() is called multiple times and lingering _aftercallbacks
334 # entries could result in double execution. See issue4646.
334 # entries could result in double execution. See issue4646.
335 _aftercallbacks.clear()
335 _aftercallbacks.clear()
336
336
337 # delay importing avoids cyclic dependency (especially commands)
337 # delay importing avoids cyclic dependency (especially commands)
338 from . import (
338 from . import (
339 color,
339 color,
340 commands,
340 commands,
341 filemerge,
341 filemerge,
342 fileset,
342 fileset,
343 revset,
343 revset,
344 templatefilters,
344 templatefilters,
345 templatefuncs,
345 templatefuncs,
346 templatekw,
346 templatekw,
347 )
347 )
348
348
349 # list of (objname, loadermod, loadername) tuple:
349 # list of (objname, loadermod, loadername) tuple:
350 # - objname is the name of an object in extension module,
350 # - objname is the name of an object in extension module,
351 # from which extra information is loaded
351 # from which extra information is loaded
352 # - loadermod is the module where loader is placed
352 # - loadermod is the module where loader is placed
353 # - loadername is the name of the function,
353 # - loadername is the name of the function,
354 # which takes (ui, extensionname, extraobj) arguments
354 # which takes (ui, extensionname, extraobj) arguments
355 log('- loading extension registration objects\n')
355 ui.log(b'extension', b'- loading extension registration objects\n')
356 extraloaders = [
356 extraloaders = [
357 ('cmdtable', commands, 'loadcmdtable'),
357 ('cmdtable', commands, 'loadcmdtable'),
358 ('colortable', color, 'loadcolortable'),
358 ('colortable', color, 'loadcolortable'),
359 ('filesetpredicate', fileset, 'loadpredicate'),
359 ('filesetpredicate', fileset, 'loadpredicate'),
360 ('internalmerge', filemerge, 'loadinternalmerge'),
360 ('internalmerge', filemerge, 'loadinternalmerge'),
361 ('revsetpredicate', revset, 'loadpredicate'),
361 ('revsetpredicate', revset, 'loadpredicate'),
362 ('templatefilter', templatefilters, 'loadfilter'),
362 ('templatefilter', templatefilters, 'loadfilter'),
363 ('templatefunc', templatefuncs, 'loadfunction'),
363 ('templatefunc', templatefuncs, 'loadfunction'),
364 ('templatekeyword', templatekw, 'loadkeyword'),
364 ('templatekeyword', templatekw, 'loadkeyword'),
365 ]
365 ]
366 with util.timedcm('load registration objects') as stats:
366 with util.timedcm('load registration objects') as stats:
367 _loadextra(ui, newindex, extraloaders)
367 _loadextra(ui, newindex, extraloaders)
368 log('> extension registration object loading took %s\n', stats)
368 ui.log(b'extension', b'> extension registration object loading took %s\n',
369 stats)
369
370
370 # Report per extension loading time (except reposetup)
371 # Report per extension loading time (except reposetup)
371 for name in sorted(loadingtime):
372 for name in sorted(loadingtime):
372 extension_msg = '> extension %s take a total of %s to load\n'
373 ui.log(b'extension', b'> extension %s take a total of %s to load\n',
373 log(extension_msg, name, util.timecount(loadingtime[name]))
374 name, util.timecount(loadingtime[name]))
374
375
375 log('extension loading complete\n')
376 ui.log(b'extension', b'extension loading complete\n')
376
377
377 def _loadextra(ui, newindex, extraloaders):
378 def _loadextra(ui, newindex, extraloaders):
378 for name in _order[newindex:]:
379 for name in _order[newindex:]:
379 module = _extensions[name]
380 module = _extensions[name]
380 if not module:
381 if not module:
381 continue # loading this module failed
382 continue # loading this module failed
382
383
383 for objname, loadermod, loadername in extraloaders:
384 for objname, loadermod, loadername in extraloaders:
384 extraobj = getattr(module, objname, None)
385 extraobj = getattr(module, objname, None)
385 if extraobj is not None:
386 if extraobj is not None:
386 getattr(loadermod, loadername)(ui, name, extraobj)
387 getattr(loadermod, loadername)(ui, name, extraobj)
387
388
388 def afterloaded(extension, callback):
389 def afterloaded(extension, callback):
389 '''Run the specified function after a named extension is loaded.
390 '''Run the specified function after a named extension is loaded.
390
391
391 If the named extension is already loaded, the callback will be called
392 If the named extension is already loaded, the callback will be called
392 immediately.
393 immediately.
393
394
394 If the named extension never loads, the callback will be called after
395 If the named extension never loads, the callback will be called after
395 all extensions have been loaded.
396 all extensions have been loaded.
396
397
397 The callback receives the named argument ``loaded``, which is a boolean
398 The callback receives the named argument ``loaded``, which is a boolean
398 indicating whether the dependent extension actually loaded.
399 indicating whether the dependent extension actually loaded.
399 '''
400 '''
400
401
401 if extension in _extensions:
402 if extension in _extensions:
402 # Report loaded as False if the extension is disabled
403 # Report loaded as False if the extension is disabled
403 loaded = (_extensions[extension] is not None)
404 loaded = (_extensions[extension] is not None)
404 callback(loaded=loaded)
405 callback(loaded=loaded)
405 else:
406 else:
406 _aftercallbacks.setdefault(extension, []).append(callback)
407 _aftercallbacks.setdefault(extension, []).append(callback)
407
408
408 def populateui(ui):
409 def populateui(ui):
409 """Run extension hooks on the given ui to populate additional members,
410 """Run extension hooks on the given ui to populate additional members,
410 extend the class dynamically, etc.
411 extend the class dynamically, etc.
411
412
412 This will be called after the configuration is loaded, and/or extensions
413 This will be called after the configuration is loaded, and/or extensions
413 are loaded. In general, it's once per ui instance, but in command-server
414 are loaded. In general, it's once per ui instance, but in command-server
414 and hgweb, this may be called more than once with the same ui.
415 and hgweb, this may be called more than once with the same ui.
415 """
416 """
416 for name, mod in extensions(ui):
417 for name, mod in extensions(ui):
417 hook = getattr(mod, 'uipopulate', None)
418 hook = getattr(mod, 'uipopulate', None)
418 if not hook:
419 if not hook:
419 continue
420 continue
420 try:
421 try:
421 hook(ui)
422 hook(ui)
422 except Exception as inst:
423 except Exception as inst:
423 ui.traceback(force=True)
424 ui.traceback(force=True)
424 ui.warn(_('*** failed to populate ui by extension %s: %s\n')
425 ui.warn(_('*** failed to populate ui by extension %s: %s\n')
425 % (name, stringutil.forcebytestr(inst)))
426 % (name, stringutil.forcebytestr(inst)))
426
427
427 def bind(func, *args):
428 def bind(func, *args):
428 '''Partial function application
429 '''Partial function application
429
430
430 Returns a new function that is the partial application of args and kwargs
431 Returns a new function that is the partial application of args and kwargs
431 to func. For example,
432 to func. For example,
432
433
433 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
434 f(1, 2, bar=3) === bind(f, 1)(2, bar=3)'''
434 assert callable(func)
435 assert callable(func)
435 def closure(*a, **kw):
436 def closure(*a, **kw):
436 return func(*(args + a), **kw)
437 return func(*(args + a), **kw)
437 return closure
438 return closure
438
439
439 def _updatewrapper(wrap, origfn, unboundwrapper):
440 def _updatewrapper(wrap, origfn, unboundwrapper):
440 '''Copy and add some useful attributes to wrapper'''
441 '''Copy and add some useful attributes to wrapper'''
441 try:
442 try:
442 wrap.__name__ = origfn.__name__
443 wrap.__name__ = origfn.__name__
443 except AttributeError:
444 except AttributeError:
444 pass
445 pass
445 wrap.__module__ = getattr(origfn, '__module__')
446 wrap.__module__ = getattr(origfn, '__module__')
446 wrap.__doc__ = getattr(origfn, '__doc__')
447 wrap.__doc__ = getattr(origfn, '__doc__')
447 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
448 wrap.__dict__.update(getattr(origfn, '__dict__', {}))
448 wrap._origfunc = origfn
449 wrap._origfunc = origfn
449 wrap._unboundwrapper = unboundwrapper
450 wrap._unboundwrapper = unboundwrapper
450
451
451 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
452 def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
452 '''Wrap the command named `command' in table
453 '''Wrap the command named `command' in table
453
454
454 Replace command in the command table with wrapper. The wrapped command will
455 Replace command in the command table with wrapper. The wrapped command will
455 be inserted into the command table specified by the table argument.
456 be inserted into the command table specified by the table argument.
456
457
457 The wrapper will be called like
458 The wrapper will be called like
458
459
459 wrapper(orig, *args, **kwargs)
460 wrapper(orig, *args, **kwargs)
460
461
461 where orig is the original (wrapped) function, and *args, **kwargs
462 where orig is the original (wrapped) function, and *args, **kwargs
462 are the arguments passed to it.
463 are the arguments passed to it.
463
464
464 Optionally append to the command synopsis and docstring, used for help.
465 Optionally append to the command synopsis and docstring, used for help.
465 For example, if your extension wraps the ``bookmarks`` command to add the
466 For example, if your extension wraps the ``bookmarks`` command to add the
466 flags ``--remote`` and ``--all`` you might call this function like so:
467 flags ``--remote`` and ``--all`` you might call this function like so:
467
468
468 synopsis = ' [-a] [--remote]'
469 synopsis = ' [-a] [--remote]'
469 docstring = """
470 docstring = """
470
471
471 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
472 The ``remotenames`` extension adds the ``--remote`` and ``--all`` (``-a``)
472 flags to the bookmarks command. Either flag will show the remote bookmarks
473 flags to the bookmarks command. Either flag will show the remote bookmarks
473 known to the repository; ``--remote`` will also suppress the output of the
474 known to the repository; ``--remote`` will also suppress the output of the
474 local bookmarks.
475 local bookmarks.
475 """
476 """
476
477
477 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
478 extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks,
478 synopsis, docstring)
479 synopsis, docstring)
479 '''
480 '''
480 assert callable(wrapper)
481 assert callable(wrapper)
481 aliases, entry = cmdutil.findcmd(command, table)
482 aliases, entry = cmdutil.findcmd(command, table)
482 for alias, e in table.iteritems():
483 for alias, e in table.iteritems():
483 if e is entry:
484 if e is entry:
484 key = alias
485 key = alias
485 break
486 break
486
487
487 origfn = entry[0]
488 origfn = entry[0]
488 wrap = functools.partial(util.checksignature(wrapper),
489 wrap = functools.partial(util.checksignature(wrapper),
489 util.checksignature(origfn))
490 util.checksignature(origfn))
490 _updatewrapper(wrap, origfn, wrapper)
491 _updatewrapper(wrap, origfn, wrapper)
491 if docstring is not None:
492 if docstring is not None:
492 wrap.__doc__ += docstring
493 wrap.__doc__ += docstring
493
494
494 newentry = list(entry)
495 newentry = list(entry)
495 newentry[0] = wrap
496 newentry[0] = wrap
496 if synopsis is not None:
497 if synopsis is not None:
497 newentry[2] += synopsis
498 newentry[2] += synopsis
498 table[key] = tuple(newentry)
499 table[key] = tuple(newentry)
499 return entry
500 return entry
500
501
501 def wrapfilecache(cls, propname, wrapper):
502 def wrapfilecache(cls, propname, wrapper):
502 """Wraps a filecache property.
503 """Wraps a filecache property.
503
504
504 These can't be wrapped using the normal wrapfunction.
505 These can't be wrapped using the normal wrapfunction.
505 """
506 """
506 propname = pycompat.sysstr(propname)
507 propname = pycompat.sysstr(propname)
507 assert callable(wrapper)
508 assert callable(wrapper)
508 for currcls in cls.__mro__:
509 for currcls in cls.__mro__:
509 if propname in currcls.__dict__:
510 if propname in currcls.__dict__:
510 origfn = currcls.__dict__[propname].func
511 origfn = currcls.__dict__[propname].func
511 assert callable(origfn)
512 assert callable(origfn)
512 def wrap(*args, **kwargs):
513 def wrap(*args, **kwargs):
513 return wrapper(origfn, *args, **kwargs)
514 return wrapper(origfn, *args, **kwargs)
514 currcls.__dict__[propname].func = wrap
515 currcls.__dict__[propname].func = wrap
515 break
516 break
516
517
517 if currcls is object:
518 if currcls is object:
518 raise AttributeError(r"type '%s' has no property '%s'" % (
519 raise AttributeError(r"type '%s' has no property '%s'" % (
519 cls, propname))
520 cls, propname))
520
521
521 class wrappedfunction(object):
522 class wrappedfunction(object):
522 '''context manager for temporarily wrapping a function'''
523 '''context manager for temporarily wrapping a function'''
523
524
524 def __init__(self, container, funcname, wrapper):
525 def __init__(self, container, funcname, wrapper):
525 assert callable(wrapper)
526 assert callable(wrapper)
526 self._container = container
527 self._container = container
527 self._funcname = funcname
528 self._funcname = funcname
528 self._wrapper = wrapper
529 self._wrapper = wrapper
529
530
530 def __enter__(self):
531 def __enter__(self):
531 wrapfunction(self._container, self._funcname, self._wrapper)
532 wrapfunction(self._container, self._funcname, self._wrapper)
532
533
533 def __exit__(self, exctype, excvalue, traceback):
534 def __exit__(self, exctype, excvalue, traceback):
534 unwrapfunction(self._container, self._funcname, self._wrapper)
535 unwrapfunction(self._container, self._funcname, self._wrapper)
535
536
536 def wrapfunction(container, funcname, wrapper):
537 def wrapfunction(container, funcname, wrapper):
537 '''Wrap the function named funcname in container
538 '''Wrap the function named funcname in container
538
539
539 Replace the funcname member in the given container with the specified
540 Replace the funcname member in the given container with the specified
540 wrapper. The container is typically a module, class, or instance.
541 wrapper. The container is typically a module, class, or instance.
541
542
542 The wrapper will be called like
543 The wrapper will be called like
543
544
544 wrapper(orig, *args, **kwargs)
545 wrapper(orig, *args, **kwargs)
545
546
546 where orig is the original (wrapped) function, and *args, **kwargs
547 where orig is the original (wrapped) function, and *args, **kwargs
547 are the arguments passed to it.
548 are the arguments passed to it.
548
549
549 Wrapping methods of the repository object is not recommended since
550 Wrapping methods of the repository object is not recommended since
550 it conflicts with extensions that extend the repository by
551 it conflicts with extensions that extend the repository by
551 subclassing. All extensions that need to extend methods of
552 subclassing. All extensions that need to extend methods of
552 localrepository should use this subclassing trick: namely,
553 localrepository should use this subclassing trick: namely,
553 reposetup() should look like
554 reposetup() should look like
554
555
555 def reposetup(ui, repo):
556 def reposetup(ui, repo):
556 class myrepo(repo.__class__):
557 class myrepo(repo.__class__):
557 def whatever(self, *args, **kwargs):
558 def whatever(self, *args, **kwargs):
558 [...extension stuff...]
559 [...extension stuff...]
559 super(myrepo, self).whatever(*args, **kwargs)
560 super(myrepo, self).whatever(*args, **kwargs)
560 [...extension stuff...]
561 [...extension stuff...]
561
562
562 repo.__class__ = myrepo
563 repo.__class__ = myrepo
563
564
564 In general, combining wrapfunction() with subclassing does not
565 In general, combining wrapfunction() with subclassing does not
565 work. Since you cannot control what other extensions are loaded by
566 work. Since you cannot control what other extensions are loaded by
566 your end users, you should play nicely with others by using the
567 your end users, you should play nicely with others by using the
567 subclass trick.
568 subclass trick.
568 '''
569 '''
569 assert callable(wrapper)
570 assert callable(wrapper)
570
571
571 origfn = getattr(container, funcname)
572 origfn = getattr(container, funcname)
572 assert callable(origfn)
573 assert callable(origfn)
573 if inspect.ismodule(container):
574 if inspect.ismodule(container):
574 # origfn is not an instance or class method. "partial" can be used.
575 # origfn is not an instance or class method. "partial" can be used.
575 # "partial" won't insert a frame in traceback.
576 # "partial" won't insert a frame in traceback.
576 wrap = functools.partial(wrapper, origfn)
577 wrap = functools.partial(wrapper, origfn)
577 else:
578 else:
578 # "partial" cannot be safely used. Emulate its effect by using "bind".
579 # "partial" cannot be safely used. Emulate its effect by using "bind".
579 # The downside is one more frame in traceback.
580 # The downside is one more frame in traceback.
580 wrap = bind(wrapper, origfn)
581 wrap = bind(wrapper, origfn)
581 _updatewrapper(wrap, origfn, wrapper)
582 _updatewrapper(wrap, origfn, wrapper)
582 setattr(container, funcname, wrap)
583 setattr(container, funcname, wrap)
583 return origfn
584 return origfn
584
585
585 def unwrapfunction(container, funcname, wrapper=None):
586 def unwrapfunction(container, funcname, wrapper=None):
586 '''undo wrapfunction
587 '''undo wrapfunction
587
588
588 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
589 If wrappers is None, undo the last wrap. Otherwise removes the wrapper
589 from the chain of wrappers.
590 from the chain of wrappers.
590
591
591 Return the removed wrapper.
592 Return the removed wrapper.
592 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
593 Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
593 wrapper is not None but is not found in the wrapper chain.
594 wrapper is not None but is not found in the wrapper chain.
594 '''
595 '''
595 chain = getwrapperchain(container, funcname)
596 chain = getwrapperchain(container, funcname)
596 origfn = chain.pop()
597 origfn = chain.pop()
597 if wrapper is None:
598 if wrapper is None:
598 wrapper = chain[0]
599 wrapper = chain[0]
599 chain.remove(wrapper)
600 chain.remove(wrapper)
600 setattr(container, funcname, origfn)
601 setattr(container, funcname, origfn)
601 for w in reversed(chain):
602 for w in reversed(chain):
602 wrapfunction(container, funcname, w)
603 wrapfunction(container, funcname, w)
603 return wrapper
604 return wrapper
604
605
605 def getwrapperchain(container, funcname):
606 def getwrapperchain(container, funcname):
606 '''get a chain of wrappers of a function
607 '''get a chain of wrappers of a function
607
608
608 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
609 Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
609
610
610 The wrapper functions are the ones passed to wrapfunction, whose first
611 The wrapper functions are the ones passed to wrapfunction, whose first
611 argument is origfunc.
612 argument is origfunc.
612 '''
613 '''
613 result = []
614 result = []
614 fn = getattr(container, funcname)
615 fn = getattr(container, funcname)
615 while fn:
616 while fn:
616 assert callable(fn)
617 assert callable(fn)
617 result.append(getattr(fn, '_unboundwrapper', fn))
618 result.append(getattr(fn, '_unboundwrapper', fn))
618 fn = getattr(fn, '_origfunc', None)
619 fn = getattr(fn, '_origfunc', None)
619 return result
620 return result
620
621
621 def _disabledpaths():
622 def _disabledpaths():
622 '''find paths of disabled extensions. returns a dict of {name: path}'''
623 '''find paths of disabled extensions. returns a dict of {name: path}'''
623 import hgext
624 import hgext
624 extpath = os.path.dirname(
625 extpath = os.path.dirname(
625 os.path.abspath(pycompat.fsencode(hgext.__file__)))
626 os.path.abspath(pycompat.fsencode(hgext.__file__)))
626 try: # might not be a filesystem path
627 try: # might not be a filesystem path
627 files = os.listdir(extpath)
628 files = os.listdir(extpath)
628 except OSError:
629 except OSError:
629 return {}
630 return {}
630
631
631 exts = {}
632 exts = {}
632 for e in files:
633 for e in files:
633 if e.endswith('.py'):
634 if e.endswith('.py'):
634 name = e.rsplit('.', 1)[0]
635 name = e.rsplit('.', 1)[0]
635 path = os.path.join(extpath, e)
636 path = os.path.join(extpath, e)
636 else:
637 else:
637 name = e
638 name = e
638 path = os.path.join(extpath, e, '__init__.py')
639 path = os.path.join(extpath, e, '__init__.py')
639 if not os.path.exists(path):
640 if not os.path.exists(path):
640 continue
641 continue
641 if name in exts or name in _order or name == '__init__':
642 if name in exts or name in _order or name == '__init__':
642 continue
643 continue
643 exts[name] = path
644 exts[name] = path
644 for name, path in _disabledextensions.iteritems():
645 for name, path in _disabledextensions.iteritems():
645 # If no path was provided for a disabled extension (e.g. "color=!"),
646 # If no path was provided for a disabled extension (e.g. "color=!"),
646 # don't replace the path we already found by the scan above.
647 # don't replace the path we already found by the scan above.
647 if path:
648 if path:
648 exts[name] = path
649 exts[name] = path
649 return exts
650 return exts
650
651
651 def _moduledoc(file):
652 def _moduledoc(file):
652 '''return the top-level python documentation for the given file
653 '''return the top-level python documentation for the given file
653
654
654 Loosely inspired by pydoc.source_synopsis(), but rewritten to
655 Loosely inspired by pydoc.source_synopsis(), but rewritten to
655 handle triple quotes and to return the whole text instead of just
656 handle triple quotes and to return the whole text instead of just
656 the synopsis'''
657 the synopsis'''
657 result = []
658 result = []
658
659
659 line = file.readline()
660 line = file.readline()
660 while line[:1] == '#' or not line.strip():
661 while line[:1] == '#' or not line.strip():
661 line = file.readline()
662 line = file.readline()
662 if not line:
663 if not line:
663 break
664 break
664
665
665 start = line[:3]
666 start = line[:3]
666 if start == '"""' or start == "'''":
667 if start == '"""' or start == "'''":
667 line = line[3:]
668 line = line[3:]
668 while line:
669 while line:
669 if line.rstrip().endswith(start):
670 if line.rstrip().endswith(start):
670 line = line.split(start)[0]
671 line = line.split(start)[0]
671 if line:
672 if line:
672 result.append(line)
673 result.append(line)
673 break
674 break
674 elif not line:
675 elif not line:
675 return None # unmatched delimiter
676 return None # unmatched delimiter
676 result.append(line)
677 result.append(line)
677 line = file.readline()
678 line = file.readline()
678 else:
679 else:
679 return None
680 return None
680
681
681 return ''.join(result)
682 return ''.join(result)
682
683
683 def _disabledhelp(path):
684 def _disabledhelp(path):
684 '''retrieve help synopsis of a disabled extension (without importing)'''
685 '''retrieve help synopsis of a disabled extension (without importing)'''
685 try:
686 try:
686 with open(path, 'rb') as src:
687 with open(path, 'rb') as src:
687 doc = _moduledoc(src)
688 doc = _moduledoc(src)
688 except IOError:
689 except IOError:
689 return
690 return
690
691
691 if doc: # extracting localized synopsis
692 if doc: # extracting localized synopsis
692 return gettext(doc)
693 return gettext(doc)
693 else:
694 else:
694 return _('(no help text available)')
695 return _('(no help text available)')
695
696
696 def disabled():
697 def disabled():
697 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
698 '''find disabled extensions from hgext. returns a dict of {name: desc}'''
698 try:
699 try:
699 from hgext import __index__
700 from hgext import __index__
700 return dict((name, gettext(desc))
701 return dict((name, gettext(desc))
701 for name, desc in __index__.docs.iteritems()
702 for name, desc in __index__.docs.iteritems()
702 if name not in _order)
703 if name not in _order)
703 except (ImportError, AttributeError):
704 except (ImportError, AttributeError):
704 pass
705 pass
705
706
706 paths = _disabledpaths()
707 paths = _disabledpaths()
707 if not paths:
708 if not paths:
708 return {}
709 return {}
709
710
710 exts = {}
711 exts = {}
711 for name, path in paths.iteritems():
712 for name, path in paths.iteritems():
712 doc = _disabledhelp(path)
713 doc = _disabledhelp(path)
713 if doc:
714 if doc:
714 exts[name] = doc.splitlines()[0]
715 exts[name] = doc.splitlines()[0]
715
716
716 return exts
717 return exts
717
718
718 def disabledext(name):
719 def disabledext(name):
719 '''find a specific disabled extension from hgext. returns desc'''
720 '''find a specific disabled extension from hgext. returns desc'''
720 try:
721 try:
721 from hgext import __index__
722 from hgext import __index__
722 if name in _order: # enabled
723 if name in _order: # enabled
723 return
724 return
724 else:
725 else:
725 return gettext(__index__.docs.get(name))
726 return gettext(__index__.docs.get(name))
726 except (ImportError, AttributeError):
727 except (ImportError, AttributeError):
727 pass
728 pass
728
729
729 paths = _disabledpaths()
730 paths = _disabledpaths()
730 if name in paths:
731 if name in paths:
731 return _disabledhelp(paths[name])
732 return _disabledhelp(paths[name])
732
733
733 def _walkcommand(node):
734 def _walkcommand(node):
734 """Scan @command() decorators in the tree starting at node"""
735 """Scan @command() decorators in the tree starting at node"""
735 todo = collections.deque([node])
736 todo = collections.deque([node])
736 while todo:
737 while todo:
737 node = todo.popleft()
738 node = todo.popleft()
738 if not isinstance(node, ast.FunctionDef):
739 if not isinstance(node, ast.FunctionDef):
739 todo.extend(ast.iter_child_nodes(node))
740 todo.extend(ast.iter_child_nodes(node))
740 continue
741 continue
741 for d in node.decorator_list:
742 for d in node.decorator_list:
742 if not isinstance(d, ast.Call):
743 if not isinstance(d, ast.Call):
743 continue
744 continue
744 if not isinstance(d.func, ast.Name):
745 if not isinstance(d.func, ast.Name):
745 continue
746 continue
746 if d.func.id != r'command':
747 if d.func.id != r'command':
747 continue
748 continue
748 yield d
749 yield d
749
750
750 def _disabledcmdtable(path):
751 def _disabledcmdtable(path):
751 """Construct a dummy command table without loading the extension module
752 """Construct a dummy command table without loading the extension module
752
753
753 This may raise IOError or SyntaxError.
754 This may raise IOError or SyntaxError.
754 """
755 """
755 with open(path, 'rb') as src:
756 with open(path, 'rb') as src:
756 root = ast.parse(src.read(), path)
757 root = ast.parse(src.read(), path)
757 cmdtable = {}
758 cmdtable = {}
758 for node in _walkcommand(root):
759 for node in _walkcommand(root):
759 if not node.args:
760 if not node.args:
760 continue
761 continue
761 a = node.args[0]
762 a = node.args[0]
762 if isinstance(a, ast.Str):
763 if isinstance(a, ast.Str):
763 name = pycompat.sysbytes(a.s)
764 name = pycompat.sysbytes(a.s)
764 elif pycompat.ispy3 and isinstance(a, ast.Bytes):
765 elif pycompat.ispy3 and isinstance(a, ast.Bytes):
765 name = a.s
766 name = a.s
766 else:
767 else:
767 continue
768 continue
768 cmdtable[name] = (None, [], b'')
769 cmdtable[name] = (None, [], b'')
769 return cmdtable
770 return cmdtable
770
771
771 def _finddisabledcmd(ui, cmd, name, path, strict):
772 def _finddisabledcmd(ui, cmd, name, path, strict):
772 try:
773 try:
773 cmdtable = _disabledcmdtable(path)
774 cmdtable = _disabledcmdtable(path)
774 except (IOError, SyntaxError):
775 except (IOError, SyntaxError):
775 return
776 return
776 try:
777 try:
777 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
778 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
778 except (error.AmbiguousCommand, error.UnknownCommand):
779 except (error.AmbiguousCommand, error.UnknownCommand):
779 return
780 return
780 for c in aliases:
781 for c in aliases:
781 if c.startswith(cmd):
782 if c.startswith(cmd):
782 cmd = c
783 cmd = c
783 break
784 break
784 else:
785 else:
785 cmd = aliases[0]
786 cmd = aliases[0]
786 doc = _disabledhelp(path)
787 doc = _disabledhelp(path)
787 return (cmd, name, doc)
788 return (cmd, name, doc)
788
789
789 def disabledcmd(ui, cmd, strict=False):
790 def disabledcmd(ui, cmd, strict=False):
790 '''find cmd from disabled extensions without importing.
791 '''find cmd from disabled extensions without importing.
791 returns (cmdname, extname, doc)'''
792 returns (cmdname, extname, doc)'''
792
793
793 paths = _disabledpaths()
794 paths = _disabledpaths()
794 if not paths:
795 if not paths:
795 raise error.UnknownCommand(cmd)
796 raise error.UnknownCommand(cmd)
796
797
797 ext = None
798 ext = None
798 # first, search for an extension with the same name as the command
799 # first, search for an extension with the same name as the command
799 path = paths.pop(cmd, None)
800 path = paths.pop(cmd, None)
800 if path:
801 if path:
801 ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict)
802 ext = _finddisabledcmd(ui, cmd, cmd, path, strict=strict)
802 if not ext:
803 if not ext:
803 # otherwise, interrogate each extension until there's a match
804 # otherwise, interrogate each extension until there's a match
804 for name, path in paths.iteritems():
805 for name, path in paths.iteritems():
805 ext = _finddisabledcmd(ui, cmd, name, path, strict=strict)
806 ext = _finddisabledcmd(ui, cmd, name, path, strict=strict)
806 if ext:
807 if ext:
807 break
808 break
808 if ext:
809 if ext:
809 return ext
810 return ext
810
811
811 raise error.UnknownCommand(cmd)
812 raise error.UnknownCommand(cmd)
812
813
813 def enabled(shortname=True):
814 def enabled(shortname=True):
814 '''return a dict of {name: desc} of extensions'''
815 '''return a dict of {name: desc} of extensions'''
815 exts = {}
816 exts = {}
816 for ename, ext in extensions():
817 for ename, ext in extensions():
817 doc = (gettext(ext.__doc__) or _('(no help text available)'))
818 doc = (gettext(ext.__doc__) or _('(no help text available)'))
818 if shortname:
819 if shortname:
819 ename = ename.split('.')[-1]
820 ename = ename.split('.')[-1]
820 exts[ename] = doc.splitlines()[0].strip()
821 exts[ename] = doc.splitlines()[0].strip()
821
822
822 return exts
823 return exts
823
824
824 def notloaded():
825 def notloaded():
825 '''return short names of extensions that failed to load'''
826 '''return short names of extensions that failed to load'''
826 return [name for name, mod in _extensions.iteritems() if mod is None]
827 return [name for name, mod in _extensions.iteritems() if mod is None]
827
828
828 def moduleversion(module):
829 def moduleversion(module):
829 '''return version information from given module as a string'''
830 '''return version information from given module as a string'''
830 if (util.safehasattr(module, 'getversion')
831 if (util.safehasattr(module, 'getversion')
831 and callable(module.getversion)):
832 and callable(module.getversion)):
832 version = module.getversion()
833 version = module.getversion()
833 elif util.safehasattr(module, '__version__'):
834 elif util.safehasattr(module, '__version__'):
834 version = module.__version__
835 version = module.__version__
835 else:
836 else:
836 version = ''
837 version = ''
837 if isinstance(version, (list, tuple)):
838 if isinstance(version, (list, tuple)):
838 version = '.'.join(pycompat.bytestr(o) for o in version)
839 version = '.'.join(pycompat.bytestr(o) for o in version)
839 return version
840 return version
840
841
841 def ismoduleinternal(module):
842 def ismoduleinternal(module):
842 exttestedwith = getattr(module, 'testedwith', None)
843 exttestedwith = getattr(module, 'testedwith', None)
843 return exttestedwith == "ships-with-hg-core"
844 return exttestedwith == "ships-with-hg-core"
@@ -1,1229 +1,1225 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 release = lock.release
52 release = lock.release
53
53
54 # shared features
54 # shared features
55 sharedbookmarks = 'bookmarks'
55 sharedbookmarks = 'bookmarks'
56
56
57 def _local(path):
57 def _local(path):
58 path = util.expandpath(util.urllocalpath(path))
58 path = util.expandpath(util.urllocalpath(path))
59 return (os.path.isfile(path) and bundlerepo or localrepo)
59 return (os.path.isfile(path) and bundlerepo or localrepo)
60
60
61 def addbranchrevs(lrepo, other, branches, revs):
61 def addbranchrevs(lrepo, other, branches, revs):
62 peer = other.peer() # a courtesy to callers using a localrepo for other
62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 hashbranch, branches = branches
63 hashbranch, branches = branches
64 if not hashbranch and not branches:
64 if not hashbranch and not branches:
65 x = revs or None
65 x = revs or None
66 if revs:
66 if revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81
81
82 with peer.commandexecutor() as e:
82 with peer.commandexecutor() as e:
83 branchmap = e.callcommand('branchmap', {}).result()
83 branchmap = e.callcommand('branchmap', {}).result()
84
84
85 def primary(branch):
85 def primary(branch):
86 if branch == '.':
86 if branch == '.':
87 if not lrepo:
87 if not lrepo:
88 raise error.Abort(_("dirstate branch not accessible"))
88 raise error.Abort(_("dirstate branch not accessible"))
89 branch = lrepo.dirstate.branch()
89 branch = lrepo.dirstate.branch()
90 if branch in branchmap:
90 if branch in branchmap:
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 return True
92 return True
93 else:
93 else:
94 return False
94 return False
95
95
96 for branch in branches:
96 for branch in branches:
97 if not primary(branch):
97 if not primary(branch):
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 if hashbranch:
99 if hashbranch:
100 if not primary(hashbranch):
100 if not primary(hashbranch):
101 revs.append(hashbranch)
101 revs.append(hashbranch)
102 return revs, revs[0]
102 return revs, revs[0]
103
103
104 def parseurl(path, branches=None):
104 def parseurl(path, branches=None):
105 '''parse url#branch, returning (url, (branch, branches))'''
105 '''parse url#branch, returning (url, (branch, branches))'''
106
106
107 u = util.url(path)
107 u = util.url(path)
108 branch = None
108 branch = None
109 if u.fragment:
109 if u.fragment:
110 branch = u.fragment
110 branch = u.fragment
111 u.fragment = None
111 u.fragment = None
112 return bytes(u), (branch, branches or [])
112 return bytes(u), (branch, branches or [])
113
113
114 schemes = {
114 schemes = {
115 'bundle': bundlerepo,
115 'bundle': bundlerepo,
116 'union': unionrepo,
116 'union': unionrepo,
117 'file': _local,
117 'file': _local,
118 'http': httppeer,
118 'http': httppeer,
119 'https': httppeer,
119 'https': httppeer,
120 'ssh': sshpeer,
120 'ssh': sshpeer,
121 'static-http': statichttprepo,
121 'static-http': statichttprepo,
122 }
122 }
123
123
124 def _peerlookup(path):
124 def _peerlookup(path):
125 u = util.url(path)
125 u = util.url(path)
126 scheme = u.scheme or 'file'
126 scheme = u.scheme or 'file'
127 thing = schemes.get(scheme) or schemes['file']
127 thing = schemes.get(scheme) or schemes['file']
128 try:
128 try:
129 return thing(path)
129 return thing(path)
130 except TypeError:
130 except TypeError:
131 # we can't test callable(thing) because 'thing' can be an unloaded
131 # we can't test callable(thing) because 'thing' can be an unloaded
132 # module that implements __call__
132 # module that implements __call__
133 if not util.safehasattr(thing, 'instance'):
133 if not util.safehasattr(thing, 'instance'):
134 raise
134 raise
135 return thing
135 return thing
136
136
137 def islocal(repo):
137 def islocal(repo):
138 '''return true if repo (or path pointing to repo) is local'''
138 '''return true if repo (or path pointing to repo) is local'''
139 if isinstance(repo, bytes):
139 if isinstance(repo, bytes):
140 try:
140 try:
141 return _peerlookup(repo).islocal(repo)
141 return _peerlookup(repo).islocal(repo)
142 except AttributeError:
142 except AttributeError:
143 return False
143 return False
144 return repo.local()
144 return repo.local()
145
145
146 def openpath(ui, path):
146 def openpath(ui, path):
147 '''open path with open if local, url.open if remote'''
147 '''open path with open if local, url.open if remote'''
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 if pathurl.islocal():
149 if pathurl.islocal():
150 return util.posixfile(pathurl.localpath(), 'rb')
150 return util.posixfile(pathurl.localpath(), 'rb')
151 else:
151 else:
152 return url.open(ui, path)
152 return url.open(ui, path)
153
153
154 # a list of (ui, repo) functions called for wire peer initialization
154 # a list of (ui, repo) functions called for wire peer initialization
155 wirepeersetupfuncs = []
155 wirepeersetupfuncs = []
156
156
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 intents=None, createopts=None):
158 intents=None, createopts=None):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 createopts=createopts)
161 createopts=createopts)
162 ui = getattr(obj, "ui", ui)
162 ui = getattr(obj, "ui", ui)
163 if ui.configbool('devel', 'debug.extensions'):
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 msg % values, label='debug.extensions')
166 else:
167 log = lambda *a, **kw: None
168 for f in presetupfuncs or []:
163 for f in presetupfuncs or []:
169 f(ui, obj)
164 f(ui, obj)
170 log('- executing reposetup hooks\n')
165 ui.log(b'extension', b'- executing reposetup hooks\n')
171 with util.timedcm('all reposetup') as allreposetupstats:
166 with util.timedcm('all reposetup') as allreposetupstats:
172 for name, module in extensions.extensions(ui):
167 for name, module in extensions.extensions(ui):
173 log(' - running reposetup for %s\n' % (name,))
168 ui.log(b'extension', b' - running reposetup for %s\n', name)
174 hook = getattr(module, 'reposetup', None)
169 hook = getattr(module, 'reposetup', None)
175 if hook:
170 if hook:
176 with util.timedcm('reposetup %r', name) as stats:
171 with util.timedcm('reposetup %r', name) as stats:
177 hook(ui, obj)
172 hook(ui, obj)
178 log(' > reposetup for %s took %s\n', name, stats)
173 ui.log(b'extension', b' > reposetup for %s took %s\n',
179 log('> all reposetup took %s\n', allreposetupstats)
174 name, stats)
175 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
180 if not obj.local():
176 if not obj.local():
181 for f in wirepeersetupfuncs:
177 for f in wirepeersetupfuncs:
182 f(ui, obj)
178 f(ui, obj)
183 return obj
179 return obj
184
180
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
181 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 createopts=None):
182 createopts=None):
187 """return a repository object for the specified path"""
183 """return a repository object for the specified path"""
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
184 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 intents=intents, createopts=createopts)
185 intents=intents, createopts=createopts)
190 repo = peer.local()
186 repo = peer.local()
191 if not repo:
187 if not repo:
192 raise error.Abort(_("repository '%s' is not local") %
188 raise error.Abort(_("repository '%s' is not local") %
193 (path or peer.url()))
189 (path or peer.url()))
194 return repo.filtered('visible')
190 return repo.filtered('visible')
195
191
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
192 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 '''return a repository peer for the specified path'''
193 '''return a repository peer for the specified path'''
198 rui = remoteui(uiorrepo, opts)
194 rui = remoteui(uiorrepo, opts)
199 return _peerorrepo(rui, path, create, intents=intents,
195 return _peerorrepo(rui, path, create, intents=intents,
200 createopts=createopts).peer()
196 createopts=createopts).peer()
201
197
202 def defaultdest(source):
198 def defaultdest(source):
203 '''return default destination of clone if none is given
199 '''return default destination of clone if none is given
204
200
205 >>> defaultdest(b'foo')
201 >>> defaultdest(b'foo')
206 'foo'
202 'foo'
207 >>> defaultdest(b'/foo/bar')
203 >>> defaultdest(b'/foo/bar')
208 'bar'
204 'bar'
209 >>> defaultdest(b'/')
205 >>> defaultdest(b'/')
210 ''
206 ''
211 >>> defaultdest(b'')
207 >>> defaultdest(b'')
212 ''
208 ''
213 >>> defaultdest(b'http://example.org/')
209 >>> defaultdest(b'http://example.org/')
214 ''
210 ''
215 >>> defaultdest(b'http://example.org/foo/')
211 >>> defaultdest(b'http://example.org/foo/')
216 'foo'
212 'foo'
217 '''
213 '''
218 path = util.url(source).path
214 path = util.url(source).path
219 if not path:
215 if not path:
220 return ''
216 return ''
221 return os.path.basename(os.path.normpath(path))
217 return os.path.basename(os.path.normpath(path))
222
218
223 def sharedreposource(repo):
219 def sharedreposource(repo):
224 """Returns repository object for source repository of a shared repo.
220 """Returns repository object for source repository of a shared repo.
225
221
226 If repo is not a shared repository, returns None.
222 If repo is not a shared repository, returns None.
227 """
223 """
228 if repo.sharedpath == repo.path:
224 if repo.sharedpath == repo.path:
229 return None
225 return None
230
226
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
227 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 return repo.srcrepo
228 return repo.srcrepo
233
229
234 # the sharedpath always ends in the .hg; we want the path to the repo
230 # the sharedpath always ends in the .hg; we want the path to the repo
235 source = repo.vfs.split(repo.sharedpath)[0]
231 source = repo.vfs.split(repo.sharedpath)[0]
236 srcurl, branches = parseurl(source)
232 srcurl, branches = parseurl(source)
237 srcrepo = repository(repo.ui, srcurl)
233 srcrepo = repository(repo.ui, srcurl)
238 repo.srcrepo = srcrepo
234 repo.srcrepo = srcrepo
239 return srcrepo
235 return srcrepo
240
236
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
237 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 relative=False):
238 relative=False):
243 '''create a shared repository'''
239 '''create a shared repository'''
244
240
245 if not islocal(source):
241 if not islocal(source):
246 raise error.Abort(_('can only share local repositories'))
242 raise error.Abort(_('can only share local repositories'))
247
243
248 if not dest:
244 if not dest:
249 dest = defaultdest(source)
245 dest = defaultdest(source)
250 else:
246 else:
251 dest = ui.expandpath(dest)
247 dest = ui.expandpath(dest)
252
248
253 if isinstance(source, bytes):
249 if isinstance(source, bytes):
254 origsource = ui.expandpath(source)
250 origsource = ui.expandpath(source)
255 source, branches = parseurl(origsource)
251 source, branches = parseurl(origsource)
256 srcrepo = repository(ui, source)
252 srcrepo = repository(ui, source)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
253 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 else:
254 else:
259 srcrepo = source.local()
255 srcrepo = source.local()
260 checkout = None
256 checkout = None
261
257
262 shareditems = set()
258 shareditems = set()
263 if bookmarks:
259 if bookmarks:
264 shareditems.add(sharedbookmarks)
260 shareditems.add(sharedbookmarks)
265
261
266 r = repository(ui, dest, create=True, createopts={
262 r = repository(ui, dest, create=True, createopts={
267 'sharedrepo': srcrepo,
263 'sharedrepo': srcrepo,
268 'sharedrelative': relative,
264 'sharedrelative': relative,
269 'shareditems': shareditems,
265 'shareditems': shareditems,
270 })
266 })
271
267
272 postshare(srcrepo, r, defaultpath=defaultpath)
268 postshare(srcrepo, r, defaultpath=defaultpath)
273 r = repository(ui, dest)
269 r = repository(ui, dest)
274 _postshareupdate(r, update, checkout=checkout)
270 _postshareupdate(r, update, checkout=checkout)
275 return r
271 return r
276
272
277 def unshare(ui, repo):
273 def unshare(ui, repo):
278 """convert a shared repository to a normal one
274 """convert a shared repository to a normal one
279
275
280 Copy the store data to the repo and remove the sharedpath data.
276 Copy the store data to the repo and remove the sharedpath data.
281
277
282 Returns a new repository object representing the unshared repository.
278 Returns a new repository object representing the unshared repository.
283
279
284 The passed repository object is not usable after this function is
280 The passed repository object is not usable after this function is
285 called.
281 called.
286 """
282 """
287
283
288 destlock = lock = None
284 destlock = lock = None
289 lock = repo.lock()
285 lock = repo.lock()
290 try:
286 try:
291 # we use locks here because if we race with commit, we
287 # we use locks here because if we race with commit, we
292 # can end up with extra data in the cloned revlogs that's
288 # can end up with extra data in the cloned revlogs that's
293 # not pointed to by changesets, thus causing verify to
289 # not pointed to by changesets, thus causing verify to
294 # fail
290 # fail
295
291
296 destlock = copystore(ui, repo, repo.path)
292 destlock = copystore(ui, repo, repo.path)
297
293
298 sharefile = repo.vfs.join('sharedpath')
294 sharefile = repo.vfs.join('sharedpath')
299 util.rename(sharefile, sharefile + '.old')
295 util.rename(sharefile, sharefile + '.old')
300
296
301 repo.requirements.discard('shared')
297 repo.requirements.discard('shared')
302 repo.requirements.discard('relshared')
298 repo.requirements.discard('relshared')
303 repo._writerequirements()
299 repo._writerequirements()
304 finally:
300 finally:
305 destlock and destlock.release()
301 destlock and destlock.release()
306 lock and lock.release()
302 lock and lock.release()
307
303
308 # Removing share changes some fundamental properties of the repo instance.
304 # Removing share changes some fundamental properties of the repo instance.
309 # So we instantiate a new repo object and operate on it rather than
305 # So we instantiate a new repo object and operate on it rather than
310 # try to keep the existing repo usable.
306 # try to keep the existing repo usable.
311 newrepo = repository(repo.baseui, repo.root, create=False)
307 newrepo = repository(repo.baseui, repo.root, create=False)
312
308
313 # TODO: figure out how to access subrepos that exist, but were previously
309 # TODO: figure out how to access subrepos that exist, but were previously
314 # removed from .hgsub
310 # removed from .hgsub
315 c = newrepo['.']
311 c = newrepo['.']
316 subs = c.substate
312 subs = c.substate
317 for s in sorted(subs):
313 for s in sorted(subs):
318 c.sub(s).unshare()
314 c.sub(s).unshare()
319
315
320 localrepo.poisonrepository(repo)
316 localrepo.poisonrepository(repo)
321
317
322 return newrepo
318 return newrepo
323
319
324 def postshare(sourcerepo, destrepo, defaultpath=None):
320 def postshare(sourcerepo, destrepo, defaultpath=None):
325 """Called after a new shared repo is created.
321 """Called after a new shared repo is created.
326
322
327 The new repo only has a requirements file and pointer to the source.
323 The new repo only has a requirements file and pointer to the source.
328 This function configures additional shared data.
324 This function configures additional shared data.
329
325
330 Extensions can wrap this function and write additional entries to
326 Extensions can wrap this function and write additional entries to
331 destrepo/.hg/shared to indicate additional pieces of data to be shared.
327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 """
328 """
333 default = defaultpath or sourcerepo.ui.config('paths', 'default')
329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 if default:
330 if default:
335 template = ('[paths]\n'
331 template = ('[paths]\n'
336 'default = %s\n')
332 'default = %s\n')
337 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338
334
339 def _postshareupdate(repo, update, checkout=None):
335 def _postshareupdate(repo, update, checkout=None):
340 """Maybe perform a working directory update after a shared repo is created.
336 """Maybe perform a working directory update after a shared repo is created.
341
337
342 ``update`` can be a boolean or a revision to update to.
338 ``update`` can be a boolean or a revision to update to.
343 """
339 """
344 if not update:
340 if not update:
345 return
341 return
346
342
347 repo.ui.status(_("updating working directory\n"))
343 repo.ui.status(_("updating working directory\n"))
348 if update is not True:
344 if update is not True:
349 checkout = update
345 checkout = update
350 for test in (checkout, 'default', 'tip'):
346 for test in (checkout, 'default', 'tip'):
351 if test is None:
347 if test is None:
352 continue
348 continue
353 try:
349 try:
354 uprev = repo.lookup(test)
350 uprev = repo.lookup(test)
355 break
351 break
356 except error.RepoLookupError:
352 except error.RepoLookupError:
357 continue
353 continue
358 _update(repo, uprev)
354 _update(repo, uprev)
359
355
360 def copystore(ui, srcrepo, destpath):
356 def copystore(ui, srcrepo, destpath):
361 '''copy files from store of srcrepo in destpath
357 '''copy files from store of srcrepo in destpath
362
358
363 returns destlock
359 returns destlock
364 '''
360 '''
365 destlock = None
361 destlock = None
366 try:
362 try:
367 hardlink = None
363 hardlink = None
368 topic = _('linking') if hardlink else _('copying')
364 topic = _('linking') if hardlink else _('copying')
369 with ui.makeprogress(topic, unit=_('files')) as progress:
365 with ui.makeprogress(topic, unit=_('files')) as progress:
370 num = 0
366 num = 0
371 srcpublishing = srcrepo.publishing()
367 srcpublishing = srcrepo.publishing()
372 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
368 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
373 dstvfs = vfsmod.vfs(destpath)
369 dstvfs = vfsmod.vfs(destpath)
374 for f in srcrepo.store.copylist():
370 for f in srcrepo.store.copylist():
375 if srcpublishing and f.endswith('phaseroots'):
371 if srcpublishing and f.endswith('phaseroots'):
376 continue
372 continue
377 dstbase = os.path.dirname(f)
373 dstbase = os.path.dirname(f)
378 if dstbase and not dstvfs.exists(dstbase):
374 if dstbase and not dstvfs.exists(dstbase):
379 dstvfs.mkdir(dstbase)
375 dstvfs.mkdir(dstbase)
380 if srcvfs.exists(f):
376 if srcvfs.exists(f):
381 if f.endswith('data'):
377 if f.endswith('data'):
382 # 'dstbase' may be empty (e.g. revlog format 0)
378 # 'dstbase' may be empty (e.g. revlog format 0)
383 lockfile = os.path.join(dstbase, "lock")
379 lockfile = os.path.join(dstbase, "lock")
384 # lock to avoid premature writing to the target
380 # lock to avoid premature writing to the target
385 destlock = lock.lock(dstvfs, lockfile)
381 destlock = lock.lock(dstvfs, lockfile)
386 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
382 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
387 hardlink, progress)
383 hardlink, progress)
388 num += n
384 num += n
389 if hardlink:
385 if hardlink:
390 ui.debug("linked %d files\n" % num)
386 ui.debug("linked %d files\n" % num)
391 else:
387 else:
392 ui.debug("copied %d files\n" % num)
388 ui.debug("copied %d files\n" % num)
393 return destlock
389 return destlock
394 except: # re-raises
390 except: # re-raises
395 release(destlock)
391 release(destlock)
396 raise
392 raise
397
393
398 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
394 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
399 rev=None, update=True, stream=False):
395 rev=None, update=True, stream=False):
400 """Perform a clone using a shared repo.
396 """Perform a clone using a shared repo.
401
397
402 The store for the repository will be located at <sharepath>/.hg. The
398 The store for the repository will be located at <sharepath>/.hg. The
403 specified revisions will be cloned or pulled from "source". A shared repo
399 specified revisions will be cloned or pulled from "source". A shared repo
404 will be created at "dest" and a working copy will be created if "update" is
400 will be created at "dest" and a working copy will be created if "update" is
405 True.
401 True.
406 """
402 """
407 revs = None
403 revs = None
408 if rev:
404 if rev:
409 if not srcpeer.capable('lookup'):
405 if not srcpeer.capable('lookup'):
410 raise error.Abort(_("src repository does not support "
406 raise error.Abort(_("src repository does not support "
411 "revision lookup and so doesn't "
407 "revision lookup and so doesn't "
412 "support clone by revision"))
408 "support clone by revision"))
413
409
414 # TODO this is batchable.
410 # TODO this is batchable.
415 remoterevs = []
411 remoterevs = []
416 for r in rev:
412 for r in rev:
417 with srcpeer.commandexecutor() as e:
413 with srcpeer.commandexecutor() as e:
418 remoterevs.append(e.callcommand('lookup', {
414 remoterevs.append(e.callcommand('lookup', {
419 'key': r,
415 'key': r,
420 }).result())
416 }).result())
421 revs = remoterevs
417 revs = remoterevs
422
418
423 # Obtain a lock before checking for or cloning the pooled repo otherwise
419 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 # 2 clients may race creating or populating it.
420 # 2 clients may race creating or populating it.
425 pooldir = os.path.dirname(sharepath)
421 pooldir = os.path.dirname(sharepath)
426 # lock class requires the directory to exist.
422 # lock class requires the directory to exist.
427 try:
423 try:
428 util.makedir(pooldir, False)
424 util.makedir(pooldir, False)
429 except OSError as e:
425 except OSError as e:
430 if e.errno != errno.EEXIST:
426 if e.errno != errno.EEXIST:
431 raise
427 raise
432
428
433 poolvfs = vfsmod.vfs(pooldir)
429 poolvfs = vfsmod.vfs(pooldir)
434 basename = os.path.basename(sharepath)
430 basename = os.path.basename(sharepath)
435
431
436 with lock.lock(poolvfs, '%s.lock' % basename):
432 with lock.lock(poolvfs, '%s.lock' % basename):
437 if os.path.exists(sharepath):
433 if os.path.exists(sharepath):
438 ui.status(_('(sharing from existing pooled repository %s)\n') %
434 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 basename)
435 basename)
440 else:
436 else:
441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
437 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 # Always use pull mode because hardlinks in share mode don't work
438 # Always use pull mode because hardlinks in share mode don't work
443 # well. Never update because working copies aren't necessary in
439 # well. Never update because working copies aren't necessary in
444 # share mode.
440 # share mode.
445 clone(ui, peeropts, source, dest=sharepath, pull=True,
441 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 revs=rev, update=False, stream=stream)
442 revs=rev, update=False, stream=stream)
447
443
448 # Resolve the value to put in [paths] section for the source.
444 # Resolve the value to put in [paths] section for the source.
449 if islocal(source):
445 if islocal(source):
450 defaultpath = os.path.abspath(util.urllocalpath(source))
446 defaultpath = os.path.abspath(util.urllocalpath(source))
451 else:
447 else:
452 defaultpath = source
448 defaultpath = source
453
449
454 sharerepo = repository(ui, path=sharepath)
450 sharerepo = repository(ui, path=sharepath)
455 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
451 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 defaultpath=defaultpath)
452 defaultpath=defaultpath)
457
453
458 # We need to perform a pull against the dest repo to fetch bookmarks
454 # We need to perform a pull against the dest repo to fetch bookmarks
459 # and other non-store data that isn't shared by default. In the case of
455 # and other non-store data that isn't shared by default. In the case of
460 # non-existing shared repo, this means we pull from the remote twice. This
456 # non-existing shared repo, this means we pull from the remote twice. This
461 # is a bit weird. But at the time it was implemented, there wasn't an easy
457 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 # way to pull just non-changegroup data.
458 # way to pull just non-changegroup data.
463 exchange.pull(destrepo, srcpeer, heads=revs)
459 exchange.pull(destrepo, srcpeer, heads=revs)
464
460
465 _postshareupdate(destrepo, update)
461 _postshareupdate(destrepo, update)
466
462
467 return srcpeer, peer(ui, peeropts, dest)
463 return srcpeer, peer(ui, peeropts, dest)
468
464
469 # Recomputing branch cache might be slow on big repos,
465 # Recomputing branch cache might be slow on big repos,
470 # so just copy it
466 # so just copy it
471 def _copycache(srcrepo, dstcachedir, fname):
467 def _copycache(srcrepo, dstcachedir, fname):
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
468 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
469 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
470 dstbranchcache = os.path.join(dstcachedir, fname)
475 if os.path.exists(srcbranchcache):
471 if os.path.exists(srcbranchcache):
476 if not os.path.exists(dstcachedir):
472 if not os.path.exists(dstcachedir):
477 os.mkdir(dstcachedir)
473 os.mkdir(dstcachedir)
478 util.copyfile(srcbranchcache, dstbranchcache)
474 util.copyfile(srcbranchcache, dstbranchcache)
479
475
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
476 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 update=True, stream=False, branch=None, shareopts=None,
477 update=True, stream=False, branch=None, shareopts=None,
482 storeincludepats=None, storeexcludepats=None, depth=None):
478 storeincludepats=None, storeexcludepats=None, depth=None):
483 """Make a copy of an existing repository.
479 """Make a copy of an existing repository.
484
480
485 Create a copy of an existing repository in a new directory. The
481 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
482 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
483 function. Returns a pair of repository peers, the source and
488 newly created destination.
484 newly created destination.
489
485
490 The location of the source is added to the new repository's
486 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
487 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
488 pushes.
493
489
494 If an exception is raised, the partly cloned/updated destination
490 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
491 repository will be deleted.
496
492
497 Arguments:
493 Arguments:
498
494
499 source: repository object or URL
495 source: repository object or URL
500
496
501 dest: URL of destination repository to create (defaults to base
497 dest: URL of destination repository to create (defaults to base
502 name of source repository)
498 name of source repository)
503
499
504 pull: always pull from source repository, even in local case or if the
500 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
501 server prefers streaming
506
502
507 stream: stream raw data uncompressed from repository (fast over
503 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
504 LAN, slow over WAN)
509
505
510 revs: revision to clone up to (implies pull=True)
506 revs: revision to clone up to (implies pull=True)
511
507
512 update: update working directory after clone completes, if
508 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
509 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
510 anything else is treated as a revision)
515
511
516 branch: branches to clone
512 branch: branches to clone
517
513
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
514 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
515 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
516 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
517 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
518 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
519 remote's path/URL. Defaults to "identity."
524
520
525 storeincludepats and storeexcludepats: sets of file patterns to include and
521 storeincludepats and storeexcludepats: sets of file patterns to include and
526 exclude in the repository copy, respectively. If not defined, all files
522 exclude in the repository copy, respectively. If not defined, all files
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
523 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 only the requested files will be performed. If ``storeincludepats`` is not
524 only the requested files will be performed. If ``storeincludepats`` is not
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
525 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 ``path:.``. If both are empty sets, no files will be cloned.
526 ``path:.``. If both are empty sets, no files will be cloned.
531 """
527 """
532
528
533 if isinstance(source, bytes):
529 if isinstance(source, bytes):
534 origsource = ui.expandpath(source)
530 origsource = ui.expandpath(source)
535 source, branches = parseurl(origsource, branch)
531 source, branches = parseurl(origsource, branch)
536 srcpeer = peer(ui, peeropts, source)
532 srcpeer = peer(ui, peeropts, source)
537 else:
533 else:
538 srcpeer = source.peer() # in case we were called with a localrepo
534 srcpeer = source.peer() # in case we were called with a localrepo
539 branches = (None, branch or [])
535 branches = (None, branch or [])
540 origsource = source = srcpeer.url()
536 origsource = source = srcpeer.url()
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
537 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542
538
543 if dest is None:
539 if dest is None:
544 dest = defaultdest(source)
540 dest = defaultdest(source)
545 if dest:
541 if dest:
546 ui.status(_("destination directory: %s\n") % dest)
542 ui.status(_("destination directory: %s\n") % dest)
547 else:
543 else:
548 dest = ui.expandpath(dest)
544 dest = ui.expandpath(dest)
549
545
550 dest = util.urllocalpath(dest)
546 dest = util.urllocalpath(dest)
551 source = util.urllocalpath(source)
547 source = util.urllocalpath(source)
552
548
553 if not dest:
549 if not dest:
554 raise error.Abort(_("empty destination path is not valid"))
550 raise error.Abort(_("empty destination path is not valid"))
555
551
556 destvfs = vfsmod.vfs(dest, expandpath=True)
552 destvfs = vfsmod.vfs(dest, expandpath=True)
557 if destvfs.lexists():
553 if destvfs.lexists():
558 if not destvfs.isdir():
554 if not destvfs.isdir():
559 raise error.Abort(_("destination '%s' already exists") % dest)
555 raise error.Abort(_("destination '%s' already exists") % dest)
560 elif destvfs.listdir():
556 elif destvfs.listdir():
561 raise error.Abort(_("destination '%s' is not empty") % dest)
557 raise error.Abort(_("destination '%s' is not empty") % dest)
562
558
563 createopts = {}
559 createopts = {}
564 narrow = False
560 narrow = False
565
561
566 if storeincludepats is not None:
562 if storeincludepats is not None:
567 narrowspec.validatepatterns(storeincludepats)
563 narrowspec.validatepatterns(storeincludepats)
568 narrow = True
564 narrow = True
569
565
570 if storeexcludepats is not None:
566 if storeexcludepats is not None:
571 narrowspec.validatepatterns(storeexcludepats)
567 narrowspec.validatepatterns(storeexcludepats)
572 narrow = True
568 narrow = True
573
569
574 if narrow:
570 if narrow:
575 # Include everything by default if only exclusion patterns defined.
571 # Include everything by default if only exclusion patterns defined.
576 if storeexcludepats and not storeincludepats:
572 if storeexcludepats and not storeincludepats:
577 storeincludepats = {'path:.'}
573 storeincludepats = {'path:.'}
578
574
579 createopts['narrowfiles'] = True
575 createopts['narrowfiles'] = True
580
576
581 if depth:
577 if depth:
582 createopts['shallowfilestore'] = True
578 createopts['shallowfilestore'] = True
583
579
584 if srcpeer.capable(b'lfs-serve'):
580 if srcpeer.capable(b'lfs-serve'):
585 # Repository creation honors the config if it disabled the extension, so
581 # Repository creation honors the config if it disabled the extension, so
586 # we can't just announce that lfs will be enabled. This check avoids
582 # we can't just announce that lfs will be enabled. This check avoids
587 # saying that lfs will be enabled, and then saying it's an unknown
583 # saying that lfs will be enabled, and then saying it's an unknown
588 # feature. The lfs creation option is set in either case so that a
584 # feature. The lfs creation option is set in either case so that a
589 # requirement is added. If the extension is explicitly disabled but the
585 # requirement is added. If the extension is explicitly disabled but the
590 # requirement is set, the clone aborts early, before transferring any
586 # requirement is set, the clone aborts early, before transferring any
591 # data.
587 # data.
592 createopts['lfs'] = True
588 createopts['lfs'] = True
593
589
594 if extensions.disabledext('lfs'):
590 if extensions.disabledext('lfs'):
595 ui.status(_('(remote is using large file support (lfs), but it is '
591 ui.status(_('(remote is using large file support (lfs), but it is '
596 'explicitly disabled in the local configuration)\n'))
592 'explicitly disabled in the local configuration)\n'))
597 else:
593 else:
598 ui.status(_('(remote is using large file support (lfs); lfs will '
594 ui.status(_('(remote is using large file support (lfs); lfs will '
599 'be enabled for this repository)\n'))
595 'be enabled for this repository)\n'))
600
596
601 shareopts = shareopts or {}
597 shareopts = shareopts or {}
602 sharepool = shareopts.get('pool')
598 sharepool = shareopts.get('pool')
603 sharenamemode = shareopts.get('mode')
599 sharenamemode = shareopts.get('mode')
604 if sharepool and islocal(dest):
600 if sharepool and islocal(dest):
605 sharepath = None
601 sharepath = None
606 if sharenamemode == 'identity':
602 if sharenamemode == 'identity':
607 # Resolve the name from the initial changeset in the remote
603 # Resolve the name from the initial changeset in the remote
608 # repository. This returns nullid when the remote is empty. It
604 # repository. This returns nullid when the remote is empty. It
609 # raises RepoLookupError if revision 0 is filtered or otherwise
605 # raises RepoLookupError if revision 0 is filtered or otherwise
610 # not available. If we fail to resolve, sharing is not enabled.
606 # not available. If we fail to resolve, sharing is not enabled.
611 try:
607 try:
612 with srcpeer.commandexecutor() as e:
608 with srcpeer.commandexecutor() as e:
613 rootnode = e.callcommand('lookup', {
609 rootnode = e.callcommand('lookup', {
614 'key': '0',
610 'key': '0',
615 }).result()
611 }).result()
616
612
617 if rootnode != node.nullid:
613 if rootnode != node.nullid:
618 sharepath = os.path.join(sharepool, node.hex(rootnode))
614 sharepath = os.path.join(sharepool, node.hex(rootnode))
619 else:
615 else:
620 ui.status(_('(not using pooled storage: '
616 ui.status(_('(not using pooled storage: '
621 'remote appears to be empty)\n'))
617 'remote appears to be empty)\n'))
622 except error.RepoLookupError:
618 except error.RepoLookupError:
623 ui.status(_('(not using pooled storage: '
619 ui.status(_('(not using pooled storage: '
624 'unable to resolve identity of remote)\n'))
620 'unable to resolve identity of remote)\n'))
625 elif sharenamemode == 'remote':
621 elif sharenamemode == 'remote':
626 sharepath = os.path.join(
622 sharepath = os.path.join(
627 sharepool, node.hex(hashlib.sha1(source).digest()))
623 sharepool, node.hex(hashlib.sha1(source).digest()))
628 else:
624 else:
629 raise error.Abort(_('unknown share naming mode: %s') %
625 raise error.Abort(_('unknown share naming mode: %s') %
630 sharenamemode)
626 sharenamemode)
631
627
632 # TODO this is a somewhat arbitrary restriction.
628 # TODO this is a somewhat arbitrary restriction.
633 if narrow:
629 if narrow:
634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
630 ui.status(_('(pooled storage not supported for narrow clones)\n'))
635 sharepath = None
631 sharepath = None
636
632
637 if sharepath:
633 if sharepath:
638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
634 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
639 dest, pull=pull, rev=revs, update=update,
635 dest, pull=pull, rev=revs, update=update,
640 stream=stream)
636 stream=stream)
641
637
642 srclock = destlock = cleandir = None
638 srclock = destlock = cleandir = None
643 srcrepo = srcpeer.local()
639 srcrepo = srcpeer.local()
644 try:
640 try:
645 abspath = origsource
641 abspath = origsource
646 if islocal(origsource):
642 if islocal(origsource):
647 abspath = os.path.abspath(util.urllocalpath(origsource))
643 abspath = os.path.abspath(util.urllocalpath(origsource))
648
644
649 if islocal(dest):
645 if islocal(dest):
650 cleandir = dest
646 cleandir = dest
651
647
652 copy = False
648 copy = False
653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
649 if (srcrepo and srcrepo.cancopy() and islocal(dest)
654 and not phases.hassecret(srcrepo)):
650 and not phases.hassecret(srcrepo)):
655 copy = not pull and not revs
651 copy = not pull and not revs
656
652
657 # TODO this is a somewhat arbitrary restriction.
653 # TODO this is a somewhat arbitrary restriction.
658 if narrow:
654 if narrow:
659 copy = False
655 copy = False
660
656
661 if copy:
657 if copy:
662 try:
658 try:
663 # we use a lock here because if we race with commit, we
659 # we use a lock here because if we race with commit, we
664 # can end up with extra data in the cloned revlogs that's
660 # can end up with extra data in the cloned revlogs that's
665 # not pointed to by changesets, thus causing verify to
661 # not pointed to by changesets, thus causing verify to
666 # fail
662 # fail
667 srclock = srcrepo.lock(wait=False)
663 srclock = srcrepo.lock(wait=False)
668 except error.LockError:
664 except error.LockError:
669 copy = False
665 copy = False
670
666
671 if copy:
667 if copy:
672 srcrepo.hook('preoutgoing', throw=True, source='clone')
668 srcrepo.hook('preoutgoing', throw=True, source='clone')
673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
669 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
674 if not os.path.exists(dest):
670 if not os.path.exists(dest):
675 util.makedirs(dest)
671 util.makedirs(dest)
676 else:
672 else:
677 # only clean up directories we create ourselves
673 # only clean up directories we create ourselves
678 cleandir = hgdir
674 cleandir = hgdir
679 try:
675 try:
680 destpath = hgdir
676 destpath = hgdir
681 util.makedir(destpath, notindexed=True)
677 util.makedir(destpath, notindexed=True)
682 except OSError as inst:
678 except OSError as inst:
683 if inst.errno == errno.EEXIST:
679 if inst.errno == errno.EEXIST:
684 cleandir = None
680 cleandir = None
685 raise error.Abort(_("destination '%s' already exists")
681 raise error.Abort(_("destination '%s' already exists")
686 % dest)
682 % dest)
687 raise
683 raise
688
684
689 destlock = copystore(ui, srcrepo, destpath)
685 destlock = copystore(ui, srcrepo, destpath)
690 # copy bookmarks over
686 # copy bookmarks over
691 srcbookmarks = srcrepo.vfs.join('bookmarks')
687 srcbookmarks = srcrepo.vfs.join('bookmarks')
692 dstbookmarks = os.path.join(destpath, 'bookmarks')
688 dstbookmarks = os.path.join(destpath, 'bookmarks')
693 if os.path.exists(srcbookmarks):
689 if os.path.exists(srcbookmarks):
694 util.copyfile(srcbookmarks, dstbookmarks)
690 util.copyfile(srcbookmarks, dstbookmarks)
695
691
696 dstcachedir = os.path.join(destpath, 'cache')
692 dstcachedir = os.path.join(destpath, 'cache')
697 for cache in cacheutil.cachetocopy(srcrepo):
693 for cache in cacheutil.cachetocopy(srcrepo):
698 _copycache(srcrepo, dstcachedir, cache)
694 _copycache(srcrepo, dstcachedir, cache)
699
695
700 # we need to re-init the repo after manually copying the data
696 # we need to re-init the repo after manually copying the data
701 # into it
697 # into it
702 destpeer = peer(srcrepo, peeropts, dest)
698 destpeer = peer(srcrepo, peeropts, dest)
703 srcrepo.hook('outgoing', source='clone',
699 srcrepo.hook('outgoing', source='clone',
704 node=node.hex(node.nullid))
700 node=node.hex(node.nullid))
705 else:
701 else:
706 try:
702 try:
707 # only pass ui when no srcrepo
703 # only pass ui when no srcrepo
708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
704 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
709 createopts=createopts)
705 createopts=createopts)
710 except OSError as inst:
706 except OSError as inst:
711 if inst.errno == errno.EEXIST:
707 if inst.errno == errno.EEXIST:
712 cleandir = None
708 cleandir = None
713 raise error.Abort(_("destination '%s' already exists")
709 raise error.Abort(_("destination '%s' already exists")
714 % dest)
710 % dest)
715 raise
711 raise
716
712
717 if revs:
713 if revs:
718 if not srcpeer.capable('lookup'):
714 if not srcpeer.capable('lookup'):
719 raise error.Abort(_("src repository does not support "
715 raise error.Abort(_("src repository does not support "
720 "revision lookup and so doesn't "
716 "revision lookup and so doesn't "
721 "support clone by revision"))
717 "support clone by revision"))
722
718
723 # TODO this is batchable.
719 # TODO this is batchable.
724 remoterevs = []
720 remoterevs = []
725 for rev in revs:
721 for rev in revs:
726 with srcpeer.commandexecutor() as e:
722 with srcpeer.commandexecutor() as e:
727 remoterevs.append(e.callcommand('lookup', {
723 remoterevs.append(e.callcommand('lookup', {
728 'key': rev,
724 'key': rev,
729 }).result())
725 }).result())
730 revs = remoterevs
726 revs = remoterevs
731
727
732 checkout = revs[0]
728 checkout = revs[0]
733 else:
729 else:
734 revs = None
730 revs = None
735 local = destpeer.local()
731 local = destpeer.local()
736 if local:
732 if local:
737 if narrow:
733 if narrow:
738 with local.lock():
734 with local.lock():
739 local.setnarrowpats(storeincludepats, storeexcludepats)
735 local.setnarrowpats(storeincludepats, storeexcludepats)
740
736
741 u = util.url(abspath)
737 u = util.url(abspath)
742 defaulturl = bytes(u)
738 defaulturl = bytes(u)
743 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
739 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
744 if not stream:
740 if not stream:
745 if pull:
741 if pull:
746 stream = False
742 stream = False
747 else:
743 else:
748 stream = None
744 stream = None
749 # internal config: ui.quietbookmarkmove
745 # internal config: ui.quietbookmarkmove
750 overrides = {('ui', 'quietbookmarkmove'): True}
746 overrides = {('ui', 'quietbookmarkmove'): True}
751 with local.ui.configoverride(overrides, 'clone'):
747 with local.ui.configoverride(overrides, 'clone'):
752 exchange.pull(local, srcpeer, revs,
748 exchange.pull(local, srcpeer, revs,
753 streamclonerequested=stream,
749 streamclonerequested=stream,
754 includepats=storeincludepats,
750 includepats=storeincludepats,
755 excludepats=storeexcludepats,
751 excludepats=storeexcludepats,
756 depth=depth)
752 depth=depth)
757 elif srcrepo:
753 elif srcrepo:
758 # TODO lift restriction once exchange.push() accepts narrow
754 # TODO lift restriction once exchange.push() accepts narrow
759 # push.
755 # push.
760 if narrow:
756 if narrow:
761 raise error.Abort(_('narrow clone not available for '
757 raise error.Abort(_('narrow clone not available for '
762 'remote destinations'))
758 'remote destinations'))
763
759
764 exchange.push(srcrepo, destpeer, revs=revs,
760 exchange.push(srcrepo, destpeer, revs=revs,
765 bookmarks=srcrepo._bookmarks.keys())
761 bookmarks=srcrepo._bookmarks.keys())
766 else:
762 else:
767 raise error.Abort(_("clone from remote to remote not supported")
763 raise error.Abort(_("clone from remote to remote not supported")
768 )
764 )
769
765
770 cleandir = None
766 cleandir = None
771
767
772 destrepo = destpeer.local()
768 destrepo = destpeer.local()
773 if destrepo:
769 if destrepo:
774 template = uimod.samplehgrcs['cloned']
770 template = uimod.samplehgrcs['cloned']
775 u = util.url(abspath)
771 u = util.url(abspath)
776 u.passwd = None
772 u.passwd = None
777 defaulturl = bytes(u)
773 defaulturl = bytes(u)
778 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
774 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
779 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
775 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
780
776
781 if ui.configbool('experimental', 'remotenames'):
777 if ui.configbool('experimental', 'remotenames'):
782 logexchange.pullremotenames(destrepo, srcpeer)
778 logexchange.pullremotenames(destrepo, srcpeer)
783
779
784 if update:
780 if update:
785 if update is not True:
781 if update is not True:
786 with srcpeer.commandexecutor() as e:
782 with srcpeer.commandexecutor() as e:
787 checkout = e.callcommand('lookup', {
783 checkout = e.callcommand('lookup', {
788 'key': update,
784 'key': update,
789 }).result()
785 }).result()
790
786
791 uprev = None
787 uprev = None
792 status = None
788 status = None
793 if checkout is not None:
789 if checkout is not None:
794 # Some extensions (at least hg-git and hg-subversion) have
790 # Some extensions (at least hg-git and hg-subversion) have
795 # a peer.lookup() implementation that returns a name instead
791 # a peer.lookup() implementation that returns a name instead
796 # of a nodeid. We work around it here until we've figured
792 # of a nodeid. We work around it here until we've figured
797 # out a better solution.
793 # out a better solution.
798 if len(checkout) == 20 and checkout in destrepo:
794 if len(checkout) == 20 and checkout in destrepo:
799 uprev = checkout
795 uprev = checkout
800 elif scmutil.isrevsymbol(destrepo, checkout):
796 elif scmutil.isrevsymbol(destrepo, checkout):
801 uprev = scmutil.revsymbol(destrepo, checkout).node()
797 uprev = scmutil.revsymbol(destrepo, checkout).node()
802 else:
798 else:
803 if update is not True:
799 if update is not True:
804 try:
800 try:
805 uprev = destrepo.lookup(update)
801 uprev = destrepo.lookup(update)
806 except error.RepoLookupError:
802 except error.RepoLookupError:
807 pass
803 pass
808 if uprev is None:
804 if uprev is None:
809 try:
805 try:
810 uprev = destrepo._bookmarks['@']
806 uprev = destrepo._bookmarks['@']
811 update = '@'
807 update = '@'
812 bn = destrepo[uprev].branch()
808 bn = destrepo[uprev].branch()
813 if bn == 'default':
809 if bn == 'default':
814 status = _("updating to bookmark @\n")
810 status = _("updating to bookmark @\n")
815 else:
811 else:
816 status = (_("updating to bookmark @ on branch %s\n")
812 status = (_("updating to bookmark @ on branch %s\n")
817 % bn)
813 % bn)
818 except KeyError:
814 except KeyError:
819 try:
815 try:
820 uprev = destrepo.branchtip('default')
816 uprev = destrepo.branchtip('default')
821 except error.RepoLookupError:
817 except error.RepoLookupError:
822 uprev = destrepo.lookup('tip')
818 uprev = destrepo.lookup('tip')
823 if not status:
819 if not status:
824 bn = destrepo[uprev].branch()
820 bn = destrepo[uprev].branch()
825 status = _("updating to branch %s\n") % bn
821 status = _("updating to branch %s\n") % bn
826 destrepo.ui.status(status)
822 destrepo.ui.status(status)
827 _update(destrepo, uprev)
823 _update(destrepo, uprev)
828 if update in destrepo._bookmarks:
824 if update in destrepo._bookmarks:
829 bookmarks.activate(destrepo, update)
825 bookmarks.activate(destrepo, update)
830 finally:
826 finally:
831 release(srclock, destlock)
827 release(srclock, destlock)
832 if cleandir is not None:
828 if cleandir is not None:
833 shutil.rmtree(cleandir, True)
829 shutil.rmtree(cleandir, True)
834 if srcpeer is not None:
830 if srcpeer is not None:
835 srcpeer.close()
831 srcpeer.close()
836 return srcpeer, destpeer
832 return srcpeer, destpeer
837
833
838 def _showstats(repo, stats, quietempty=False):
834 def _showstats(repo, stats, quietempty=False):
839 if quietempty and stats.isempty():
835 if quietempty and stats.isempty():
840 return
836 return
841 repo.ui.status(_("%d files updated, %d files merged, "
837 repo.ui.status(_("%d files updated, %d files merged, "
842 "%d files removed, %d files unresolved\n") % (
838 "%d files removed, %d files unresolved\n") % (
843 stats.updatedcount, stats.mergedcount,
839 stats.updatedcount, stats.mergedcount,
844 stats.removedcount, stats.unresolvedcount))
840 stats.removedcount, stats.unresolvedcount))
845
841
846 def updaterepo(repo, node, overwrite, updatecheck=None):
842 def updaterepo(repo, node, overwrite, updatecheck=None):
847 """Update the working directory to node.
843 """Update the working directory to node.
848
844
849 When overwrite is set, changes are clobbered, merged else
845 When overwrite is set, changes are clobbered, merged else
850
846
851 returns stats (see pydoc mercurial.merge.applyupdates)"""
847 returns stats (see pydoc mercurial.merge.applyupdates)"""
852 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
848 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
853 labels=['working copy', 'destination'],
849 labels=['working copy', 'destination'],
854 updatecheck=updatecheck)
850 updatecheck=updatecheck)
855
851
856 def update(repo, node, quietempty=False, updatecheck=None):
852 def update(repo, node, quietempty=False, updatecheck=None):
857 """update the working directory to node"""
853 """update the working directory to node"""
858 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
854 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
859 _showstats(repo, stats, quietempty)
855 _showstats(repo, stats, quietempty)
860 if stats.unresolvedcount:
856 if stats.unresolvedcount:
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
862 return stats.unresolvedcount > 0
858 return stats.unresolvedcount > 0
863
859
864 # naming conflict in clone()
860 # naming conflict in clone()
865 _update = update
861 _update = update
866
862
867 def clean(repo, node, show_stats=True, quietempty=False):
863 def clean(repo, node, show_stats=True, quietempty=False):
868 """forcibly switch the working directory to node, clobbering changes"""
864 """forcibly switch the working directory to node, clobbering changes"""
869 stats = updaterepo(repo, node, True)
865 stats = updaterepo(repo, node, True)
870 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
866 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
871 if show_stats:
867 if show_stats:
872 _showstats(repo, stats, quietempty)
868 _showstats(repo, stats, quietempty)
873 return stats.unresolvedcount > 0
869 return stats.unresolvedcount > 0
874
870
875 # naming conflict in updatetotally()
871 # naming conflict in updatetotally()
876 _clean = clean
872 _clean = clean
877
873
878 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
874 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
879 """Update the working directory with extra care for non-file components
875 """Update the working directory with extra care for non-file components
880
876
881 This takes care of non-file components below:
877 This takes care of non-file components below:
882
878
883 :bookmark: might be advanced or (in)activated
879 :bookmark: might be advanced or (in)activated
884
880
885 This takes arguments below:
881 This takes arguments below:
886
882
887 :checkout: to which revision the working directory is updated
883 :checkout: to which revision the working directory is updated
888 :brev: a name, which might be a bookmark to be activated after updating
884 :brev: a name, which might be a bookmark to be activated after updating
889 :clean: whether changes in the working directory can be discarded
885 :clean: whether changes in the working directory can be discarded
890 :updatecheck: how to deal with a dirty working directory
886 :updatecheck: how to deal with a dirty working directory
891
887
892 Valid values for updatecheck are (None => linear):
888 Valid values for updatecheck are (None => linear):
893
889
894 * abort: abort if the working directory is dirty
890 * abort: abort if the working directory is dirty
895 * none: don't check (merge working directory changes into destination)
891 * none: don't check (merge working directory changes into destination)
896 * linear: check that update is linear before merging working directory
892 * linear: check that update is linear before merging working directory
897 changes into destination
893 changes into destination
898 * noconflict: check that the update does not result in file merges
894 * noconflict: check that the update does not result in file merges
899
895
900 This returns whether conflict is detected at updating or not.
896 This returns whether conflict is detected at updating or not.
901 """
897 """
902 if updatecheck is None:
898 if updatecheck is None:
903 updatecheck = ui.config('commands', 'update.check')
899 updatecheck = ui.config('commands', 'update.check')
904 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
900 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
905 # If not configured, or invalid value configured
901 # If not configured, or invalid value configured
906 updatecheck = 'linear'
902 updatecheck = 'linear'
907 with repo.wlock():
903 with repo.wlock():
908 movemarkfrom = None
904 movemarkfrom = None
909 warndest = False
905 warndest = False
910 if checkout is None:
906 if checkout is None:
911 updata = destutil.destupdate(repo, clean=clean)
907 updata = destutil.destupdate(repo, clean=clean)
912 checkout, movemarkfrom, brev = updata
908 checkout, movemarkfrom, brev = updata
913 warndest = True
909 warndest = True
914
910
915 if clean:
911 if clean:
916 ret = _clean(repo, checkout)
912 ret = _clean(repo, checkout)
917 else:
913 else:
918 if updatecheck == 'abort':
914 if updatecheck == 'abort':
919 cmdutil.bailifchanged(repo, merge=False)
915 cmdutil.bailifchanged(repo, merge=False)
920 updatecheck = 'none'
916 updatecheck = 'none'
921 ret = _update(repo, checkout, updatecheck=updatecheck)
917 ret = _update(repo, checkout, updatecheck=updatecheck)
922
918
923 if not ret and movemarkfrom:
919 if not ret and movemarkfrom:
924 if movemarkfrom == repo['.'].node():
920 if movemarkfrom == repo['.'].node():
925 pass # no-op update
921 pass # no-op update
926 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
922 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
927 b = ui.label(repo._activebookmark, 'bookmarks.active')
923 b = ui.label(repo._activebookmark, 'bookmarks.active')
928 ui.status(_("updating bookmark %s\n") % b)
924 ui.status(_("updating bookmark %s\n") % b)
929 else:
925 else:
930 # this can happen with a non-linear update
926 # this can happen with a non-linear update
931 b = ui.label(repo._activebookmark, 'bookmarks')
927 b = ui.label(repo._activebookmark, 'bookmarks')
932 ui.status(_("(leaving bookmark %s)\n") % b)
928 ui.status(_("(leaving bookmark %s)\n") % b)
933 bookmarks.deactivate(repo)
929 bookmarks.deactivate(repo)
934 elif brev in repo._bookmarks:
930 elif brev in repo._bookmarks:
935 if brev != repo._activebookmark:
931 if brev != repo._activebookmark:
936 b = ui.label(brev, 'bookmarks.active')
932 b = ui.label(brev, 'bookmarks.active')
937 ui.status(_("(activating bookmark %s)\n") % b)
933 ui.status(_("(activating bookmark %s)\n") % b)
938 bookmarks.activate(repo, brev)
934 bookmarks.activate(repo, brev)
939 elif brev:
935 elif brev:
940 if repo._activebookmark:
936 if repo._activebookmark:
941 b = ui.label(repo._activebookmark, 'bookmarks')
937 b = ui.label(repo._activebookmark, 'bookmarks')
942 ui.status(_("(leaving bookmark %s)\n") % b)
938 ui.status(_("(leaving bookmark %s)\n") % b)
943 bookmarks.deactivate(repo)
939 bookmarks.deactivate(repo)
944
940
945 if warndest:
941 if warndest:
946 destutil.statusotherdests(ui, repo)
942 destutil.statusotherdests(ui, repo)
947
943
948 return ret
944 return ret
949
945
950 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
946 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
951 abort=False):
947 abort=False):
952 """Branch merge with node, resolving changes. Return true if any
948 """Branch merge with node, resolving changes. Return true if any
953 unresolved conflicts."""
949 unresolved conflicts."""
954 if not abort:
950 if not abort:
955 stats = mergemod.update(repo, node, branchmerge=True, force=force,
951 stats = mergemod.update(repo, node, branchmerge=True, force=force,
956 mergeforce=mergeforce, labels=labels)
952 mergeforce=mergeforce, labels=labels)
957 else:
953 else:
958 ms = mergemod.mergestate.read(repo)
954 ms = mergemod.mergestate.read(repo)
959 if ms.active():
955 if ms.active():
960 # there were conflicts
956 # there were conflicts
961 node = ms.localctx.hex()
957 node = ms.localctx.hex()
962 else:
958 else:
963 # there were no conficts, mergestate was not stored
959 # there were no conficts, mergestate was not stored
964 node = repo['.'].hex()
960 node = repo['.'].hex()
965
961
966 repo.ui.status(_("aborting the merge, updating back to"
962 repo.ui.status(_("aborting the merge, updating back to"
967 " %s\n") % node[:12])
963 " %s\n") % node[:12])
968 stats = mergemod.update(repo, node, branchmerge=False, force=True,
964 stats = mergemod.update(repo, node, branchmerge=False, force=True,
969 labels=labels)
965 labels=labels)
970
966
971 _showstats(repo, stats)
967 _showstats(repo, stats)
972 if stats.unresolvedcount:
968 if stats.unresolvedcount:
973 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
969 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
974 "or 'hg merge --abort' to abandon\n"))
970 "or 'hg merge --abort' to abandon\n"))
975 elif remind and not abort:
971 elif remind and not abort:
976 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
972 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
977 return stats.unresolvedcount > 0
973 return stats.unresolvedcount > 0
978
974
979 def _incoming(displaychlist, subreporecurse, ui, repo, source,
975 def _incoming(displaychlist, subreporecurse, ui, repo, source,
980 opts, buffered=False):
976 opts, buffered=False):
981 """
977 """
982 Helper for incoming / gincoming.
978 Helper for incoming / gincoming.
983 displaychlist gets called with
979 displaychlist gets called with
984 (remoterepo, incomingchangesetlist, displayer) parameters,
980 (remoterepo, incomingchangesetlist, displayer) parameters,
985 and is supposed to contain only code that can't be unified.
981 and is supposed to contain only code that can't be unified.
986 """
982 """
987 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
983 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
988 other = peer(repo, opts, source)
984 other = peer(repo, opts, source)
989 ui.status(_('comparing with %s\n') % util.hidepassword(source))
985 ui.status(_('comparing with %s\n') % util.hidepassword(source))
990 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
986 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
991
987
992 if revs:
988 if revs:
993 revs = [other.lookup(rev) for rev in revs]
989 revs = [other.lookup(rev) for rev in revs]
994 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
990 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
995 revs, opts["bundle"], opts["force"])
991 revs, opts["bundle"], opts["force"])
996 try:
992 try:
997 if not chlist:
993 if not chlist:
998 ui.status(_("no changes found\n"))
994 ui.status(_("no changes found\n"))
999 return subreporecurse()
995 return subreporecurse()
1000 ui.pager('incoming')
996 ui.pager('incoming')
1001 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
997 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1002 buffered=buffered)
998 buffered=buffered)
1003 displaychlist(other, chlist, displayer)
999 displaychlist(other, chlist, displayer)
1004 displayer.close()
1000 displayer.close()
1005 finally:
1001 finally:
1006 cleanupfn()
1002 cleanupfn()
1007 subreporecurse()
1003 subreporecurse()
1008 return 0 # exit code is zero since we found incoming changes
1004 return 0 # exit code is zero since we found incoming changes
1009
1005
1010 def incoming(ui, repo, source, opts):
1006 def incoming(ui, repo, source, opts):
1011 def subreporecurse():
1007 def subreporecurse():
1012 ret = 1
1008 ret = 1
1013 if opts.get('subrepos'):
1009 if opts.get('subrepos'):
1014 ctx = repo[None]
1010 ctx = repo[None]
1015 for subpath in sorted(ctx.substate):
1011 for subpath in sorted(ctx.substate):
1016 sub = ctx.sub(subpath)
1012 sub = ctx.sub(subpath)
1017 ret = min(ret, sub.incoming(ui, source, opts))
1013 ret = min(ret, sub.incoming(ui, source, opts))
1018 return ret
1014 return ret
1019
1015
1020 def display(other, chlist, displayer):
1016 def display(other, chlist, displayer):
1021 limit = logcmdutil.getlimit(opts)
1017 limit = logcmdutil.getlimit(opts)
1022 if opts.get('newest_first'):
1018 if opts.get('newest_first'):
1023 chlist.reverse()
1019 chlist.reverse()
1024 count = 0
1020 count = 0
1025 for n in chlist:
1021 for n in chlist:
1026 if limit is not None and count >= limit:
1022 if limit is not None and count >= limit:
1027 break
1023 break
1028 parents = [p for p in other.changelog.parents(n) if p != nullid]
1024 parents = [p for p in other.changelog.parents(n) if p != nullid]
1029 if opts.get('no_merges') and len(parents) == 2:
1025 if opts.get('no_merges') and len(parents) == 2:
1030 continue
1026 continue
1031 count += 1
1027 count += 1
1032 displayer.show(other[n])
1028 displayer.show(other[n])
1033 return _incoming(display, subreporecurse, ui, repo, source, opts)
1029 return _incoming(display, subreporecurse, ui, repo, source, opts)
1034
1030
1035 def _outgoing(ui, repo, dest, opts):
1031 def _outgoing(ui, repo, dest, opts):
1036 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1032 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1037 if not path:
1033 if not path:
1038 raise error.Abort(_('default repository not configured!'),
1034 raise error.Abort(_('default repository not configured!'),
1039 hint=_("see 'hg help config.paths'"))
1035 hint=_("see 'hg help config.paths'"))
1040 dest = path.pushloc or path.loc
1036 dest = path.pushloc or path.loc
1041 branches = path.branch, opts.get('branch') or []
1037 branches = path.branch, opts.get('branch') or []
1042
1038
1043 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1039 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1044 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1040 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1045 if revs:
1041 if revs:
1046 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1042 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1047
1043
1048 other = peer(repo, opts, dest)
1044 other = peer(repo, opts, dest)
1049 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1045 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1050 force=opts.get('force'))
1046 force=opts.get('force'))
1051 o = outgoing.missing
1047 o = outgoing.missing
1052 if not o:
1048 if not o:
1053 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1049 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1054 return o, other
1050 return o, other
1055
1051
1056 def outgoing(ui, repo, dest, opts):
1052 def outgoing(ui, repo, dest, opts):
1057 def recurse():
1053 def recurse():
1058 ret = 1
1054 ret = 1
1059 if opts.get('subrepos'):
1055 if opts.get('subrepos'):
1060 ctx = repo[None]
1056 ctx = repo[None]
1061 for subpath in sorted(ctx.substate):
1057 for subpath in sorted(ctx.substate):
1062 sub = ctx.sub(subpath)
1058 sub = ctx.sub(subpath)
1063 ret = min(ret, sub.outgoing(ui, dest, opts))
1059 ret = min(ret, sub.outgoing(ui, dest, opts))
1064 return ret
1060 return ret
1065
1061
1066 limit = logcmdutil.getlimit(opts)
1062 limit = logcmdutil.getlimit(opts)
1067 o, other = _outgoing(ui, repo, dest, opts)
1063 o, other = _outgoing(ui, repo, dest, opts)
1068 if not o:
1064 if not o:
1069 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1065 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1070 return recurse()
1066 return recurse()
1071
1067
1072 if opts.get('newest_first'):
1068 if opts.get('newest_first'):
1073 o.reverse()
1069 o.reverse()
1074 ui.pager('outgoing')
1070 ui.pager('outgoing')
1075 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1071 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1076 count = 0
1072 count = 0
1077 for n in o:
1073 for n in o:
1078 if limit is not None and count >= limit:
1074 if limit is not None and count >= limit:
1079 break
1075 break
1080 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1076 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1081 if opts.get('no_merges') and len(parents) == 2:
1077 if opts.get('no_merges') and len(parents) == 2:
1082 continue
1078 continue
1083 count += 1
1079 count += 1
1084 displayer.show(repo[n])
1080 displayer.show(repo[n])
1085 displayer.close()
1081 displayer.close()
1086 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1082 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1087 recurse()
1083 recurse()
1088 return 0 # exit code is zero since we found outgoing changes
1084 return 0 # exit code is zero since we found outgoing changes
1089
1085
1090 def verify(repo):
1086 def verify(repo):
1091 """verify the consistency of a repository"""
1087 """verify the consistency of a repository"""
1092 ret = verifymod.verify(repo)
1088 ret = verifymod.verify(repo)
1093
1089
1094 # Broken subrepo references in hidden csets don't seem worth worrying about,
1090 # Broken subrepo references in hidden csets don't seem worth worrying about,
1095 # since they can't be pushed/pulled, and --hidden can be used if they are a
1091 # since they can't be pushed/pulled, and --hidden can be used if they are a
1096 # concern.
1092 # concern.
1097
1093
1098 # pathto() is needed for -R case
1094 # pathto() is needed for -R case
1099 revs = repo.revs("filelog(%s)",
1095 revs = repo.revs("filelog(%s)",
1100 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1096 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1101
1097
1102 if revs:
1098 if revs:
1103 repo.ui.status(_('checking subrepo links\n'))
1099 repo.ui.status(_('checking subrepo links\n'))
1104 for rev in revs:
1100 for rev in revs:
1105 ctx = repo[rev]
1101 ctx = repo[rev]
1106 try:
1102 try:
1107 for subpath in ctx.substate:
1103 for subpath in ctx.substate:
1108 try:
1104 try:
1109 ret = (ctx.sub(subpath, allowcreate=False).verify()
1105 ret = (ctx.sub(subpath, allowcreate=False).verify()
1110 or ret)
1106 or ret)
1111 except error.RepoError as e:
1107 except error.RepoError as e:
1112 repo.ui.warn(('%d: %s\n') % (rev, e))
1108 repo.ui.warn(('%d: %s\n') % (rev, e))
1113 except Exception:
1109 except Exception:
1114 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1110 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1115 node.short(ctx.node()))
1111 node.short(ctx.node()))
1116
1112
1117 return ret
1113 return ret
1118
1114
1119 def remoteui(src, opts):
1115 def remoteui(src, opts):
1120 'build a remote ui from ui or repo and opts'
1116 'build a remote ui from ui or repo and opts'
1121 if util.safehasattr(src, 'baseui'): # looks like a repository
1117 if util.safehasattr(src, 'baseui'): # looks like a repository
1122 dst = src.baseui.copy() # drop repo-specific config
1118 dst = src.baseui.copy() # drop repo-specific config
1123 src = src.ui # copy target options from repo
1119 src = src.ui # copy target options from repo
1124 else: # assume it's a global ui object
1120 else: # assume it's a global ui object
1125 dst = src.copy() # keep all global options
1121 dst = src.copy() # keep all global options
1126
1122
1127 # copy ssh-specific options
1123 # copy ssh-specific options
1128 for o in 'ssh', 'remotecmd':
1124 for o in 'ssh', 'remotecmd':
1129 v = opts.get(o) or src.config('ui', o)
1125 v = opts.get(o) or src.config('ui', o)
1130 if v:
1126 if v:
1131 dst.setconfig("ui", o, v, 'copied')
1127 dst.setconfig("ui", o, v, 'copied')
1132
1128
1133 # copy bundle-specific options
1129 # copy bundle-specific options
1134 r = src.config('bundle', 'mainreporoot')
1130 r = src.config('bundle', 'mainreporoot')
1135 if r:
1131 if r:
1136 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1132 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1137
1133
1138 # copy selected local settings to the remote ui
1134 # copy selected local settings to the remote ui
1139 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1135 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1140 for key, val in src.configitems(sect):
1136 for key, val in src.configitems(sect):
1141 dst.setconfig(sect, key, val, 'copied')
1137 dst.setconfig(sect, key, val, 'copied')
1142 v = src.config('web', 'cacerts')
1138 v = src.config('web', 'cacerts')
1143 if v:
1139 if v:
1144 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1140 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1145
1141
1146 return dst
1142 return dst
1147
1143
1148 # Files of interest
1144 # Files of interest
1149 # Used to check if the repository has changed looking at mtime and size of
1145 # Used to check if the repository has changed looking at mtime and size of
1150 # these files.
1146 # these files.
1151 foi = [('spath', '00changelog.i'),
1147 foi = [('spath', '00changelog.i'),
1152 ('spath', 'phaseroots'), # ! phase can change content at the same size
1148 ('spath', 'phaseroots'), # ! phase can change content at the same size
1153 ('spath', 'obsstore'),
1149 ('spath', 'obsstore'),
1154 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1150 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1155 ]
1151 ]
1156
1152
1157 class cachedlocalrepo(object):
1153 class cachedlocalrepo(object):
1158 """Holds a localrepository that can be cached and reused."""
1154 """Holds a localrepository that can be cached and reused."""
1159
1155
1160 def __init__(self, repo):
1156 def __init__(self, repo):
1161 """Create a new cached repo from an existing repo.
1157 """Create a new cached repo from an existing repo.
1162
1158
1163 We assume the passed in repo was recently created. If the
1159 We assume the passed in repo was recently created. If the
1164 repo has changed between when it was created and when it was
1160 repo has changed between when it was created and when it was
1165 turned into a cache, it may not refresh properly.
1161 turned into a cache, it may not refresh properly.
1166 """
1162 """
1167 assert isinstance(repo, localrepo.localrepository)
1163 assert isinstance(repo, localrepo.localrepository)
1168 self._repo = repo
1164 self._repo = repo
1169 self._state, self.mtime = self._repostate()
1165 self._state, self.mtime = self._repostate()
1170 self._filtername = repo.filtername
1166 self._filtername = repo.filtername
1171
1167
1172 def fetch(self):
1168 def fetch(self):
1173 """Refresh (if necessary) and return a repository.
1169 """Refresh (if necessary) and return a repository.
1174
1170
1175 If the cached instance is out of date, it will be recreated
1171 If the cached instance is out of date, it will be recreated
1176 automatically and returned.
1172 automatically and returned.
1177
1173
1178 Returns a tuple of the repo and a boolean indicating whether a new
1174 Returns a tuple of the repo and a boolean indicating whether a new
1179 repo instance was created.
1175 repo instance was created.
1180 """
1176 """
1181 # We compare the mtimes and sizes of some well-known files to
1177 # We compare the mtimes and sizes of some well-known files to
1182 # determine if the repo changed. This is not precise, as mtimes
1178 # determine if the repo changed. This is not precise, as mtimes
1183 # are susceptible to clock skew and imprecise filesystems and
1179 # are susceptible to clock skew and imprecise filesystems and
1184 # file content can change while maintaining the same size.
1180 # file content can change while maintaining the same size.
1185
1181
1186 state, mtime = self._repostate()
1182 state, mtime = self._repostate()
1187 if state == self._state:
1183 if state == self._state:
1188 return self._repo, False
1184 return self._repo, False
1189
1185
1190 repo = repository(self._repo.baseui, self._repo.url())
1186 repo = repository(self._repo.baseui, self._repo.url())
1191 if self._filtername:
1187 if self._filtername:
1192 self._repo = repo.filtered(self._filtername)
1188 self._repo = repo.filtered(self._filtername)
1193 else:
1189 else:
1194 self._repo = repo.unfiltered()
1190 self._repo = repo.unfiltered()
1195 self._state = state
1191 self._state = state
1196 self.mtime = mtime
1192 self.mtime = mtime
1197
1193
1198 return self._repo, True
1194 return self._repo, True
1199
1195
1200 def _repostate(self):
1196 def _repostate(self):
1201 state = []
1197 state = []
1202 maxmtime = -1
1198 maxmtime = -1
1203 for attr, fname in foi:
1199 for attr, fname in foi:
1204 prefix = getattr(self._repo, attr)
1200 prefix = getattr(self._repo, attr)
1205 p = os.path.join(prefix, fname)
1201 p = os.path.join(prefix, fname)
1206 try:
1202 try:
1207 st = os.stat(p)
1203 st = os.stat(p)
1208 except OSError:
1204 except OSError:
1209 st = os.stat(prefix)
1205 st = os.stat(prefix)
1210 state.append((st[stat.ST_MTIME], st.st_size))
1206 state.append((st[stat.ST_MTIME], st.st_size))
1211 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1207 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1212
1208
1213 return tuple(state), maxmtime
1209 return tuple(state), maxmtime
1214
1210
1215 def copy(self):
1211 def copy(self):
1216 """Obtain a copy of this class instance.
1212 """Obtain a copy of this class instance.
1217
1213
1218 A new localrepository instance is obtained. The new instance should be
1214 A new localrepository instance is obtained. The new instance should be
1219 completely independent of the original.
1215 completely independent of the original.
1220 """
1216 """
1221 repo = repository(self._repo.baseui, self._repo.origroot)
1217 repo = repository(self._repo.baseui, self._repo.origroot)
1222 if self._filtername:
1218 if self._filtername:
1223 repo = repo.filtered(self._filtername)
1219 repo = repo.filtered(self._filtername)
1224 else:
1220 else:
1225 repo = repo.unfiltered()
1221 repo = repo.unfiltered()
1226 c = cachedlocalrepo(repo)
1222 c = cachedlocalrepo(repo)
1227 c._state = self._state
1223 c._state = self._state
1228 c.mtime = self.mtime
1224 c.mtime = self.mtime
1229 return c
1225 return c
@@ -1,151 +1,156 b''
1 $ filterlog () {
2 > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
3 > }
4
1 ensure that failing ui.atexit handlers report sensibly
5 ensure that failing ui.atexit handlers report sensibly
2
6
3 $ cat > $TESTTMP/bailatexit.py <<EOF
7 $ cat > $TESTTMP/bailatexit.py <<EOF
4 > from mercurial import util
8 > from mercurial import util
5 > def bail():
9 > def bail():
6 > raise RuntimeError('ui.atexit handler exception')
10 > raise RuntimeError('ui.atexit handler exception')
7 >
11 >
8 > def extsetup(ui):
12 > def extsetup(ui):
9 > ui.atexit(bail)
13 > ui.atexit(bail)
10 > EOF
14 > EOF
11 $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
15 $ hg -q --config extensions.bailatexit=$TESTTMP/bailatexit.py \
12 > help help
16 > help help
13 hg help [-eck] [-s PLATFORM] [TOPIC]
17 hg help [-eck] [-s PLATFORM] [TOPIC]
14
18
15 show help for a given topic or a help overview
19 show help for a given topic or a help overview
16 error in exit handlers:
20 error in exit handlers:
17 Traceback (most recent call last):
21 Traceback (most recent call last):
18 File "*/mercurial/dispatch.py", line *, in _runexithandlers (glob)
22 File "*/mercurial/dispatch.py", line *, in _runexithandlers (glob)
19 func(*args, **kwargs)
23 func(*args, **kwargs)
20 File "$TESTTMP/bailatexit.py", line *, in bail (glob)
24 File "$TESTTMP/bailatexit.py", line *, in bail (glob)
21 raise RuntimeError('ui.atexit handler exception')
25 raise RuntimeError('ui.atexit handler exception')
22 RuntimeError: ui.atexit handler exception
26 RuntimeError: ui.atexit handler exception
23 [255]
27 [255]
24
28
25 $ rm $TESTTMP/bailatexit.py
29 $ rm $TESTTMP/bailatexit.py
26
30
27 another bad extension
31 another bad extension
28
32
29 $ echo 'raise Exception("bit bucket overflow")' > badext.py
33 $ echo 'raise Exception("bit bucket overflow")' > badext.py
30 $ abspathexc=`pwd`/badext.py
34 $ abspathexc=`pwd`/badext.py
31
35
32 $ cat >baddocext.py <<EOF
36 $ cat >baddocext.py <<EOF
33 > """
37 > """
34 > baddocext is bad
38 > baddocext is bad
35 > """
39 > """
36 > EOF
40 > EOF
37 $ abspathdoc=`pwd`/baddocext.py
41 $ abspathdoc=`pwd`/baddocext.py
38
42
39 $ cat <<EOF >> $HGRCPATH
43 $ cat <<EOF >> $HGRCPATH
40 > [extensions]
44 > [extensions]
41 > gpg =
45 > gpg =
42 > hgext.gpg =
46 > hgext.gpg =
43 > badext = $abspathexc
47 > badext = $abspathexc
44 > baddocext = $abspathdoc
48 > baddocext = $abspathdoc
45 > badext2 =
49 > badext2 =
46 > EOF
50 > EOF
47
51
48 $ hg -q help help 2>&1 |grep extension
52 $ hg -q help help 2>&1 |grep extension
49 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
53 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
50 *** failed to import extension badext2: No module named *badext2* (glob)
54 *** failed to import extension badext2: No module named *badext2* (glob)
51
55
52 show traceback
56 show traceback
53
57
54 $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound'
58 $ hg -q help help --traceback 2>&1 | egrep ' extension|^Exception|Traceback|ImportError|ModuleNotFound'
55 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
59 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
56 Traceback (most recent call last):
60 Traceback (most recent call last):
57 Exception: bit bucket overflow
61 Exception: bit bucket overflow
58 *** failed to import extension badext2: No module named *badext2* (glob)
62 *** failed to import extension badext2: No module named *badext2* (glob)
59 Traceback (most recent call last):
63 Traceback (most recent call last):
60 ImportError: No module named badext2 (no-py3 !)
64 ImportError: No module named badext2 (no-py3 !)
61 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
65 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
62 Traceback (most recent call last): (py3 !)
66 Traceback (most recent call last): (py3 !)
63 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
67 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
64 Traceback (most recent call last): (py3 !)
68 Traceback (most recent call last): (py3 !)
65 ModuleNotFoundError: No module named 'badext2' (py3 !)
69 ModuleNotFoundError: No module named 'badext2' (py3 !)
66
70
67 names of extensions failed to load can be accessed via extensions.notloaded()
71 names of extensions failed to load can be accessed via extensions.notloaded()
68
72
69 $ cat <<EOF > showbadexts.py
73 $ cat <<EOF > showbadexts.py
70 > from mercurial import commands, extensions, registrar
74 > from mercurial import commands, extensions, registrar
71 > cmdtable = {}
75 > cmdtable = {}
72 > command = registrar.command(cmdtable)
76 > command = registrar.command(cmdtable)
73 > @command(b'showbadexts', norepo=True)
77 > @command(b'showbadexts', norepo=True)
74 > def showbadexts(ui, *pats, **opts):
78 > def showbadexts(ui, *pats, **opts):
75 > ui.write(b'BADEXTS: %s\n' % b' '.join(sorted(extensions.notloaded())))
79 > ui.write(b'BADEXTS: %s\n' % b' '.join(sorted(extensions.notloaded())))
76 > EOF
80 > EOF
77 $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
81 $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS'
78 BADEXTS: badext badext2
82 BADEXTS: badext badext2
79
83
80 #if no-extraextensions
84 #if no-extraextensions
81 show traceback for ImportError of hgext.name if devel.debug.extensions is set
85 show traceback for ImportError of hgext.name if devel.debug.extensions is set
82
86
83 $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
87 $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
84 > | grep -v '^ ' \
88 > | grep -v '^ ' \
85 > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import|ModuleNotFound'
89 > | filterlog \
86 debug.extensions: loading extensions
90 > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|^YYYY|not import|ModuleNotFound'
87 debug.extensions: - processing 5 entries
91 YYYY/MM/DD HH:MM:SS (PID)> loading extensions
88 debug.extensions: - loading extension: gpg
92 YYYY/MM/DD HH:MM:SS (PID)> - processing 5 entries
89 debug.extensions: > gpg extension loaded in * (glob)
93 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: gpg
90 debug.extensions: - validating extension tables: gpg
94 YYYY/MM/DD HH:MM:SS (PID)> > gpg extension loaded in * (glob)
91 debug.extensions: - invoking registered callbacks: gpg
95 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: gpg
92 debug.extensions: > callbacks completed in * (glob)
96 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: gpg
93 debug.extensions: - loading extension: badext
97 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
98 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext
94 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
99 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
95 Traceback (most recent call last):
100 Traceback (most recent call last):
96 Exception: bit bucket overflow
101 Exception: bit bucket overflow
97 debug.extensions: - loading extension: baddocext
102 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: baddocext
98 debug.extensions: > baddocext extension loaded in * (glob)
103 YYYY/MM/DD HH:MM:SS (PID)> > baddocext extension loaded in * (glob)
99 debug.extensions: - validating extension tables: baddocext
104 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: baddocext
100 debug.extensions: - invoking registered callbacks: baddocext
105 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: baddocext
101 debug.extensions: > callbacks completed in * (glob)
106 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
102 debug.extensions: - loading extension: badext2
107 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext2
103 debug.extensions: - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
108 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
104 Traceback (most recent call last):
109 Traceback (most recent call last):
105 ImportError: No module named badext2 (no-py3 !)
110 ImportError: No module named badext2 (no-py3 !)
106 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
111 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
107 debug.extensions: - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
112 YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
108 Traceback (most recent call last):
113 Traceback (most recent call last):
109 ImportError: No module named badext2 (no-py3 !)
114 ImportError: No module named badext2 (no-py3 !)
110 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
115 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
111 Traceback (most recent call last): (py3 !)
116 Traceback (most recent call last): (py3 !)
112 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
117 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
113 *** failed to import extension badext2: No module named *badext2* (glob)
118 *** failed to import extension badext2: No module named *badext2* (glob)
114 Traceback (most recent call last):
119 Traceback (most recent call last):
115 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
120 ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
116 Traceback (most recent call last): (py3 !)
121 Traceback (most recent call last): (py3 !)
117 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
122 ModuleNotFoundError: No module named 'hgext3rd.badext2' (py3 !)
118 Traceback (most recent call last): (py3 !)
123 Traceback (most recent call last): (py3 !)
119 ModuleNotFoundError: No module named 'badext2' (py3 !)
124 ModuleNotFoundError: No module named 'badext2' (py3 !)
120 ImportError: No module named badext2 (no-py3 !)
125 ImportError: No module named badext2 (no-py3 !)
121 debug.extensions: > loaded 2 extensions, total time * (glob)
126 YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
122 debug.extensions: - loading configtable attributes
127 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
123 debug.extensions: - executing uisetup hooks
128 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
124 debug.extensions: - running uisetup for gpg
129 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for gpg
125 debug.extensions: > uisetup for gpg took * (glob)
130 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for gpg took * (glob)
126 debug.extensions: - running uisetup for baddocext
131 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for baddocext
127 debug.extensions: > uisetup for baddocext took * (glob)
132 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for baddocext took * (glob)
128 debug.extensions: > all uisetup took * (glob)
133 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
129 debug.extensions: - executing extsetup hooks
134 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
130 debug.extensions: - running extsetup for gpg
135 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for gpg
131 debug.extensions: > extsetup for gpg took * (glob)
136 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for gpg took * (glob)
132 debug.extensions: - running extsetup for baddocext
137 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for baddocext
133 debug.extensions: > extsetup for baddocext took * (glob)
138 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for baddocext took * (glob)
134 debug.extensions: > all extsetup took * (glob)
139 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
135 debug.extensions: - executing remaining aftercallbacks
140 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
136 debug.extensions: > remaining aftercallbacks completed in * (glob)
141 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
137 debug.extensions: - loading extension registration objects
142 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
138 debug.extensions: > extension registration object loading took * (glob)
143 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
139 debug.extensions: > extension baddocext take a total of * to load (glob)
144 YYYY/MM/DD HH:MM:SS (PID)> > extension baddocext take a total of * to load (glob)
140 debug.extensions: > extension gpg take a total of * to load (glob)
145 YYYY/MM/DD HH:MM:SS (PID)> > extension gpg take a total of * to load (glob)
141 debug.extensions: extension loading complete
146 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
142 #endif
147 #endif
143
148
144 confirm that there's no crash when an extension's documentation is bad
149 confirm that there's no crash when an extension's documentation is bad
145
150
146 $ hg help --keyword baddocext
151 $ hg help --keyword baddocext
147 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
152 *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
148 *** failed to import extension badext2: No module named *badext2* (glob)
153 *** failed to import extension badext2: No module named *badext2* (glob)
149 Topics:
154 Topics:
150
155
151 extensions Using Additional Features
156 extensions Using Additional Features
@@ -1,96 +1,100 b''
1 Test basic extension support
1 Test basic extension support
2
2
3 $ cat > foobar.py <<EOF
3 $ cat > foobar.py <<EOF
4 > import os
4 > import os
5 > from mercurial import commands, registrar
5 > from mercurial import commands, registrar
6 > cmdtable = {}
6 > cmdtable = {}
7 > command = registrar.command(cmdtable)
7 > command = registrar.command(cmdtable)
8 > configtable = {}
8 > configtable = {}
9 > configitem = registrar.configitem(configtable)
9 > configitem = registrar.configitem(configtable)
10 > configitem(b'tests', b'foo', default=b"Foo")
10 > configitem(b'tests', b'foo', default=b"Foo")
11 > def uisetup(ui):
11 > def uisetup(ui):
12 > ui.debug(b"uisetup called [debug]\\n")
12 > ui.debug(b"uisetup called [debug]\\n")
13 > ui.write(b"uisetup called\\n")
13 > ui.write(b"uisetup called\\n")
14 > ui.status(b"uisetup called [status]\\n")
14 > ui.status(b"uisetup called [status]\\n")
15 > ui.flush()
15 > ui.flush()
16 > def reposetup(ui, repo):
16 > def reposetup(ui, repo):
17 > ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
17 > ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
18 > ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
18 > ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
19 > ui.flush()
19 > ui.flush()
20 > @command(b'foo', [], b'hg foo')
20 > @command(b'foo', [], b'hg foo')
21 > def foo(ui, *args, **kwargs):
21 > def foo(ui, *args, **kwargs):
22 > foo = ui.config(b'tests', b'foo')
22 > foo = ui.config(b'tests', b'foo')
23 > ui.write(foo)
23 > ui.write(foo)
24 > ui.write(b"\\n")
24 > ui.write(b"\\n")
25 > @command(b'bar', [], b'hg bar', norepo=True)
25 > @command(b'bar', [], b'hg bar', norepo=True)
26 > def bar(ui, *args, **kwargs):
26 > def bar(ui, *args, **kwargs):
27 > ui.write(b"Bar\\n")
27 > ui.write(b"Bar\\n")
28 > EOF
28 > EOF
29 $ abspath=`pwd`/foobar.py
29 $ abspath=`pwd`/foobar.py
30
30
31 $ mkdir barfoo
31 $ mkdir barfoo
32 $ cp foobar.py barfoo/__init__.py
32 $ cp foobar.py barfoo/__init__.py
33 $ barfoopath=`pwd`/barfoo
33 $ barfoopath=`pwd`/barfoo
34
34
35 $ hg init a
35 $ hg init a
36 $ cd a
36 $ cd a
37 $ echo foo > file
37 $ echo foo > file
38 $ hg add file
38 $ hg add file
39 $ hg commit -m 'add file'
39 $ hg commit -m 'add file'
40
40
41 $ echo '[extensions]' >> $HGRCPATH
41 $ echo '[extensions]' >> $HGRCPATH
42 $ echo "foobar = $abspath" >> $HGRCPATH
42 $ echo "foobar = $abspath" >> $HGRCPATH
43
43
44 $ filterlog () {
45 > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
46 > }
47
44 Test extension setup timings
48 Test extension setup timings
45
49
46 $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1
50 $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1 | filterlog
47 debug.extensions: loading extensions
51 YYYY/MM/DD HH:MM:SS (PID)> loading extensions
48 debug.extensions: - processing 1 entries
52 YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
49 debug.extensions: - loading extension: foobar
53 YYYY/MM/DD HH:MM:SS (PID)> - loading extension: foobar
50 debug.extensions: > foobar extension loaded in * (glob)
54 YYYY/MM/DD HH:MM:SS (PID)> > foobar extension loaded in * (glob)
51 debug.extensions: - validating extension tables: foobar
55 YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: foobar
52 debug.extensions: - invoking registered callbacks: foobar
56 YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: foobar
53 debug.extensions: > callbacks completed in * (glob)
57 YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
54 debug.extensions: > loaded 1 extensions, total time * (glob)
58 YYYY/MM/DD HH:MM:SS (PID)> > loaded 1 extensions, total time * (glob)
55 debug.extensions: - loading configtable attributes
59 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
56 debug.extensions: - executing uisetup hooks
60 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
57 debug.extensions: - running uisetup for foobar
61 YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for foobar
58 uisetup called [debug]
62 uisetup called [debug]
59 uisetup called
63 uisetup called
60 uisetup called [status]
64 uisetup called [status]
61 debug.extensions: > uisetup for foobar took * (glob)
65 YYYY/MM/DD HH:MM:SS (PID)> > uisetup for foobar took * (glob)
62 debug.extensions: > all uisetup took * (glob)
66 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
63 debug.extensions: - executing extsetup hooks
67 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
64 debug.extensions: - running extsetup for foobar
68 YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for foobar
65 debug.extensions: > extsetup for foobar took * (glob)
69 YYYY/MM/DD HH:MM:SS (PID)> > extsetup for foobar took * (glob)
66 debug.extensions: > all extsetup took * (glob)
70 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
67 debug.extensions: - executing remaining aftercallbacks
71 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
68 debug.extensions: > remaining aftercallbacks completed in * (glob)
72 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
69 debug.extensions: - loading extension registration objects
73 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
70 debug.extensions: > extension registration object loading took * (glob)
74 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
71 debug.extensions: > extension foobar take a total of * to load (glob)
75 YYYY/MM/DD HH:MM:SS (PID)> > extension foobar take a total of * to load (glob)
72 debug.extensions: extension loading complete
76 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
73 debug.extensions: loading additional extensions
77 YYYY/MM/DD HH:MM:SS (PID)> loading additional extensions
74 debug.extensions: - processing 1 entries
78 YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
75 debug.extensions: > loaded 0 extensions, total time * (glob)
79 YYYY/MM/DD HH:MM:SS (PID)> > loaded 0 extensions, total time * (glob)
76 debug.extensions: - loading configtable attributes
80 YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
77 debug.extensions: - executing uisetup hooks
81 YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
78 debug.extensions: > all uisetup took * (glob)
82 YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
79 debug.extensions: - executing extsetup hooks
83 YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
80 debug.extensions: > all extsetup took * (glob)
84 YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
81 debug.extensions: - executing remaining aftercallbacks
85 YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
82 debug.extensions: > remaining aftercallbacks completed in * (glob)
86 YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
83 debug.extensions: - loading extension registration objects
87 YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
84 debug.extensions: > extension registration object loading took * (glob)
88 YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
85 debug.extensions: extension loading complete
89 YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
86 debug.extensions: - executing reposetup hooks
90 YYYY/MM/DD HH:MM:SS (PID)> - executing reposetup hooks
87 debug.extensions: - running reposetup for foobar
91 YYYY/MM/DD HH:MM:SS (PID)> - running reposetup for foobar
88 reposetup called for a
92 reposetup called for a
89 ui == repo.ui
93 ui == repo.ui
90 debug.extensions: > reposetup for foobar took * (glob)
94 YYYY/MM/DD HH:MM:SS (PID)> > reposetup for foobar took * (glob)
91 debug.extensions: > all reposetup took * (glob)
95 YYYY/MM/DD HH:MM:SS (PID)> > all reposetup took * (glob)
92 Foo
96 Foo
93
97
94 $ cd ..
98 $ cd ..
95
99
96 $ echo 'foobar = !' >> $HGRCPATH
100 $ echo 'foobar = !' >> $HGRCPATH
General Comments 0
You need to be logged in to leave comments. Login now