##// END OF EJS Templates
errors: format "abort: " text in a new Abort.format() method...
Martin von Zweigbergk -
r46497:600aec73 default
parent child Browse files
Show More
@@ -1,751 +1,749 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command (DEPRECATED)
22 'setumask' command (DEPRECATED)
23 'setumask2' command
23 'setumask2' command
24 set umask
24 set umask
25
25
26 'validate' command
26 'validate' command
27 reload the config and check if the server is up to date
27 reload the config and check if the server is up to date
28
28
29 Config
29 Config
30 ------
30 ------
31
31
32 ::
32 ::
33
33
34 [chgserver]
34 [chgserver]
35 # how long (in seconds) should an idle chg server exit
35 # how long (in seconds) should an idle chg server exit
36 idletimeout = 3600
36 idletimeout = 3600
37
37
38 # whether to skip config or env change checks
38 # whether to skip config or env change checks
39 skiphash = False
39 skiphash = False
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import inspect
44 import inspect
45 import os
45 import os
46 import re
46 import re
47 import socket
47 import socket
48 import stat
48 import stat
49 import struct
49 import struct
50 import time
50 import time
51
51
52 from .i18n import _
52 from .i18n import _
53 from .pycompat import (
53 from .pycompat import (
54 getattr,
54 getattr,
55 setattr,
55 setattr,
56 )
56 )
57
57
58 from . import (
58 from . import (
59 commandserver,
59 commandserver,
60 encoding,
60 encoding,
61 error,
61 error,
62 extensions,
62 extensions,
63 node,
63 node,
64 pycompat,
64 pycompat,
65 util,
65 util,
66 )
66 )
67
67
68 from .utils import (
68 from .utils import (
69 hashutil,
69 hashutil,
70 procutil,
70 procutil,
71 stringutil,
71 stringutil,
72 )
72 )
73
73
74
74
75 def _hashlist(items):
75 def _hashlist(items):
76 """return sha1 hexdigest for a list"""
76 """return sha1 hexdigest for a list"""
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
78
78
79
79
80 # sensitive config sections affecting confighash
80 # sensitive config sections affecting confighash
81 _configsections = [
81 _configsections = [
82 b'alias', # affects global state commands.table
82 b'alias', # affects global state commands.table
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
84 b'eol', # uses setconfig('eol', ...)
84 b'eol', # uses setconfig('eol', ...)
85 b'extdiff', # uisetup will register new commands
85 b'extdiff', # uisetup will register new commands
86 b'extensions',
86 b'extensions',
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
89 b'schemes', # extsetup will update global hg.schemes
89 b'schemes', # extsetup will update global hg.schemes
90 ]
90 ]
91
91
92 _configsectionitems = [
92 _configsectionitems = [
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
94 ]
94 ]
95
95
96 # sensitive environment variables affecting confighash
96 # sensitive environment variables affecting confighash
97 _envre = re.compile(
97 _envre = re.compile(
98 br'''\A(?:
98 br'''\A(?:
99 CHGHG
99 CHGHG
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
101 |HG(?:ENCODING|PLAIN).*
101 |HG(?:ENCODING|PLAIN).*
102 |LANG(?:UAGE)?
102 |LANG(?:UAGE)?
103 |LC_.*
103 |LC_.*
104 |LD_.*
104 |LD_.*
105 |PATH
105 |PATH
106 |PYTHON.*
106 |PYTHON.*
107 |TERM(?:INFO)?
107 |TERM(?:INFO)?
108 |TZ
108 |TZ
109 )\Z''',
109 )\Z''',
110 re.X,
110 re.X,
111 )
111 )
112
112
113
113
114 def _confighash(ui):
114 def _confighash(ui):
115 """return a quick hash for detecting config/env changes
115 """return a quick hash for detecting config/env changes
116
116
117 confighash is the hash of sensitive config items and environment variables.
117 confighash is the hash of sensitive config items and environment variables.
118
118
119 for chgserver, it is designed that once confighash changes, the server is
119 for chgserver, it is designed that once confighash changes, the server is
120 not qualified to serve its client and should redirect the client to a new
120 not qualified to serve its client and should redirect the client to a new
121 server. different from mtimehash, confighash change will not mark the
121 server. different from mtimehash, confighash change will not mark the
122 server outdated and exit since the user can have different configs at the
122 server outdated and exit since the user can have different configs at the
123 same time.
123 same time.
124 """
124 """
125 sectionitems = []
125 sectionitems = []
126 for section in _configsections:
126 for section in _configsections:
127 sectionitems.append(ui.configitems(section))
127 sectionitems.append(ui.configitems(section))
128 for section, item in _configsectionitems:
128 for section, item in _configsectionitems:
129 sectionitems.append(ui.config(section, item))
129 sectionitems.append(ui.config(section, item))
130 sectionhash = _hashlist(sectionitems)
130 sectionhash = _hashlist(sectionitems)
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
132 if b'CHGHG' in encoding.environ:
132 if b'CHGHG' in encoding.environ:
133 ignored = {b'HG'}
133 ignored = {b'HG'}
134 else:
134 else:
135 ignored = set()
135 ignored = set()
136 envitems = [
136 envitems = [
137 (k, v)
137 (k, v)
138 for k, v in pycompat.iteritems(encoding.environ)
138 for k, v in pycompat.iteritems(encoding.environ)
139 if _envre.match(k) and k not in ignored
139 if _envre.match(k) and k not in ignored
140 ]
140 ]
141 envhash = _hashlist(sorted(envitems))
141 envhash = _hashlist(sorted(envitems))
142 return sectionhash[:6] + envhash[:6]
142 return sectionhash[:6] + envhash[:6]
143
143
144
144
145 def _getmtimepaths(ui):
145 def _getmtimepaths(ui):
146 """get a list of paths that should be checked to detect change
146 """get a list of paths that should be checked to detect change
147
147
148 The list will include:
148 The list will include:
149 - extensions (will not cover all files for complex extensions)
149 - extensions (will not cover all files for complex extensions)
150 - mercurial/__version__.py
150 - mercurial/__version__.py
151 - python binary
151 - python binary
152 """
152 """
153 modules = [m for n, m in extensions.extensions(ui)]
153 modules = [m for n, m in extensions.extensions(ui)]
154 try:
154 try:
155 from . import __version__
155 from . import __version__
156
156
157 modules.append(__version__)
157 modules.append(__version__)
158 except ImportError:
158 except ImportError:
159 pass
159 pass
160 files = []
160 files = []
161 if pycompat.sysexecutable:
161 if pycompat.sysexecutable:
162 files.append(pycompat.sysexecutable)
162 files.append(pycompat.sysexecutable)
163 for m in modules:
163 for m in modules:
164 try:
164 try:
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
166 except TypeError:
166 except TypeError:
167 pass
167 pass
168 return sorted(set(files))
168 return sorted(set(files))
169
169
170
170
171 def _mtimehash(paths):
171 def _mtimehash(paths):
172 """return a quick hash for detecting file changes
172 """return a quick hash for detecting file changes
173
173
174 mtimehash calls stat on given paths and calculate a hash based on size and
174 mtimehash calls stat on given paths and calculate a hash based on size and
175 mtime of each file. mtimehash does not read file content because reading is
175 mtime of each file. mtimehash does not read file content because reading is
176 expensive. therefore it's not 100% reliable for detecting content changes.
176 expensive. therefore it's not 100% reliable for detecting content changes.
177 it's possible to return different hashes for same file contents.
177 it's possible to return different hashes for same file contents.
178 it's also possible to return a same hash for different file contents for
178 it's also possible to return a same hash for different file contents for
179 some carefully crafted situation.
179 some carefully crafted situation.
180
180
181 for chgserver, it is designed that once mtimehash changes, the server is
181 for chgserver, it is designed that once mtimehash changes, the server is
182 considered outdated immediately and should no longer provide service.
182 considered outdated immediately and should no longer provide service.
183
183
184 mtimehash is not included in confighash because we only know the paths of
184 mtimehash is not included in confighash because we only know the paths of
185 extensions after importing them (there is imp.find_module but that faces
185 extensions after importing them (there is imp.find_module but that faces
186 race conditions). We need to calculate confighash without importing.
186 race conditions). We need to calculate confighash without importing.
187 """
187 """
188
188
189 def trystat(path):
189 def trystat(path):
190 try:
190 try:
191 st = os.stat(path)
191 st = os.stat(path)
192 return (st[stat.ST_MTIME], st.st_size)
192 return (st[stat.ST_MTIME], st.st_size)
193 except OSError:
193 except OSError:
194 # could be ENOENT, EPERM etc. not fatal in any case
194 # could be ENOENT, EPERM etc. not fatal in any case
195 pass
195 pass
196
196
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
198
198
199
199
200 class hashstate(object):
200 class hashstate(object):
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
202
202
203 def __init__(self, confighash, mtimehash, mtimepaths):
203 def __init__(self, confighash, mtimehash, mtimepaths):
204 self.confighash = confighash
204 self.confighash = confighash
205 self.mtimehash = mtimehash
205 self.mtimehash = mtimehash
206 self.mtimepaths = mtimepaths
206 self.mtimepaths = mtimepaths
207
207
208 @staticmethod
208 @staticmethod
209 def fromui(ui, mtimepaths=None):
209 def fromui(ui, mtimepaths=None):
210 if mtimepaths is None:
210 if mtimepaths is None:
211 mtimepaths = _getmtimepaths(ui)
211 mtimepaths = _getmtimepaths(ui)
212 confighash = _confighash(ui)
212 confighash = _confighash(ui)
213 mtimehash = _mtimehash(mtimepaths)
213 mtimehash = _mtimehash(mtimepaths)
214 ui.log(
214 ui.log(
215 b'cmdserver',
215 b'cmdserver',
216 b'confighash = %s mtimehash = %s\n',
216 b'confighash = %s mtimehash = %s\n',
217 confighash,
217 confighash,
218 mtimehash,
218 mtimehash,
219 )
219 )
220 return hashstate(confighash, mtimehash, mtimepaths)
220 return hashstate(confighash, mtimehash, mtimepaths)
221
221
222
222
223 def _newchgui(srcui, csystem, attachio):
223 def _newchgui(srcui, csystem, attachio):
224 class chgui(srcui.__class__):
224 class chgui(srcui.__class__):
225 def __init__(self, src=None):
225 def __init__(self, src=None):
226 super(chgui, self).__init__(src)
226 super(chgui, self).__init__(src)
227 if src:
227 if src:
228 self._csystem = getattr(src, '_csystem', csystem)
228 self._csystem = getattr(src, '_csystem', csystem)
229 else:
229 else:
230 self._csystem = csystem
230 self._csystem = csystem
231
231
232 def _runsystem(self, cmd, environ, cwd, out):
232 def _runsystem(self, cmd, environ, cwd, out):
233 # fallback to the original system method if
233 # fallback to the original system method if
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
235 # b. or stdout is redirected by protectfinout(),
235 # b. or stdout is redirected by protectfinout(),
236 # because the chg client is not aware of these situations and
236 # because the chg client is not aware of these situations and
237 # will behave differently (i.e. write to stdout).
237 # will behave differently (i.e. write to stdout).
238 if (
238 if (
239 out is not self.fout
239 out is not self.fout
240 or not util.safehasattr(self.fout, b'fileno')
240 or not util.safehasattr(self.fout, b'fileno')
241 or self.fout.fileno() != procutil.stdout.fileno()
241 or self.fout.fileno() != procutil.stdout.fileno()
242 or self._finoutredirected
242 or self._finoutredirected
243 ):
243 ):
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
245 self.flush()
245 self.flush()
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
247
247
248 def _runpager(self, cmd, env=None):
248 def _runpager(self, cmd, env=None):
249 self._csystem(
249 self._csystem(
250 cmd,
250 cmd,
251 procutil.shellenviron(env),
251 procutil.shellenviron(env),
252 type=b'pager',
252 type=b'pager',
253 cmdtable={b'attachio': attachio},
253 cmdtable={b'attachio': attachio},
254 )
254 )
255 return True
255 return True
256
256
257 return chgui(srcui)
257 return chgui(srcui)
258
258
259
259
260 def _loadnewui(srcui, args, cdebug):
260 def _loadnewui(srcui, args, cdebug):
261 from . import dispatch # avoid cycle
261 from . import dispatch # avoid cycle
262
262
263 newui = srcui.__class__.load()
263 newui = srcui.__class__.load()
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
265 setattr(newui, a, getattr(srcui, a))
265 setattr(newui, a, getattr(srcui, a))
266 if util.safehasattr(srcui, b'_csystem'):
266 if util.safehasattr(srcui, b'_csystem'):
267 newui._csystem = srcui._csystem
267 newui._csystem = srcui._csystem
268
268
269 # command line args
269 # command line args
270 options = dispatch._earlyparseopts(newui, args)
270 options = dispatch._earlyparseopts(newui, args)
271 dispatch._parseconfig(newui, options[b'config'])
271 dispatch._parseconfig(newui, options[b'config'])
272
272
273 # stolen from tortoisehg.util.copydynamicconfig()
273 # stolen from tortoisehg.util.copydynamicconfig()
274 for section, name, value in srcui.walkconfig():
274 for section, name, value in srcui.walkconfig():
275 source = srcui.configsource(section, name)
275 source = srcui.configsource(section, name)
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
277 # path:line or command line, or environ
277 # path:line or command line, or environ
278 continue
278 continue
279 newui.setconfig(section, name, value, source)
279 newui.setconfig(section, name, value, source)
280
280
281 # load wd and repo config, copied from dispatch.py
281 # load wd and repo config, copied from dispatch.py
282 cwd = options[b'cwd']
282 cwd = options[b'cwd']
283 cwd = cwd and os.path.realpath(cwd) or None
283 cwd = cwd and os.path.realpath(cwd) or None
284 rpath = options[b'repository']
284 rpath = options[b'repository']
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
286
286
287 extensions.populateui(newui)
287 extensions.populateui(newui)
288 commandserver.setuplogging(newui, fp=cdebug)
288 commandserver.setuplogging(newui, fp=cdebug)
289 if newui is not newlui:
289 if newui is not newlui:
290 extensions.populateui(newlui)
290 extensions.populateui(newlui)
291 commandserver.setuplogging(newlui, fp=cdebug)
291 commandserver.setuplogging(newlui, fp=cdebug)
292
292
293 return (newui, newlui)
293 return (newui, newlui)
294
294
295
295
296 class channeledsystem(object):
296 class channeledsystem(object):
297 """Propagate ui.system() request in the following format:
297 """Propagate ui.system() request in the following format:
298
298
299 payload length (unsigned int),
299 payload length (unsigned int),
300 type, '\0',
300 type, '\0',
301 cmd, '\0',
301 cmd, '\0',
302 cwd, '\0',
302 cwd, '\0',
303 envkey, '=', val, '\0',
303 envkey, '=', val, '\0',
304 ...
304 ...
305 envkey, '=', val
305 envkey, '=', val
306
306
307 if type == 'system', waits for:
307 if type == 'system', waits for:
308
308
309 exitcode length (unsigned int),
309 exitcode length (unsigned int),
310 exitcode (int)
310 exitcode (int)
311
311
312 if type == 'pager', repetitively waits for a command name ending with '\n'
312 if type == 'pager', repetitively waits for a command name ending with '\n'
313 and executes it defined by cmdtable, or exits the loop if the command name
313 and executes it defined by cmdtable, or exits the loop if the command name
314 is empty.
314 is empty.
315 """
315 """
316
316
317 def __init__(self, in_, out, channel):
317 def __init__(self, in_, out, channel):
318 self.in_ = in_
318 self.in_ = in_
319 self.out = out
319 self.out = out
320 self.channel = channel
320 self.channel = channel
321
321
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
325 data = b'\0'.join(args)
325 data = b'\0'.join(args)
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
327 self.out.write(data)
327 self.out.write(data)
328 self.out.flush()
328 self.out.flush()
329
329
330 if type == b'system':
330 if type == b'system':
331 length = self.in_.read(4)
331 length = self.in_.read(4)
332 (length,) = struct.unpack(b'>I', length)
332 (length,) = struct.unpack(b'>I', length)
333 if length != 4:
333 if length != 4:
334 raise error.Abort(_(b'invalid response'))
334 raise error.Abort(_(b'invalid response'))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
336 return rc
336 return rc
337 elif type == b'pager':
337 elif type == b'pager':
338 while True:
338 while True:
339 cmd = self.in_.readline()[:-1]
339 cmd = self.in_.readline()[:-1]
340 if not cmd:
340 if not cmd:
341 break
341 break
342 if cmdtable and cmd in cmdtable:
342 if cmdtable and cmd in cmdtable:
343 cmdtable[cmd]()
343 cmdtable[cmd]()
344 else:
344 else:
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
346 else:
346 else:
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
348
348
349
349
350 _iochannels = [
350 _iochannels = [
351 # server.ch, ui.fp, mode
351 # server.ch, ui.fp, mode
352 (b'cin', b'fin', 'rb'),
352 (b'cin', b'fin', 'rb'),
353 (b'cout', b'fout', 'wb'),
353 (b'cout', b'fout', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
355 ]
355 ]
356
356
357
357
358 class chgcmdserver(commandserver.server):
358 class chgcmdserver(commandserver.server):
359 def __init__(
359 def __init__(
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
361 ):
361 ):
362 super(chgcmdserver, self).__init__(
362 super(chgcmdserver, self).__init__(
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
364 repo,
364 repo,
365 fin,
365 fin,
366 fout,
366 fout,
367 prereposetups,
367 prereposetups,
368 )
368 )
369 self.clientsock = sock
369 self.clientsock = sock
370 self._ioattached = False
370 self._ioattached = False
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
372 self.hashstate = hashstate
372 self.hashstate = hashstate
373 self.baseaddress = baseaddress
373 self.baseaddress = baseaddress
374 if hashstate is not None:
374 if hashstate is not None:
375 self.capabilities = self.capabilities.copy()
375 self.capabilities = self.capabilities.copy()
376 self.capabilities[b'validate'] = chgcmdserver.validate
376 self.capabilities[b'validate'] = chgcmdserver.validate
377
377
378 def cleanup(self):
378 def cleanup(self):
379 super(chgcmdserver, self).cleanup()
379 super(chgcmdserver, self).cleanup()
380 # dispatch._runcatch() does not flush outputs if exception is not
380 # dispatch._runcatch() does not flush outputs if exception is not
381 # handled by dispatch._dispatch()
381 # handled by dispatch._dispatch()
382 self.ui.flush()
382 self.ui.flush()
383 self._restoreio()
383 self._restoreio()
384 self._ioattached = False
384 self._ioattached = False
385
385
386 def attachio(self):
386 def attachio(self):
387 """Attach to client's stdio passed via unix domain socket; all
387 """Attach to client's stdio passed via unix domain socket; all
388 channels except cresult will no longer be used
388 channels except cresult will no longer be used
389 """
389 """
390 # tell client to sendmsg() with 1-byte payload, which makes it
390 # tell client to sendmsg() with 1-byte payload, which makes it
391 # distinctive from "attachio\n" command consumed by client.read()
391 # distinctive from "attachio\n" command consumed by client.read()
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
393 clientfds = util.recvfds(self.clientsock.fileno())
393 clientfds = util.recvfds(self.clientsock.fileno())
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
395
395
396 ui = self.ui
396 ui = self.ui
397 ui.flush()
397 ui.flush()
398 self._saveio()
398 self._saveio()
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
400 assert fd > 0
400 assert fd > 0
401 fp = getattr(ui, fn)
401 fp = getattr(ui, fn)
402 os.dup2(fd, fp.fileno())
402 os.dup2(fd, fp.fileno())
403 os.close(fd)
403 os.close(fd)
404 if self._ioattached:
404 if self._ioattached:
405 continue
405 continue
406 # reset buffering mode when client is first attached. as we want
406 # reset buffering mode when client is first attached. as we want
407 # to see output immediately on pager, the mode stays unchanged
407 # to see output immediately on pager, the mode stays unchanged
408 # when client re-attached. ferr is unchanged because it should
408 # when client re-attached. ferr is unchanged because it should
409 # be unbuffered no matter if it is a tty or not.
409 # be unbuffered no matter if it is a tty or not.
410 if fn == b'ferr':
410 if fn == b'ferr':
411 newfp = fp
411 newfp = fp
412 elif pycompat.ispy3:
412 elif pycompat.ispy3:
413 # On Python 3, the standard library doesn't offer line-buffered
413 # On Python 3, the standard library doesn't offer line-buffered
414 # binary streams, so wrap/unwrap it.
414 # binary streams, so wrap/unwrap it.
415 if fp.isatty():
415 if fp.isatty():
416 newfp = procutil.make_line_buffered(fp)
416 newfp = procutil.make_line_buffered(fp)
417 else:
417 else:
418 newfp = procutil.unwrap_line_buffered(fp)
418 newfp = procutil.unwrap_line_buffered(fp)
419 else:
419 else:
420 # Python 2 uses the I/O streams provided by the C library, so
420 # Python 2 uses the I/O streams provided by the C library, so
421 # make it line-buffered explicitly. Otherwise the default would
421 # make it line-buffered explicitly. Otherwise the default would
422 # be decided on first write(), where fout could be a pager.
422 # be decided on first write(), where fout could be a pager.
423 if fp.isatty():
423 if fp.isatty():
424 bufsize = 1 # line buffered
424 bufsize = 1 # line buffered
425 else:
425 else:
426 bufsize = -1 # system default
426 bufsize = -1 # system default
427 newfp = os.fdopen(fp.fileno(), mode, bufsize)
427 newfp = os.fdopen(fp.fileno(), mode, bufsize)
428 if newfp is not fp:
428 if newfp is not fp:
429 setattr(ui, fn, newfp)
429 setattr(ui, fn, newfp)
430 setattr(self, cn, newfp)
430 setattr(self, cn, newfp)
431
431
432 self._ioattached = True
432 self._ioattached = True
433 self.cresult.write(struct.pack(b'>i', len(clientfds)))
433 self.cresult.write(struct.pack(b'>i', len(clientfds)))
434
434
435 def _saveio(self):
435 def _saveio(self):
436 if self._oldios:
436 if self._oldios:
437 return
437 return
438 ui = self.ui
438 ui = self.ui
439 for cn, fn, _mode in _iochannels:
439 for cn, fn, _mode in _iochannels:
440 ch = getattr(self, cn)
440 ch = getattr(self, cn)
441 fp = getattr(ui, fn)
441 fp = getattr(ui, fn)
442 fd = os.dup(fp.fileno())
442 fd = os.dup(fp.fileno())
443 self._oldios.append((ch, fp, fd))
443 self._oldios.append((ch, fp, fd))
444
444
445 def _restoreio(self):
445 def _restoreio(self):
446 if not self._oldios:
446 if not self._oldios:
447 return
447 return
448 nullfd = os.open(os.devnull, os.O_WRONLY)
448 nullfd = os.open(os.devnull, os.O_WRONLY)
449 ui = self.ui
449 ui = self.ui
450 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
450 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
451 newfp = getattr(ui, fn)
451 newfp = getattr(ui, fn)
452 # On Python 2, newfp and fp may be separate file objects associated
452 # On Python 2, newfp and fp may be separate file objects associated
453 # with the same fd, so we must close newfp while it's associated
453 # with the same fd, so we must close newfp while it's associated
454 # with the client. Otherwise the new associated fd would be closed
454 # with the client. Otherwise the new associated fd would be closed
455 # when newfp gets deleted. On Python 3, newfp is just a wrapper
455 # when newfp gets deleted. On Python 3, newfp is just a wrapper
456 # around fp even if newfp is not fp, so deleting newfp is safe.
456 # around fp even if newfp is not fp, so deleting newfp is safe.
457 if not (pycompat.ispy3 or newfp is fp):
457 if not (pycompat.ispy3 or newfp is fp):
458 newfp.close()
458 newfp.close()
459 # restore original fd: fp is open again
459 # restore original fd: fp is open again
460 try:
460 try:
461 if (pycompat.ispy3 or newfp is fp) and 'w' in mode:
461 if (pycompat.ispy3 or newfp is fp) and 'w' in mode:
462 # Discard buffered data which couldn't be flushed because
462 # Discard buffered data which couldn't be flushed because
463 # of EPIPE. The data should belong to the current session
463 # of EPIPE. The data should belong to the current session
464 # and should never persist.
464 # and should never persist.
465 os.dup2(nullfd, fp.fileno())
465 os.dup2(nullfd, fp.fileno())
466 fp.flush()
466 fp.flush()
467 os.dup2(fd, fp.fileno())
467 os.dup2(fd, fp.fileno())
468 except OSError as err:
468 except OSError as err:
469 # According to issue6330, running chg on heavy loaded systems
469 # According to issue6330, running chg on heavy loaded systems
470 # can lead to EBUSY. [man dup2] indicates that, on Linux,
470 # can lead to EBUSY. [man dup2] indicates that, on Linux,
471 # EBUSY comes from a race condition between open() and dup2().
471 # EBUSY comes from a race condition between open() and dup2().
472 # However it's not clear why open() race occurred for
472 # However it's not clear why open() race occurred for
473 # newfd=stdin/out/err.
473 # newfd=stdin/out/err.
474 self.ui.log(
474 self.ui.log(
475 b'chgserver',
475 b'chgserver',
476 b'got %s while duplicating %s\n',
476 b'got %s while duplicating %s\n',
477 stringutil.forcebytestr(err),
477 stringutil.forcebytestr(err),
478 fn,
478 fn,
479 )
479 )
480 os.close(fd)
480 os.close(fd)
481 setattr(self, cn, ch)
481 setattr(self, cn, ch)
482 setattr(ui, fn, fp)
482 setattr(ui, fn, fp)
483 os.close(nullfd)
483 os.close(nullfd)
484 del self._oldios[:]
484 del self._oldios[:]
485
485
486 def validate(self):
486 def validate(self):
487 """Reload the config and check if the server is up to date
487 """Reload the config and check if the server is up to date
488
488
489 Read a list of '\0' separated arguments.
489 Read a list of '\0' separated arguments.
490 Write a non-empty list of '\0' separated instruction strings or '\0'
490 Write a non-empty list of '\0' separated instruction strings or '\0'
491 if the list is empty.
491 if the list is empty.
492 An instruction string could be either:
492 An instruction string could be either:
493 - "unlink $path", the client should unlink the path to stop the
493 - "unlink $path", the client should unlink the path to stop the
494 outdated server.
494 outdated server.
495 - "redirect $path", the client should attempt to connect to $path
495 - "redirect $path", the client should attempt to connect to $path
496 first. If it does not work, start a new server. It implies
496 first. If it does not work, start a new server. It implies
497 "reconnect".
497 "reconnect".
498 - "exit $n", the client should exit directly with code n.
498 - "exit $n", the client should exit directly with code n.
499 This may happen if we cannot parse the config.
499 This may happen if we cannot parse the config.
500 - "reconnect", the client should close the connection and
500 - "reconnect", the client should close the connection and
501 reconnect.
501 reconnect.
502 If neither "reconnect" nor "redirect" is included in the instruction
502 If neither "reconnect" nor "redirect" is included in the instruction
503 list, the client can continue with this server after completing all
503 list, the client can continue with this server after completing all
504 the instructions.
504 the instructions.
505 """
505 """
506 args = self._readlist()
506 args = self._readlist()
507 try:
507 try:
508 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
508 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
509 except error.ParseError as inst:
509 except error.ParseError as inst:
510 self.ui.warn(inst.format())
510 self.ui.warn(inst.format())
511 self.ui.flush()
511 self.ui.flush()
512 self.cresult.write(b'exit 255')
512 self.cresult.write(b'exit 255')
513 return
513 return
514 except error.Abort as inst:
514 except error.Abort as inst:
515 self.ui.error(_(b"abort: %s\n") % inst.message)
515 self.ui.error(inst.format())
516 if inst.hint:
517 self.ui.error(_(b"(%s)\n") % inst.hint)
518 self.ui.flush()
516 self.ui.flush()
519 self.cresult.write(b'exit 255')
517 self.cresult.write(b'exit 255')
520 return
518 return
521 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
519 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
522 insts = []
520 insts = []
523 if newhash.mtimehash != self.hashstate.mtimehash:
521 if newhash.mtimehash != self.hashstate.mtimehash:
524 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
522 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
525 insts.append(b'unlink %s' % addr)
523 insts.append(b'unlink %s' % addr)
526 # mtimehash is empty if one or more extensions fail to load.
524 # mtimehash is empty if one or more extensions fail to load.
527 # to be compatible with hg, still serve the client this time.
525 # to be compatible with hg, still serve the client this time.
528 if self.hashstate.mtimehash:
526 if self.hashstate.mtimehash:
529 insts.append(b'reconnect')
527 insts.append(b'reconnect')
530 if newhash.confighash != self.hashstate.confighash:
528 if newhash.confighash != self.hashstate.confighash:
531 addr = _hashaddress(self.baseaddress, newhash.confighash)
529 addr = _hashaddress(self.baseaddress, newhash.confighash)
532 insts.append(b'redirect %s' % addr)
530 insts.append(b'redirect %s' % addr)
533 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
531 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
534 self.cresult.write(b'\0'.join(insts) or b'\0')
532 self.cresult.write(b'\0'.join(insts) or b'\0')
535
533
536 def chdir(self):
534 def chdir(self):
537 """Change current directory
535 """Change current directory
538
536
539 Note that the behavior of --cwd option is bit different from this.
537 Note that the behavior of --cwd option is bit different from this.
540 It does not affect --config parameter.
538 It does not affect --config parameter.
541 """
539 """
542 path = self._readstr()
540 path = self._readstr()
543 if not path:
541 if not path:
544 return
542 return
545 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
543 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
546 os.chdir(path)
544 os.chdir(path)
547
545
548 def setumask(self):
546 def setumask(self):
549 """Change umask (DEPRECATED)"""
547 """Change umask (DEPRECATED)"""
550 # BUG: this does not follow the message frame structure, but kept for
548 # BUG: this does not follow the message frame structure, but kept for
551 # backward compatibility with old chg clients for some time
549 # backward compatibility with old chg clients for some time
552 self._setumask(self._read(4))
550 self._setumask(self._read(4))
553
551
554 def setumask2(self):
552 def setumask2(self):
555 """Change umask"""
553 """Change umask"""
556 data = self._readstr()
554 data = self._readstr()
557 if len(data) != 4:
555 if len(data) != 4:
558 raise ValueError(b'invalid mask length in setumask2 request')
556 raise ValueError(b'invalid mask length in setumask2 request')
559 self._setumask(data)
557 self._setumask(data)
560
558
561 def _setumask(self, data):
559 def _setumask(self, data):
562 mask = struct.unpack(b'>I', data)[0]
560 mask = struct.unpack(b'>I', data)[0]
563 self.ui.log(b'chgserver', b'setumask %r\n', mask)
561 self.ui.log(b'chgserver', b'setumask %r\n', mask)
564 util.setumask(mask)
562 util.setumask(mask)
565
563
566 def runcommand(self):
564 def runcommand(self):
567 # pager may be attached within the runcommand session, which should
565 # pager may be attached within the runcommand session, which should
568 # be detached at the end of the session. otherwise the pager wouldn't
566 # be detached at the end of the session. otherwise the pager wouldn't
569 # receive EOF.
567 # receive EOF.
570 globaloldios = self._oldios
568 globaloldios = self._oldios
571 self._oldios = []
569 self._oldios = []
572 try:
570 try:
573 return super(chgcmdserver, self).runcommand()
571 return super(chgcmdserver, self).runcommand()
574 finally:
572 finally:
575 self._restoreio()
573 self._restoreio()
576 self._oldios = globaloldios
574 self._oldios = globaloldios
577
575
578 def setenv(self):
576 def setenv(self):
579 """Clear and update os.environ
577 """Clear and update os.environ
580
578
581 Note that not all variables can make an effect on the running process.
579 Note that not all variables can make an effect on the running process.
582 """
580 """
583 l = self._readlist()
581 l = self._readlist()
584 try:
582 try:
585 newenv = dict(s.split(b'=', 1) for s in l)
583 newenv = dict(s.split(b'=', 1) for s in l)
586 except ValueError:
584 except ValueError:
587 raise ValueError(b'unexpected value in setenv request')
585 raise ValueError(b'unexpected value in setenv request')
588 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
586 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
589
587
590 encoding.environ.clear()
588 encoding.environ.clear()
591 encoding.environ.update(newenv)
589 encoding.environ.update(newenv)
592
590
593 capabilities = commandserver.server.capabilities.copy()
591 capabilities = commandserver.server.capabilities.copy()
594 capabilities.update(
592 capabilities.update(
595 {
593 {
596 b'attachio': attachio,
594 b'attachio': attachio,
597 b'chdir': chdir,
595 b'chdir': chdir,
598 b'runcommand': runcommand,
596 b'runcommand': runcommand,
599 b'setenv': setenv,
597 b'setenv': setenv,
600 b'setumask': setumask,
598 b'setumask': setumask,
601 b'setumask2': setumask2,
599 b'setumask2': setumask2,
602 }
600 }
603 )
601 )
604
602
605 if util.safehasattr(procutil, b'setprocname'):
603 if util.safehasattr(procutil, b'setprocname'):
606
604
607 def setprocname(self):
605 def setprocname(self):
608 """Change process title"""
606 """Change process title"""
609 name = self._readstr()
607 name = self._readstr()
610 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
608 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
611 procutil.setprocname(name)
609 procutil.setprocname(name)
612
610
613 capabilities[b'setprocname'] = setprocname
611 capabilities[b'setprocname'] = setprocname
614
612
615
613
616 def _tempaddress(address):
614 def _tempaddress(address):
617 return b'%s.%d.tmp' % (address, os.getpid())
615 return b'%s.%d.tmp' % (address, os.getpid())
618
616
619
617
620 def _hashaddress(address, hashstr):
618 def _hashaddress(address, hashstr):
621 # if the basename of address contains '.', use only the left part. this
619 # if the basename of address contains '.', use only the left part. this
622 # makes it possible for the client to pass 'server.tmp$PID' and follow by
620 # makes it possible for the client to pass 'server.tmp$PID' and follow by
623 # an atomic rename to avoid locking when spawning new servers.
621 # an atomic rename to avoid locking when spawning new servers.
624 dirname, basename = os.path.split(address)
622 dirname, basename = os.path.split(address)
625 basename = basename.split(b'.', 1)[0]
623 basename = basename.split(b'.', 1)[0]
626 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
624 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
627
625
628
626
629 class chgunixservicehandler(object):
627 class chgunixservicehandler(object):
630 """Set of operations for chg services"""
628 """Set of operations for chg services"""
631
629
632 pollinterval = 1 # [sec]
630 pollinterval = 1 # [sec]
633
631
634 def __init__(self, ui):
632 def __init__(self, ui):
635 self.ui = ui
633 self.ui = ui
636 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
634 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
637 self._lastactive = time.time()
635 self._lastactive = time.time()
638
636
639 def bindsocket(self, sock, address):
637 def bindsocket(self, sock, address):
640 self._inithashstate(address)
638 self._inithashstate(address)
641 self._checkextensions()
639 self._checkextensions()
642 self._bind(sock)
640 self._bind(sock)
643 self._createsymlink()
641 self._createsymlink()
644 # no "listening at" message should be printed to simulate hg behavior
642 # no "listening at" message should be printed to simulate hg behavior
645
643
646 def _inithashstate(self, address):
644 def _inithashstate(self, address):
647 self._baseaddress = address
645 self._baseaddress = address
648 if self.ui.configbool(b'chgserver', b'skiphash'):
646 if self.ui.configbool(b'chgserver', b'skiphash'):
649 self._hashstate = None
647 self._hashstate = None
650 self._realaddress = address
648 self._realaddress = address
651 return
649 return
652 self._hashstate = hashstate.fromui(self.ui)
650 self._hashstate = hashstate.fromui(self.ui)
653 self._realaddress = _hashaddress(address, self._hashstate.confighash)
651 self._realaddress = _hashaddress(address, self._hashstate.confighash)
654
652
655 def _checkextensions(self):
653 def _checkextensions(self):
656 if not self._hashstate:
654 if not self._hashstate:
657 return
655 return
658 if extensions.notloaded():
656 if extensions.notloaded():
659 # one or more extensions failed to load. mtimehash becomes
657 # one or more extensions failed to load. mtimehash becomes
660 # meaningless because we do not know the paths of those extensions.
658 # meaningless because we do not know the paths of those extensions.
661 # set mtimehash to an illegal hash value to invalidate the server.
659 # set mtimehash to an illegal hash value to invalidate the server.
662 self._hashstate.mtimehash = b''
660 self._hashstate.mtimehash = b''
663
661
664 def _bind(self, sock):
662 def _bind(self, sock):
665 # use a unique temp address so we can stat the file and do ownership
663 # use a unique temp address so we can stat the file and do ownership
666 # check later
664 # check later
667 tempaddress = _tempaddress(self._realaddress)
665 tempaddress = _tempaddress(self._realaddress)
668 util.bindunixsocket(sock, tempaddress)
666 util.bindunixsocket(sock, tempaddress)
669 self._socketstat = os.stat(tempaddress)
667 self._socketstat = os.stat(tempaddress)
670 sock.listen(socket.SOMAXCONN)
668 sock.listen(socket.SOMAXCONN)
671 # rename will replace the old socket file if exists atomically. the
669 # rename will replace the old socket file if exists atomically. the
672 # old server will detect ownership change and exit.
670 # old server will detect ownership change and exit.
673 util.rename(tempaddress, self._realaddress)
671 util.rename(tempaddress, self._realaddress)
674
672
675 def _createsymlink(self):
673 def _createsymlink(self):
676 if self._baseaddress == self._realaddress:
674 if self._baseaddress == self._realaddress:
677 return
675 return
678 tempaddress = _tempaddress(self._baseaddress)
676 tempaddress = _tempaddress(self._baseaddress)
679 os.symlink(os.path.basename(self._realaddress), tempaddress)
677 os.symlink(os.path.basename(self._realaddress), tempaddress)
680 util.rename(tempaddress, self._baseaddress)
678 util.rename(tempaddress, self._baseaddress)
681
679
682 def _issocketowner(self):
680 def _issocketowner(self):
683 try:
681 try:
684 st = os.stat(self._realaddress)
682 st = os.stat(self._realaddress)
685 return (
683 return (
686 st.st_ino == self._socketstat.st_ino
684 st.st_ino == self._socketstat.st_ino
687 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
685 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
688 )
686 )
689 except OSError:
687 except OSError:
690 return False
688 return False
691
689
692 def unlinksocket(self, address):
690 def unlinksocket(self, address):
693 if not self._issocketowner():
691 if not self._issocketowner():
694 return
692 return
695 # it is possible to have a race condition here that we may
693 # it is possible to have a race condition here that we may
696 # remove another server's socket file. but that's okay
694 # remove another server's socket file. but that's okay
697 # since that server will detect and exit automatically and
695 # since that server will detect and exit automatically and
698 # the client will start a new server on demand.
696 # the client will start a new server on demand.
699 util.tryunlink(self._realaddress)
697 util.tryunlink(self._realaddress)
700
698
701 def shouldexit(self):
699 def shouldexit(self):
702 if not self._issocketowner():
700 if not self._issocketowner():
703 self.ui.log(
701 self.ui.log(
704 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
702 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
705 )
703 )
706 return True
704 return True
707 if time.time() - self._lastactive > self._idletimeout:
705 if time.time() - self._lastactive > self._idletimeout:
708 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
706 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
709 return True
707 return True
710 return False
708 return False
711
709
712 def newconnection(self):
710 def newconnection(self):
713 self._lastactive = time.time()
711 self._lastactive = time.time()
714
712
715 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
713 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
716 return chgcmdserver(
714 return chgcmdserver(
717 self.ui,
715 self.ui,
718 repo,
716 repo,
719 fin,
717 fin,
720 fout,
718 fout,
721 conn,
719 conn,
722 prereposetups,
720 prereposetups,
723 self._hashstate,
721 self._hashstate,
724 self._baseaddress,
722 self._baseaddress,
725 )
723 )
726
724
727
725
728 def chgunixservice(ui, repo, opts):
726 def chgunixservice(ui, repo, opts):
729 # CHGINTERNALMARK is set by chg client. It is an indication of things are
727 # CHGINTERNALMARK is set by chg client. It is an indication of things are
730 # started by chg so other code can do things accordingly, like disabling
728 # started by chg so other code can do things accordingly, like disabling
731 # demandimport or detecting chg client started by chg client. When executed
729 # demandimport or detecting chg client started by chg client. When executed
732 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
730 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
733 # environ cleaner.
731 # environ cleaner.
734 if b'CHGINTERNALMARK' in encoding.environ:
732 if b'CHGINTERNALMARK' in encoding.environ:
735 del encoding.environ[b'CHGINTERNALMARK']
733 del encoding.environ[b'CHGINTERNALMARK']
736 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
734 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
737 # it thinks the current value is "C". This breaks the hash computation and
735 # it thinks the current value is "C". This breaks the hash computation and
738 # causes chg to restart loop.
736 # causes chg to restart loop.
739 if b'CHGORIG_LC_CTYPE' in encoding.environ:
737 if b'CHGORIG_LC_CTYPE' in encoding.environ:
740 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
738 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
741 del encoding.environ[b'CHGORIG_LC_CTYPE']
739 del encoding.environ[b'CHGORIG_LC_CTYPE']
742 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
740 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
743 if b'LC_CTYPE' in encoding.environ:
741 if b'LC_CTYPE' in encoding.environ:
744 del encoding.environ[b'LC_CTYPE']
742 del encoding.environ[b'LC_CTYPE']
745 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
743 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
746
744
747 if repo:
745 if repo:
748 # one chgserver can serve multiple repos. drop repo information
746 # one chgserver can serve multiple repos. drop repo information
749 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
747 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
750 h = chgunixservicehandler(ui)
748 h = chgunixservicehandler(ui)
751 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
749 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,1332 +1,1330 b''
1 # dispatch.py - command dispatching for mercurial
1 # dispatch.py - command dispatching for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import errno
10 import errno
11 import getopt
11 import getopt
12 import io
12 import io
13 import os
13 import os
14 import pdb
14 import pdb
15 import re
15 import re
16 import signal
16 import signal
17 import sys
17 import sys
18 import traceback
18 import traceback
19
19
20
20
21 from .i18n import _
21 from .i18n import _
22 from .pycompat import getattr
22 from .pycompat import getattr
23
23
24 from hgdemandimport import tracing
24 from hgdemandimport import tracing
25
25
26 from . import (
26 from . import (
27 cmdutil,
27 cmdutil,
28 color,
28 color,
29 commands,
29 commands,
30 demandimport,
30 demandimport,
31 encoding,
31 encoding,
32 error,
32 error,
33 extensions,
33 extensions,
34 fancyopts,
34 fancyopts,
35 help,
35 help,
36 hg,
36 hg,
37 hook,
37 hook,
38 localrepo,
38 localrepo,
39 profiling,
39 profiling,
40 pycompat,
40 pycompat,
41 rcutil,
41 rcutil,
42 registrar,
42 registrar,
43 requirements as requirementsmod,
43 requirements as requirementsmod,
44 scmutil,
44 scmutil,
45 ui as uimod,
45 ui as uimod,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55
55
56 class request(object):
56 class request(object):
57 def __init__(
57 def __init__(
58 self,
58 self,
59 args,
59 args,
60 ui=None,
60 ui=None,
61 repo=None,
61 repo=None,
62 fin=None,
62 fin=None,
63 fout=None,
63 fout=None,
64 ferr=None,
64 ferr=None,
65 fmsg=None,
65 fmsg=None,
66 prereposetups=None,
66 prereposetups=None,
67 ):
67 ):
68 self.args = args
68 self.args = args
69 self.ui = ui
69 self.ui = ui
70 self.repo = repo
70 self.repo = repo
71
71
72 # input/output/error streams
72 # input/output/error streams
73 self.fin = fin
73 self.fin = fin
74 self.fout = fout
74 self.fout = fout
75 self.ferr = ferr
75 self.ferr = ferr
76 # separate stream for status/error messages
76 # separate stream for status/error messages
77 self.fmsg = fmsg
77 self.fmsg = fmsg
78
78
79 # remember options pre-parsed by _earlyparseopts()
79 # remember options pre-parsed by _earlyparseopts()
80 self.earlyoptions = {}
80 self.earlyoptions = {}
81
81
82 # reposetups which run before extensions, useful for chg to pre-fill
82 # reposetups which run before extensions, useful for chg to pre-fill
83 # low-level repo state (for example, changelog) before extensions.
83 # low-level repo state (for example, changelog) before extensions.
84 self.prereposetups = prereposetups or []
84 self.prereposetups = prereposetups or []
85
85
86 # store the parsed and canonical command
86 # store the parsed and canonical command
87 self.canonical_command = None
87 self.canonical_command = None
88
88
89 def _runexithandlers(self):
89 def _runexithandlers(self):
90 exc = None
90 exc = None
91 handlers = self.ui._exithandlers
91 handlers = self.ui._exithandlers
92 try:
92 try:
93 while handlers:
93 while handlers:
94 func, args, kwargs = handlers.pop()
94 func, args, kwargs = handlers.pop()
95 try:
95 try:
96 func(*args, **kwargs)
96 func(*args, **kwargs)
97 except: # re-raises below
97 except: # re-raises below
98 if exc is None:
98 if exc is None:
99 exc = sys.exc_info()[1]
99 exc = sys.exc_info()[1]
100 self.ui.warnnoi18n(b'error in exit handlers:\n')
100 self.ui.warnnoi18n(b'error in exit handlers:\n')
101 self.ui.traceback(force=True)
101 self.ui.traceback(force=True)
102 finally:
102 finally:
103 if exc is not None:
103 if exc is not None:
104 raise exc
104 raise exc
105
105
106
106
107 def run():
107 def run():
108 """run the command in sys.argv"""
108 """run the command in sys.argv"""
109 try:
109 try:
110 initstdio()
110 initstdio()
111 with tracing.log('parse args into request'):
111 with tracing.log('parse args into request'):
112 req = request(pycompat.sysargv[1:])
112 req = request(pycompat.sysargv[1:])
113 err = None
113 err = None
114 try:
114 try:
115 status = dispatch(req)
115 status = dispatch(req)
116 except error.StdioError as e:
116 except error.StdioError as e:
117 err = e
117 err = e
118 status = -1
118 status = -1
119
119
120 # In all cases we try to flush stdio streams.
120 # In all cases we try to flush stdio streams.
121 if util.safehasattr(req.ui, b'fout'):
121 if util.safehasattr(req.ui, b'fout'):
122 assert req.ui is not None # help pytype
122 assert req.ui is not None # help pytype
123 assert req.ui.fout is not None # help pytype
123 assert req.ui.fout is not None # help pytype
124 try:
124 try:
125 req.ui.fout.flush()
125 req.ui.fout.flush()
126 except IOError as e:
126 except IOError as e:
127 err = e
127 err = e
128 status = -1
128 status = -1
129
129
130 if util.safehasattr(req.ui, b'ferr'):
130 if util.safehasattr(req.ui, b'ferr'):
131 assert req.ui is not None # help pytype
131 assert req.ui is not None # help pytype
132 assert req.ui.ferr is not None # help pytype
132 assert req.ui.ferr is not None # help pytype
133 try:
133 try:
134 if err is not None and err.errno != errno.EPIPE:
134 if err is not None and err.errno != errno.EPIPE:
135 req.ui.ferr.write(
135 req.ui.ferr.write(
136 b'abort: %s\n' % encoding.strtolocal(err.strerror)
136 b'abort: %s\n' % encoding.strtolocal(err.strerror)
137 )
137 )
138 req.ui.ferr.flush()
138 req.ui.ferr.flush()
139 # There's not much we can do about an I/O error here. So (possibly)
139 # There's not much we can do about an I/O error here. So (possibly)
140 # change the status code and move on.
140 # change the status code and move on.
141 except IOError:
141 except IOError:
142 status = -1
142 status = -1
143
143
144 _silencestdio()
144 _silencestdio()
145 except KeyboardInterrupt:
145 except KeyboardInterrupt:
146 # Catch early/late KeyboardInterrupt as last ditch. Here nothing will
146 # Catch early/late KeyboardInterrupt as last ditch. Here nothing will
147 # be printed to console to avoid another IOError/KeyboardInterrupt.
147 # be printed to console to avoid another IOError/KeyboardInterrupt.
148 status = -1
148 status = -1
149 sys.exit(status & 255)
149 sys.exit(status & 255)
150
150
151
151
152 if pycompat.ispy3:
152 if pycompat.ispy3:
153
153
154 def initstdio():
154 def initstdio():
155 # stdio streams on Python 3 are io.TextIOWrapper instances proxying another
155 # stdio streams on Python 3 are io.TextIOWrapper instances proxying another
156 # buffer. These streams will normalize \n to \r\n by default. Mercurial's
156 # buffer. These streams will normalize \n to \r\n by default. Mercurial's
157 # preferred mechanism for writing output (ui.write()) uses io.BufferedWriter
157 # preferred mechanism for writing output (ui.write()) uses io.BufferedWriter
158 # instances, which write to the underlying stdio file descriptor in binary
158 # instances, which write to the underlying stdio file descriptor in binary
159 # mode. ui.write() uses \n for line endings and no line ending normalization
159 # mode. ui.write() uses \n for line endings and no line ending normalization
160 # is attempted through this interface. This "just works," even if the system
160 # is attempted through this interface. This "just works," even if the system
161 # preferred line ending is not \n.
161 # preferred line ending is not \n.
162 #
162 #
163 # But some parts of Mercurial (e.g. hooks) can still send data to sys.stdout
163 # But some parts of Mercurial (e.g. hooks) can still send data to sys.stdout
164 # and sys.stderr. They will inherit the line ending normalization settings,
164 # and sys.stderr. They will inherit the line ending normalization settings,
165 # potentially causing e.g. \r\n to be emitted. Since emitting \n should
165 # potentially causing e.g. \r\n to be emitted. Since emitting \n should
166 # "just work," here we change the sys.* streams to disable line ending
166 # "just work," here we change the sys.* streams to disable line ending
167 # normalization, ensuring compatibility with our ui type.
167 # normalization, ensuring compatibility with our ui type.
168
168
169 # write_through is new in Python 3.7.
169 # write_through is new in Python 3.7.
170 kwargs = {
170 kwargs = {
171 "newline": "\n",
171 "newline": "\n",
172 "line_buffering": sys.stdout.line_buffering,
172 "line_buffering": sys.stdout.line_buffering,
173 }
173 }
174 if util.safehasattr(sys.stdout, "write_through"):
174 if util.safehasattr(sys.stdout, "write_through"):
175 kwargs["write_through"] = sys.stdout.write_through
175 kwargs["write_through"] = sys.stdout.write_through
176 sys.stdout = io.TextIOWrapper(
176 sys.stdout = io.TextIOWrapper(
177 sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors, **kwargs
177 sys.stdout.buffer, sys.stdout.encoding, sys.stdout.errors, **kwargs
178 )
178 )
179
179
180 kwargs = {
180 kwargs = {
181 "newline": "\n",
181 "newline": "\n",
182 "line_buffering": sys.stderr.line_buffering,
182 "line_buffering": sys.stderr.line_buffering,
183 }
183 }
184 if util.safehasattr(sys.stderr, "write_through"):
184 if util.safehasattr(sys.stderr, "write_through"):
185 kwargs["write_through"] = sys.stderr.write_through
185 kwargs["write_through"] = sys.stderr.write_through
186 sys.stderr = io.TextIOWrapper(
186 sys.stderr = io.TextIOWrapper(
187 sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors, **kwargs
187 sys.stderr.buffer, sys.stderr.encoding, sys.stderr.errors, **kwargs
188 )
188 )
189
189
190 # No write_through on read-only stream.
190 # No write_through on read-only stream.
191 sys.stdin = io.TextIOWrapper(
191 sys.stdin = io.TextIOWrapper(
192 sys.stdin.buffer,
192 sys.stdin.buffer,
193 sys.stdin.encoding,
193 sys.stdin.encoding,
194 sys.stdin.errors,
194 sys.stdin.errors,
195 # None is universal newlines mode.
195 # None is universal newlines mode.
196 newline=None,
196 newline=None,
197 line_buffering=sys.stdin.line_buffering,
197 line_buffering=sys.stdin.line_buffering,
198 )
198 )
199
199
200 def _silencestdio():
200 def _silencestdio():
201 for fp in (sys.stdout, sys.stderr):
201 for fp in (sys.stdout, sys.stderr):
202 # Check if the file is okay
202 # Check if the file is okay
203 try:
203 try:
204 fp.flush()
204 fp.flush()
205 continue
205 continue
206 except IOError:
206 except IOError:
207 pass
207 pass
208 # Otherwise mark it as closed to silence "Exception ignored in"
208 # Otherwise mark it as closed to silence "Exception ignored in"
209 # message emitted by the interpreter finalizer. Be careful to
209 # message emitted by the interpreter finalizer. Be careful to
210 # not close procutil.stdout, which may be a fdopen-ed file object
210 # not close procutil.stdout, which may be a fdopen-ed file object
211 # and its close() actually closes the underlying file descriptor.
211 # and its close() actually closes the underlying file descriptor.
212 try:
212 try:
213 fp.close()
213 fp.close()
214 except IOError:
214 except IOError:
215 pass
215 pass
216
216
217
217
218 else:
218 else:
219
219
220 def initstdio():
220 def initstdio():
221 for fp in (sys.stdin, sys.stdout, sys.stderr):
221 for fp in (sys.stdin, sys.stdout, sys.stderr):
222 procutil.setbinary(fp)
222 procutil.setbinary(fp)
223
223
224 def _silencestdio():
224 def _silencestdio():
225 pass
225 pass
226
226
227
227
228 def _formatargs(args):
228 def _formatargs(args):
229 return b' '.join(procutil.shellquote(a) for a in args)
229 return b' '.join(procutil.shellquote(a) for a in args)
230
230
231
231
232 def dispatch(req):
232 def dispatch(req):
233 """run the command specified in req.args; returns an integer status code"""
233 """run the command specified in req.args; returns an integer status code"""
234 with tracing.log('dispatch.dispatch'):
234 with tracing.log('dispatch.dispatch'):
235 if req.ferr:
235 if req.ferr:
236 ferr = req.ferr
236 ferr = req.ferr
237 elif req.ui:
237 elif req.ui:
238 ferr = req.ui.ferr
238 ferr = req.ui.ferr
239 else:
239 else:
240 ferr = procutil.stderr
240 ferr = procutil.stderr
241
241
242 try:
242 try:
243 if not req.ui:
243 if not req.ui:
244 req.ui = uimod.ui.load()
244 req.ui = uimod.ui.load()
245 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
245 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
246 if req.earlyoptions[b'traceback']:
246 if req.earlyoptions[b'traceback']:
247 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
247 req.ui.setconfig(b'ui', b'traceback', b'on', b'--traceback')
248
248
249 # set ui streams from the request
249 # set ui streams from the request
250 if req.fin:
250 if req.fin:
251 req.ui.fin = req.fin
251 req.ui.fin = req.fin
252 if req.fout:
252 if req.fout:
253 req.ui.fout = req.fout
253 req.ui.fout = req.fout
254 if req.ferr:
254 if req.ferr:
255 req.ui.ferr = req.ferr
255 req.ui.ferr = req.ferr
256 if req.fmsg:
256 if req.fmsg:
257 req.ui.fmsg = req.fmsg
257 req.ui.fmsg = req.fmsg
258 except error.Abort as inst:
258 except error.Abort as inst:
259 ferr.write(_(b"abort: %s\n") % inst.message)
259 ferr.write(inst.format())
260 if inst.hint:
261 ferr.write(_(b"(%s)\n") % inst.hint)
262 return -1
260 return -1
263 except error.ParseError as inst:
261 except error.ParseError as inst:
264 ferr.write(inst.format())
262 ferr.write(inst.format())
265 return -1
263 return -1
266
264
267 msg = _formatargs(req.args)
265 msg = _formatargs(req.args)
268 starttime = util.timer()
266 starttime = util.timer()
269 ret = 1 # default of Python exit code on unhandled exception
267 ret = 1 # default of Python exit code on unhandled exception
270 try:
268 try:
271 ret = _runcatch(req) or 0
269 ret = _runcatch(req) or 0
272 except error.ProgrammingError as inst:
270 except error.ProgrammingError as inst:
273 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
271 req.ui.error(_(b'** ProgrammingError: %s\n') % inst)
274 if inst.hint:
272 if inst.hint:
275 req.ui.error(_(b'** (%s)\n') % inst.hint)
273 req.ui.error(_(b'** (%s)\n') % inst.hint)
276 raise
274 raise
277 except KeyboardInterrupt as inst:
275 except KeyboardInterrupt as inst:
278 try:
276 try:
279 if isinstance(inst, error.SignalInterrupt):
277 if isinstance(inst, error.SignalInterrupt):
280 msg = _(b"killed!\n")
278 msg = _(b"killed!\n")
281 else:
279 else:
282 msg = _(b"interrupted!\n")
280 msg = _(b"interrupted!\n")
283 req.ui.error(msg)
281 req.ui.error(msg)
284 except error.SignalInterrupt:
282 except error.SignalInterrupt:
285 # maybe pager would quit without consuming all the output, and
283 # maybe pager would quit without consuming all the output, and
286 # SIGPIPE was raised. we cannot print anything in this case.
284 # SIGPIPE was raised. we cannot print anything in this case.
287 pass
285 pass
288 except IOError as inst:
286 except IOError as inst:
289 if inst.errno != errno.EPIPE:
287 if inst.errno != errno.EPIPE:
290 raise
288 raise
291 ret = -1
289 ret = -1
292 finally:
290 finally:
293 duration = util.timer() - starttime
291 duration = util.timer() - starttime
294 req.ui.flush() # record blocked times
292 req.ui.flush() # record blocked times
295 if req.ui.logblockedtimes:
293 if req.ui.logblockedtimes:
296 req.ui._blockedtimes[b'command_duration'] = duration * 1000
294 req.ui._blockedtimes[b'command_duration'] = duration * 1000
297 req.ui.log(
295 req.ui.log(
298 b'uiblocked',
296 b'uiblocked',
299 b'ui blocked ms\n',
297 b'ui blocked ms\n',
300 **pycompat.strkwargs(req.ui._blockedtimes)
298 **pycompat.strkwargs(req.ui._blockedtimes)
301 )
299 )
302 return_code = ret & 255
300 return_code = ret & 255
303 req.ui.log(
301 req.ui.log(
304 b"commandfinish",
302 b"commandfinish",
305 b"%s exited %d after %0.2f seconds\n",
303 b"%s exited %d after %0.2f seconds\n",
306 msg,
304 msg,
307 return_code,
305 return_code,
308 duration,
306 duration,
309 return_code=return_code,
307 return_code=return_code,
310 duration=duration,
308 duration=duration,
311 canonical_command=req.canonical_command,
309 canonical_command=req.canonical_command,
312 )
310 )
313 try:
311 try:
314 req._runexithandlers()
312 req._runexithandlers()
315 except: # exiting, so no re-raises
313 except: # exiting, so no re-raises
316 ret = ret or -1
314 ret = ret or -1
317 # do flush again since ui.log() and exit handlers may write to ui
315 # do flush again since ui.log() and exit handlers may write to ui
318 req.ui.flush()
316 req.ui.flush()
319 return ret
317 return ret
320
318
321
319
322 def _runcatch(req):
320 def _runcatch(req):
323 with tracing.log('dispatch._runcatch'):
321 with tracing.log('dispatch._runcatch'):
324
322
325 def catchterm(*args):
323 def catchterm(*args):
326 raise error.SignalInterrupt
324 raise error.SignalInterrupt
327
325
328 ui = req.ui
326 ui = req.ui
329 try:
327 try:
330 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
328 for name in b'SIGBREAK', b'SIGHUP', b'SIGTERM':
331 num = getattr(signal, name, None)
329 num = getattr(signal, name, None)
332 if num:
330 if num:
333 signal.signal(num, catchterm)
331 signal.signal(num, catchterm)
334 except ValueError:
332 except ValueError:
335 pass # happens if called in a thread
333 pass # happens if called in a thread
336
334
337 def _runcatchfunc():
335 def _runcatchfunc():
338 realcmd = None
336 realcmd = None
339 try:
337 try:
340 cmdargs = fancyopts.fancyopts(
338 cmdargs = fancyopts.fancyopts(
341 req.args[:], commands.globalopts, {}
339 req.args[:], commands.globalopts, {}
342 )
340 )
343 cmd = cmdargs[0]
341 cmd = cmdargs[0]
344 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
342 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
345 realcmd = aliases[0]
343 realcmd = aliases[0]
346 except (
344 except (
347 error.UnknownCommand,
345 error.UnknownCommand,
348 error.AmbiguousCommand,
346 error.AmbiguousCommand,
349 IndexError,
347 IndexError,
350 getopt.GetoptError,
348 getopt.GetoptError,
351 ):
349 ):
352 # Don't handle this here. We know the command is
350 # Don't handle this here. We know the command is
353 # invalid, but all we're worried about for now is that
351 # invalid, but all we're worried about for now is that
354 # it's not a command that server operators expect to
352 # it's not a command that server operators expect to
355 # be safe to offer to users in a sandbox.
353 # be safe to offer to users in a sandbox.
356 pass
354 pass
357 if realcmd == b'serve' and b'--stdio' in cmdargs:
355 if realcmd == b'serve' and b'--stdio' in cmdargs:
358 # We want to constrain 'hg serve --stdio' instances pretty
356 # We want to constrain 'hg serve --stdio' instances pretty
359 # closely, as many shared-ssh access tools want to grant
357 # closely, as many shared-ssh access tools want to grant
360 # access to run *only* 'hg -R $repo serve --stdio'. We
358 # access to run *only* 'hg -R $repo serve --stdio'. We
361 # restrict to exactly that set of arguments, and prohibit
359 # restrict to exactly that set of arguments, and prohibit
362 # any repo name that starts with '--' to prevent
360 # any repo name that starts with '--' to prevent
363 # shenanigans wherein a user does something like pass
361 # shenanigans wherein a user does something like pass
364 # --debugger or --config=ui.debugger=1 as a repo
362 # --debugger or --config=ui.debugger=1 as a repo
365 # name. This used to actually run the debugger.
363 # name. This used to actually run the debugger.
366 if (
364 if (
367 len(req.args) != 4
365 len(req.args) != 4
368 or req.args[0] != b'-R'
366 or req.args[0] != b'-R'
369 or req.args[1].startswith(b'--')
367 or req.args[1].startswith(b'--')
370 or req.args[2] != b'serve'
368 or req.args[2] != b'serve'
371 or req.args[3] != b'--stdio'
369 or req.args[3] != b'--stdio'
372 ):
370 ):
373 raise error.Abort(
371 raise error.Abort(
374 _(b'potentially unsafe serve --stdio invocation: %s')
372 _(b'potentially unsafe serve --stdio invocation: %s')
375 % (stringutil.pprint(req.args),)
373 % (stringutil.pprint(req.args),)
376 )
374 )
377
375
378 try:
376 try:
379 debugger = b'pdb'
377 debugger = b'pdb'
380 debugtrace = {b'pdb': pdb.set_trace}
378 debugtrace = {b'pdb': pdb.set_trace}
381 debugmortem = {b'pdb': pdb.post_mortem}
379 debugmortem = {b'pdb': pdb.post_mortem}
382
380
383 # read --config before doing anything else
381 # read --config before doing anything else
384 # (e.g. to change trust settings for reading .hg/hgrc)
382 # (e.g. to change trust settings for reading .hg/hgrc)
385 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
383 cfgs = _parseconfig(req.ui, req.earlyoptions[b'config'])
386
384
387 if req.repo:
385 if req.repo:
388 # copy configs that were passed on the cmdline (--config) to
386 # copy configs that were passed on the cmdline (--config) to
389 # the repo ui
387 # the repo ui
390 for sec, name, val in cfgs:
388 for sec, name, val in cfgs:
391 req.repo.ui.setconfig(
389 req.repo.ui.setconfig(
392 sec, name, val, source=b'--config'
390 sec, name, val, source=b'--config'
393 )
391 )
394
392
395 # developer config: ui.debugger
393 # developer config: ui.debugger
396 debugger = ui.config(b"ui", b"debugger")
394 debugger = ui.config(b"ui", b"debugger")
397 debugmod = pdb
395 debugmod = pdb
398 if not debugger or ui.plain():
396 if not debugger or ui.plain():
399 # if we are in HGPLAIN mode, then disable custom debugging
397 # if we are in HGPLAIN mode, then disable custom debugging
400 debugger = b'pdb'
398 debugger = b'pdb'
401 elif req.earlyoptions[b'debugger']:
399 elif req.earlyoptions[b'debugger']:
402 # This import can be slow for fancy debuggers, so only
400 # This import can be slow for fancy debuggers, so only
403 # do it when absolutely necessary, i.e. when actual
401 # do it when absolutely necessary, i.e. when actual
404 # debugging has been requested
402 # debugging has been requested
405 with demandimport.deactivated():
403 with demandimport.deactivated():
406 try:
404 try:
407 debugmod = __import__(debugger)
405 debugmod = __import__(debugger)
408 except ImportError:
406 except ImportError:
409 pass # Leave debugmod = pdb
407 pass # Leave debugmod = pdb
410
408
411 debugtrace[debugger] = debugmod.set_trace
409 debugtrace[debugger] = debugmod.set_trace
412 debugmortem[debugger] = debugmod.post_mortem
410 debugmortem[debugger] = debugmod.post_mortem
413
411
414 # enter the debugger before command execution
412 # enter the debugger before command execution
415 if req.earlyoptions[b'debugger']:
413 if req.earlyoptions[b'debugger']:
416 ui.warn(
414 ui.warn(
417 _(
415 _(
418 b"entering debugger - "
416 b"entering debugger - "
419 b"type c to continue starting hg or h for help\n"
417 b"type c to continue starting hg or h for help\n"
420 )
418 )
421 )
419 )
422
420
423 if (
421 if (
424 debugger != b'pdb'
422 debugger != b'pdb'
425 and debugtrace[debugger] == debugtrace[b'pdb']
423 and debugtrace[debugger] == debugtrace[b'pdb']
426 ):
424 ):
427 ui.warn(
425 ui.warn(
428 _(
426 _(
429 b"%s debugger specified "
427 b"%s debugger specified "
430 b"but its module was not found\n"
428 b"but its module was not found\n"
431 )
429 )
432 % debugger
430 % debugger
433 )
431 )
434 with demandimport.deactivated():
432 with demandimport.deactivated():
435 debugtrace[debugger]()
433 debugtrace[debugger]()
436 try:
434 try:
437 return _dispatch(req)
435 return _dispatch(req)
438 finally:
436 finally:
439 ui.flush()
437 ui.flush()
440 except: # re-raises
438 except: # re-raises
441 # enter the debugger when we hit an exception
439 # enter the debugger when we hit an exception
442 if req.earlyoptions[b'debugger']:
440 if req.earlyoptions[b'debugger']:
443 traceback.print_exc()
441 traceback.print_exc()
444 debugmortem[debugger](sys.exc_info()[2])
442 debugmortem[debugger](sys.exc_info()[2])
445 raise
443 raise
446
444
447 return _callcatch(ui, _runcatchfunc)
445 return _callcatch(ui, _runcatchfunc)
448
446
449
447
450 def _callcatch(ui, func):
448 def _callcatch(ui, func):
451 """like scmutil.callcatch but handles more high-level exceptions about
449 """like scmutil.callcatch but handles more high-level exceptions about
452 config parsing and commands. besides, use handlecommandexception to handle
450 config parsing and commands. besides, use handlecommandexception to handle
453 uncaught exceptions.
451 uncaught exceptions.
454 """
452 """
455 try:
453 try:
456 return scmutil.callcatch(ui, func)
454 return scmutil.callcatch(ui, func)
457 except error.AmbiguousCommand as inst:
455 except error.AmbiguousCommand as inst:
458 ui.warn(
456 ui.warn(
459 _(b"hg: command '%s' is ambiguous:\n %s\n")
457 _(b"hg: command '%s' is ambiguous:\n %s\n")
460 % (inst.prefix, b" ".join(inst.matches))
458 % (inst.prefix, b" ".join(inst.matches))
461 )
459 )
462 except error.CommandError as inst:
460 except error.CommandError as inst:
463 if inst.command:
461 if inst.command:
464 ui.pager(b'help')
462 ui.pager(b'help')
465 msgbytes = pycompat.bytestr(inst.message)
463 msgbytes = pycompat.bytestr(inst.message)
466 ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
464 ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
467 commands.help_(ui, inst.command, full=False, command=True)
465 commands.help_(ui, inst.command, full=False, command=True)
468 else:
466 else:
469 ui.warn(_(b"hg: %s\n") % inst.message)
467 ui.warn(_(b"hg: %s\n") % inst.message)
470 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
468 ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
471 except error.ParseError as inst:
469 except error.ParseError as inst:
472 ui.warn(inst.format())
470 ui.warn(inst.format())
473 return -1
471 return -1
474 except error.UnknownCommand as inst:
472 except error.UnknownCommand as inst:
475 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
473 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
476 try:
474 try:
477 # check if the command is in a disabled extension
475 # check if the command is in a disabled extension
478 # (but don't check for extensions themselves)
476 # (but don't check for extensions themselves)
479 formatted = help.formattedhelp(
477 formatted = help.formattedhelp(
480 ui, commands, inst.command, unknowncmd=True
478 ui, commands, inst.command, unknowncmd=True
481 )
479 )
482 ui.warn(nocmdmsg)
480 ui.warn(nocmdmsg)
483 ui.write(formatted)
481 ui.write(formatted)
484 except (error.UnknownCommand, error.Abort):
482 except (error.UnknownCommand, error.Abort):
485 suggested = False
483 suggested = False
486 if inst.all_commands:
484 if inst.all_commands:
487 sim = error.getsimilar(inst.all_commands, inst.command)
485 sim = error.getsimilar(inst.all_commands, inst.command)
488 if sim:
486 if sim:
489 ui.warn(nocmdmsg)
487 ui.warn(nocmdmsg)
490 ui.warn(b"(%s)\n" % error.similarity_hint(sim))
488 ui.warn(b"(%s)\n" % error.similarity_hint(sim))
491 suggested = True
489 suggested = True
492 if not suggested:
490 if not suggested:
493 ui.warn(nocmdmsg)
491 ui.warn(nocmdmsg)
494 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
492 ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
495 except IOError:
493 except IOError:
496 raise
494 raise
497 except KeyboardInterrupt:
495 except KeyboardInterrupt:
498 raise
496 raise
499 except: # probably re-raises
497 except: # probably re-raises
500 if not handlecommandexception(ui):
498 if not handlecommandexception(ui):
501 raise
499 raise
502
500
503 return -1
501 return -1
504
502
505
503
506 def aliasargs(fn, givenargs):
504 def aliasargs(fn, givenargs):
507 args = []
505 args = []
508 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
506 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
509 if not util.safehasattr(fn, b'_origfunc'):
507 if not util.safehasattr(fn, b'_origfunc'):
510 args = getattr(fn, 'args', args)
508 args = getattr(fn, 'args', args)
511 if args:
509 if args:
512 cmd = b' '.join(map(procutil.shellquote, args))
510 cmd = b' '.join(map(procutil.shellquote, args))
513
511
514 nums = []
512 nums = []
515
513
516 def replacer(m):
514 def replacer(m):
517 num = int(m.group(1)) - 1
515 num = int(m.group(1)) - 1
518 nums.append(num)
516 nums.append(num)
519 if num < len(givenargs):
517 if num < len(givenargs):
520 return givenargs[num]
518 return givenargs[num]
521 raise error.Abort(_(b'too few arguments for command alias'))
519 raise error.Abort(_(b'too few arguments for command alias'))
522
520
523 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
521 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
524 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
522 givenargs = [x for i, x in enumerate(givenargs) if i not in nums]
525 args = pycompat.shlexsplit(cmd)
523 args = pycompat.shlexsplit(cmd)
526 return args + givenargs
524 return args + givenargs
527
525
528
526
529 def aliasinterpolate(name, args, cmd):
527 def aliasinterpolate(name, args, cmd):
530 '''interpolate args into cmd for shell aliases
528 '''interpolate args into cmd for shell aliases
531
529
532 This also handles $0, $@ and "$@".
530 This also handles $0, $@ and "$@".
533 '''
531 '''
534 # util.interpolate can't deal with "$@" (with quotes) because it's only
532 # util.interpolate can't deal with "$@" (with quotes) because it's only
535 # built to match prefix + patterns.
533 # built to match prefix + patterns.
536 replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)}
534 replacemap = {b'$%d' % (i + 1): arg for i, arg in enumerate(args)}
537 replacemap[b'$0'] = name
535 replacemap[b'$0'] = name
538 replacemap[b'$$'] = b'$'
536 replacemap[b'$$'] = b'$'
539 replacemap[b'$@'] = b' '.join(args)
537 replacemap[b'$@'] = b' '.join(args)
540 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
538 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
541 # parameters, separated out into words. Emulate the same behavior here by
539 # parameters, separated out into words. Emulate the same behavior here by
542 # quoting the arguments individually. POSIX shells will then typically
540 # quoting the arguments individually. POSIX shells will then typically
543 # tokenize each argument into exactly one word.
541 # tokenize each argument into exactly one word.
544 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
542 replacemap[b'"$@"'] = b' '.join(procutil.shellquote(arg) for arg in args)
545 # escape '\$' for regex
543 # escape '\$' for regex
546 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
544 regex = b'|'.join(replacemap.keys()).replace(b'$', br'\$')
547 r = re.compile(regex)
545 r = re.compile(regex)
548 return r.sub(lambda x: replacemap[x.group()], cmd)
546 return r.sub(lambda x: replacemap[x.group()], cmd)
549
547
550
548
551 class cmdalias(object):
549 class cmdalias(object):
552 def __init__(self, ui, name, definition, cmdtable, source):
550 def __init__(self, ui, name, definition, cmdtable, source):
553 self.name = self.cmd = name
551 self.name = self.cmd = name
554 self.cmdname = b''
552 self.cmdname = b''
555 self.definition = definition
553 self.definition = definition
556 self.fn = None
554 self.fn = None
557 self.givenargs = []
555 self.givenargs = []
558 self.opts = []
556 self.opts = []
559 self.help = b''
557 self.help = b''
560 self.badalias = None
558 self.badalias = None
561 self.unknowncmd = False
559 self.unknowncmd = False
562 self.source = source
560 self.source = source
563
561
564 try:
562 try:
565 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
563 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
566 for alias, e in pycompat.iteritems(cmdtable):
564 for alias, e in pycompat.iteritems(cmdtable):
567 if e is entry:
565 if e is entry:
568 self.cmd = alias
566 self.cmd = alias
569 break
567 break
570 self.shadows = True
568 self.shadows = True
571 except error.UnknownCommand:
569 except error.UnknownCommand:
572 self.shadows = False
570 self.shadows = False
573
571
574 if not self.definition:
572 if not self.definition:
575 self.badalias = _(b"no definition for alias '%s'") % self.name
573 self.badalias = _(b"no definition for alias '%s'") % self.name
576 return
574 return
577
575
578 if self.definition.startswith(b'!'):
576 if self.definition.startswith(b'!'):
579 shdef = self.definition[1:]
577 shdef = self.definition[1:]
580 self.shell = True
578 self.shell = True
581
579
582 def fn(ui, *args):
580 def fn(ui, *args):
583 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
581 env = {b'HG_ARGS': b' '.join((self.name,) + args)}
584
582
585 def _checkvar(m):
583 def _checkvar(m):
586 if m.groups()[0] == b'$':
584 if m.groups()[0] == b'$':
587 return m.group()
585 return m.group()
588 elif int(m.groups()[0]) <= len(args):
586 elif int(m.groups()[0]) <= len(args):
589 return m.group()
587 return m.group()
590 else:
588 else:
591 ui.debug(
589 ui.debug(
592 b"No argument found for substitution "
590 b"No argument found for substitution "
593 b"of %i variable in alias '%s' definition.\n"
591 b"of %i variable in alias '%s' definition.\n"
594 % (int(m.groups()[0]), self.name)
592 % (int(m.groups()[0]), self.name)
595 )
593 )
596 return b''
594 return b''
597
595
598 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
596 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
599 cmd = aliasinterpolate(self.name, args, cmd)
597 cmd = aliasinterpolate(self.name, args, cmd)
600 return ui.system(
598 return ui.system(
601 cmd, environ=env, blockedtag=b'alias_%s' % self.name
599 cmd, environ=env, blockedtag=b'alias_%s' % self.name
602 )
600 )
603
601
604 self.fn = fn
602 self.fn = fn
605 self.alias = True
603 self.alias = True
606 self._populatehelp(ui, name, shdef, self.fn)
604 self._populatehelp(ui, name, shdef, self.fn)
607 return
605 return
608
606
609 try:
607 try:
610 args = pycompat.shlexsplit(self.definition)
608 args = pycompat.shlexsplit(self.definition)
611 except ValueError as inst:
609 except ValueError as inst:
612 self.badalias = _(b"error in definition for alias '%s': %s") % (
610 self.badalias = _(b"error in definition for alias '%s': %s") % (
613 self.name,
611 self.name,
614 stringutil.forcebytestr(inst),
612 stringutil.forcebytestr(inst),
615 )
613 )
616 return
614 return
617 earlyopts, args = _earlysplitopts(args)
615 earlyopts, args = _earlysplitopts(args)
618 if earlyopts:
616 if earlyopts:
619 self.badalias = _(
617 self.badalias = _(
620 b"error in definition for alias '%s': %s may "
618 b"error in definition for alias '%s': %s may "
621 b"only be given on the command line"
619 b"only be given on the command line"
622 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
620 ) % (self.name, b'/'.join(pycompat.ziplist(*earlyopts)[0]))
623 return
621 return
624 self.cmdname = cmd = args.pop(0)
622 self.cmdname = cmd = args.pop(0)
625 self.givenargs = args
623 self.givenargs = args
626
624
627 try:
625 try:
628 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
626 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
629 if len(tableentry) > 2:
627 if len(tableentry) > 2:
630 self.fn, self.opts, cmdhelp = tableentry
628 self.fn, self.opts, cmdhelp = tableentry
631 else:
629 else:
632 self.fn, self.opts = tableentry
630 self.fn, self.opts = tableentry
633 cmdhelp = None
631 cmdhelp = None
634
632
635 self.alias = True
633 self.alias = True
636 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
634 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
637
635
638 except error.UnknownCommand:
636 except error.UnknownCommand:
639 self.badalias = _(
637 self.badalias = _(
640 b"alias '%s' resolves to unknown command '%s'"
638 b"alias '%s' resolves to unknown command '%s'"
641 ) % (self.name, cmd,)
639 ) % (self.name, cmd,)
642 self.unknowncmd = True
640 self.unknowncmd = True
643 except error.AmbiguousCommand:
641 except error.AmbiguousCommand:
644 self.badalias = _(
642 self.badalias = _(
645 b"alias '%s' resolves to ambiguous command '%s'"
643 b"alias '%s' resolves to ambiguous command '%s'"
646 ) % (self.name, cmd,)
644 ) % (self.name, cmd,)
647
645
648 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
646 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
649 # confine strings to be passed to i18n.gettext()
647 # confine strings to be passed to i18n.gettext()
650 cfg = {}
648 cfg = {}
651 for k in (b'doc', b'help', b'category'):
649 for k in (b'doc', b'help', b'category'):
652 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
650 v = ui.config(b'alias', b'%s:%s' % (name, k), None)
653 if v is None:
651 if v is None:
654 continue
652 continue
655 if not encoding.isasciistr(v):
653 if not encoding.isasciistr(v):
656 self.badalias = _(
654 self.badalias = _(
657 b"non-ASCII character in alias definition '%s:%s'"
655 b"non-ASCII character in alias definition '%s:%s'"
658 ) % (name, k)
656 ) % (name, k)
659 return
657 return
660 cfg[k] = v
658 cfg[k] = v
661
659
662 self.help = cfg.get(b'help', defaulthelp or b'')
660 self.help = cfg.get(b'help', defaulthelp or b'')
663 if self.help and self.help.startswith(b"hg " + cmd):
661 if self.help and self.help.startswith(b"hg " + cmd):
664 # drop prefix in old-style help lines so hg shows the alias
662 # drop prefix in old-style help lines so hg shows the alias
665 self.help = self.help[4 + len(cmd) :]
663 self.help = self.help[4 + len(cmd) :]
666
664
667 self.owndoc = b'doc' in cfg
665 self.owndoc = b'doc' in cfg
668 doc = cfg.get(b'doc', pycompat.getdoc(fn))
666 doc = cfg.get(b'doc', pycompat.getdoc(fn))
669 if doc is not None:
667 if doc is not None:
670 doc = pycompat.sysstr(doc)
668 doc = pycompat.sysstr(doc)
671 self.__doc__ = doc
669 self.__doc__ = doc
672
670
673 self.helpcategory = cfg.get(
671 self.helpcategory = cfg.get(
674 b'category', registrar.command.CATEGORY_NONE
672 b'category', registrar.command.CATEGORY_NONE
675 )
673 )
676
674
677 @property
675 @property
678 def args(self):
676 def args(self):
679 args = pycompat.maplist(util.expandpath, self.givenargs)
677 args = pycompat.maplist(util.expandpath, self.givenargs)
680 return aliasargs(self.fn, args)
678 return aliasargs(self.fn, args)
681
679
682 def __getattr__(self, name):
680 def __getattr__(self, name):
683 adefaults = {
681 adefaults = {
684 'norepo': True,
682 'norepo': True,
685 'intents': set(),
683 'intents': set(),
686 'optionalrepo': False,
684 'optionalrepo': False,
687 'inferrepo': False,
685 'inferrepo': False,
688 }
686 }
689 if name not in adefaults:
687 if name not in adefaults:
690 raise AttributeError(name)
688 raise AttributeError(name)
691 if self.badalias or util.safehasattr(self, b'shell'):
689 if self.badalias or util.safehasattr(self, b'shell'):
692 return adefaults[name]
690 return adefaults[name]
693 return getattr(self.fn, name)
691 return getattr(self.fn, name)
694
692
695 def __call__(self, ui, *args, **opts):
693 def __call__(self, ui, *args, **opts):
696 if self.badalias:
694 if self.badalias:
697 hint = None
695 hint = None
698 if self.unknowncmd:
696 if self.unknowncmd:
699 try:
697 try:
700 # check if the command is in a disabled extension
698 # check if the command is in a disabled extension
701 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
699 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
702 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
700 hint = _(b"'%s' is provided by '%s' extension") % (cmd, ext)
703 except error.UnknownCommand:
701 except error.UnknownCommand:
704 pass
702 pass
705 raise error.Abort(self.badalias, hint=hint)
703 raise error.Abort(self.badalias, hint=hint)
706 if self.shadows:
704 if self.shadows:
707 ui.debug(
705 ui.debug(
708 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
706 b"alias '%s' shadows command '%s'\n" % (self.name, self.cmdname)
709 )
707 )
710
708
711 ui.log(
709 ui.log(
712 b'commandalias',
710 b'commandalias',
713 b"alias '%s' expands to '%s'\n",
711 b"alias '%s' expands to '%s'\n",
714 self.name,
712 self.name,
715 self.definition,
713 self.definition,
716 )
714 )
717 if util.safehasattr(self, b'shell'):
715 if util.safehasattr(self, b'shell'):
718 return self.fn(ui, *args, **opts)
716 return self.fn(ui, *args, **opts)
719 else:
717 else:
720 try:
718 try:
721 return util.checksignature(self.fn)(ui, *args, **opts)
719 return util.checksignature(self.fn)(ui, *args, **opts)
722 except error.SignatureError:
720 except error.SignatureError:
723 args = b' '.join([self.cmdname] + self.args)
721 args = b' '.join([self.cmdname] + self.args)
724 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
722 ui.debug(b"alias '%s' expands to '%s'\n" % (self.name, args))
725 raise
723 raise
726
724
727
725
728 class lazyaliasentry(object):
726 class lazyaliasentry(object):
729 """like a typical command entry (func, opts, help), but is lazy"""
727 """like a typical command entry (func, opts, help), but is lazy"""
730
728
731 def __init__(self, ui, name, definition, cmdtable, source):
729 def __init__(self, ui, name, definition, cmdtable, source):
732 self.ui = ui
730 self.ui = ui
733 self.name = name
731 self.name = name
734 self.definition = definition
732 self.definition = definition
735 self.cmdtable = cmdtable.copy()
733 self.cmdtable = cmdtable.copy()
736 self.source = source
734 self.source = source
737 self.alias = True
735 self.alias = True
738
736
739 @util.propertycache
737 @util.propertycache
740 def _aliasdef(self):
738 def _aliasdef(self):
741 return cmdalias(
739 return cmdalias(
742 self.ui, self.name, self.definition, self.cmdtable, self.source
740 self.ui, self.name, self.definition, self.cmdtable, self.source
743 )
741 )
744
742
745 def __getitem__(self, n):
743 def __getitem__(self, n):
746 aliasdef = self._aliasdef
744 aliasdef = self._aliasdef
747 if n == 0:
745 if n == 0:
748 return aliasdef
746 return aliasdef
749 elif n == 1:
747 elif n == 1:
750 return aliasdef.opts
748 return aliasdef.opts
751 elif n == 2:
749 elif n == 2:
752 return aliasdef.help
750 return aliasdef.help
753 else:
751 else:
754 raise IndexError
752 raise IndexError
755
753
756 def __iter__(self):
754 def __iter__(self):
757 for i in range(3):
755 for i in range(3):
758 yield self[i]
756 yield self[i]
759
757
760 def __len__(self):
758 def __len__(self):
761 return 3
759 return 3
762
760
763
761
764 def addaliases(ui, cmdtable):
762 def addaliases(ui, cmdtable):
765 # aliases are processed after extensions have been loaded, so they
763 # aliases are processed after extensions have been loaded, so they
766 # may use extension commands. Aliases can also use other alias definitions,
764 # may use extension commands. Aliases can also use other alias definitions,
767 # but only if they have been defined prior to the current definition.
765 # but only if they have been defined prior to the current definition.
768 for alias, definition in ui.configitems(b'alias', ignoresub=True):
766 for alias, definition in ui.configitems(b'alias', ignoresub=True):
769 try:
767 try:
770 if cmdtable[alias].definition == definition:
768 if cmdtable[alias].definition == definition:
771 continue
769 continue
772 except (KeyError, AttributeError):
770 except (KeyError, AttributeError):
773 # definition might not exist or it might not be a cmdalias
771 # definition might not exist or it might not be a cmdalias
774 pass
772 pass
775
773
776 source = ui.configsource(b'alias', alias)
774 source = ui.configsource(b'alias', alias)
777 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
775 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
778 cmdtable[alias] = entry
776 cmdtable[alias] = entry
779
777
780
778
781 def _parse(ui, args):
779 def _parse(ui, args):
782 options = {}
780 options = {}
783 cmdoptions = {}
781 cmdoptions = {}
784
782
785 try:
783 try:
786 args = fancyopts.fancyopts(args, commands.globalopts, options)
784 args = fancyopts.fancyopts(args, commands.globalopts, options)
787 except getopt.GetoptError as inst:
785 except getopt.GetoptError as inst:
788 raise error.CommandError(None, stringutil.forcebytestr(inst))
786 raise error.CommandError(None, stringutil.forcebytestr(inst))
789
787
790 if args:
788 if args:
791 cmd, args = args[0], args[1:]
789 cmd, args = args[0], args[1:]
792 aliases, entry = cmdutil.findcmd(
790 aliases, entry = cmdutil.findcmd(
793 cmd, commands.table, ui.configbool(b"ui", b"strict")
791 cmd, commands.table, ui.configbool(b"ui", b"strict")
794 )
792 )
795 cmd = aliases[0]
793 cmd = aliases[0]
796 args = aliasargs(entry[0], args)
794 args = aliasargs(entry[0], args)
797 defaults = ui.config(b"defaults", cmd)
795 defaults = ui.config(b"defaults", cmd)
798 if defaults:
796 if defaults:
799 args = (
797 args = (
800 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
798 pycompat.maplist(util.expandpath, pycompat.shlexsplit(defaults))
801 + args
799 + args
802 )
800 )
803 c = list(entry[1])
801 c = list(entry[1])
804 else:
802 else:
805 cmd = None
803 cmd = None
806 c = []
804 c = []
807
805
808 # combine global options into local
806 # combine global options into local
809 for o in commands.globalopts:
807 for o in commands.globalopts:
810 c.append((o[0], o[1], options[o[1]], o[3]))
808 c.append((o[0], o[1], options[o[1]], o[3]))
811
809
812 try:
810 try:
813 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
811 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
814 except getopt.GetoptError as inst:
812 except getopt.GetoptError as inst:
815 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
813 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
816
814
817 # separate global options back out
815 # separate global options back out
818 for o in commands.globalopts:
816 for o in commands.globalopts:
819 n = o[1]
817 n = o[1]
820 options[n] = cmdoptions[n]
818 options[n] = cmdoptions[n]
821 del cmdoptions[n]
819 del cmdoptions[n]
822
820
823 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
821 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
824
822
825
823
826 def _parseconfig(ui, config):
824 def _parseconfig(ui, config):
827 """parse the --config options from the command line"""
825 """parse the --config options from the command line"""
828 configs = []
826 configs = []
829
827
830 for cfg in config:
828 for cfg in config:
831 try:
829 try:
832 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
830 name, value = [cfgelem.strip() for cfgelem in cfg.split(b'=', 1)]
833 section, name = name.split(b'.', 1)
831 section, name = name.split(b'.', 1)
834 if not section or not name:
832 if not section or not name:
835 raise IndexError
833 raise IndexError
836 ui.setconfig(section, name, value, b'--config')
834 ui.setconfig(section, name, value, b'--config')
837 configs.append((section, name, value))
835 configs.append((section, name, value))
838 except (IndexError, ValueError):
836 except (IndexError, ValueError):
839 raise error.Abort(
837 raise error.Abort(
840 _(
838 _(
841 b'malformed --config option: %r '
839 b'malformed --config option: %r '
842 b'(use --config section.name=value)'
840 b'(use --config section.name=value)'
843 )
841 )
844 % pycompat.bytestr(cfg)
842 % pycompat.bytestr(cfg)
845 )
843 )
846
844
847 return configs
845 return configs
848
846
849
847
850 def _earlyparseopts(ui, args):
848 def _earlyparseopts(ui, args):
851 options = {}
849 options = {}
852 fancyopts.fancyopts(
850 fancyopts.fancyopts(
853 args,
851 args,
854 commands.globalopts,
852 commands.globalopts,
855 options,
853 options,
856 gnu=not ui.plain(b'strictflags'),
854 gnu=not ui.plain(b'strictflags'),
857 early=True,
855 early=True,
858 optaliases={b'repository': [b'repo']},
856 optaliases={b'repository': [b'repo']},
859 )
857 )
860 return options
858 return options
861
859
862
860
863 def _earlysplitopts(args):
861 def _earlysplitopts(args):
864 """Split args into a list of possible early options and remainder args"""
862 """Split args into a list of possible early options and remainder args"""
865 shortoptions = b'R:'
863 shortoptions = b'R:'
866 # TODO: perhaps 'debugger' should be included
864 # TODO: perhaps 'debugger' should be included
867 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
865 longoptions = [b'cwd=', b'repository=', b'repo=', b'config=']
868 return fancyopts.earlygetopt(
866 return fancyopts.earlygetopt(
869 args, shortoptions, longoptions, gnu=True, keepsep=True
867 args, shortoptions, longoptions, gnu=True, keepsep=True
870 )
868 )
871
869
872
870
873 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
871 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
874 # run pre-hook, and abort if it fails
872 # run pre-hook, and abort if it fails
875 hook.hook(
873 hook.hook(
876 lui,
874 lui,
877 repo,
875 repo,
878 b"pre-%s" % cmd,
876 b"pre-%s" % cmd,
879 True,
877 True,
880 args=b" ".join(fullargs),
878 args=b" ".join(fullargs),
881 pats=cmdpats,
879 pats=cmdpats,
882 opts=cmdoptions,
880 opts=cmdoptions,
883 )
881 )
884 try:
882 try:
885 ret = _runcommand(ui, options, cmd, d)
883 ret = _runcommand(ui, options, cmd, d)
886 # run post-hook, passing command result
884 # run post-hook, passing command result
887 hook.hook(
885 hook.hook(
888 lui,
886 lui,
889 repo,
887 repo,
890 b"post-%s" % cmd,
888 b"post-%s" % cmd,
891 False,
889 False,
892 args=b" ".join(fullargs),
890 args=b" ".join(fullargs),
893 result=ret,
891 result=ret,
894 pats=cmdpats,
892 pats=cmdpats,
895 opts=cmdoptions,
893 opts=cmdoptions,
896 )
894 )
897 except Exception:
895 except Exception:
898 # run failure hook and re-raise
896 # run failure hook and re-raise
899 hook.hook(
897 hook.hook(
900 lui,
898 lui,
901 repo,
899 repo,
902 b"fail-%s" % cmd,
900 b"fail-%s" % cmd,
903 False,
901 False,
904 args=b" ".join(fullargs),
902 args=b" ".join(fullargs),
905 pats=cmdpats,
903 pats=cmdpats,
906 opts=cmdoptions,
904 opts=cmdoptions,
907 )
905 )
908 raise
906 raise
909 return ret
907 return ret
910
908
911
909
912 def _readsharedsourceconfig(ui, path):
910 def _readsharedsourceconfig(ui, path):
913 """if the current repository is shared one, this tries to read
911 """if the current repository is shared one, this tries to read
914 .hg/hgrc of shared source if we are in share-safe mode
912 .hg/hgrc of shared source if we are in share-safe mode
915
913
916 Config read is loaded into the ui object passed
914 Config read is loaded into the ui object passed
917
915
918 This should be called before reading .hg/hgrc or the main repo
916 This should be called before reading .hg/hgrc or the main repo
919 as that overrides config set in shared source"""
917 as that overrides config set in shared source"""
920 try:
918 try:
921 with open(os.path.join(path, b".hg", b"requires"), "rb") as fp:
919 with open(os.path.join(path, b".hg", b"requires"), "rb") as fp:
922 requirements = set(fp.read().splitlines())
920 requirements = set(fp.read().splitlines())
923 if not (
921 if not (
924 requirementsmod.SHARESAFE_REQUIREMENT in requirements
922 requirementsmod.SHARESAFE_REQUIREMENT in requirements
925 and requirementsmod.SHARED_REQUIREMENT in requirements
923 and requirementsmod.SHARED_REQUIREMENT in requirements
926 ):
924 ):
927 return
925 return
928 hgvfs = vfs.vfs(os.path.join(path, b".hg"))
926 hgvfs = vfs.vfs(os.path.join(path, b".hg"))
929 sharedvfs = localrepo._getsharedvfs(hgvfs, requirements)
927 sharedvfs = localrepo._getsharedvfs(hgvfs, requirements)
930 ui.readconfig(sharedvfs.join(b"hgrc"), path)
928 ui.readconfig(sharedvfs.join(b"hgrc"), path)
931 except IOError:
929 except IOError:
932 pass
930 pass
933
931
934
932
935 def _getlocal(ui, rpath, wd=None):
933 def _getlocal(ui, rpath, wd=None):
936 """Return (path, local ui object) for the given target path.
934 """Return (path, local ui object) for the given target path.
937
935
938 Takes paths in [cwd]/.hg/hgrc into account."
936 Takes paths in [cwd]/.hg/hgrc into account."
939 """
937 """
940 if wd is None:
938 if wd is None:
941 try:
939 try:
942 wd = encoding.getcwd()
940 wd = encoding.getcwd()
943 except OSError as e:
941 except OSError as e:
944 raise error.Abort(
942 raise error.Abort(
945 _(b"error getting current working directory: %s")
943 _(b"error getting current working directory: %s")
946 % encoding.strtolocal(e.strerror)
944 % encoding.strtolocal(e.strerror)
947 )
945 )
948
946
949 path = cmdutil.findrepo(wd) or b""
947 path = cmdutil.findrepo(wd) or b""
950 if not path:
948 if not path:
951 lui = ui
949 lui = ui
952 else:
950 else:
953 lui = ui.copy()
951 lui = ui.copy()
954 if rcutil.use_repo_hgrc():
952 if rcutil.use_repo_hgrc():
955 _readsharedsourceconfig(lui, path)
953 _readsharedsourceconfig(lui, path)
956 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
954 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
957 lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
955 lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
958
956
959 if rpath:
957 if rpath:
960 path = lui.expandpath(rpath)
958 path = lui.expandpath(rpath)
961 lui = ui.copy()
959 lui = ui.copy()
962 if rcutil.use_repo_hgrc():
960 if rcutil.use_repo_hgrc():
963 _readsharedsourceconfig(lui, path)
961 _readsharedsourceconfig(lui, path)
964 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
962 lui.readconfig(os.path.join(path, b".hg", b"hgrc"), path)
965 lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
963 lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
966
964
967 return path, lui
965 return path, lui
968
966
969
967
970 def _checkshellalias(lui, ui, args):
968 def _checkshellalias(lui, ui, args):
971 """Return the function to run the shell alias, if it is required"""
969 """Return the function to run the shell alias, if it is required"""
972 options = {}
970 options = {}
973
971
974 try:
972 try:
975 args = fancyopts.fancyopts(args, commands.globalopts, options)
973 args = fancyopts.fancyopts(args, commands.globalopts, options)
976 except getopt.GetoptError:
974 except getopt.GetoptError:
977 return
975 return
978
976
979 if not args:
977 if not args:
980 return
978 return
981
979
982 cmdtable = commands.table
980 cmdtable = commands.table
983
981
984 cmd = args[0]
982 cmd = args[0]
985 try:
983 try:
986 strict = ui.configbool(b"ui", b"strict")
984 strict = ui.configbool(b"ui", b"strict")
987 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
985 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
988 except (error.AmbiguousCommand, error.UnknownCommand):
986 except (error.AmbiguousCommand, error.UnknownCommand):
989 return
987 return
990
988
991 cmd = aliases[0]
989 cmd = aliases[0]
992 fn = entry[0]
990 fn = entry[0]
993
991
994 if cmd and util.safehasattr(fn, b'shell'):
992 if cmd and util.safehasattr(fn, b'shell'):
995 # shell alias shouldn't receive early options which are consumed by hg
993 # shell alias shouldn't receive early options which are consumed by hg
996 _earlyopts, args = _earlysplitopts(args)
994 _earlyopts, args = _earlysplitopts(args)
997 d = lambda: fn(ui, *args[1:])
995 d = lambda: fn(ui, *args[1:])
998 return lambda: runcommand(
996 return lambda: runcommand(
999 lui, None, cmd, args[:1], ui, options, d, [], {}
997 lui, None, cmd, args[:1], ui, options, d, [], {}
1000 )
998 )
1001
999
1002
1000
1003 def _dispatch(req):
1001 def _dispatch(req):
1004 args = req.args
1002 args = req.args
1005 ui = req.ui
1003 ui = req.ui
1006
1004
1007 # check for cwd
1005 # check for cwd
1008 cwd = req.earlyoptions[b'cwd']
1006 cwd = req.earlyoptions[b'cwd']
1009 if cwd:
1007 if cwd:
1010 os.chdir(cwd)
1008 os.chdir(cwd)
1011
1009
1012 rpath = req.earlyoptions[b'repository']
1010 rpath = req.earlyoptions[b'repository']
1013 path, lui = _getlocal(ui, rpath)
1011 path, lui = _getlocal(ui, rpath)
1014
1012
1015 uis = {ui, lui}
1013 uis = {ui, lui}
1016
1014
1017 if req.repo:
1015 if req.repo:
1018 uis.add(req.repo.ui)
1016 uis.add(req.repo.ui)
1019
1017
1020 if (
1018 if (
1021 req.earlyoptions[b'verbose']
1019 req.earlyoptions[b'verbose']
1022 or req.earlyoptions[b'debug']
1020 or req.earlyoptions[b'debug']
1023 or req.earlyoptions[b'quiet']
1021 or req.earlyoptions[b'quiet']
1024 ):
1022 ):
1025 for opt in (b'verbose', b'debug', b'quiet'):
1023 for opt in (b'verbose', b'debug', b'quiet'):
1026 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
1024 val = pycompat.bytestr(bool(req.earlyoptions[opt]))
1027 for ui_ in uis:
1025 for ui_ in uis:
1028 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1026 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1029
1027
1030 if req.earlyoptions[b'profile']:
1028 if req.earlyoptions[b'profile']:
1031 for ui_ in uis:
1029 for ui_ in uis:
1032 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
1030 ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
1033
1031
1034 profile = lui.configbool(b'profiling', b'enabled')
1032 profile = lui.configbool(b'profiling', b'enabled')
1035 with profiling.profile(lui, enabled=profile) as profiler:
1033 with profiling.profile(lui, enabled=profile) as profiler:
1036 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
1034 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
1037 # reposetup
1035 # reposetup
1038 extensions.loadall(lui)
1036 extensions.loadall(lui)
1039 # Propagate any changes to lui.__class__ by extensions
1037 # Propagate any changes to lui.__class__ by extensions
1040 ui.__class__ = lui.__class__
1038 ui.__class__ = lui.__class__
1041
1039
1042 # (uisetup and extsetup are handled in extensions.loadall)
1040 # (uisetup and extsetup are handled in extensions.loadall)
1043
1041
1044 # (reposetup is handled in hg.repository)
1042 # (reposetup is handled in hg.repository)
1045
1043
1046 addaliases(lui, commands.table)
1044 addaliases(lui, commands.table)
1047
1045
1048 # All aliases and commands are completely defined, now.
1046 # All aliases and commands are completely defined, now.
1049 # Check abbreviation/ambiguity of shell alias.
1047 # Check abbreviation/ambiguity of shell alias.
1050 shellaliasfn = _checkshellalias(lui, ui, args)
1048 shellaliasfn = _checkshellalias(lui, ui, args)
1051 if shellaliasfn:
1049 if shellaliasfn:
1052 # no additional configs will be set, set up the ui instances
1050 # no additional configs will be set, set up the ui instances
1053 for ui_ in uis:
1051 for ui_ in uis:
1054 extensions.populateui(ui_)
1052 extensions.populateui(ui_)
1055 return shellaliasfn()
1053 return shellaliasfn()
1056
1054
1057 # check for fallback encoding
1055 # check for fallback encoding
1058 fallback = lui.config(b'ui', b'fallbackencoding')
1056 fallback = lui.config(b'ui', b'fallbackencoding')
1059 if fallback:
1057 if fallback:
1060 encoding.fallbackencoding = fallback
1058 encoding.fallbackencoding = fallback
1061
1059
1062 fullargs = args
1060 fullargs = args
1063 cmd, func, args, options, cmdoptions = _parse(lui, args)
1061 cmd, func, args, options, cmdoptions = _parse(lui, args)
1064
1062
1065 # store the canonical command name in request object for later access
1063 # store the canonical command name in request object for later access
1066 req.canonical_command = cmd
1064 req.canonical_command = cmd
1067
1065
1068 if options[b"config"] != req.earlyoptions[b"config"]:
1066 if options[b"config"] != req.earlyoptions[b"config"]:
1069 raise error.Abort(_(b"option --config may not be abbreviated!"))
1067 raise error.Abort(_(b"option --config may not be abbreviated!"))
1070 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1068 if options[b"cwd"] != req.earlyoptions[b"cwd"]:
1071 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1069 raise error.Abort(_(b"option --cwd may not be abbreviated!"))
1072 if options[b"repository"] != req.earlyoptions[b"repository"]:
1070 if options[b"repository"] != req.earlyoptions[b"repository"]:
1073 raise error.Abort(
1071 raise error.Abort(
1074 _(
1072 _(
1075 b"option -R has to be separated from other options (e.g. not "
1073 b"option -R has to be separated from other options (e.g. not "
1076 b"-qR) and --repository may only be abbreviated as --repo!"
1074 b"-qR) and --repository may only be abbreviated as --repo!"
1077 )
1075 )
1078 )
1076 )
1079 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1077 if options[b"debugger"] != req.earlyoptions[b"debugger"]:
1080 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1078 raise error.Abort(_(b"option --debugger may not be abbreviated!"))
1081 # don't validate --profile/--traceback, which can be enabled from now
1079 # don't validate --profile/--traceback, which can be enabled from now
1082
1080
1083 if options[b"encoding"]:
1081 if options[b"encoding"]:
1084 encoding.encoding = options[b"encoding"]
1082 encoding.encoding = options[b"encoding"]
1085 if options[b"encodingmode"]:
1083 if options[b"encodingmode"]:
1086 encoding.encodingmode = options[b"encodingmode"]
1084 encoding.encodingmode = options[b"encodingmode"]
1087 if options[b"time"]:
1085 if options[b"time"]:
1088
1086
1089 def get_times():
1087 def get_times():
1090 t = os.times()
1088 t = os.times()
1091 if t[4] == 0.0:
1089 if t[4] == 0.0:
1092 # Windows leaves this as zero, so use time.perf_counter()
1090 # Windows leaves this as zero, so use time.perf_counter()
1093 t = (t[0], t[1], t[2], t[3], util.timer())
1091 t = (t[0], t[1], t[2], t[3], util.timer())
1094 return t
1092 return t
1095
1093
1096 s = get_times()
1094 s = get_times()
1097
1095
1098 def print_time():
1096 def print_time():
1099 t = get_times()
1097 t = get_times()
1100 ui.warn(
1098 ui.warn(
1101 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1099 _(b"time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n")
1102 % (
1100 % (
1103 t[4] - s[4],
1101 t[4] - s[4],
1104 t[0] - s[0],
1102 t[0] - s[0],
1105 t[2] - s[2],
1103 t[2] - s[2],
1106 t[1] - s[1],
1104 t[1] - s[1],
1107 t[3] - s[3],
1105 t[3] - s[3],
1108 )
1106 )
1109 )
1107 )
1110
1108
1111 ui.atexit(print_time)
1109 ui.atexit(print_time)
1112 if options[b"profile"]:
1110 if options[b"profile"]:
1113 profiler.start()
1111 profiler.start()
1114
1112
1115 # if abbreviated version of this were used, take them in account, now
1113 # if abbreviated version of this were used, take them in account, now
1116 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1114 if options[b'verbose'] or options[b'debug'] or options[b'quiet']:
1117 for opt in (b'verbose', b'debug', b'quiet'):
1115 for opt in (b'verbose', b'debug', b'quiet'):
1118 if options[opt] == req.earlyoptions[opt]:
1116 if options[opt] == req.earlyoptions[opt]:
1119 continue
1117 continue
1120 val = pycompat.bytestr(bool(options[opt]))
1118 val = pycompat.bytestr(bool(options[opt]))
1121 for ui_ in uis:
1119 for ui_ in uis:
1122 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1120 ui_.setconfig(b'ui', opt, val, b'--' + opt)
1123
1121
1124 if options[b'traceback']:
1122 if options[b'traceback']:
1125 for ui_ in uis:
1123 for ui_ in uis:
1126 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1124 ui_.setconfig(b'ui', b'traceback', b'on', b'--traceback')
1127
1125
1128 if options[b'noninteractive']:
1126 if options[b'noninteractive']:
1129 for ui_ in uis:
1127 for ui_ in uis:
1130 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1128 ui_.setconfig(b'ui', b'interactive', b'off', b'-y')
1131
1129
1132 if cmdoptions.get(b'insecure', False):
1130 if cmdoptions.get(b'insecure', False):
1133 for ui_ in uis:
1131 for ui_ in uis:
1134 ui_.insecureconnections = True
1132 ui_.insecureconnections = True
1135
1133
1136 # setup color handling before pager, because setting up pager
1134 # setup color handling before pager, because setting up pager
1137 # might cause incorrect console information
1135 # might cause incorrect console information
1138 coloropt = options[b'color']
1136 coloropt = options[b'color']
1139 for ui_ in uis:
1137 for ui_ in uis:
1140 if coloropt:
1138 if coloropt:
1141 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1139 ui_.setconfig(b'ui', b'color', coloropt, b'--color')
1142 color.setup(ui_)
1140 color.setup(ui_)
1143
1141
1144 if stringutil.parsebool(options[b'pager']):
1142 if stringutil.parsebool(options[b'pager']):
1145 # ui.pager() expects 'internal-always-' prefix in this case
1143 # ui.pager() expects 'internal-always-' prefix in this case
1146 ui.pager(b'internal-always-' + cmd)
1144 ui.pager(b'internal-always-' + cmd)
1147 elif options[b'pager'] != b'auto':
1145 elif options[b'pager'] != b'auto':
1148 for ui_ in uis:
1146 for ui_ in uis:
1149 ui_.disablepager()
1147 ui_.disablepager()
1150
1148
1151 # configs are fully loaded, set up the ui instances
1149 # configs are fully loaded, set up the ui instances
1152 for ui_ in uis:
1150 for ui_ in uis:
1153 extensions.populateui(ui_)
1151 extensions.populateui(ui_)
1154
1152
1155 if options[b'version']:
1153 if options[b'version']:
1156 return commands.version_(ui)
1154 return commands.version_(ui)
1157 if options[b'help']:
1155 if options[b'help']:
1158 return commands.help_(ui, cmd, command=cmd is not None)
1156 return commands.help_(ui, cmd, command=cmd is not None)
1159 elif not cmd:
1157 elif not cmd:
1160 return commands.help_(ui, b'shortlist')
1158 return commands.help_(ui, b'shortlist')
1161
1159
1162 repo = None
1160 repo = None
1163 cmdpats = args[:]
1161 cmdpats = args[:]
1164 assert func is not None # help out pytype
1162 assert func is not None # help out pytype
1165 if not func.norepo:
1163 if not func.norepo:
1166 # use the repo from the request only if we don't have -R
1164 # use the repo from the request only if we don't have -R
1167 if not rpath and not cwd:
1165 if not rpath and not cwd:
1168 repo = req.repo
1166 repo = req.repo
1169
1167
1170 if repo:
1168 if repo:
1171 # set the descriptors of the repo ui to those of ui
1169 # set the descriptors of the repo ui to those of ui
1172 repo.ui.fin = ui.fin
1170 repo.ui.fin = ui.fin
1173 repo.ui.fout = ui.fout
1171 repo.ui.fout = ui.fout
1174 repo.ui.ferr = ui.ferr
1172 repo.ui.ferr = ui.ferr
1175 repo.ui.fmsg = ui.fmsg
1173 repo.ui.fmsg = ui.fmsg
1176 else:
1174 else:
1177 try:
1175 try:
1178 repo = hg.repository(
1176 repo = hg.repository(
1179 ui,
1177 ui,
1180 path=path,
1178 path=path,
1181 presetupfuncs=req.prereposetups,
1179 presetupfuncs=req.prereposetups,
1182 intents=func.intents,
1180 intents=func.intents,
1183 )
1181 )
1184 if not repo.local():
1182 if not repo.local():
1185 raise error.Abort(
1183 raise error.Abort(
1186 _(b"repository '%s' is not local") % path
1184 _(b"repository '%s' is not local") % path
1187 )
1185 )
1188 repo.ui.setconfig(
1186 repo.ui.setconfig(
1189 b"bundle", b"mainreporoot", repo.root, b'repo'
1187 b"bundle", b"mainreporoot", repo.root, b'repo'
1190 )
1188 )
1191 except error.RequirementError:
1189 except error.RequirementError:
1192 raise
1190 raise
1193 except error.RepoError:
1191 except error.RepoError:
1194 if rpath: # invalid -R path
1192 if rpath: # invalid -R path
1195 raise
1193 raise
1196 if not func.optionalrepo:
1194 if not func.optionalrepo:
1197 if func.inferrepo and args and not path:
1195 if func.inferrepo and args and not path:
1198 # try to infer -R from command args
1196 # try to infer -R from command args
1199 repos = pycompat.maplist(cmdutil.findrepo, args)
1197 repos = pycompat.maplist(cmdutil.findrepo, args)
1200 guess = repos[0]
1198 guess = repos[0]
1201 if guess and repos.count(guess) == len(repos):
1199 if guess and repos.count(guess) == len(repos):
1202 req.args = [b'--repository', guess] + fullargs
1200 req.args = [b'--repository', guess] + fullargs
1203 req.earlyoptions[b'repository'] = guess
1201 req.earlyoptions[b'repository'] = guess
1204 return _dispatch(req)
1202 return _dispatch(req)
1205 if not path:
1203 if not path:
1206 raise error.RepoError(
1204 raise error.RepoError(
1207 _(
1205 _(
1208 b"no repository found in"
1206 b"no repository found in"
1209 b" '%s' (.hg not found)"
1207 b" '%s' (.hg not found)"
1210 )
1208 )
1211 % encoding.getcwd()
1209 % encoding.getcwd()
1212 )
1210 )
1213 raise
1211 raise
1214 if repo:
1212 if repo:
1215 ui = repo.ui
1213 ui = repo.ui
1216 if options[b'hidden']:
1214 if options[b'hidden']:
1217 repo = repo.unfiltered()
1215 repo = repo.unfiltered()
1218 args.insert(0, repo)
1216 args.insert(0, repo)
1219 elif rpath:
1217 elif rpath:
1220 ui.warn(_(b"warning: --repository ignored\n"))
1218 ui.warn(_(b"warning: --repository ignored\n"))
1221
1219
1222 msg = _formatargs(fullargs)
1220 msg = _formatargs(fullargs)
1223 ui.log(b"command", b'%s\n', msg)
1221 ui.log(b"command", b'%s\n', msg)
1224 strcmdopt = pycompat.strkwargs(cmdoptions)
1222 strcmdopt = pycompat.strkwargs(cmdoptions)
1225 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1223 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
1226 try:
1224 try:
1227 return runcommand(
1225 return runcommand(
1228 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1226 lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions
1229 )
1227 )
1230 finally:
1228 finally:
1231 if repo and repo != req.repo:
1229 if repo and repo != req.repo:
1232 repo.close()
1230 repo.close()
1233
1231
1234
1232
1235 def _runcommand(ui, options, cmd, cmdfunc):
1233 def _runcommand(ui, options, cmd, cmdfunc):
1236 """Run a command function, possibly with profiling enabled."""
1234 """Run a command function, possibly with profiling enabled."""
1237 try:
1235 try:
1238 with tracing.log("Running %s command" % cmd):
1236 with tracing.log("Running %s command" % cmd):
1239 return cmdfunc()
1237 return cmdfunc()
1240 except error.SignatureError:
1238 except error.SignatureError:
1241 raise error.CommandError(cmd, _(b'invalid arguments'))
1239 raise error.CommandError(cmd, _(b'invalid arguments'))
1242
1240
1243
1241
1244 def _exceptionwarning(ui):
1242 def _exceptionwarning(ui):
1245 """Produce a warning message for the current active exception"""
1243 """Produce a warning message for the current active exception"""
1246
1244
1247 # For compatibility checking, we discard the portion of the hg
1245 # For compatibility checking, we discard the portion of the hg
1248 # version after the + on the assumption that if a "normal
1246 # version after the + on the assumption that if a "normal
1249 # user" is running a build with a + in it the packager
1247 # user" is running a build with a + in it the packager
1250 # probably built from fairly close to a tag and anyone with a
1248 # probably built from fairly close to a tag and anyone with a
1251 # 'make local' copy of hg (where the version number can be out
1249 # 'make local' copy of hg (where the version number can be out
1252 # of date) will be clueful enough to notice the implausible
1250 # of date) will be clueful enough to notice the implausible
1253 # version number and try updating.
1251 # version number and try updating.
1254 ct = util.versiontuple(n=2)
1252 ct = util.versiontuple(n=2)
1255 worst = None, ct, b''
1253 worst = None, ct, b''
1256 if ui.config(b'ui', b'supportcontact') is None:
1254 if ui.config(b'ui', b'supportcontact') is None:
1257 for name, mod in extensions.extensions():
1255 for name, mod in extensions.extensions():
1258 # 'testedwith' should be bytes, but not all extensions are ported
1256 # 'testedwith' should be bytes, but not all extensions are ported
1259 # to py3 and we don't want UnicodeException because of that.
1257 # to py3 and we don't want UnicodeException because of that.
1260 testedwith = stringutil.forcebytestr(
1258 testedwith = stringutil.forcebytestr(
1261 getattr(mod, 'testedwith', b'')
1259 getattr(mod, 'testedwith', b'')
1262 )
1260 )
1263 report = getattr(mod, 'buglink', _(b'the extension author.'))
1261 report = getattr(mod, 'buglink', _(b'the extension author.'))
1264 if not testedwith.strip():
1262 if not testedwith.strip():
1265 # We found an untested extension. It's likely the culprit.
1263 # We found an untested extension. It's likely the culprit.
1266 worst = name, b'unknown', report
1264 worst = name, b'unknown', report
1267 break
1265 break
1268
1266
1269 # Never blame on extensions bundled with Mercurial.
1267 # Never blame on extensions bundled with Mercurial.
1270 if extensions.ismoduleinternal(mod):
1268 if extensions.ismoduleinternal(mod):
1271 continue
1269 continue
1272
1270
1273 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1271 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1274 if ct in tested:
1272 if ct in tested:
1275 continue
1273 continue
1276
1274
1277 lower = [t for t in tested if t < ct]
1275 lower = [t for t in tested if t < ct]
1278 nearest = max(lower or tested)
1276 nearest = max(lower or tested)
1279 if worst[0] is None or nearest < worst[1]:
1277 if worst[0] is None or nearest < worst[1]:
1280 worst = name, nearest, report
1278 worst = name, nearest, report
1281 if worst[0] is not None:
1279 if worst[0] is not None:
1282 name, testedwith, report = worst
1280 name, testedwith, report = worst
1283 if not isinstance(testedwith, (bytes, str)):
1281 if not isinstance(testedwith, (bytes, str)):
1284 testedwith = b'.'.join(
1282 testedwith = b'.'.join(
1285 [stringutil.forcebytestr(c) for c in testedwith]
1283 [stringutil.forcebytestr(c) for c in testedwith]
1286 )
1284 )
1287 warning = _(
1285 warning = _(
1288 b'** Unknown exception encountered with '
1286 b'** Unknown exception encountered with '
1289 b'possibly-broken third-party extension %s\n'
1287 b'possibly-broken third-party extension %s\n'
1290 b'** which supports versions %s of Mercurial.\n'
1288 b'** which supports versions %s of Mercurial.\n'
1291 b'** Please disable %s and try your action again.\n'
1289 b'** Please disable %s and try your action again.\n'
1292 b'** If that fixes the bug please report it to %s\n'
1290 b'** If that fixes the bug please report it to %s\n'
1293 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1291 ) % (name, testedwith, name, stringutil.forcebytestr(report))
1294 else:
1292 else:
1295 bugtracker = ui.config(b'ui', b'supportcontact')
1293 bugtracker = ui.config(b'ui', b'supportcontact')
1296 if bugtracker is None:
1294 if bugtracker is None:
1297 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1295 bugtracker = _(b"https://mercurial-scm.org/wiki/BugTracker")
1298 warning = (
1296 warning = (
1299 _(
1297 _(
1300 b"** unknown exception encountered, "
1298 b"** unknown exception encountered, "
1301 b"please report by visiting\n** "
1299 b"please report by visiting\n** "
1302 )
1300 )
1303 + bugtracker
1301 + bugtracker
1304 + b'\n'
1302 + b'\n'
1305 )
1303 )
1306 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1304 sysversion = pycompat.sysbytes(sys.version).replace(b'\n', b'')
1307 warning += (
1305 warning += (
1308 (_(b"** Python %s\n") % sysversion)
1306 (_(b"** Python %s\n") % sysversion)
1309 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1307 + (_(b"** Mercurial Distributed SCM (version %s)\n") % util.version())
1310 + (
1308 + (
1311 _(b"** Extensions loaded: %s\n")
1309 _(b"** Extensions loaded: %s\n")
1312 % b", ".join([x[0] for x in extensions.extensions()])
1310 % b", ".join([x[0] for x in extensions.extensions()])
1313 )
1311 )
1314 )
1312 )
1315 return warning
1313 return warning
1316
1314
1317
1315
1318 def handlecommandexception(ui):
1316 def handlecommandexception(ui):
1319 """Produce a warning message for broken commands
1317 """Produce a warning message for broken commands
1320
1318
1321 Called when handling an exception; the exception is reraised if
1319 Called when handling an exception; the exception is reraised if
1322 this function returns False, ignored otherwise.
1320 this function returns False, ignored otherwise.
1323 """
1321 """
1324 warning = _exceptionwarning(ui)
1322 warning = _exceptionwarning(ui)
1325 ui.log(
1323 ui.log(
1326 b"commandexception",
1324 b"commandexception",
1327 b"%s\n%s\n",
1325 b"%s\n%s\n",
1328 warning,
1326 warning,
1329 pycompat.sysbytes(traceback.format_exc()),
1327 pycompat.sysbytes(traceback.format_exc()),
1330 )
1328 )
1331 ui.warn(warning)
1329 ui.warn(warning)
1332 return False # re-raise the exception
1330 return False # re-raise the exception
@@ -1,557 +1,565 b''
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import difflib
16 import difflib
17
17
18 # Do not import anything but pycompat here, please
18 # Do not import anything but pycompat here, please
19 from . import pycompat
19 from . import pycompat
20
20
21
21
22 def _tobytes(exc):
22 def _tobytes(exc):
23 """Byte-stringify exception in the same way as BaseException_str()"""
23 """Byte-stringify exception in the same way as BaseException_str()"""
24 if not exc.args:
24 if not exc.args:
25 return b''
25 return b''
26 if len(exc.args) == 1:
26 if len(exc.args) == 1:
27 return pycompat.bytestr(exc.args[0])
27 return pycompat.bytestr(exc.args[0])
28 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
28 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
29
29
30
30
31 class Hint(object):
31 class Hint(object):
32 """Mix-in to provide a hint of an error
32 """Mix-in to provide a hint of an error
33
33
34 This should come first in the inheritance list to consume a hint and
34 This should come first in the inheritance list to consume a hint and
35 pass remaining arguments to the exception class.
35 pass remaining arguments to the exception class.
36 """
36 """
37
37
38 def __init__(self, *args, **kw):
38 def __init__(self, *args, **kw):
39 self.hint = kw.pop('hint', None)
39 self.hint = kw.pop('hint', None)
40 super(Hint, self).__init__(*args, **kw)
40 super(Hint, self).__init__(*args, **kw)
41
41
42
42
43 class StorageError(Hint, Exception):
43 class StorageError(Hint, Exception):
44 """Raised when an error occurs in a storage layer.
44 """Raised when an error occurs in a storage layer.
45
45
46 Usually subclassed by a storage-specific exception.
46 Usually subclassed by a storage-specific exception.
47 """
47 """
48
48
49 __bytes__ = _tobytes
49 __bytes__ = _tobytes
50
50
51
51
52 class RevlogError(StorageError):
52 class RevlogError(StorageError):
53 pass
53 pass
54
54
55
55
56 class SidedataHashError(RevlogError):
56 class SidedataHashError(RevlogError):
57 def __init__(self, key, expected, got):
57 def __init__(self, key, expected, got):
58 self.sidedatakey = key
58 self.sidedatakey = key
59 self.expecteddigest = expected
59 self.expecteddigest = expected
60 self.actualdigest = got
60 self.actualdigest = got
61
61
62
62
63 class FilteredIndexError(IndexError):
63 class FilteredIndexError(IndexError):
64 __bytes__ = _tobytes
64 __bytes__ = _tobytes
65
65
66
66
67 class LookupError(RevlogError, KeyError):
67 class LookupError(RevlogError, KeyError):
68 def __init__(self, name, index, message):
68 def __init__(self, name, index, message):
69 self.name = name
69 self.name = name
70 self.index = index
70 self.index = index
71 # this can't be called 'message' because at least some installs of
71 # this can't be called 'message' because at least some installs of
72 # Python 2.6+ complain about the 'message' property being deprecated
72 # Python 2.6+ complain about the 'message' property being deprecated
73 self.lookupmessage = message
73 self.lookupmessage = message
74 if isinstance(name, bytes) and len(name) == 20:
74 if isinstance(name, bytes) and len(name) == 20:
75 from .node import short
75 from .node import short
76
76
77 name = short(name)
77 name = short(name)
78 # if name is a binary node, it can be None
78 # if name is a binary node, it can be None
79 RevlogError.__init__(
79 RevlogError.__init__(
80 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
80 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
81 )
81 )
82
82
83 def __bytes__(self):
83 def __bytes__(self):
84 return RevlogError.__bytes__(self)
84 return RevlogError.__bytes__(self)
85
85
86 def __str__(self):
86 def __str__(self):
87 return RevlogError.__str__(self)
87 return RevlogError.__str__(self)
88
88
89
89
90 class AmbiguousPrefixLookupError(LookupError):
90 class AmbiguousPrefixLookupError(LookupError):
91 pass
91 pass
92
92
93
93
94 class FilteredLookupError(LookupError):
94 class FilteredLookupError(LookupError):
95 pass
95 pass
96
96
97
97
98 class ManifestLookupError(LookupError):
98 class ManifestLookupError(LookupError):
99 pass
99 pass
100
100
101
101
102 class CommandError(Exception):
102 class CommandError(Exception):
103 """Exception raised on errors in parsing the command line."""
103 """Exception raised on errors in parsing the command line."""
104
104
105 def __init__(self, command, message):
105 def __init__(self, command, message):
106 self.command = command
106 self.command = command
107 self.message = message
107 self.message = message
108 super(CommandError, self).__init__()
108 super(CommandError, self).__init__()
109
109
110 __bytes__ = _tobytes
110 __bytes__ = _tobytes
111
111
112
112
113 class UnknownCommand(Exception):
113 class UnknownCommand(Exception):
114 """Exception raised if command is not in the command table."""
114 """Exception raised if command is not in the command table."""
115
115
116 def __init__(self, command, all_commands=None):
116 def __init__(self, command, all_commands=None):
117 self.command = command
117 self.command = command
118 self.all_commands = all_commands
118 self.all_commands = all_commands
119 super(UnknownCommand, self).__init__()
119 super(UnknownCommand, self).__init__()
120
120
121 __bytes__ = _tobytes
121 __bytes__ = _tobytes
122
122
123
123
124 class AmbiguousCommand(Exception):
124 class AmbiguousCommand(Exception):
125 """Exception raised if command shortcut matches more than one command."""
125 """Exception raised if command shortcut matches more than one command."""
126
126
127 def __init__(self, prefix, matches):
127 def __init__(self, prefix, matches):
128 self.prefix = prefix
128 self.prefix = prefix
129 self.matches = matches
129 self.matches = matches
130 super(AmbiguousCommand, self).__init__()
130 super(AmbiguousCommand, self).__init__()
131
131
132 __bytes__ = _tobytes
132 __bytes__ = _tobytes
133
133
134
134
135 class WorkerError(Exception):
135 class WorkerError(Exception):
136 """Exception raised when a worker process dies."""
136 """Exception raised when a worker process dies."""
137
137
138 def __init__(self, status_code):
138 def __init__(self, status_code):
139 self.status_code = status_code
139 self.status_code = status_code
140
140
141
141
142 class InterventionRequired(Hint, Exception):
142 class InterventionRequired(Hint, Exception):
143 """Exception raised when a command requires human intervention."""
143 """Exception raised when a command requires human intervention."""
144
144
145 __bytes__ = _tobytes
145 __bytes__ = _tobytes
146
146
147
147
148 class ConflictResolutionRequired(InterventionRequired):
148 class ConflictResolutionRequired(InterventionRequired):
149 """Exception raised when a continuable command required merge conflict resolution."""
149 """Exception raised when a continuable command required merge conflict resolution."""
150
150
151 def __init__(self, opname):
151 def __init__(self, opname):
152 from .i18n import _
152 from .i18n import _
153
153
154 self.opname = opname
154 self.opname = opname
155 InterventionRequired.__init__(
155 InterventionRequired.__init__(
156 self,
156 self,
157 _(
157 _(
158 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
158 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
159 )
159 )
160 % opname,
160 % opname,
161 )
161 )
162
162
163
163
164 class Abort(Hint, Exception):
164 class Abort(Hint, Exception):
165 """Raised if a command needs to print an error and exit."""
165 """Raised if a command needs to print an error and exit."""
166
166
167 def __init__(self, message, hint=None):
167 def __init__(self, message, hint=None):
168 self.message = message
168 self.message = message
169 self.hint = hint
169 self.hint = hint
170 # Pass the message into the Exception constructor to help extensions
170 # Pass the message into the Exception constructor to help extensions
171 # that look for exc.args[0].
171 # that look for exc.args[0].
172 Exception.__init__(self, message)
172 Exception.__init__(self, message)
173
173
174 def __bytes__(self):
174 def __bytes__(self):
175 return self.message
175 return self.message
176
176
177 if pycompat.ispy3:
177 if pycompat.ispy3:
178
178
179 def __str__(self):
179 def __str__(self):
180 # the output would be unreadable if the message was translated,
180 # the output would be unreadable if the message was translated,
181 # but do not replace it with encoding.strfromlocal(), which
181 # but do not replace it with encoding.strfromlocal(), which
182 # may raise another exception.
182 # may raise another exception.
183 return pycompat.sysstr(self.__bytes__())
183 return pycompat.sysstr(self.__bytes__())
184
184
185 def format(self):
186 from .i18n import _
187
188 message = _(b"abort: %s\n") % self.message
189 if self.hint:
190 message += _(b"(%s)\n") % self.hint
191 return message
192
185
193
186 class InputError(Abort):
194 class InputError(Abort):
187 """Indicates that the user made an error in their input.
195 """Indicates that the user made an error in their input.
188
196
189 Examples: Invalid command, invalid flags, invalid revision.
197 Examples: Invalid command, invalid flags, invalid revision.
190 """
198 """
191
199
192
200
193 class StateError(Abort):
201 class StateError(Abort):
194 """Indicates that the operation might work if retried in a different state.
202 """Indicates that the operation might work if retried in a different state.
195
203
196 Examples: Unresolved merge conflicts, unfinished operations.
204 Examples: Unresolved merge conflicts, unfinished operations.
197 """
205 """
198
206
199
207
200 class CanceledError(Abort):
208 class CanceledError(Abort):
201 """Indicates that the user canceled the operation.
209 """Indicates that the user canceled the operation.
202
210
203 Examples: Close commit editor with error status, quit chistedit.
211 Examples: Close commit editor with error status, quit chistedit.
204 """
212 """
205
213
206
214
207 class HookLoadError(Abort):
215 class HookLoadError(Abort):
208 """raised when loading a hook fails, aborting an operation
216 """raised when loading a hook fails, aborting an operation
209
217
210 Exists to allow more specialized catching."""
218 Exists to allow more specialized catching."""
211
219
212
220
213 class HookAbort(Abort):
221 class HookAbort(Abort):
214 """raised when a validation hook fails, aborting an operation
222 """raised when a validation hook fails, aborting an operation
215
223
216 Exists to allow more specialized catching."""
224 Exists to allow more specialized catching."""
217
225
218
226
219 class ConfigError(Abort):
227 class ConfigError(Abort):
220 """Exception raised when parsing config files"""
228 """Exception raised when parsing config files"""
221
229
222
230
223 class UpdateAbort(Abort):
231 class UpdateAbort(Abort):
224 """Raised when an update is aborted for destination issue"""
232 """Raised when an update is aborted for destination issue"""
225
233
226
234
227 class MergeDestAbort(Abort):
235 class MergeDestAbort(Abort):
228 """Raised when an update is aborted for destination issues"""
236 """Raised when an update is aborted for destination issues"""
229
237
230
238
231 class NoMergeDestAbort(MergeDestAbort):
239 class NoMergeDestAbort(MergeDestAbort):
232 """Raised when an update is aborted because there is nothing to merge"""
240 """Raised when an update is aborted because there is nothing to merge"""
233
241
234
242
235 class ManyMergeDestAbort(MergeDestAbort):
243 class ManyMergeDestAbort(MergeDestAbort):
236 """Raised when an update is aborted because destination is ambiguous"""
244 """Raised when an update is aborted because destination is ambiguous"""
237
245
238
246
239 class ResponseExpected(Abort):
247 class ResponseExpected(Abort):
240 """Raised when an EOF is received for a prompt"""
248 """Raised when an EOF is received for a prompt"""
241
249
242 def __init__(self):
250 def __init__(self):
243 from .i18n import _
251 from .i18n import _
244
252
245 Abort.__init__(self, _(b'response expected'))
253 Abort.__init__(self, _(b'response expected'))
246
254
247
255
248 class OutOfBandError(Hint, Exception):
256 class OutOfBandError(Hint, Exception):
249 """Exception raised when a remote repo reports failure"""
257 """Exception raised when a remote repo reports failure"""
250
258
251 __bytes__ = _tobytes
259 __bytes__ = _tobytes
252
260
253
261
254 class ParseError(Hint, Exception):
262 class ParseError(Hint, Exception):
255 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
263 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
256
264
257 def __init__(self, message, location=None, hint=None):
265 def __init__(self, message, location=None, hint=None):
258 self.message = message
266 self.message = message
259 self.location = location
267 self.location = location
260 self.hint = hint
268 self.hint = hint
261 # Pass the message and possibly location into the Exception constructor
269 # Pass the message and possibly location into the Exception constructor
262 # to help code that looks for exc.args.
270 # to help code that looks for exc.args.
263 if location is not None:
271 if location is not None:
264 Exception.__init__(self, message, location)
272 Exception.__init__(self, message, location)
265 else:
273 else:
266 Exception.__init__(self, message)
274 Exception.__init__(self, message)
267
275
268 __bytes__ = _tobytes
276 __bytes__ = _tobytes
269
277
270 def format(self):
278 def format(self):
271 from .i18n import _
279 from .i18n import _
272
280
273 if self.location is not None:
281 if self.location is not None:
274 message = _(b"hg: parse error at %s: %s\n") % (
282 message = _(b"hg: parse error at %s: %s\n") % (
275 pycompat.bytestr(self.location),
283 pycompat.bytestr(self.location),
276 self.message,
284 self.message,
277 )
285 )
278 else:
286 else:
279 message = _(b"hg: parse error: %s\n") % self.message
287 message = _(b"hg: parse error: %s\n") % self.message
280 if self.hint:
288 if self.hint:
281 message += _(b"(%s)\n") % self.hint
289 message += _(b"(%s)\n") % self.hint
282 return message
290 return message
283
291
284
292
285 class PatchError(Exception):
293 class PatchError(Exception):
286 __bytes__ = _tobytes
294 __bytes__ = _tobytes
287
295
288
296
289 def getsimilar(symbols, value):
297 def getsimilar(symbols, value):
290 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
298 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
291 # The cutoff for similarity here is pretty arbitrary. It should
299 # The cutoff for similarity here is pretty arbitrary. It should
292 # probably be investigated and tweaked.
300 # probably be investigated and tweaked.
293 return [s for s in symbols if sim(s) > 0.6]
301 return [s for s in symbols if sim(s) > 0.6]
294
302
295
303
296 def similarity_hint(similar):
304 def similarity_hint(similar):
297 from .i18n import _
305 from .i18n import _
298
306
299 if len(similar) == 1:
307 if len(similar) == 1:
300 return _(b"did you mean %s?") % similar[0]
308 return _(b"did you mean %s?") % similar[0]
301 elif similar:
309 elif similar:
302 ss = b", ".join(sorted(similar))
310 ss = b", ".join(sorted(similar))
303 return _(b"did you mean one of %s?") % ss
311 return _(b"did you mean one of %s?") % ss
304 else:
312 else:
305 return None
313 return None
306
314
307
315
308 class UnknownIdentifier(ParseError):
316 class UnknownIdentifier(ParseError):
309 """Exception raised when a {rev,file}set references an unknown identifier"""
317 """Exception raised when a {rev,file}set references an unknown identifier"""
310
318
311 def __init__(self, function, symbols):
319 def __init__(self, function, symbols):
312 from .i18n import _
320 from .i18n import _
313
321
314 similar = getsimilar(symbols, function)
322 similar = getsimilar(symbols, function)
315 hint = similarity_hint(similar)
323 hint = similarity_hint(similar)
316
324
317 ParseError.__init__(
325 ParseError.__init__(
318 self, _(b"unknown identifier: %s") % function, hint=hint
326 self, _(b"unknown identifier: %s") % function, hint=hint
319 )
327 )
320
328
321
329
322 class RepoError(Hint, Exception):
330 class RepoError(Hint, Exception):
323 __bytes__ = _tobytes
331 __bytes__ = _tobytes
324
332
325
333
326 class RepoLookupError(RepoError):
334 class RepoLookupError(RepoError):
327 pass
335 pass
328
336
329
337
330 class FilteredRepoLookupError(RepoLookupError):
338 class FilteredRepoLookupError(RepoLookupError):
331 pass
339 pass
332
340
333
341
334 class CapabilityError(RepoError):
342 class CapabilityError(RepoError):
335 pass
343 pass
336
344
337
345
338 class RequirementError(RepoError):
346 class RequirementError(RepoError):
339 """Exception raised if .hg/requires has an unknown entry."""
347 """Exception raised if .hg/requires has an unknown entry."""
340
348
341
349
342 class StdioError(IOError):
350 class StdioError(IOError):
343 """Raised if I/O to stdout or stderr fails"""
351 """Raised if I/O to stdout or stderr fails"""
344
352
345 def __init__(self, err):
353 def __init__(self, err):
346 IOError.__init__(self, err.errno, err.strerror)
354 IOError.__init__(self, err.errno, err.strerror)
347
355
348 # no __bytes__() because error message is derived from the standard IOError
356 # no __bytes__() because error message is derived from the standard IOError
349
357
350
358
351 class UnsupportedMergeRecords(Abort):
359 class UnsupportedMergeRecords(Abort):
352 def __init__(self, recordtypes):
360 def __init__(self, recordtypes):
353 from .i18n import _
361 from .i18n import _
354
362
355 self.recordtypes = sorted(recordtypes)
363 self.recordtypes = sorted(recordtypes)
356 s = b' '.join(self.recordtypes)
364 s = b' '.join(self.recordtypes)
357 Abort.__init__(
365 Abort.__init__(
358 self,
366 self,
359 _(b'unsupported merge state records: %s') % s,
367 _(b'unsupported merge state records: %s') % s,
360 hint=_(
368 hint=_(
361 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
369 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
362 b'more information'
370 b'more information'
363 ),
371 ),
364 )
372 )
365
373
366
374
367 class UnknownVersion(Abort):
375 class UnknownVersion(Abort):
368 """generic exception for aborting from an encounter with an unknown version
376 """generic exception for aborting from an encounter with an unknown version
369 """
377 """
370
378
371 def __init__(self, msg, hint=None, version=None):
379 def __init__(self, msg, hint=None, version=None):
372 self.version = version
380 self.version = version
373 super(UnknownVersion, self).__init__(msg, hint=hint)
381 super(UnknownVersion, self).__init__(msg, hint=hint)
374
382
375
383
376 class LockError(IOError):
384 class LockError(IOError):
377 def __init__(self, errno, strerror, filename, desc):
385 def __init__(self, errno, strerror, filename, desc):
378 IOError.__init__(self, errno, strerror, filename)
386 IOError.__init__(self, errno, strerror, filename)
379 self.desc = desc
387 self.desc = desc
380
388
381 # no __bytes__() because error message is derived from the standard IOError
389 # no __bytes__() because error message is derived from the standard IOError
382
390
383
391
384 class LockHeld(LockError):
392 class LockHeld(LockError):
385 def __init__(self, errno, filename, desc, locker):
393 def __init__(self, errno, filename, desc, locker):
386 LockError.__init__(self, errno, b'Lock held', filename, desc)
394 LockError.__init__(self, errno, b'Lock held', filename, desc)
387 self.locker = locker
395 self.locker = locker
388
396
389
397
390 class LockUnavailable(LockError):
398 class LockUnavailable(LockError):
391 pass
399 pass
392
400
393
401
394 # LockError is for errors while acquiring the lock -- this is unrelated
402 # LockError is for errors while acquiring the lock -- this is unrelated
395 class LockInheritanceContractViolation(RuntimeError):
403 class LockInheritanceContractViolation(RuntimeError):
396 __bytes__ = _tobytes
404 __bytes__ = _tobytes
397
405
398
406
399 class ResponseError(Exception):
407 class ResponseError(Exception):
400 """Raised to print an error with part of output and exit."""
408 """Raised to print an error with part of output and exit."""
401
409
402 __bytes__ = _tobytes
410 __bytes__ = _tobytes
403
411
404
412
405 # derived from KeyboardInterrupt to simplify some breakout code
413 # derived from KeyboardInterrupt to simplify some breakout code
406 class SignalInterrupt(KeyboardInterrupt):
414 class SignalInterrupt(KeyboardInterrupt):
407 """Exception raised on SIGTERM and SIGHUP."""
415 """Exception raised on SIGTERM and SIGHUP."""
408
416
409
417
410 class SignatureError(Exception):
418 class SignatureError(Exception):
411 __bytes__ = _tobytes
419 __bytes__ = _tobytes
412
420
413
421
414 class PushRaced(RuntimeError):
422 class PushRaced(RuntimeError):
415 """An exception raised during unbundling that indicate a push race"""
423 """An exception raised during unbundling that indicate a push race"""
416
424
417 __bytes__ = _tobytes
425 __bytes__ = _tobytes
418
426
419
427
420 class ProgrammingError(Hint, RuntimeError):
428 class ProgrammingError(Hint, RuntimeError):
421 """Raised if a mercurial (core or extension) developer made a mistake"""
429 """Raised if a mercurial (core or extension) developer made a mistake"""
422
430
423 def __init__(self, msg, *args, **kwargs):
431 def __init__(self, msg, *args, **kwargs):
424 # On Python 3, turn the message back into a string since this is
432 # On Python 3, turn the message back into a string since this is
425 # an internal-only error that won't be printed except in a
433 # an internal-only error that won't be printed except in a
426 # stack traces.
434 # stack traces.
427 msg = pycompat.sysstr(msg)
435 msg = pycompat.sysstr(msg)
428 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
436 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
429
437
430 __bytes__ = _tobytes
438 __bytes__ = _tobytes
431
439
432
440
433 class WdirUnsupported(Exception):
441 class WdirUnsupported(Exception):
434 """An exception which is raised when 'wdir()' is not supported"""
442 """An exception which is raised when 'wdir()' is not supported"""
435
443
436 __bytes__ = _tobytes
444 __bytes__ = _tobytes
437
445
438
446
439 # bundle2 related errors
447 # bundle2 related errors
440 class BundleValueError(ValueError):
448 class BundleValueError(ValueError):
441 """error raised when bundle2 cannot be processed"""
449 """error raised when bundle2 cannot be processed"""
442
450
443 __bytes__ = _tobytes
451 __bytes__ = _tobytes
444
452
445
453
446 class BundleUnknownFeatureError(BundleValueError):
454 class BundleUnknownFeatureError(BundleValueError):
447 def __init__(self, parttype=None, params=(), values=()):
455 def __init__(self, parttype=None, params=(), values=()):
448 self.parttype = parttype
456 self.parttype = parttype
449 self.params = params
457 self.params = params
450 self.values = values
458 self.values = values
451 if self.parttype is None:
459 if self.parttype is None:
452 msg = b'Stream Parameter'
460 msg = b'Stream Parameter'
453 else:
461 else:
454 msg = parttype
462 msg = parttype
455 entries = self.params
463 entries = self.params
456 if self.params and self.values:
464 if self.params and self.values:
457 assert len(self.params) == len(self.values)
465 assert len(self.params) == len(self.values)
458 entries = []
466 entries = []
459 for idx, par in enumerate(self.params):
467 for idx, par in enumerate(self.params):
460 val = self.values[idx]
468 val = self.values[idx]
461 if val is None:
469 if val is None:
462 entries.append(val)
470 entries.append(val)
463 else:
471 else:
464 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
472 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
465 if entries:
473 if entries:
466 msg = b'%s - %s' % (msg, b', '.join(entries))
474 msg = b'%s - %s' % (msg, b', '.join(entries))
467 ValueError.__init__(self, msg)
475 ValueError.__init__(self, msg)
468
476
469
477
470 class ReadOnlyPartError(RuntimeError):
478 class ReadOnlyPartError(RuntimeError):
471 """error raised when code tries to alter a part being generated"""
479 """error raised when code tries to alter a part being generated"""
472
480
473 __bytes__ = _tobytes
481 __bytes__ = _tobytes
474
482
475
483
476 class PushkeyFailed(Abort):
484 class PushkeyFailed(Abort):
477 """error raised when a pushkey part failed to update a value"""
485 """error raised when a pushkey part failed to update a value"""
478
486
479 def __init__(
487 def __init__(
480 self, partid, namespace=None, key=None, new=None, old=None, ret=None
488 self, partid, namespace=None, key=None, new=None, old=None, ret=None
481 ):
489 ):
482 self.partid = partid
490 self.partid = partid
483 self.namespace = namespace
491 self.namespace = namespace
484 self.key = key
492 self.key = key
485 self.new = new
493 self.new = new
486 self.old = old
494 self.old = old
487 self.ret = ret
495 self.ret = ret
488 # no i18n expected to be processed into a better message
496 # no i18n expected to be processed into a better message
489 Abort.__init__(
497 Abort.__init__(
490 self, b'failed to update value for "%s/%s"' % (namespace, key)
498 self, b'failed to update value for "%s/%s"' % (namespace, key)
491 )
499 )
492
500
493
501
494 class CensoredNodeError(StorageError):
502 class CensoredNodeError(StorageError):
495 """error raised when content verification fails on a censored node
503 """error raised when content verification fails on a censored node
496
504
497 Also contains the tombstone data substituted for the uncensored data.
505 Also contains the tombstone data substituted for the uncensored data.
498 """
506 """
499
507
500 def __init__(self, filename, node, tombstone):
508 def __init__(self, filename, node, tombstone):
501 from .node import short
509 from .node import short
502
510
503 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
511 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
504 self.tombstone = tombstone
512 self.tombstone = tombstone
505
513
506
514
507 class CensoredBaseError(StorageError):
515 class CensoredBaseError(StorageError):
508 """error raised when a delta is rejected because its base is censored
516 """error raised when a delta is rejected because its base is censored
509
517
510 A delta based on a censored revision must be formed as single patch
518 A delta based on a censored revision must be formed as single patch
511 operation which replaces the entire base with new content. This ensures
519 operation which replaces the entire base with new content. This ensures
512 the delta may be applied by clones which have not censored the base.
520 the delta may be applied by clones which have not censored the base.
513 """
521 """
514
522
515
523
516 class InvalidBundleSpecification(Exception):
524 class InvalidBundleSpecification(Exception):
517 """error raised when a bundle specification is invalid.
525 """error raised when a bundle specification is invalid.
518
526
519 This is used for syntax errors as opposed to support errors.
527 This is used for syntax errors as opposed to support errors.
520 """
528 """
521
529
522 __bytes__ = _tobytes
530 __bytes__ = _tobytes
523
531
524
532
525 class UnsupportedBundleSpecification(Exception):
533 class UnsupportedBundleSpecification(Exception):
526 """error raised when a bundle specification is not supported."""
534 """error raised when a bundle specification is not supported."""
527
535
528 __bytes__ = _tobytes
536 __bytes__ = _tobytes
529
537
530
538
531 class CorruptedState(Exception):
539 class CorruptedState(Exception):
532 """error raised when a command is not able to read its state from file"""
540 """error raised when a command is not able to read its state from file"""
533
541
534 __bytes__ = _tobytes
542 __bytes__ = _tobytes
535
543
536
544
537 class PeerTransportError(Abort):
545 class PeerTransportError(Abort):
538 """Transport-level I/O error when communicating with a peer repo."""
546 """Transport-level I/O error when communicating with a peer repo."""
539
547
540
548
541 class InMemoryMergeConflictsError(Exception):
549 class InMemoryMergeConflictsError(Exception):
542 """Exception raised when merge conflicts arose during an in-memory merge."""
550 """Exception raised when merge conflicts arose during an in-memory merge."""
543
551
544 __bytes__ = _tobytes
552 __bytes__ = _tobytes
545
553
546
554
547 class WireprotoCommandError(Exception):
555 class WireprotoCommandError(Exception):
548 """Represents an error during execution of a wire protocol command.
556 """Represents an error during execution of a wire protocol command.
549
557
550 Should only be thrown by wire protocol version 2 commands.
558 Should only be thrown by wire protocol version 2 commands.
551
559
552 The error is a formatter string and an optional iterable of arguments.
560 The error is a formatter string and an optional iterable of arguments.
553 """
561 """
554
562
555 def __init__(self, message, args=None):
563 def __init__(self, message, args=None):
556 self.message = message
564 self.message = message
557 self.messageargs = args
565 self.messageargs = args
@@ -1,2312 +1,2310 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .pycompat import getattr
28 from .pycompat import getattr
29 from .thirdparty import attr
29 from .thirdparty import attr
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 requirements as requirementsmod,
41 requirements as requirementsmod,
42 revsetlang,
42 revsetlang,
43 similar,
43 similar,
44 smartset,
44 smartset,
45 url,
45 url,
46 util,
46 util,
47 vfs,
47 vfs,
48 )
48 )
49
49
50 from .utils import (
50 from .utils import (
51 hashutil,
51 hashutil,
52 procutil,
52 procutil,
53 stringutil,
53 stringutil,
54 )
54 )
55
55
56 if pycompat.iswindows:
56 if pycompat.iswindows:
57 from . import scmwindows as scmplatform
57 from . import scmwindows as scmplatform
58 else:
58 else:
59 from . import scmposix as scmplatform
59 from . import scmposix as scmplatform
60
60
61 parsers = policy.importmod('parsers')
61 parsers = policy.importmod('parsers')
62 rustrevlog = policy.importrust('revlog')
62 rustrevlog = policy.importrust('revlog')
63
63
64 termsize = scmplatform.termsize
64 termsize = scmplatform.termsize
65
65
66
66
67 @attr.s(slots=True, repr=False)
67 @attr.s(slots=True, repr=False)
68 class status(object):
68 class status(object):
69 '''Struct with a list of files per status.
69 '''Struct with a list of files per status.
70
70
71 The 'deleted', 'unknown' and 'ignored' properties are only
71 The 'deleted', 'unknown' and 'ignored' properties are only
72 relevant to the working copy.
72 relevant to the working copy.
73 '''
73 '''
74
74
75 modified = attr.ib(default=attr.Factory(list))
75 modified = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
76 added = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
77 removed = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
78 deleted = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
79 unknown = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
80 ignored = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
81 clean = attr.ib(default=attr.Factory(list))
82
82
83 def __iter__(self):
83 def __iter__(self):
84 yield self.modified
84 yield self.modified
85 yield self.added
85 yield self.added
86 yield self.removed
86 yield self.removed
87 yield self.deleted
87 yield self.deleted
88 yield self.unknown
88 yield self.unknown
89 yield self.ignored
89 yield self.ignored
90 yield self.clean
90 yield self.clean
91
91
92 def __repr__(self):
92 def __repr__(self):
93 return (
93 return (
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 r'unknown=%s, ignored=%s, clean=%s>'
95 r'unknown=%s, ignored=%s, clean=%s>'
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97
97
98
98
99 def itersubrepos(ctx1, ctx2):
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
106
107 missing = set()
107 missing = set()
108
108
109 for subpath in ctx2.substate:
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
111 del subpaths[subpath]
112 missing.add(subpath)
112 missing.add(subpath)
113
113
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 yield subpath, ctx.sub(subpath)
115 yield subpath, ctx.sub(subpath)
116
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
120 # against itself.
121 for subpath in missing:
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
123
124
124
125 def nochangesfound(ui, repo, excluded=None):
125 def nochangesfound(ui, repo, excluded=None):
126 '''Report no changes for push/pull, excluded is None or a list of
126 '''Report no changes for push/pull, excluded is None or a list of
127 nodes excluded from the push/pull.
127 nodes excluded from the push/pull.
128 '''
128 '''
129 secretlist = []
129 secretlist = []
130 if excluded:
130 if excluded:
131 for n in excluded:
131 for n in excluded:
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(
137 ui.status(
138 _(b"no changes found (ignored %d secret changesets)\n")
138 _(b"no changes found (ignored %d secret changesets)\n")
139 % len(secretlist)
139 % len(secretlist)
140 )
140 )
141 else:
141 else:
142 ui.status(_(b"no changes found\n"))
142 ui.status(_(b"no changes found\n"))
143
143
144
144
145 def callcatch(ui, func):
145 def callcatch(ui, func):
146 """call func() with global exception handling
146 """call func() with global exception handling
147
147
148 return func() if no exception happens. otherwise do some error handling
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
149 and return an exit code accordingly. does not handle all exceptions.
150 """
150 """
151 coarse_exit_code = -1
151 coarse_exit_code = -1
152 detailed_exit_code = -1
152 detailed_exit_code = -1
153 try:
153 try:
154 try:
154 try:
155 return func()
155 return func()
156 except: # re-raises
156 except: # re-raises
157 ui.traceback()
157 ui.traceback()
158 raise
158 raise
159 # Global exception handling, alphabetically
159 # Global exception handling, alphabetically
160 # Mercurial-specific first, followed by built-in and library exceptions
160 # Mercurial-specific first, followed by built-in and library exceptions
161 except error.LockHeld as inst:
161 except error.LockHeld as inst:
162 detailed_exit_code = 20
162 detailed_exit_code = 20
163 if inst.errno == errno.ETIMEDOUT:
163 if inst.errno == errno.ETIMEDOUT:
164 reason = _(b'timed out waiting for lock held by %r') % (
164 reason = _(b'timed out waiting for lock held by %r') % (
165 pycompat.bytestr(inst.locker)
165 pycompat.bytestr(inst.locker)
166 )
166 )
167 else:
167 else:
168 reason = _(b'lock held by %r') % inst.locker
168 reason = _(b'lock held by %r') % inst.locker
169 ui.error(
169 ui.error(
170 _(b"abort: %s: %s\n")
170 _(b"abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 )
172 )
173 if not inst.locker:
173 if not inst.locker:
174 ui.error(_(b"(lock might be very busy)\n"))
174 ui.error(_(b"(lock might be very busy)\n"))
175 except error.LockUnavailable as inst:
175 except error.LockUnavailable as inst:
176 detailed_exit_code = 20
176 detailed_exit_code = 20
177 ui.error(
177 ui.error(
178 _(b"abort: could not lock %s: %s\n")
178 _(b"abort: could not lock %s: %s\n")
179 % (
179 % (
180 inst.desc or stringutil.forcebytestr(inst.filename),
180 inst.desc or stringutil.forcebytestr(inst.filename),
181 encoding.strtolocal(inst.strerror),
181 encoding.strtolocal(inst.strerror),
182 )
182 )
183 )
183 )
184 except error.OutOfBandError as inst:
184 except error.OutOfBandError as inst:
185 detailed_exit_code = 100
185 detailed_exit_code = 100
186 if inst.args:
186 if inst.args:
187 msg = _(b"abort: remote error:\n")
187 msg = _(b"abort: remote error:\n")
188 else:
188 else:
189 msg = _(b"abort: remote error\n")
189 msg = _(b"abort: remote error\n")
190 ui.error(msg)
190 ui.error(msg)
191 if inst.args:
191 if inst.args:
192 ui.error(b''.join(inst.args))
192 ui.error(b''.join(inst.args))
193 if inst.hint:
193 if inst.hint:
194 ui.error(b'(%s)\n' % inst.hint)
194 ui.error(b'(%s)\n' % inst.hint)
195 except error.RepoError as inst:
195 except error.RepoError as inst:
196 ui.error(_(b"abort: %s!\n") % inst)
196 ui.error(_(b"abort: %s!\n") % inst)
197 if inst.hint:
197 if inst.hint:
198 ui.error(_(b"(%s)\n") % inst.hint)
198 ui.error(_(b"(%s)\n") % inst.hint)
199 except error.ResponseError as inst:
199 except error.ResponseError as inst:
200 ui.error(_(b"abort: %s") % inst.args[0])
200 ui.error(_(b"abort: %s") % inst.args[0])
201 msg = inst.args[1]
201 msg = inst.args[1]
202 if isinstance(msg, type(u'')):
202 if isinstance(msg, type(u'')):
203 msg = pycompat.sysbytes(msg)
203 msg = pycompat.sysbytes(msg)
204 if not isinstance(msg, bytes):
204 if not isinstance(msg, bytes):
205 ui.error(b" %r\n" % (msg,))
205 ui.error(b" %r\n" % (msg,))
206 elif not msg:
206 elif not msg:
207 ui.error(_(b" empty string\n"))
207 ui.error(_(b" empty string\n"))
208 else:
208 else:
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 except error.CensoredNodeError as inst:
210 except error.CensoredNodeError as inst:
211 ui.error(_(b"abort: file censored %s!\n") % inst)
211 ui.error(_(b"abort: file censored %s!\n") % inst)
212 except error.StorageError as inst:
212 except error.StorageError as inst:
213 ui.error(_(b"abort: %s!\n") % inst)
213 ui.error(_(b"abort: %s!\n") % inst)
214 if inst.hint:
214 if inst.hint:
215 ui.error(_(b"(%s)\n") % inst.hint)
215 ui.error(_(b"(%s)\n") % inst.hint)
216 except error.InterventionRequired as inst:
216 except error.InterventionRequired as inst:
217 ui.error(b"%s\n" % inst)
217 ui.error(b"%s\n" % inst)
218 if inst.hint:
218 if inst.hint:
219 ui.error(_(b"(%s)\n") % inst.hint)
219 ui.error(_(b"(%s)\n") % inst.hint)
220 detailed_exit_code = 240
220 detailed_exit_code = 240
221 coarse_exit_code = 1
221 coarse_exit_code = 1
222 except error.WdirUnsupported:
222 except error.WdirUnsupported:
223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
224 except error.Abort as inst:
224 except error.Abort as inst:
225 if isinstance(inst, error.InputError):
225 if isinstance(inst, error.InputError):
226 detailed_exit_code = 10
226 detailed_exit_code = 10
227 elif isinstance(inst, error.StateError):
227 elif isinstance(inst, error.StateError):
228 detailed_exit_code = 20
228 detailed_exit_code = 20
229 elif isinstance(inst, error.ConfigError):
229 elif isinstance(inst, error.ConfigError):
230 detailed_exit_code = 30
230 detailed_exit_code = 30
231 elif isinstance(inst, error.CanceledError):
231 elif isinstance(inst, error.CanceledError):
232 detailed_exit_code = 250
232 detailed_exit_code = 250
233 ui.error(_(b"abort: %s\n") % inst.message)
233 ui.error(inst.format())
234 if inst.hint:
235 ui.error(_(b"(%s)\n") % inst.hint)
236 except error.WorkerError as inst:
234 except error.WorkerError as inst:
237 # Don't print a message -- the worker already should have
235 # Don't print a message -- the worker already should have
238 return inst.status_code
236 return inst.status_code
239 except ImportError as inst:
237 except ImportError as inst:
240 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
238 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
241 m = stringutil.forcebytestr(inst).split()[-1]
239 m = stringutil.forcebytestr(inst).split()[-1]
242 if m in b"mpatch bdiff".split():
240 if m in b"mpatch bdiff".split():
243 ui.error(_(b"(did you forget to compile extensions?)\n"))
241 ui.error(_(b"(did you forget to compile extensions?)\n"))
244 elif m in b"zlib".split():
242 elif m in b"zlib".split():
245 ui.error(_(b"(is your Python install correct?)\n"))
243 ui.error(_(b"(is your Python install correct?)\n"))
246 except util.urlerr.httperror as inst:
244 except util.urlerr.httperror as inst:
247 detailed_exit_code = 100
245 detailed_exit_code = 100
248 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
246 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
249 except util.urlerr.urlerror as inst:
247 except util.urlerr.urlerror as inst:
250 detailed_exit_code = 100
248 detailed_exit_code = 100
251 try: # usually it is in the form (errno, strerror)
249 try: # usually it is in the form (errno, strerror)
252 reason = inst.reason.args[1]
250 reason = inst.reason.args[1]
253 except (AttributeError, IndexError):
251 except (AttributeError, IndexError):
254 # it might be anything, for example a string
252 # it might be anything, for example a string
255 reason = inst.reason
253 reason = inst.reason
256 if isinstance(reason, pycompat.unicode):
254 if isinstance(reason, pycompat.unicode):
257 # SSLError of Python 2.7.9 contains a unicode
255 # SSLError of Python 2.7.9 contains a unicode
258 reason = encoding.unitolocal(reason)
256 reason = encoding.unitolocal(reason)
259 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
257 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
260 except (IOError, OSError) as inst:
258 except (IOError, OSError) as inst:
261 if (
259 if (
262 util.safehasattr(inst, b"args")
260 util.safehasattr(inst, b"args")
263 and inst.args
261 and inst.args
264 and inst.args[0] == errno.EPIPE
262 and inst.args[0] == errno.EPIPE
265 ):
263 ):
266 pass
264 pass
267 elif getattr(inst, "strerror", None): # common IOError or OSError
265 elif getattr(inst, "strerror", None): # common IOError or OSError
268 if getattr(inst, "filename", None) is not None:
266 if getattr(inst, "filename", None) is not None:
269 ui.error(
267 ui.error(
270 _(b"abort: %s: '%s'\n")
268 _(b"abort: %s: '%s'\n")
271 % (
269 % (
272 encoding.strtolocal(inst.strerror),
270 encoding.strtolocal(inst.strerror),
273 stringutil.forcebytestr(inst.filename),
271 stringutil.forcebytestr(inst.filename),
274 )
272 )
275 )
273 )
276 else:
274 else:
277 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
275 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
278 else: # suspicious IOError
276 else: # suspicious IOError
279 raise
277 raise
280 except MemoryError:
278 except MemoryError:
281 ui.error(_(b"abort: out of memory\n"))
279 ui.error(_(b"abort: out of memory\n"))
282 except SystemExit as inst:
280 except SystemExit as inst:
283 # Commands shouldn't sys.exit directly, but give a return code.
281 # Commands shouldn't sys.exit directly, but give a return code.
284 # Just in case catch this and and pass exit code to caller.
282 # Just in case catch this and and pass exit code to caller.
285 detailed_exit_code = 254
283 detailed_exit_code = 254
286 coarse_exit_code = inst.code
284 coarse_exit_code = inst.code
287
285
288 if ui.configbool(b'ui', b'detailed-exit-code'):
286 if ui.configbool(b'ui', b'detailed-exit-code'):
289 return detailed_exit_code
287 return detailed_exit_code
290 else:
288 else:
291 return coarse_exit_code
289 return coarse_exit_code
292
290
293
291
294 def checknewlabel(repo, lbl, kind):
292 def checknewlabel(repo, lbl, kind):
295 # Do not use the "kind" parameter in ui output.
293 # Do not use the "kind" parameter in ui output.
296 # It makes strings difficult to translate.
294 # It makes strings difficult to translate.
297 if lbl in [b'tip', b'.', b'null']:
295 if lbl in [b'tip', b'.', b'null']:
298 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
296 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
299 for c in (b':', b'\0', b'\n', b'\r'):
297 for c in (b':', b'\0', b'\n', b'\r'):
300 if c in lbl:
298 if c in lbl:
301 raise error.InputError(
299 raise error.InputError(
302 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
300 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
303 )
301 )
304 try:
302 try:
305 int(lbl)
303 int(lbl)
306 raise error.InputError(_(b"cannot use an integer as a name"))
304 raise error.InputError(_(b"cannot use an integer as a name"))
307 except ValueError:
305 except ValueError:
308 pass
306 pass
309 if lbl.strip() != lbl:
307 if lbl.strip() != lbl:
310 raise error.InputError(
308 raise error.InputError(
311 _(b"leading or trailing whitespace in name %r") % lbl
309 _(b"leading or trailing whitespace in name %r") % lbl
312 )
310 )
313
311
314
312
315 def checkfilename(f):
313 def checkfilename(f):
316 '''Check that the filename f is an acceptable filename for a tracked file'''
314 '''Check that the filename f is an acceptable filename for a tracked file'''
317 if b'\r' in f or b'\n' in f:
315 if b'\r' in f or b'\n' in f:
318 raise error.InputError(
316 raise error.InputError(
319 _(b"'\\n' and '\\r' disallowed in filenames: %r")
317 _(b"'\\n' and '\\r' disallowed in filenames: %r")
320 % pycompat.bytestr(f)
318 % pycompat.bytestr(f)
321 )
319 )
322
320
323
321
324 def checkportable(ui, f):
322 def checkportable(ui, f):
325 '''Check if filename f is portable and warn or abort depending on config'''
323 '''Check if filename f is portable and warn or abort depending on config'''
326 checkfilename(f)
324 checkfilename(f)
327 abort, warn = checkportabilityalert(ui)
325 abort, warn = checkportabilityalert(ui)
328 if abort or warn:
326 if abort or warn:
329 msg = util.checkwinfilename(f)
327 msg = util.checkwinfilename(f)
330 if msg:
328 if msg:
331 msg = b"%s: %s" % (msg, procutil.shellquote(f))
329 msg = b"%s: %s" % (msg, procutil.shellquote(f))
332 if abort:
330 if abort:
333 raise error.InputError(msg)
331 raise error.InputError(msg)
334 ui.warn(_(b"warning: %s\n") % msg)
332 ui.warn(_(b"warning: %s\n") % msg)
335
333
336
334
337 def checkportabilityalert(ui):
335 def checkportabilityalert(ui):
338 '''check if the user's config requests nothing, a warning, or abort for
336 '''check if the user's config requests nothing, a warning, or abort for
339 non-portable filenames'''
337 non-portable filenames'''
340 val = ui.config(b'ui', b'portablefilenames')
338 val = ui.config(b'ui', b'portablefilenames')
341 lval = val.lower()
339 lval = val.lower()
342 bval = stringutil.parsebool(val)
340 bval = stringutil.parsebool(val)
343 abort = pycompat.iswindows or lval == b'abort'
341 abort = pycompat.iswindows or lval == b'abort'
344 warn = bval or lval == b'warn'
342 warn = bval or lval == b'warn'
345 if bval is None and not (warn or abort or lval == b'ignore'):
343 if bval is None and not (warn or abort or lval == b'ignore'):
346 raise error.ConfigError(
344 raise error.ConfigError(
347 _(b"ui.portablefilenames value is invalid ('%s')") % val
345 _(b"ui.portablefilenames value is invalid ('%s')") % val
348 )
346 )
349 return abort, warn
347 return abort, warn
350
348
351
349
352 class casecollisionauditor(object):
350 class casecollisionauditor(object):
353 def __init__(self, ui, abort, dirstate):
351 def __init__(self, ui, abort, dirstate):
354 self._ui = ui
352 self._ui = ui
355 self._abort = abort
353 self._abort = abort
356 allfiles = b'\0'.join(dirstate)
354 allfiles = b'\0'.join(dirstate)
357 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
355 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
358 self._dirstate = dirstate
356 self._dirstate = dirstate
359 # The purpose of _newfiles is so that we don't complain about
357 # The purpose of _newfiles is so that we don't complain about
360 # case collisions if someone were to call this object with the
358 # case collisions if someone were to call this object with the
361 # same filename twice.
359 # same filename twice.
362 self._newfiles = set()
360 self._newfiles = set()
363
361
364 def __call__(self, f):
362 def __call__(self, f):
365 if f in self._newfiles:
363 if f in self._newfiles:
366 return
364 return
367 fl = encoding.lower(f)
365 fl = encoding.lower(f)
368 if fl in self._loweredfiles and f not in self._dirstate:
366 if fl in self._loweredfiles and f not in self._dirstate:
369 msg = _(b'possible case-folding collision for %s') % f
367 msg = _(b'possible case-folding collision for %s') % f
370 if self._abort:
368 if self._abort:
371 raise error.Abort(msg)
369 raise error.Abort(msg)
372 self._ui.warn(_(b"warning: %s\n") % msg)
370 self._ui.warn(_(b"warning: %s\n") % msg)
373 self._loweredfiles.add(fl)
371 self._loweredfiles.add(fl)
374 self._newfiles.add(f)
372 self._newfiles.add(f)
375
373
376
374
377 def filteredhash(repo, maxrev):
375 def filteredhash(repo, maxrev):
378 """build hash of filtered revisions in the current repoview.
376 """build hash of filtered revisions in the current repoview.
379
377
380 Multiple caches perform up-to-date validation by checking that the
378 Multiple caches perform up-to-date validation by checking that the
381 tiprev and tipnode stored in the cache file match the current repository.
379 tiprev and tipnode stored in the cache file match the current repository.
382 However, this is not sufficient for validating repoviews because the set
380 However, this is not sufficient for validating repoviews because the set
383 of revisions in the view may change without the repository tiprev and
381 of revisions in the view may change without the repository tiprev and
384 tipnode changing.
382 tipnode changing.
385
383
386 This function hashes all the revs filtered from the view and returns
384 This function hashes all the revs filtered from the view and returns
387 that SHA-1 digest.
385 that SHA-1 digest.
388 """
386 """
389 cl = repo.changelog
387 cl = repo.changelog
390 if not cl.filteredrevs:
388 if not cl.filteredrevs:
391 return None
389 return None
392 key = cl._filteredrevs_hashcache.get(maxrev)
390 key = cl._filteredrevs_hashcache.get(maxrev)
393 if not key:
391 if not key:
394 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
392 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
395 if revs:
393 if revs:
396 s = hashutil.sha1()
394 s = hashutil.sha1()
397 for rev in revs:
395 for rev in revs:
398 s.update(b'%d;' % rev)
396 s.update(b'%d;' % rev)
399 key = s.digest()
397 key = s.digest()
400 cl._filteredrevs_hashcache[maxrev] = key
398 cl._filteredrevs_hashcache[maxrev] = key
401 return key
399 return key
402
400
403
401
404 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
402 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
405 '''yield every hg repository under path, always recursively.
403 '''yield every hg repository under path, always recursively.
406 The recurse flag will only control recursion into repo working dirs'''
404 The recurse flag will only control recursion into repo working dirs'''
407
405
408 def errhandler(err):
406 def errhandler(err):
409 if err.filename == path:
407 if err.filename == path:
410 raise err
408 raise err
411
409
412 samestat = getattr(os.path, 'samestat', None)
410 samestat = getattr(os.path, 'samestat', None)
413 if followsym and samestat is not None:
411 if followsym and samestat is not None:
414
412
415 def adddir(dirlst, dirname):
413 def adddir(dirlst, dirname):
416 dirstat = os.stat(dirname)
414 dirstat = os.stat(dirname)
417 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
415 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
418 if not match:
416 if not match:
419 dirlst.append(dirstat)
417 dirlst.append(dirstat)
420 return not match
418 return not match
421
419
422 else:
420 else:
423 followsym = False
421 followsym = False
424
422
425 if (seen_dirs is None) and followsym:
423 if (seen_dirs is None) and followsym:
426 seen_dirs = []
424 seen_dirs = []
427 adddir(seen_dirs, path)
425 adddir(seen_dirs, path)
428 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
426 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
429 dirs.sort()
427 dirs.sort()
430 if b'.hg' in dirs:
428 if b'.hg' in dirs:
431 yield root # found a repository
429 yield root # found a repository
432 qroot = os.path.join(root, b'.hg', b'patches')
430 qroot = os.path.join(root, b'.hg', b'patches')
433 if os.path.isdir(os.path.join(qroot, b'.hg')):
431 if os.path.isdir(os.path.join(qroot, b'.hg')):
434 yield qroot # we have a patch queue repo here
432 yield qroot # we have a patch queue repo here
435 if recurse:
433 if recurse:
436 # avoid recursing inside the .hg directory
434 # avoid recursing inside the .hg directory
437 dirs.remove(b'.hg')
435 dirs.remove(b'.hg')
438 else:
436 else:
439 dirs[:] = [] # don't descend further
437 dirs[:] = [] # don't descend further
440 elif followsym:
438 elif followsym:
441 newdirs = []
439 newdirs = []
442 for d in dirs:
440 for d in dirs:
443 fname = os.path.join(root, d)
441 fname = os.path.join(root, d)
444 if adddir(seen_dirs, fname):
442 if adddir(seen_dirs, fname):
445 if os.path.islink(fname):
443 if os.path.islink(fname):
446 for hgname in walkrepos(fname, True, seen_dirs):
444 for hgname in walkrepos(fname, True, seen_dirs):
447 yield hgname
445 yield hgname
448 else:
446 else:
449 newdirs.append(d)
447 newdirs.append(d)
450 dirs[:] = newdirs
448 dirs[:] = newdirs
451
449
452
450
453 def binnode(ctx):
451 def binnode(ctx):
454 """Return binary node id for a given basectx"""
452 """Return binary node id for a given basectx"""
455 node = ctx.node()
453 node = ctx.node()
456 if node is None:
454 if node is None:
457 return wdirid
455 return wdirid
458 return node
456 return node
459
457
460
458
461 def intrev(ctx):
459 def intrev(ctx):
462 """Return integer for a given basectx that can be used in comparison or
460 """Return integer for a given basectx that can be used in comparison or
463 arithmetic operation"""
461 arithmetic operation"""
464 rev = ctx.rev()
462 rev = ctx.rev()
465 if rev is None:
463 if rev is None:
466 return wdirrev
464 return wdirrev
467 return rev
465 return rev
468
466
469
467
470 def formatchangeid(ctx):
468 def formatchangeid(ctx):
471 """Format changectx as '{rev}:{node|formatnode}', which is the default
469 """Format changectx as '{rev}:{node|formatnode}', which is the default
472 template provided by logcmdutil.changesettemplater"""
470 template provided by logcmdutil.changesettemplater"""
473 repo = ctx.repo()
471 repo = ctx.repo()
474 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
472 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
475
473
476
474
477 def formatrevnode(ui, rev, node):
475 def formatrevnode(ui, rev, node):
478 """Format given revision and node depending on the current verbosity"""
476 """Format given revision and node depending on the current verbosity"""
479 if ui.debugflag:
477 if ui.debugflag:
480 hexfunc = hex
478 hexfunc = hex
481 else:
479 else:
482 hexfunc = short
480 hexfunc = short
483 return b'%d:%s' % (rev, hexfunc(node))
481 return b'%d:%s' % (rev, hexfunc(node))
484
482
485
483
486 def resolvehexnodeidprefix(repo, prefix):
484 def resolvehexnodeidprefix(repo, prefix):
487 if prefix.startswith(b'x'):
485 if prefix.startswith(b'x'):
488 prefix = prefix[1:]
486 prefix = prefix[1:]
489 try:
487 try:
490 # Uses unfiltered repo because it's faster when prefix is ambiguous/
488 # Uses unfiltered repo because it's faster when prefix is ambiguous/
491 # This matches the shortesthexnodeidprefix() function below.
489 # This matches the shortesthexnodeidprefix() function below.
492 node = repo.unfiltered().changelog._partialmatch(prefix)
490 node = repo.unfiltered().changelog._partialmatch(prefix)
493 except error.AmbiguousPrefixLookupError:
491 except error.AmbiguousPrefixLookupError:
494 revset = repo.ui.config(
492 revset = repo.ui.config(
495 b'experimental', b'revisions.disambiguatewithin'
493 b'experimental', b'revisions.disambiguatewithin'
496 )
494 )
497 if revset:
495 if revset:
498 # Clear config to avoid infinite recursion
496 # Clear config to avoid infinite recursion
499 configoverrides = {
497 configoverrides = {
500 (b'experimental', b'revisions.disambiguatewithin'): None
498 (b'experimental', b'revisions.disambiguatewithin'): None
501 }
499 }
502 with repo.ui.configoverride(configoverrides):
500 with repo.ui.configoverride(configoverrides):
503 revs = repo.anyrevs([revset], user=True)
501 revs = repo.anyrevs([revset], user=True)
504 matches = []
502 matches = []
505 for rev in revs:
503 for rev in revs:
506 node = repo.changelog.node(rev)
504 node = repo.changelog.node(rev)
507 if hex(node).startswith(prefix):
505 if hex(node).startswith(prefix):
508 matches.append(node)
506 matches.append(node)
509 if len(matches) == 1:
507 if len(matches) == 1:
510 return matches[0]
508 return matches[0]
511 raise
509 raise
512 if node is None:
510 if node is None:
513 return
511 return
514 repo.changelog.rev(node) # make sure node isn't filtered
512 repo.changelog.rev(node) # make sure node isn't filtered
515 return node
513 return node
516
514
517
515
518 def mayberevnum(repo, prefix):
516 def mayberevnum(repo, prefix):
519 """Checks if the given prefix may be mistaken for a revision number"""
517 """Checks if the given prefix may be mistaken for a revision number"""
520 try:
518 try:
521 i = int(prefix)
519 i = int(prefix)
522 # if we are a pure int, then starting with zero will not be
520 # if we are a pure int, then starting with zero will not be
523 # confused as a rev; or, obviously, if the int is larger
521 # confused as a rev; or, obviously, if the int is larger
524 # than the value of the tip rev. We still need to disambiguate if
522 # than the value of the tip rev. We still need to disambiguate if
525 # prefix == '0', since that *is* a valid revnum.
523 # prefix == '0', since that *is* a valid revnum.
526 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
524 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
527 return False
525 return False
528 return True
526 return True
529 except ValueError:
527 except ValueError:
530 return False
528 return False
531
529
532
530
533 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
531 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
534 """Find the shortest unambiguous prefix that matches hexnode.
532 """Find the shortest unambiguous prefix that matches hexnode.
535
533
536 If "cache" is not None, it must be a dictionary that can be used for
534 If "cache" is not None, it must be a dictionary that can be used for
537 caching between calls to this method.
535 caching between calls to this method.
538 """
536 """
539 # _partialmatch() of filtered changelog could take O(len(repo)) time,
537 # _partialmatch() of filtered changelog could take O(len(repo)) time,
540 # which would be unacceptably slow. so we look for hash collision in
538 # which would be unacceptably slow. so we look for hash collision in
541 # unfiltered space, which means some hashes may be slightly longer.
539 # unfiltered space, which means some hashes may be slightly longer.
542
540
543 minlength = max(minlength, 1)
541 minlength = max(minlength, 1)
544
542
545 def disambiguate(prefix):
543 def disambiguate(prefix):
546 """Disambiguate against revnums."""
544 """Disambiguate against revnums."""
547 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
545 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
548 if mayberevnum(repo, prefix):
546 if mayberevnum(repo, prefix):
549 return b'x' + prefix
547 return b'x' + prefix
550 else:
548 else:
551 return prefix
549 return prefix
552
550
553 hexnode = hex(node)
551 hexnode = hex(node)
554 for length in range(len(prefix), len(hexnode) + 1):
552 for length in range(len(prefix), len(hexnode) + 1):
555 prefix = hexnode[:length]
553 prefix = hexnode[:length]
556 if not mayberevnum(repo, prefix):
554 if not mayberevnum(repo, prefix):
557 return prefix
555 return prefix
558
556
559 cl = repo.unfiltered().changelog
557 cl = repo.unfiltered().changelog
560 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
558 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
561 if revset:
559 if revset:
562 revs = None
560 revs = None
563 if cache is not None:
561 if cache is not None:
564 revs = cache.get(b'disambiguationrevset')
562 revs = cache.get(b'disambiguationrevset')
565 if revs is None:
563 if revs is None:
566 revs = repo.anyrevs([revset], user=True)
564 revs = repo.anyrevs([revset], user=True)
567 if cache is not None:
565 if cache is not None:
568 cache[b'disambiguationrevset'] = revs
566 cache[b'disambiguationrevset'] = revs
569 if cl.rev(node) in revs:
567 if cl.rev(node) in revs:
570 hexnode = hex(node)
568 hexnode = hex(node)
571 nodetree = None
569 nodetree = None
572 if cache is not None:
570 if cache is not None:
573 nodetree = cache.get(b'disambiguationnodetree')
571 nodetree = cache.get(b'disambiguationnodetree')
574 if not nodetree:
572 if not nodetree:
575 if util.safehasattr(parsers, 'nodetree'):
573 if util.safehasattr(parsers, 'nodetree'):
576 # The CExt is the only implementation to provide a nodetree
574 # The CExt is the only implementation to provide a nodetree
577 # class so far.
575 # class so far.
578 index = cl.index
576 index = cl.index
579 if util.safehasattr(index, 'get_cindex'):
577 if util.safehasattr(index, 'get_cindex'):
580 # the rust wrapped need to give access to its internal index
578 # the rust wrapped need to give access to its internal index
581 index = index.get_cindex()
579 index = index.get_cindex()
582 nodetree = parsers.nodetree(index, len(revs))
580 nodetree = parsers.nodetree(index, len(revs))
583 for r in revs:
581 for r in revs:
584 nodetree.insert(r)
582 nodetree.insert(r)
585 if cache is not None:
583 if cache is not None:
586 cache[b'disambiguationnodetree'] = nodetree
584 cache[b'disambiguationnodetree'] = nodetree
587 if nodetree is not None:
585 if nodetree is not None:
588 length = max(nodetree.shortest(node), minlength)
586 length = max(nodetree.shortest(node), minlength)
589 prefix = hexnode[:length]
587 prefix = hexnode[:length]
590 return disambiguate(prefix)
588 return disambiguate(prefix)
591 for length in range(minlength, len(hexnode) + 1):
589 for length in range(minlength, len(hexnode) + 1):
592 matches = []
590 matches = []
593 prefix = hexnode[:length]
591 prefix = hexnode[:length]
594 for rev in revs:
592 for rev in revs:
595 otherhexnode = repo[rev].hex()
593 otherhexnode = repo[rev].hex()
596 if prefix == otherhexnode[:length]:
594 if prefix == otherhexnode[:length]:
597 matches.append(otherhexnode)
595 matches.append(otherhexnode)
598 if len(matches) == 1:
596 if len(matches) == 1:
599 return disambiguate(prefix)
597 return disambiguate(prefix)
600
598
601 try:
599 try:
602 return disambiguate(cl.shortest(node, minlength))
600 return disambiguate(cl.shortest(node, minlength))
603 except error.LookupError:
601 except error.LookupError:
604 raise error.RepoLookupError()
602 raise error.RepoLookupError()
605
603
606
604
607 def isrevsymbol(repo, symbol):
605 def isrevsymbol(repo, symbol):
608 """Checks if a symbol exists in the repo.
606 """Checks if a symbol exists in the repo.
609
607
610 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
608 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
611 symbol is an ambiguous nodeid prefix.
609 symbol is an ambiguous nodeid prefix.
612 """
610 """
613 try:
611 try:
614 revsymbol(repo, symbol)
612 revsymbol(repo, symbol)
615 return True
613 return True
616 except error.RepoLookupError:
614 except error.RepoLookupError:
617 return False
615 return False
618
616
619
617
620 def revsymbol(repo, symbol):
618 def revsymbol(repo, symbol):
621 """Returns a context given a single revision symbol (as string).
619 """Returns a context given a single revision symbol (as string).
622
620
623 This is similar to revsingle(), but accepts only a single revision symbol,
621 This is similar to revsingle(), but accepts only a single revision symbol,
624 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
622 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
625 not "max(public())".
623 not "max(public())".
626 """
624 """
627 if not isinstance(symbol, bytes):
625 if not isinstance(symbol, bytes):
628 msg = (
626 msg = (
629 b"symbol (%s of type %s) was not a string, did you mean "
627 b"symbol (%s of type %s) was not a string, did you mean "
630 b"repo[symbol]?" % (symbol, type(symbol))
628 b"repo[symbol]?" % (symbol, type(symbol))
631 )
629 )
632 raise error.ProgrammingError(msg)
630 raise error.ProgrammingError(msg)
633 try:
631 try:
634 if symbol in (b'.', b'tip', b'null'):
632 if symbol in (b'.', b'tip', b'null'):
635 return repo[symbol]
633 return repo[symbol]
636
634
637 try:
635 try:
638 r = int(symbol)
636 r = int(symbol)
639 if b'%d' % r != symbol:
637 if b'%d' % r != symbol:
640 raise ValueError
638 raise ValueError
641 l = len(repo.changelog)
639 l = len(repo.changelog)
642 if r < 0:
640 if r < 0:
643 r += l
641 r += l
644 if r < 0 or r >= l and r != wdirrev:
642 if r < 0 or r >= l and r != wdirrev:
645 raise ValueError
643 raise ValueError
646 return repo[r]
644 return repo[r]
647 except error.FilteredIndexError:
645 except error.FilteredIndexError:
648 raise
646 raise
649 except (ValueError, OverflowError, IndexError):
647 except (ValueError, OverflowError, IndexError):
650 pass
648 pass
651
649
652 if len(symbol) == 40:
650 if len(symbol) == 40:
653 try:
651 try:
654 node = bin(symbol)
652 node = bin(symbol)
655 rev = repo.changelog.rev(node)
653 rev = repo.changelog.rev(node)
656 return repo[rev]
654 return repo[rev]
657 except error.FilteredLookupError:
655 except error.FilteredLookupError:
658 raise
656 raise
659 except (TypeError, LookupError):
657 except (TypeError, LookupError):
660 pass
658 pass
661
659
662 # look up bookmarks through the name interface
660 # look up bookmarks through the name interface
663 try:
661 try:
664 node = repo.names.singlenode(repo, symbol)
662 node = repo.names.singlenode(repo, symbol)
665 rev = repo.changelog.rev(node)
663 rev = repo.changelog.rev(node)
666 return repo[rev]
664 return repo[rev]
667 except KeyError:
665 except KeyError:
668 pass
666 pass
669
667
670 node = resolvehexnodeidprefix(repo, symbol)
668 node = resolvehexnodeidprefix(repo, symbol)
671 if node is not None:
669 if node is not None:
672 rev = repo.changelog.rev(node)
670 rev = repo.changelog.rev(node)
673 return repo[rev]
671 return repo[rev]
674
672
675 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
673 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
676
674
677 except error.WdirUnsupported:
675 except error.WdirUnsupported:
678 return repo[None]
676 return repo[None]
679 except (
677 except (
680 error.FilteredIndexError,
678 error.FilteredIndexError,
681 error.FilteredLookupError,
679 error.FilteredLookupError,
682 error.FilteredRepoLookupError,
680 error.FilteredRepoLookupError,
683 ):
681 ):
684 raise _filterederror(repo, symbol)
682 raise _filterederror(repo, symbol)
685
683
686
684
687 def _filterederror(repo, changeid):
685 def _filterederror(repo, changeid):
688 """build an exception to be raised about a filtered changeid
686 """build an exception to be raised about a filtered changeid
689
687
690 This is extracted in a function to help extensions (eg: evolve) to
688 This is extracted in a function to help extensions (eg: evolve) to
691 experiment with various message variants."""
689 experiment with various message variants."""
692 if repo.filtername.startswith(b'visible'):
690 if repo.filtername.startswith(b'visible'):
693
691
694 # Check if the changeset is obsolete
692 # Check if the changeset is obsolete
695 unfilteredrepo = repo.unfiltered()
693 unfilteredrepo = repo.unfiltered()
696 ctx = revsymbol(unfilteredrepo, changeid)
694 ctx = revsymbol(unfilteredrepo, changeid)
697
695
698 # If the changeset is obsolete, enrich the message with the reason
696 # If the changeset is obsolete, enrich the message with the reason
699 # that made this changeset not visible
697 # that made this changeset not visible
700 if ctx.obsolete():
698 if ctx.obsolete():
701 msg = obsutil._getfilteredreason(repo, changeid, ctx)
699 msg = obsutil._getfilteredreason(repo, changeid, ctx)
702 else:
700 else:
703 msg = _(b"hidden revision '%s'") % changeid
701 msg = _(b"hidden revision '%s'") % changeid
704
702
705 hint = _(b'use --hidden to access hidden revisions')
703 hint = _(b'use --hidden to access hidden revisions')
706
704
707 return error.FilteredRepoLookupError(msg, hint=hint)
705 return error.FilteredRepoLookupError(msg, hint=hint)
708 msg = _(b"filtered revision '%s' (not in '%s' subset)")
706 msg = _(b"filtered revision '%s' (not in '%s' subset)")
709 msg %= (changeid, repo.filtername)
707 msg %= (changeid, repo.filtername)
710 return error.FilteredRepoLookupError(msg)
708 return error.FilteredRepoLookupError(msg)
711
709
712
710
713 def revsingle(repo, revspec, default=b'.', localalias=None):
711 def revsingle(repo, revspec, default=b'.', localalias=None):
714 if not revspec and revspec != 0:
712 if not revspec and revspec != 0:
715 return repo[default]
713 return repo[default]
716
714
717 l = revrange(repo, [revspec], localalias=localalias)
715 l = revrange(repo, [revspec], localalias=localalias)
718 if not l:
716 if not l:
719 raise error.Abort(_(b'empty revision set'))
717 raise error.Abort(_(b'empty revision set'))
720 return repo[l.last()]
718 return repo[l.last()]
721
719
722
720
723 def _pairspec(revspec):
721 def _pairspec(revspec):
724 tree = revsetlang.parse(revspec)
722 tree = revsetlang.parse(revspec)
725 return tree and tree[0] in (
723 return tree and tree[0] in (
726 b'range',
724 b'range',
727 b'rangepre',
725 b'rangepre',
728 b'rangepost',
726 b'rangepost',
729 b'rangeall',
727 b'rangeall',
730 )
728 )
731
729
732
730
733 def revpair(repo, revs):
731 def revpair(repo, revs):
734 if not revs:
732 if not revs:
735 return repo[b'.'], repo[None]
733 return repo[b'.'], repo[None]
736
734
737 l = revrange(repo, revs)
735 l = revrange(repo, revs)
738
736
739 if not l:
737 if not l:
740 raise error.Abort(_(b'empty revision range'))
738 raise error.Abort(_(b'empty revision range'))
741
739
742 first = l.first()
740 first = l.first()
743 second = l.last()
741 second = l.last()
744
742
745 if (
743 if (
746 first == second
744 first == second
747 and len(revs) >= 2
745 and len(revs) >= 2
748 and not all(revrange(repo, [r]) for r in revs)
746 and not all(revrange(repo, [r]) for r in revs)
749 ):
747 ):
750 raise error.Abort(_(b'empty revision on one side of range'))
748 raise error.Abort(_(b'empty revision on one side of range'))
751
749
752 # if top-level is range expression, the result must always be a pair
750 # if top-level is range expression, the result must always be a pair
753 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
751 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
754 return repo[first], repo[None]
752 return repo[first], repo[None]
755
753
756 return repo[first], repo[second]
754 return repo[first], repo[second]
757
755
758
756
759 def revrange(repo, specs, localalias=None):
757 def revrange(repo, specs, localalias=None):
760 """Execute 1 to many revsets and return the union.
758 """Execute 1 to many revsets and return the union.
761
759
762 This is the preferred mechanism for executing revsets using user-specified
760 This is the preferred mechanism for executing revsets using user-specified
763 config options, such as revset aliases.
761 config options, such as revset aliases.
764
762
765 The revsets specified by ``specs`` will be executed via a chained ``OR``
763 The revsets specified by ``specs`` will be executed via a chained ``OR``
766 expression. If ``specs`` is empty, an empty result is returned.
764 expression. If ``specs`` is empty, an empty result is returned.
767
765
768 ``specs`` can contain integers, in which case they are assumed to be
766 ``specs`` can contain integers, in which case they are assumed to be
769 revision numbers.
767 revision numbers.
770
768
771 It is assumed the revsets are already formatted. If you have arguments
769 It is assumed the revsets are already formatted. If you have arguments
772 that need to be expanded in the revset, call ``revsetlang.formatspec()``
770 that need to be expanded in the revset, call ``revsetlang.formatspec()``
773 and pass the result as an element of ``specs``.
771 and pass the result as an element of ``specs``.
774
772
775 Specifying a single revset is allowed.
773 Specifying a single revset is allowed.
776
774
777 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
775 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
778 integer revisions.
776 integer revisions.
779 """
777 """
780 allspecs = []
778 allspecs = []
781 for spec in specs:
779 for spec in specs:
782 if isinstance(spec, int):
780 if isinstance(spec, int):
783 spec = revsetlang.formatspec(b'%d', spec)
781 spec = revsetlang.formatspec(b'%d', spec)
784 allspecs.append(spec)
782 allspecs.append(spec)
785 return repo.anyrevs(allspecs, user=True, localalias=localalias)
783 return repo.anyrevs(allspecs, user=True, localalias=localalias)
786
784
787
785
788 def increasingwindows(windowsize=8, sizelimit=512):
786 def increasingwindows(windowsize=8, sizelimit=512):
789 while True:
787 while True:
790 yield windowsize
788 yield windowsize
791 if windowsize < sizelimit:
789 if windowsize < sizelimit:
792 windowsize *= 2
790 windowsize *= 2
793
791
794
792
795 def walkchangerevs(repo, revs, makefilematcher, prepare):
793 def walkchangerevs(repo, revs, makefilematcher, prepare):
796 '''Iterate over files and the revs in a "windowed" way.
794 '''Iterate over files and the revs in a "windowed" way.
797
795
798 Callers most commonly need to iterate backwards over the history
796 Callers most commonly need to iterate backwards over the history
799 in which they are interested. Doing so has awful (quadratic-looking)
797 in which they are interested. Doing so has awful (quadratic-looking)
800 performance, so we use iterators in a "windowed" way.
798 performance, so we use iterators in a "windowed" way.
801
799
802 We walk a window of revisions in the desired order. Within the
800 We walk a window of revisions in the desired order. Within the
803 window, we first walk forwards to gather data, then in the desired
801 window, we first walk forwards to gather data, then in the desired
804 order (usually backwards) to display it.
802 order (usually backwards) to display it.
805
803
806 This function returns an iterator yielding contexts. Before
804 This function returns an iterator yielding contexts. Before
807 yielding each context, the iterator will first call the prepare
805 yielding each context, the iterator will first call the prepare
808 function on each context in the window in forward order.'''
806 function on each context in the window in forward order.'''
809
807
810 if not revs:
808 if not revs:
811 return []
809 return []
812 change = repo.__getitem__
810 change = repo.__getitem__
813
811
814 def iterate():
812 def iterate():
815 it = iter(revs)
813 it = iter(revs)
816 stopiteration = False
814 stopiteration = False
817 for windowsize in increasingwindows():
815 for windowsize in increasingwindows():
818 nrevs = []
816 nrevs = []
819 for i in pycompat.xrange(windowsize):
817 for i in pycompat.xrange(windowsize):
820 rev = next(it, None)
818 rev = next(it, None)
821 if rev is None:
819 if rev is None:
822 stopiteration = True
820 stopiteration = True
823 break
821 break
824 nrevs.append(rev)
822 nrevs.append(rev)
825 for rev in sorted(nrevs):
823 for rev in sorted(nrevs):
826 ctx = change(rev)
824 ctx = change(rev)
827 prepare(ctx, makefilematcher(ctx))
825 prepare(ctx, makefilematcher(ctx))
828 for rev in nrevs:
826 for rev in nrevs:
829 yield change(rev)
827 yield change(rev)
830
828
831 if stopiteration:
829 if stopiteration:
832 break
830 break
833
831
834 return iterate()
832 return iterate()
835
833
836
834
837 def meaningfulparents(repo, ctx):
835 def meaningfulparents(repo, ctx):
838 """Return list of meaningful (or all if debug) parentrevs for rev.
836 """Return list of meaningful (or all if debug) parentrevs for rev.
839
837
840 For merges (two non-nullrev revisions) both parents are meaningful.
838 For merges (two non-nullrev revisions) both parents are meaningful.
841 Otherwise the first parent revision is considered meaningful if it
839 Otherwise the first parent revision is considered meaningful if it
842 is not the preceding revision.
840 is not the preceding revision.
843 """
841 """
844 parents = ctx.parents()
842 parents = ctx.parents()
845 if len(parents) > 1:
843 if len(parents) > 1:
846 return parents
844 return parents
847 if repo.ui.debugflag:
845 if repo.ui.debugflag:
848 return [parents[0], repo[nullrev]]
846 return [parents[0], repo[nullrev]]
849 if parents[0].rev() >= intrev(ctx) - 1:
847 if parents[0].rev() >= intrev(ctx) - 1:
850 return []
848 return []
851 return parents
849 return parents
852
850
853
851
854 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
852 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
855 """Return a function that produced paths for presenting to the user.
853 """Return a function that produced paths for presenting to the user.
856
854
857 The returned function takes a repo-relative path and produces a path
855 The returned function takes a repo-relative path and produces a path
858 that can be presented in the UI.
856 that can be presented in the UI.
859
857
860 Depending on the value of ui.relative-paths, either a repo-relative or
858 Depending on the value of ui.relative-paths, either a repo-relative or
861 cwd-relative path will be produced.
859 cwd-relative path will be produced.
862
860
863 legacyrelativevalue is the value to use if ui.relative-paths=legacy
861 legacyrelativevalue is the value to use if ui.relative-paths=legacy
864
862
865 If forcerelativevalue is not None, then that value will be used regardless
863 If forcerelativevalue is not None, then that value will be used regardless
866 of what ui.relative-paths is set to.
864 of what ui.relative-paths is set to.
867 """
865 """
868 if forcerelativevalue is not None:
866 if forcerelativevalue is not None:
869 relative = forcerelativevalue
867 relative = forcerelativevalue
870 else:
868 else:
871 config = repo.ui.config(b'ui', b'relative-paths')
869 config = repo.ui.config(b'ui', b'relative-paths')
872 if config == b'legacy':
870 if config == b'legacy':
873 relative = legacyrelativevalue
871 relative = legacyrelativevalue
874 else:
872 else:
875 relative = stringutil.parsebool(config)
873 relative = stringutil.parsebool(config)
876 if relative is None:
874 if relative is None:
877 raise error.ConfigError(
875 raise error.ConfigError(
878 _(b"ui.relative-paths is not a boolean ('%s')") % config
876 _(b"ui.relative-paths is not a boolean ('%s')") % config
879 )
877 )
880
878
881 if relative:
879 if relative:
882 cwd = repo.getcwd()
880 cwd = repo.getcwd()
883 if cwd != b'':
881 if cwd != b'':
884 # this branch would work even if cwd == b'' (ie cwd = repo
882 # this branch would work even if cwd == b'' (ie cwd = repo
885 # root), but its generality makes the returned function slower
883 # root), but its generality makes the returned function slower
886 pathto = repo.pathto
884 pathto = repo.pathto
887 return lambda f: pathto(f, cwd)
885 return lambda f: pathto(f, cwd)
888 if repo.ui.configbool(b'ui', b'slash'):
886 if repo.ui.configbool(b'ui', b'slash'):
889 return lambda f: f
887 return lambda f: f
890 else:
888 else:
891 return util.localpath
889 return util.localpath
892
890
893
891
894 def subdiruipathfn(subpath, uipathfn):
892 def subdiruipathfn(subpath, uipathfn):
895 '''Create a new uipathfn that treats the file as relative to subpath.'''
893 '''Create a new uipathfn that treats the file as relative to subpath.'''
896 return lambda f: uipathfn(posixpath.join(subpath, f))
894 return lambda f: uipathfn(posixpath.join(subpath, f))
897
895
898
896
899 def anypats(pats, opts):
897 def anypats(pats, opts):
900 '''Checks if any patterns, including --include and --exclude were given.
898 '''Checks if any patterns, including --include and --exclude were given.
901
899
902 Some commands (e.g. addremove) use this condition for deciding whether to
900 Some commands (e.g. addremove) use this condition for deciding whether to
903 print absolute or relative paths.
901 print absolute or relative paths.
904 '''
902 '''
905 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
903 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
906
904
907
905
908 def expandpats(pats):
906 def expandpats(pats):
909 '''Expand bare globs when running on windows.
907 '''Expand bare globs when running on windows.
910 On posix we assume it already has already been done by sh.'''
908 On posix we assume it already has already been done by sh.'''
911 if not util.expandglobs:
909 if not util.expandglobs:
912 return list(pats)
910 return list(pats)
913 ret = []
911 ret = []
914 for kindpat in pats:
912 for kindpat in pats:
915 kind, pat = matchmod._patsplit(kindpat, None)
913 kind, pat = matchmod._patsplit(kindpat, None)
916 if kind is None:
914 if kind is None:
917 try:
915 try:
918 globbed = glob.glob(pat)
916 globbed = glob.glob(pat)
919 except re.error:
917 except re.error:
920 globbed = [pat]
918 globbed = [pat]
921 if globbed:
919 if globbed:
922 ret.extend(globbed)
920 ret.extend(globbed)
923 continue
921 continue
924 ret.append(kindpat)
922 ret.append(kindpat)
925 return ret
923 return ret
926
924
927
925
928 def matchandpats(
926 def matchandpats(
929 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
927 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
930 ):
928 ):
931 '''Return a matcher and the patterns that were used.
929 '''Return a matcher and the patterns that were used.
932 The matcher will warn about bad matches, unless an alternate badfn callback
930 The matcher will warn about bad matches, unless an alternate badfn callback
933 is provided.'''
931 is provided.'''
934 if opts is None:
932 if opts is None:
935 opts = {}
933 opts = {}
936 if not globbed and default == b'relpath':
934 if not globbed and default == b'relpath':
937 pats = expandpats(pats or [])
935 pats = expandpats(pats or [])
938
936
939 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
937 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
940
938
941 def bad(f, msg):
939 def bad(f, msg):
942 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
940 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
943
941
944 if badfn is None:
942 if badfn is None:
945 badfn = bad
943 badfn = bad
946
944
947 m = ctx.match(
945 m = ctx.match(
948 pats,
946 pats,
949 opts.get(b'include'),
947 opts.get(b'include'),
950 opts.get(b'exclude'),
948 opts.get(b'exclude'),
951 default,
949 default,
952 listsubrepos=opts.get(b'subrepos'),
950 listsubrepos=opts.get(b'subrepos'),
953 badfn=badfn,
951 badfn=badfn,
954 )
952 )
955
953
956 if m.always():
954 if m.always():
957 pats = []
955 pats = []
958 return m, pats
956 return m, pats
959
957
960
958
961 def match(
959 def match(
962 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
960 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
963 ):
961 ):
964 '''Return a matcher that will warn about bad matches.'''
962 '''Return a matcher that will warn about bad matches.'''
965 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
963 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
966
964
967
965
968 def matchall(repo):
966 def matchall(repo):
969 '''Return a matcher that will efficiently match everything.'''
967 '''Return a matcher that will efficiently match everything.'''
970 return matchmod.always()
968 return matchmod.always()
971
969
972
970
973 def matchfiles(repo, files, badfn=None):
971 def matchfiles(repo, files, badfn=None):
974 '''Return a matcher that will efficiently match exactly these files.'''
972 '''Return a matcher that will efficiently match exactly these files.'''
975 return matchmod.exact(files, badfn=badfn)
973 return matchmod.exact(files, badfn=badfn)
976
974
977
975
978 def parsefollowlinespattern(repo, rev, pat, msg):
976 def parsefollowlinespattern(repo, rev, pat, msg):
979 """Return a file name from `pat` pattern suitable for usage in followlines
977 """Return a file name from `pat` pattern suitable for usage in followlines
980 logic.
978 logic.
981 """
979 """
982 if not matchmod.patkind(pat):
980 if not matchmod.patkind(pat):
983 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
981 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
984 else:
982 else:
985 ctx = repo[rev]
983 ctx = repo[rev]
986 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
984 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
987 files = [f for f in ctx if m(f)]
985 files = [f for f in ctx if m(f)]
988 if len(files) != 1:
986 if len(files) != 1:
989 raise error.ParseError(msg)
987 raise error.ParseError(msg)
990 return files[0]
988 return files[0]
991
989
992
990
993 def getorigvfs(ui, repo):
991 def getorigvfs(ui, repo):
994 """return a vfs suitable to save 'orig' file
992 """return a vfs suitable to save 'orig' file
995
993
996 return None if no special directory is configured"""
994 return None if no special directory is configured"""
997 origbackuppath = ui.config(b'ui', b'origbackuppath')
995 origbackuppath = ui.config(b'ui', b'origbackuppath')
998 if not origbackuppath:
996 if not origbackuppath:
999 return None
997 return None
1000 return vfs.vfs(repo.wvfs.join(origbackuppath))
998 return vfs.vfs(repo.wvfs.join(origbackuppath))
1001
999
1002
1000
1003 def backuppath(ui, repo, filepath):
1001 def backuppath(ui, repo, filepath):
1004 '''customize where working copy backup files (.orig files) are created
1002 '''customize where working copy backup files (.orig files) are created
1005
1003
1006 Fetch user defined path from config file: [ui] origbackuppath = <path>
1004 Fetch user defined path from config file: [ui] origbackuppath = <path>
1007 Fall back to default (filepath with .orig suffix) if not specified
1005 Fall back to default (filepath with .orig suffix) if not specified
1008
1006
1009 filepath is repo-relative
1007 filepath is repo-relative
1010
1008
1011 Returns an absolute path
1009 Returns an absolute path
1012 '''
1010 '''
1013 origvfs = getorigvfs(ui, repo)
1011 origvfs = getorigvfs(ui, repo)
1014 if origvfs is None:
1012 if origvfs is None:
1015 return repo.wjoin(filepath + b".orig")
1013 return repo.wjoin(filepath + b".orig")
1016
1014
1017 origbackupdir = origvfs.dirname(filepath)
1015 origbackupdir = origvfs.dirname(filepath)
1018 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1016 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1019 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1017 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1020
1018
1021 # Remove any files that conflict with the backup file's path
1019 # Remove any files that conflict with the backup file's path
1022 for f in reversed(list(pathutil.finddirs(filepath))):
1020 for f in reversed(list(pathutil.finddirs(filepath))):
1023 if origvfs.isfileorlink(f):
1021 if origvfs.isfileorlink(f):
1024 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1022 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1025 origvfs.unlink(f)
1023 origvfs.unlink(f)
1026 break
1024 break
1027
1025
1028 origvfs.makedirs(origbackupdir)
1026 origvfs.makedirs(origbackupdir)
1029
1027
1030 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1028 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1031 ui.note(
1029 ui.note(
1032 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1030 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1033 )
1031 )
1034 origvfs.rmtree(filepath, forcibly=True)
1032 origvfs.rmtree(filepath, forcibly=True)
1035
1033
1036 return origvfs.join(filepath)
1034 return origvfs.join(filepath)
1037
1035
1038
1036
1039 class _containsnode(object):
1037 class _containsnode(object):
1040 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1038 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1041
1039
1042 def __init__(self, repo, revcontainer):
1040 def __init__(self, repo, revcontainer):
1043 self._torev = repo.changelog.rev
1041 self._torev = repo.changelog.rev
1044 self._revcontains = revcontainer.__contains__
1042 self._revcontains = revcontainer.__contains__
1045
1043
1046 def __contains__(self, node):
1044 def __contains__(self, node):
1047 return self._revcontains(self._torev(node))
1045 return self._revcontains(self._torev(node))
1048
1046
1049
1047
1050 def cleanupnodes(
1048 def cleanupnodes(
1051 repo,
1049 repo,
1052 replacements,
1050 replacements,
1053 operation,
1051 operation,
1054 moves=None,
1052 moves=None,
1055 metadata=None,
1053 metadata=None,
1056 fixphase=False,
1054 fixphase=False,
1057 targetphase=None,
1055 targetphase=None,
1058 backup=True,
1056 backup=True,
1059 ):
1057 ):
1060 """do common cleanups when old nodes are replaced by new nodes
1058 """do common cleanups when old nodes are replaced by new nodes
1061
1059
1062 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1060 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1063 (we might also want to move working directory parent in the future)
1061 (we might also want to move working directory parent in the future)
1064
1062
1065 By default, bookmark moves are calculated automatically from 'replacements',
1063 By default, bookmark moves are calculated automatically from 'replacements',
1066 but 'moves' can be used to override that. Also, 'moves' may include
1064 but 'moves' can be used to override that. Also, 'moves' may include
1067 additional bookmark moves that should not have associated obsmarkers.
1065 additional bookmark moves that should not have associated obsmarkers.
1068
1066
1069 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1067 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1070 have replacements. operation is a string, like "rebase".
1068 have replacements. operation is a string, like "rebase".
1071
1069
1072 metadata is dictionary containing metadata to be stored in obsmarker if
1070 metadata is dictionary containing metadata to be stored in obsmarker if
1073 obsolescence is enabled.
1071 obsolescence is enabled.
1074 """
1072 """
1075 assert fixphase or targetphase is None
1073 assert fixphase or targetphase is None
1076 if not replacements and not moves:
1074 if not replacements and not moves:
1077 return
1075 return
1078
1076
1079 # translate mapping's other forms
1077 # translate mapping's other forms
1080 if not util.safehasattr(replacements, b'items'):
1078 if not util.safehasattr(replacements, b'items'):
1081 replacements = {(n,): () for n in replacements}
1079 replacements = {(n,): () for n in replacements}
1082 else:
1080 else:
1083 # upgrading non tuple "source" to tuple ones for BC
1081 # upgrading non tuple "source" to tuple ones for BC
1084 repls = {}
1082 repls = {}
1085 for key, value in replacements.items():
1083 for key, value in replacements.items():
1086 if not isinstance(key, tuple):
1084 if not isinstance(key, tuple):
1087 key = (key,)
1085 key = (key,)
1088 repls[key] = value
1086 repls[key] = value
1089 replacements = repls
1087 replacements = repls
1090
1088
1091 # Unfiltered repo is needed since nodes in replacements might be hidden.
1089 # Unfiltered repo is needed since nodes in replacements might be hidden.
1092 unfi = repo.unfiltered()
1090 unfi = repo.unfiltered()
1093
1091
1094 # Calculate bookmark movements
1092 # Calculate bookmark movements
1095 if moves is None:
1093 if moves is None:
1096 moves = {}
1094 moves = {}
1097 for oldnodes, newnodes in replacements.items():
1095 for oldnodes, newnodes in replacements.items():
1098 for oldnode in oldnodes:
1096 for oldnode in oldnodes:
1099 if oldnode in moves:
1097 if oldnode in moves:
1100 continue
1098 continue
1101 if len(newnodes) > 1:
1099 if len(newnodes) > 1:
1102 # usually a split, take the one with biggest rev number
1100 # usually a split, take the one with biggest rev number
1103 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1101 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1104 elif len(newnodes) == 0:
1102 elif len(newnodes) == 0:
1105 # move bookmark backwards
1103 # move bookmark backwards
1106 allreplaced = []
1104 allreplaced = []
1107 for rep in replacements:
1105 for rep in replacements:
1108 allreplaced.extend(rep)
1106 allreplaced.extend(rep)
1109 roots = list(
1107 roots = list(
1110 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1108 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1111 )
1109 )
1112 if roots:
1110 if roots:
1113 newnode = roots[0].node()
1111 newnode = roots[0].node()
1114 else:
1112 else:
1115 newnode = nullid
1113 newnode = nullid
1116 else:
1114 else:
1117 newnode = newnodes[0]
1115 newnode = newnodes[0]
1118 moves[oldnode] = newnode
1116 moves[oldnode] = newnode
1119
1117
1120 allnewnodes = [n for ns in replacements.values() for n in ns]
1118 allnewnodes = [n for ns in replacements.values() for n in ns]
1121 toretract = {}
1119 toretract = {}
1122 toadvance = {}
1120 toadvance = {}
1123 if fixphase:
1121 if fixphase:
1124 precursors = {}
1122 precursors = {}
1125 for oldnodes, newnodes in replacements.items():
1123 for oldnodes, newnodes in replacements.items():
1126 for oldnode in oldnodes:
1124 for oldnode in oldnodes:
1127 for newnode in newnodes:
1125 for newnode in newnodes:
1128 precursors.setdefault(newnode, []).append(oldnode)
1126 precursors.setdefault(newnode, []).append(oldnode)
1129
1127
1130 allnewnodes.sort(key=lambda n: unfi[n].rev())
1128 allnewnodes.sort(key=lambda n: unfi[n].rev())
1131 newphases = {}
1129 newphases = {}
1132
1130
1133 def phase(ctx):
1131 def phase(ctx):
1134 return newphases.get(ctx.node(), ctx.phase())
1132 return newphases.get(ctx.node(), ctx.phase())
1135
1133
1136 for newnode in allnewnodes:
1134 for newnode in allnewnodes:
1137 ctx = unfi[newnode]
1135 ctx = unfi[newnode]
1138 parentphase = max(phase(p) for p in ctx.parents())
1136 parentphase = max(phase(p) for p in ctx.parents())
1139 if targetphase is None:
1137 if targetphase is None:
1140 oldphase = max(
1138 oldphase = max(
1141 unfi[oldnode].phase() for oldnode in precursors[newnode]
1139 unfi[oldnode].phase() for oldnode in precursors[newnode]
1142 )
1140 )
1143 newphase = max(oldphase, parentphase)
1141 newphase = max(oldphase, parentphase)
1144 else:
1142 else:
1145 newphase = max(targetphase, parentphase)
1143 newphase = max(targetphase, parentphase)
1146 newphases[newnode] = newphase
1144 newphases[newnode] = newphase
1147 if newphase > ctx.phase():
1145 if newphase > ctx.phase():
1148 toretract.setdefault(newphase, []).append(newnode)
1146 toretract.setdefault(newphase, []).append(newnode)
1149 elif newphase < ctx.phase():
1147 elif newphase < ctx.phase():
1150 toadvance.setdefault(newphase, []).append(newnode)
1148 toadvance.setdefault(newphase, []).append(newnode)
1151
1149
1152 with repo.transaction(b'cleanup') as tr:
1150 with repo.transaction(b'cleanup') as tr:
1153 # Move bookmarks
1151 # Move bookmarks
1154 bmarks = repo._bookmarks
1152 bmarks = repo._bookmarks
1155 bmarkchanges = []
1153 bmarkchanges = []
1156 for oldnode, newnode in moves.items():
1154 for oldnode, newnode in moves.items():
1157 oldbmarks = repo.nodebookmarks(oldnode)
1155 oldbmarks = repo.nodebookmarks(oldnode)
1158 if not oldbmarks:
1156 if not oldbmarks:
1159 continue
1157 continue
1160 from . import bookmarks # avoid import cycle
1158 from . import bookmarks # avoid import cycle
1161
1159
1162 repo.ui.debug(
1160 repo.ui.debug(
1163 b'moving bookmarks %r from %s to %s\n'
1161 b'moving bookmarks %r from %s to %s\n'
1164 % (
1162 % (
1165 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1163 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1166 hex(oldnode),
1164 hex(oldnode),
1167 hex(newnode),
1165 hex(newnode),
1168 )
1166 )
1169 )
1167 )
1170 # Delete divergent bookmarks being parents of related newnodes
1168 # Delete divergent bookmarks being parents of related newnodes
1171 deleterevs = repo.revs(
1169 deleterevs = repo.revs(
1172 b'parents(roots(%ln & (::%n))) - parents(%n)',
1170 b'parents(roots(%ln & (::%n))) - parents(%n)',
1173 allnewnodes,
1171 allnewnodes,
1174 newnode,
1172 newnode,
1175 oldnode,
1173 oldnode,
1176 )
1174 )
1177 deletenodes = _containsnode(repo, deleterevs)
1175 deletenodes = _containsnode(repo, deleterevs)
1178 for name in oldbmarks:
1176 for name in oldbmarks:
1179 bmarkchanges.append((name, newnode))
1177 bmarkchanges.append((name, newnode))
1180 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1178 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1181 bmarkchanges.append((b, None))
1179 bmarkchanges.append((b, None))
1182
1180
1183 if bmarkchanges:
1181 if bmarkchanges:
1184 bmarks.applychanges(repo, tr, bmarkchanges)
1182 bmarks.applychanges(repo, tr, bmarkchanges)
1185
1183
1186 for phase, nodes in toretract.items():
1184 for phase, nodes in toretract.items():
1187 phases.retractboundary(repo, tr, phase, nodes)
1185 phases.retractboundary(repo, tr, phase, nodes)
1188 for phase, nodes in toadvance.items():
1186 for phase, nodes in toadvance.items():
1189 phases.advanceboundary(repo, tr, phase, nodes)
1187 phases.advanceboundary(repo, tr, phase, nodes)
1190
1188
1191 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1189 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1192 # Obsolete or strip nodes
1190 # Obsolete or strip nodes
1193 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1191 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1194 # If a node is already obsoleted, and we want to obsolete it
1192 # If a node is already obsoleted, and we want to obsolete it
1195 # without a successor, skip that obssolete request since it's
1193 # without a successor, skip that obssolete request since it's
1196 # unnecessary. That's the "if s or not isobs(n)" check below.
1194 # unnecessary. That's the "if s or not isobs(n)" check below.
1197 # Also sort the node in topology order, that might be useful for
1195 # Also sort the node in topology order, that might be useful for
1198 # some obsstore logic.
1196 # some obsstore logic.
1199 # NOTE: the sorting might belong to createmarkers.
1197 # NOTE: the sorting might belong to createmarkers.
1200 torev = unfi.changelog.rev
1198 torev = unfi.changelog.rev
1201 sortfunc = lambda ns: torev(ns[0][0])
1199 sortfunc = lambda ns: torev(ns[0][0])
1202 rels = []
1200 rels = []
1203 for ns, s in sorted(replacements.items(), key=sortfunc):
1201 for ns, s in sorted(replacements.items(), key=sortfunc):
1204 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1202 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1205 rels.append(rel)
1203 rels.append(rel)
1206 if rels:
1204 if rels:
1207 obsolete.createmarkers(
1205 obsolete.createmarkers(
1208 repo, rels, operation=operation, metadata=metadata
1206 repo, rels, operation=operation, metadata=metadata
1209 )
1207 )
1210 elif phases.supportinternal(repo) and mayusearchived:
1208 elif phases.supportinternal(repo) and mayusearchived:
1211 # this assume we do not have "unstable" nodes above the cleaned ones
1209 # this assume we do not have "unstable" nodes above the cleaned ones
1212 allreplaced = set()
1210 allreplaced = set()
1213 for ns in replacements.keys():
1211 for ns in replacements.keys():
1214 allreplaced.update(ns)
1212 allreplaced.update(ns)
1215 if backup:
1213 if backup:
1216 from . import repair # avoid import cycle
1214 from . import repair # avoid import cycle
1217
1215
1218 node = min(allreplaced, key=repo.changelog.rev)
1216 node = min(allreplaced, key=repo.changelog.rev)
1219 repair.backupbundle(
1217 repair.backupbundle(
1220 repo, allreplaced, allreplaced, node, operation
1218 repo, allreplaced, allreplaced, node, operation
1221 )
1219 )
1222 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1220 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1223 else:
1221 else:
1224 from . import repair # avoid import cycle
1222 from . import repair # avoid import cycle
1225
1223
1226 tostrip = list(n for ns in replacements for n in ns)
1224 tostrip = list(n for ns in replacements for n in ns)
1227 if tostrip:
1225 if tostrip:
1228 repair.delayedstrip(
1226 repair.delayedstrip(
1229 repo.ui, repo, tostrip, operation, backup=backup
1227 repo.ui, repo, tostrip, operation, backup=backup
1230 )
1228 )
1231
1229
1232
1230
1233 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1231 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1234 if opts is None:
1232 if opts is None:
1235 opts = {}
1233 opts = {}
1236 m = matcher
1234 m = matcher
1237 dry_run = opts.get(b'dry_run')
1235 dry_run = opts.get(b'dry_run')
1238 try:
1236 try:
1239 similarity = float(opts.get(b'similarity') or 0)
1237 similarity = float(opts.get(b'similarity') or 0)
1240 except ValueError:
1238 except ValueError:
1241 raise error.Abort(_(b'similarity must be a number'))
1239 raise error.Abort(_(b'similarity must be a number'))
1242 if similarity < 0 or similarity > 100:
1240 if similarity < 0 or similarity > 100:
1243 raise error.Abort(_(b'similarity must be between 0 and 100'))
1241 raise error.Abort(_(b'similarity must be between 0 and 100'))
1244 similarity /= 100.0
1242 similarity /= 100.0
1245
1243
1246 ret = 0
1244 ret = 0
1247
1245
1248 wctx = repo[None]
1246 wctx = repo[None]
1249 for subpath in sorted(wctx.substate):
1247 for subpath in sorted(wctx.substate):
1250 submatch = matchmod.subdirmatcher(subpath, m)
1248 submatch = matchmod.subdirmatcher(subpath, m)
1251 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1249 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1252 sub = wctx.sub(subpath)
1250 sub = wctx.sub(subpath)
1253 subprefix = repo.wvfs.reljoin(prefix, subpath)
1251 subprefix = repo.wvfs.reljoin(prefix, subpath)
1254 subuipathfn = subdiruipathfn(subpath, uipathfn)
1252 subuipathfn = subdiruipathfn(subpath, uipathfn)
1255 try:
1253 try:
1256 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1254 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1257 ret = 1
1255 ret = 1
1258 except error.LookupError:
1256 except error.LookupError:
1259 repo.ui.status(
1257 repo.ui.status(
1260 _(b"skipping missing subrepository: %s\n")
1258 _(b"skipping missing subrepository: %s\n")
1261 % uipathfn(subpath)
1259 % uipathfn(subpath)
1262 )
1260 )
1263
1261
1264 rejected = []
1262 rejected = []
1265
1263
1266 def badfn(f, msg):
1264 def badfn(f, msg):
1267 if f in m.files():
1265 if f in m.files():
1268 m.bad(f, msg)
1266 m.bad(f, msg)
1269 rejected.append(f)
1267 rejected.append(f)
1270
1268
1271 badmatch = matchmod.badmatch(m, badfn)
1269 badmatch = matchmod.badmatch(m, badfn)
1272 added, unknown, deleted, removed, forgotten = _interestingfiles(
1270 added, unknown, deleted, removed, forgotten = _interestingfiles(
1273 repo, badmatch
1271 repo, badmatch
1274 )
1272 )
1275
1273
1276 unknownset = set(unknown + forgotten)
1274 unknownset = set(unknown + forgotten)
1277 toprint = unknownset.copy()
1275 toprint = unknownset.copy()
1278 toprint.update(deleted)
1276 toprint.update(deleted)
1279 for abs in sorted(toprint):
1277 for abs in sorted(toprint):
1280 if repo.ui.verbose or not m.exact(abs):
1278 if repo.ui.verbose or not m.exact(abs):
1281 if abs in unknownset:
1279 if abs in unknownset:
1282 status = _(b'adding %s\n') % uipathfn(abs)
1280 status = _(b'adding %s\n') % uipathfn(abs)
1283 label = b'ui.addremove.added'
1281 label = b'ui.addremove.added'
1284 else:
1282 else:
1285 status = _(b'removing %s\n') % uipathfn(abs)
1283 status = _(b'removing %s\n') % uipathfn(abs)
1286 label = b'ui.addremove.removed'
1284 label = b'ui.addremove.removed'
1287 repo.ui.status(status, label=label)
1285 repo.ui.status(status, label=label)
1288
1286
1289 renames = _findrenames(
1287 renames = _findrenames(
1290 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1288 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1291 )
1289 )
1292
1290
1293 if not dry_run:
1291 if not dry_run:
1294 _markchanges(repo, unknown + forgotten, deleted, renames)
1292 _markchanges(repo, unknown + forgotten, deleted, renames)
1295
1293
1296 for f in rejected:
1294 for f in rejected:
1297 if f in m.files():
1295 if f in m.files():
1298 return 1
1296 return 1
1299 return ret
1297 return ret
1300
1298
1301
1299
1302 def marktouched(repo, files, similarity=0.0):
1300 def marktouched(repo, files, similarity=0.0):
1303 '''Assert that files have somehow been operated upon. files are relative to
1301 '''Assert that files have somehow been operated upon. files are relative to
1304 the repo root.'''
1302 the repo root.'''
1305 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1303 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1306 rejected = []
1304 rejected = []
1307
1305
1308 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1306 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1309
1307
1310 if repo.ui.verbose:
1308 if repo.ui.verbose:
1311 unknownset = set(unknown + forgotten)
1309 unknownset = set(unknown + forgotten)
1312 toprint = unknownset.copy()
1310 toprint = unknownset.copy()
1313 toprint.update(deleted)
1311 toprint.update(deleted)
1314 for abs in sorted(toprint):
1312 for abs in sorted(toprint):
1315 if abs in unknownset:
1313 if abs in unknownset:
1316 status = _(b'adding %s\n') % abs
1314 status = _(b'adding %s\n') % abs
1317 else:
1315 else:
1318 status = _(b'removing %s\n') % abs
1316 status = _(b'removing %s\n') % abs
1319 repo.ui.status(status)
1317 repo.ui.status(status)
1320
1318
1321 # TODO: We should probably have the caller pass in uipathfn and apply it to
1319 # TODO: We should probably have the caller pass in uipathfn and apply it to
1322 # the messages above too. legacyrelativevalue=True is consistent with how
1320 # the messages above too. legacyrelativevalue=True is consistent with how
1323 # it used to work.
1321 # it used to work.
1324 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1322 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1325 renames = _findrenames(
1323 renames = _findrenames(
1326 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1324 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1327 )
1325 )
1328
1326
1329 _markchanges(repo, unknown + forgotten, deleted, renames)
1327 _markchanges(repo, unknown + forgotten, deleted, renames)
1330
1328
1331 for f in rejected:
1329 for f in rejected:
1332 if f in m.files():
1330 if f in m.files():
1333 return 1
1331 return 1
1334 return 0
1332 return 0
1335
1333
1336
1334
1337 def _interestingfiles(repo, matcher):
1335 def _interestingfiles(repo, matcher):
1338 '''Walk dirstate with matcher, looking for files that addremove would care
1336 '''Walk dirstate with matcher, looking for files that addremove would care
1339 about.
1337 about.
1340
1338
1341 This is different from dirstate.status because it doesn't care about
1339 This is different from dirstate.status because it doesn't care about
1342 whether files are modified or clean.'''
1340 whether files are modified or clean.'''
1343 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1341 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1344 audit_path = pathutil.pathauditor(repo.root, cached=True)
1342 audit_path = pathutil.pathauditor(repo.root, cached=True)
1345
1343
1346 ctx = repo[None]
1344 ctx = repo[None]
1347 dirstate = repo.dirstate
1345 dirstate = repo.dirstate
1348 matcher = repo.narrowmatch(matcher, includeexact=True)
1346 matcher = repo.narrowmatch(matcher, includeexact=True)
1349 walkresults = dirstate.walk(
1347 walkresults = dirstate.walk(
1350 matcher,
1348 matcher,
1351 subrepos=sorted(ctx.substate),
1349 subrepos=sorted(ctx.substate),
1352 unknown=True,
1350 unknown=True,
1353 ignored=False,
1351 ignored=False,
1354 full=False,
1352 full=False,
1355 )
1353 )
1356 for abs, st in pycompat.iteritems(walkresults):
1354 for abs, st in pycompat.iteritems(walkresults):
1357 dstate = dirstate[abs]
1355 dstate = dirstate[abs]
1358 if dstate == b'?' and audit_path.check(abs):
1356 if dstate == b'?' and audit_path.check(abs):
1359 unknown.append(abs)
1357 unknown.append(abs)
1360 elif dstate != b'r' and not st:
1358 elif dstate != b'r' and not st:
1361 deleted.append(abs)
1359 deleted.append(abs)
1362 elif dstate == b'r' and st:
1360 elif dstate == b'r' and st:
1363 forgotten.append(abs)
1361 forgotten.append(abs)
1364 # for finding renames
1362 # for finding renames
1365 elif dstate == b'r' and not st:
1363 elif dstate == b'r' and not st:
1366 removed.append(abs)
1364 removed.append(abs)
1367 elif dstate == b'a':
1365 elif dstate == b'a':
1368 added.append(abs)
1366 added.append(abs)
1369
1367
1370 return added, unknown, deleted, removed, forgotten
1368 return added, unknown, deleted, removed, forgotten
1371
1369
1372
1370
1373 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1371 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1374 '''Find renames from removed files to added ones.'''
1372 '''Find renames from removed files to added ones.'''
1375 renames = {}
1373 renames = {}
1376 if similarity > 0:
1374 if similarity > 0:
1377 for old, new, score in similar.findrenames(
1375 for old, new, score in similar.findrenames(
1378 repo, added, removed, similarity
1376 repo, added, removed, similarity
1379 ):
1377 ):
1380 if (
1378 if (
1381 repo.ui.verbose
1379 repo.ui.verbose
1382 or not matcher.exact(old)
1380 or not matcher.exact(old)
1383 or not matcher.exact(new)
1381 or not matcher.exact(new)
1384 ):
1382 ):
1385 repo.ui.status(
1383 repo.ui.status(
1386 _(
1384 _(
1387 b'recording removal of %s as rename to %s '
1385 b'recording removal of %s as rename to %s '
1388 b'(%d%% similar)\n'
1386 b'(%d%% similar)\n'
1389 )
1387 )
1390 % (uipathfn(old), uipathfn(new), score * 100)
1388 % (uipathfn(old), uipathfn(new), score * 100)
1391 )
1389 )
1392 renames[new] = old
1390 renames[new] = old
1393 return renames
1391 return renames
1394
1392
1395
1393
1396 def _markchanges(repo, unknown, deleted, renames):
1394 def _markchanges(repo, unknown, deleted, renames):
1397 '''Marks the files in unknown as added, the files in deleted as removed,
1395 '''Marks the files in unknown as added, the files in deleted as removed,
1398 and the files in renames as copied.'''
1396 and the files in renames as copied.'''
1399 wctx = repo[None]
1397 wctx = repo[None]
1400 with repo.wlock():
1398 with repo.wlock():
1401 wctx.forget(deleted)
1399 wctx.forget(deleted)
1402 wctx.add(unknown)
1400 wctx.add(unknown)
1403 for new, old in pycompat.iteritems(renames):
1401 for new, old in pycompat.iteritems(renames):
1404 wctx.copy(old, new)
1402 wctx.copy(old, new)
1405
1403
1406
1404
1407 def getrenamedfn(repo, endrev=None):
1405 def getrenamedfn(repo, endrev=None):
1408 if copiesmod.usechangesetcentricalgo(repo):
1406 if copiesmod.usechangesetcentricalgo(repo):
1409
1407
1410 def getrenamed(fn, rev):
1408 def getrenamed(fn, rev):
1411 ctx = repo[rev]
1409 ctx = repo[rev]
1412 p1copies = ctx.p1copies()
1410 p1copies = ctx.p1copies()
1413 if fn in p1copies:
1411 if fn in p1copies:
1414 return p1copies[fn]
1412 return p1copies[fn]
1415 p2copies = ctx.p2copies()
1413 p2copies = ctx.p2copies()
1416 if fn in p2copies:
1414 if fn in p2copies:
1417 return p2copies[fn]
1415 return p2copies[fn]
1418 return None
1416 return None
1419
1417
1420 return getrenamed
1418 return getrenamed
1421
1419
1422 rcache = {}
1420 rcache = {}
1423 if endrev is None:
1421 if endrev is None:
1424 endrev = len(repo)
1422 endrev = len(repo)
1425
1423
1426 def getrenamed(fn, rev):
1424 def getrenamed(fn, rev):
1427 '''looks up all renames for a file (up to endrev) the first
1425 '''looks up all renames for a file (up to endrev) the first
1428 time the file is given. It indexes on the changerev and only
1426 time the file is given. It indexes on the changerev and only
1429 parses the manifest if linkrev != changerev.
1427 parses the manifest if linkrev != changerev.
1430 Returns rename info for fn at changerev rev.'''
1428 Returns rename info for fn at changerev rev.'''
1431 if fn not in rcache:
1429 if fn not in rcache:
1432 rcache[fn] = {}
1430 rcache[fn] = {}
1433 fl = repo.file(fn)
1431 fl = repo.file(fn)
1434 for i in fl:
1432 for i in fl:
1435 lr = fl.linkrev(i)
1433 lr = fl.linkrev(i)
1436 renamed = fl.renamed(fl.node(i))
1434 renamed = fl.renamed(fl.node(i))
1437 rcache[fn][lr] = renamed and renamed[0]
1435 rcache[fn][lr] = renamed and renamed[0]
1438 if lr >= endrev:
1436 if lr >= endrev:
1439 break
1437 break
1440 if rev in rcache[fn]:
1438 if rev in rcache[fn]:
1441 return rcache[fn][rev]
1439 return rcache[fn][rev]
1442
1440
1443 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1441 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1444 # filectx logic.
1442 # filectx logic.
1445 try:
1443 try:
1446 return repo[rev][fn].copysource()
1444 return repo[rev][fn].copysource()
1447 except error.LookupError:
1445 except error.LookupError:
1448 return None
1446 return None
1449
1447
1450 return getrenamed
1448 return getrenamed
1451
1449
1452
1450
1453 def getcopiesfn(repo, endrev=None):
1451 def getcopiesfn(repo, endrev=None):
1454 if copiesmod.usechangesetcentricalgo(repo):
1452 if copiesmod.usechangesetcentricalgo(repo):
1455
1453
1456 def copiesfn(ctx):
1454 def copiesfn(ctx):
1457 if ctx.p2copies():
1455 if ctx.p2copies():
1458 allcopies = ctx.p1copies().copy()
1456 allcopies = ctx.p1copies().copy()
1459 # There should be no overlap
1457 # There should be no overlap
1460 allcopies.update(ctx.p2copies())
1458 allcopies.update(ctx.p2copies())
1461 return sorted(allcopies.items())
1459 return sorted(allcopies.items())
1462 else:
1460 else:
1463 return sorted(ctx.p1copies().items())
1461 return sorted(ctx.p1copies().items())
1464
1462
1465 else:
1463 else:
1466 getrenamed = getrenamedfn(repo, endrev)
1464 getrenamed = getrenamedfn(repo, endrev)
1467
1465
1468 def copiesfn(ctx):
1466 def copiesfn(ctx):
1469 copies = []
1467 copies = []
1470 for fn in ctx.files():
1468 for fn in ctx.files():
1471 rename = getrenamed(fn, ctx.rev())
1469 rename = getrenamed(fn, ctx.rev())
1472 if rename:
1470 if rename:
1473 copies.append((fn, rename))
1471 copies.append((fn, rename))
1474 return copies
1472 return copies
1475
1473
1476 return copiesfn
1474 return copiesfn
1477
1475
1478
1476
1479 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1477 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1480 """Update the dirstate to reflect the intent of copying src to dst. For
1478 """Update the dirstate to reflect the intent of copying src to dst. For
1481 different reasons it might not end with dst being marked as copied from src.
1479 different reasons it might not end with dst being marked as copied from src.
1482 """
1480 """
1483 origsrc = repo.dirstate.copied(src) or src
1481 origsrc = repo.dirstate.copied(src) or src
1484 if dst == origsrc: # copying back a copy?
1482 if dst == origsrc: # copying back a copy?
1485 if repo.dirstate[dst] not in b'mn' and not dryrun:
1483 if repo.dirstate[dst] not in b'mn' and not dryrun:
1486 repo.dirstate.normallookup(dst)
1484 repo.dirstate.normallookup(dst)
1487 else:
1485 else:
1488 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1486 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1489 if not ui.quiet:
1487 if not ui.quiet:
1490 ui.warn(
1488 ui.warn(
1491 _(
1489 _(
1492 b"%s has not been committed yet, so no copy "
1490 b"%s has not been committed yet, so no copy "
1493 b"data will be stored for %s.\n"
1491 b"data will be stored for %s.\n"
1494 )
1492 )
1495 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1493 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1496 )
1494 )
1497 if repo.dirstate[dst] in b'?r' and not dryrun:
1495 if repo.dirstate[dst] in b'?r' and not dryrun:
1498 wctx.add([dst])
1496 wctx.add([dst])
1499 elif not dryrun:
1497 elif not dryrun:
1500 wctx.copy(origsrc, dst)
1498 wctx.copy(origsrc, dst)
1501
1499
1502
1500
1503 def movedirstate(repo, newctx, match=None):
1501 def movedirstate(repo, newctx, match=None):
1504 """Move the dirstate to newctx and adjust it as necessary.
1502 """Move the dirstate to newctx and adjust it as necessary.
1505
1503
1506 A matcher can be provided as an optimization. It is probably a bug to pass
1504 A matcher can be provided as an optimization. It is probably a bug to pass
1507 a matcher that doesn't match all the differences between the parent of the
1505 a matcher that doesn't match all the differences between the parent of the
1508 working copy and newctx.
1506 working copy and newctx.
1509 """
1507 """
1510 oldctx = repo[b'.']
1508 oldctx = repo[b'.']
1511 ds = repo.dirstate
1509 ds = repo.dirstate
1512 copies = dict(ds.copies())
1510 copies = dict(ds.copies())
1513 ds.setparents(newctx.node(), nullid)
1511 ds.setparents(newctx.node(), nullid)
1514 s = newctx.status(oldctx, match=match)
1512 s = newctx.status(oldctx, match=match)
1515 for f in s.modified:
1513 for f in s.modified:
1516 if ds[f] == b'r':
1514 if ds[f] == b'r':
1517 # modified + removed -> removed
1515 # modified + removed -> removed
1518 continue
1516 continue
1519 ds.normallookup(f)
1517 ds.normallookup(f)
1520
1518
1521 for f in s.added:
1519 for f in s.added:
1522 if ds[f] == b'r':
1520 if ds[f] == b'r':
1523 # added + removed -> unknown
1521 # added + removed -> unknown
1524 ds.drop(f)
1522 ds.drop(f)
1525 elif ds[f] != b'a':
1523 elif ds[f] != b'a':
1526 ds.add(f)
1524 ds.add(f)
1527
1525
1528 for f in s.removed:
1526 for f in s.removed:
1529 if ds[f] == b'a':
1527 if ds[f] == b'a':
1530 # removed + added -> normal
1528 # removed + added -> normal
1531 ds.normallookup(f)
1529 ds.normallookup(f)
1532 elif ds[f] != b'r':
1530 elif ds[f] != b'r':
1533 ds.remove(f)
1531 ds.remove(f)
1534
1532
1535 # Merge old parent and old working dir copies
1533 # Merge old parent and old working dir copies
1536 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1534 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1537 oldcopies.update(copies)
1535 oldcopies.update(copies)
1538 copies = {
1536 copies = {
1539 dst: oldcopies.get(src, src)
1537 dst: oldcopies.get(src, src)
1540 for dst, src in pycompat.iteritems(oldcopies)
1538 for dst, src in pycompat.iteritems(oldcopies)
1541 }
1539 }
1542 # Adjust the dirstate copies
1540 # Adjust the dirstate copies
1543 for dst, src in pycompat.iteritems(copies):
1541 for dst, src in pycompat.iteritems(copies):
1544 if src not in newctx or dst in newctx or ds[dst] != b'a':
1542 if src not in newctx or dst in newctx or ds[dst] != b'a':
1545 src = None
1543 src = None
1546 ds.copy(src, dst)
1544 ds.copy(src, dst)
1547 repo._quick_access_changeid_invalidate()
1545 repo._quick_access_changeid_invalidate()
1548
1546
1549
1547
1550 def filterrequirements(requirements):
1548 def filterrequirements(requirements):
1551 """ filters the requirements into two sets:
1549 """ filters the requirements into two sets:
1552
1550
1553 wcreq: requirements which should be written in .hg/requires
1551 wcreq: requirements which should be written in .hg/requires
1554 storereq: which should be written in .hg/store/requires
1552 storereq: which should be written in .hg/store/requires
1555
1553
1556 Returns (wcreq, storereq)
1554 Returns (wcreq, storereq)
1557 """
1555 """
1558 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1556 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1559 wc, store = set(), set()
1557 wc, store = set(), set()
1560 for r in requirements:
1558 for r in requirements:
1561 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1559 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1562 wc.add(r)
1560 wc.add(r)
1563 else:
1561 else:
1564 store.add(r)
1562 store.add(r)
1565 return wc, store
1563 return wc, store
1566 return requirements, None
1564 return requirements, None
1567
1565
1568
1566
1569 def istreemanifest(repo):
1567 def istreemanifest(repo):
1570 """ returns whether the repository is using treemanifest or not """
1568 """ returns whether the repository is using treemanifest or not """
1571 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1569 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1572
1570
1573
1571
1574 def writereporequirements(repo, requirements=None):
1572 def writereporequirements(repo, requirements=None):
1575 """ writes requirements for the repo to .hg/requires """
1573 """ writes requirements for the repo to .hg/requires """
1576 if requirements:
1574 if requirements:
1577 repo.requirements = requirements
1575 repo.requirements = requirements
1578 wcreq, storereq = filterrequirements(repo.requirements)
1576 wcreq, storereq = filterrequirements(repo.requirements)
1579 if wcreq is not None:
1577 if wcreq is not None:
1580 writerequires(repo.vfs, wcreq)
1578 writerequires(repo.vfs, wcreq)
1581 if storereq is not None:
1579 if storereq is not None:
1582 writerequires(repo.svfs, storereq)
1580 writerequires(repo.svfs, storereq)
1583
1581
1584
1582
1585 def writerequires(opener, requirements):
1583 def writerequires(opener, requirements):
1586 with opener(b'requires', b'w', atomictemp=True) as fp:
1584 with opener(b'requires', b'w', atomictemp=True) as fp:
1587 for r in sorted(requirements):
1585 for r in sorted(requirements):
1588 fp.write(b"%s\n" % r)
1586 fp.write(b"%s\n" % r)
1589
1587
1590
1588
1591 class filecachesubentry(object):
1589 class filecachesubentry(object):
1592 def __init__(self, path, stat):
1590 def __init__(self, path, stat):
1593 self.path = path
1591 self.path = path
1594 self.cachestat = None
1592 self.cachestat = None
1595 self._cacheable = None
1593 self._cacheable = None
1596
1594
1597 if stat:
1595 if stat:
1598 self.cachestat = filecachesubentry.stat(self.path)
1596 self.cachestat = filecachesubentry.stat(self.path)
1599
1597
1600 if self.cachestat:
1598 if self.cachestat:
1601 self._cacheable = self.cachestat.cacheable()
1599 self._cacheable = self.cachestat.cacheable()
1602 else:
1600 else:
1603 # None means we don't know yet
1601 # None means we don't know yet
1604 self._cacheable = None
1602 self._cacheable = None
1605
1603
1606 def refresh(self):
1604 def refresh(self):
1607 if self.cacheable():
1605 if self.cacheable():
1608 self.cachestat = filecachesubentry.stat(self.path)
1606 self.cachestat = filecachesubentry.stat(self.path)
1609
1607
1610 def cacheable(self):
1608 def cacheable(self):
1611 if self._cacheable is not None:
1609 if self._cacheable is not None:
1612 return self._cacheable
1610 return self._cacheable
1613
1611
1614 # we don't know yet, assume it is for now
1612 # we don't know yet, assume it is for now
1615 return True
1613 return True
1616
1614
1617 def changed(self):
1615 def changed(self):
1618 # no point in going further if we can't cache it
1616 # no point in going further if we can't cache it
1619 if not self.cacheable():
1617 if not self.cacheable():
1620 return True
1618 return True
1621
1619
1622 newstat = filecachesubentry.stat(self.path)
1620 newstat = filecachesubentry.stat(self.path)
1623
1621
1624 # we may not know if it's cacheable yet, check again now
1622 # we may not know if it's cacheable yet, check again now
1625 if newstat and self._cacheable is None:
1623 if newstat and self._cacheable is None:
1626 self._cacheable = newstat.cacheable()
1624 self._cacheable = newstat.cacheable()
1627
1625
1628 # check again
1626 # check again
1629 if not self._cacheable:
1627 if not self._cacheable:
1630 return True
1628 return True
1631
1629
1632 if self.cachestat != newstat:
1630 if self.cachestat != newstat:
1633 self.cachestat = newstat
1631 self.cachestat = newstat
1634 return True
1632 return True
1635 else:
1633 else:
1636 return False
1634 return False
1637
1635
1638 @staticmethod
1636 @staticmethod
1639 def stat(path):
1637 def stat(path):
1640 try:
1638 try:
1641 return util.cachestat(path)
1639 return util.cachestat(path)
1642 except OSError as e:
1640 except OSError as e:
1643 if e.errno != errno.ENOENT:
1641 if e.errno != errno.ENOENT:
1644 raise
1642 raise
1645
1643
1646
1644
1647 class filecacheentry(object):
1645 class filecacheentry(object):
1648 def __init__(self, paths, stat=True):
1646 def __init__(self, paths, stat=True):
1649 self._entries = []
1647 self._entries = []
1650 for path in paths:
1648 for path in paths:
1651 self._entries.append(filecachesubentry(path, stat))
1649 self._entries.append(filecachesubentry(path, stat))
1652
1650
1653 def changed(self):
1651 def changed(self):
1654 '''true if any entry has changed'''
1652 '''true if any entry has changed'''
1655 for entry in self._entries:
1653 for entry in self._entries:
1656 if entry.changed():
1654 if entry.changed():
1657 return True
1655 return True
1658 return False
1656 return False
1659
1657
1660 def refresh(self):
1658 def refresh(self):
1661 for entry in self._entries:
1659 for entry in self._entries:
1662 entry.refresh()
1660 entry.refresh()
1663
1661
1664
1662
1665 class filecache(object):
1663 class filecache(object):
1666 """A property like decorator that tracks files under .hg/ for updates.
1664 """A property like decorator that tracks files under .hg/ for updates.
1667
1665
1668 On first access, the files defined as arguments are stat()ed and the
1666 On first access, the files defined as arguments are stat()ed and the
1669 results cached. The decorated function is called. The results are stashed
1667 results cached. The decorated function is called. The results are stashed
1670 away in a ``_filecache`` dict on the object whose method is decorated.
1668 away in a ``_filecache`` dict on the object whose method is decorated.
1671
1669
1672 On subsequent access, the cached result is used as it is set to the
1670 On subsequent access, the cached result is used as it is set to the
1673 instance dictionary.
1671 instance dictionary.
1674
1672
1675 On external property set/delete operations, the caller must update the
1673 On external property set/delete operations, the caller must update the
1676 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1674 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1677 instead of directly setting <attr>.
1675 instead of directly setting <attr>.
1678
1676
1679 When using the property API, the cached data is always used if available.
1677 When using the property API, the cached data is always used if available.
1680 No stat() is performed to check if the file has changed.
1678 No stat() is performed to check if the file has changed.
1681
1679
1682 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1680 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1683 can populate an entry before the property's getter is called. In this case,
1681 can populate an entry before the property's getter is called. In this case,
1684 entries in ``_filecache`` will be used during property operations,
1682 entries in ``_filecache`` will be used during property operations,
1685 if available. If the underlying file changes, it is up to external callers
1683 if available. If the underlying file changes, it is up to external callers
1686 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1684 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1687 method result as well as possibly calling ``del obj._filecache[attr]`` to
1685 method result as well as possibly calling ``del obj._filecache[attr]`` to
1688 remove the ``filecacheentry``.
1686 remove the ``filecacheentry``.
1689 """
1687 """
1690
1688
1691 def __init__(self, *paths):
1689 def __init__(self, *paths):
1692 self.paths = paths
1690 self.paths = paths
1693
1691
1694 def join(self, obj, fname):
1692 def join(self, obj, fname):
1695 """Used to compute the runtime path of a cached file.
1693 """Used to compute the runtime path of a cached file.
1696
1694
1697 Users should subclass filecache and provide their own version of this
1695 Users should subclass filecache and provide their own version of this
1698 function to call the appropriate join function on 'obj' (an instance
1696 function to call the appropriate join function on 'obj' (an instance
1699 of the class that its member function was decorated).
1697 of the class that its member function was decorated).
1700 """
1698 """
1701 raise NotImplementedError
1699 raise NotImplementedError
1702
1700
1703 def __call__(self, func):
1701 def __call__(self, func):
1704 self.func = func
1702 self.func = func
1705 self.sname = func.__name__
1703 self.sname = func.__name__
1706 self.name = pycompat.sysbytes(self.sname)
1704 self.name = pycompat.sysbytes(self.sname)
1707 return self
1705 return self
1708
1706
1709 def __get__(self, obj, type=None):
1707 def __get__(self, obj, type=None):
1710 # if accessed on the class, return the descriptor itself.
1708 # if accessed on the class, return the descriptor itself.
1711 if obj is None:
1709 if obj is None:
1712 return self
1710 return self
1713
1711
1714 assert self.sname not in obj.__dict__
1712 assert self.sname not in obj.__dict__
1715
1713
1716 entry = obj._filecache.get(self.name)
1714 entry = obj._filecache.get(self.name)
1717
1715
1718 if entry:
1716 if entry:
1719 if entry.changed():
1717 if entry.changed():
1720 entry.obj = self.func(obj)
1718 entry.obj = self.func(obj)
1721 else:
1719 else:
1722 paths = [self.join(obj, path) for path in self.paths]
1720 paths = [self.join(obj, path) for path in self.paths]
1723
1721
1724 # We stat -before- creating the object so our cache doesn't lie if
1722 # We stat -before- creating the object so our cache doesn't lie if
1725 # a writer modified between the time we read and stat
1723 # a writer modified between the time we read and stat
1726 entry = filecacheentry(paths, True)
1724 entry = filecacheentry(paths, True)
1727 entry.obj = self.func(obj)
1725 entry.obj = self.func(obj)
1728
1726
1729 obj._filecache[self.name] = entry
1727 obj._filecache[self.name] = entry
1730
1728
1731 obj.__dict__[self.sname] = entry.obj
1729 obj.__dict__[self.sname] = entry.obj
1732 return entry.obj
1730 return entry.obj
1733
1731
1734 # don't implement __set__(), which would make __dict__ lookup as slow as
1732 # don't implement __set__(), which would make __dict__ lookup as slow as
1735 # function call.
1733 # function call.
1736
1734
1737 def set(self, obj, value):
1735 def set(self, obj, value):
1738 if self.name not in obj._filecache:
1736 if self.name not in obj._filecache:
1739 # we add an entry for the missing value because X in __dict__
1737 # we add an entry for the missing value because X in __dict__
1740 # implies X in _filecache
1738 # implies X in _filecache
1741 paths = [self.join(obj, path) for path in self.paths]
1739 paths = [self.join(obj, path) for path in self.paths]
1742 ce = filecacheentry(paths, False)
1740 ce = filecacheentry(paths, False)
1743 obj._filecache[self.name] = ce
1741 obj._filecache[self.name] = ce
1744 else:
1742 else:
1745 ce = obj._filecache[self.name]
1743 ce = obj._filecache[self.name]
1746
1744
1747 ce.obj = value # update cached copy
1745 ce.obj = value # update cached copy
1748 obj.__dict__[self.sname] = value # update copy returned by obj.x
1746 obj.__dict__[self.sname] = value # update copy returned by obj.x
1749
1747
1750
1748
1751 def extdatasource(repo, source):
1749 def extdatasource(repo, source):
1752 """Gather a map of rev -> value dict from the specified source
1750 """Gather a map of rev -> value dict from the specified source
1753
1751
1754 A source spec is treated as a URL, with a special case shell: type
1752 A source spec is treated as a URL, with a special case shell: type
1755 for parsing the output from a shell command.
1753 for parsing the output from a shell command.
1756
1754
1757 The data is parsed as a series of newline-separated records where
1755 The data is parsed as a series of newline-separated records where
1758 each record is a revision specifier optionally followed by a space
1756 each record is a revision specifier optionally followed by a space
1759 and a freeform string value. If the revision is known locally, it
1757 and a freeform string value. If the revision is known locally, it
1760 is converted to a rev, otherwise the record is skipped.
1758 is converted to a rev, otherwise the record is skipped.
1761
1759
1762 Note that both key and value are treated as UTF-8 and converted to
1760 Note that both key and value are treated as UTF-8 and converted to
1763 the local encoding. This allows uniformity between local and
1761 the local encoding. This allows uniformity between local and
1764 remote data sources.
1762 remote data sources.
1765 """
1763 """
1766
1764
1767 spec = repo.ui.config(b"extdata", source)
1765 spec = repo.ui.config(b"extdata", source)
1768 if not spec:
1766 if not spec:
1769 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1767 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1770
1768
1771 data = {}
1769 data = {}
1772 src = proc = None
1770 src = proc = None
1773 try:
1771 try:
1774 if spec.startswith(b"shell:"):
1772 if spec.startswith(b"shell:"):
1775 # external commands should be run relative to the repo root
1773 # external commands should be run relative to the repo root
1776 cmd = spec[6:]
1774 cmd = spec[6:]
1777 proc = subprocess.Popen(
1775 proc = subprocess.Popen(
1778 procutil.tonativestr(cmd),
1776 procutil.tonativestr(cmd),
1779 shell=True,
1777 shell=True,
1780 bufsize=-1,
1778 bufsize=-1,
1781 close_fds=procutil.closefds,
1779 close_fds=procutil.closefds,
1782 stdout=subprocess.PIPE,
1780 stdout=subprocess.PIPE,
1783 cwd=procutil.tonativestr(repo.root),
1781 cwd=procutil.tonativestr(repo.root),
1784 )
1782 )
1785 src = proc.stdout
1783 src = proc.stdout
1786 else:
1784 else:
1787 # treat as a URL or file
1785 # treat as a URL or file
1788 src = url.open(repo.ui, spec)
1786 src = url.open(repo.ui, spec)
1789 for l in src:
1787 for l in src:
1790 if b" " in l:
1788 if b" " in l:
1791 k, v = l.strip().split(b" ", 1)
1789 k, v = l.strip().split(b" ", 1)
1792 else:
1790 else:
1793 k, v = l.strip(), b""
1791 k, v = l.strip(), b""
1794
1792
1795 k = encoding.tolocal(k)
1793 k = encoding.tolocal(k)
1796 try:
1794 try:
1797 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1795 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1798 except (error.LookupError, error.RepoLookupError):
1796 except (error.LookupError, error.RepoLookupError):
1799 pass # we ignore data for nodes that don't exist locally
1797 pass # we ignore data for nodes that don't exist locally
1800 finally:
1798 finally:
1801 if proc:
1799 if proc:
1802 try:
1800 try:
1803 proc.communicate()
1801 proc.communicate()
1804 except ValueError:
1802 except ValueError:
1805 # This happens if we started iterating src and then
1803 # This happens if we started iterating src and then
1806 # get a parse error on a line. It should be safe to ignore.
1804 # get a parse error on a line. It should be safe to ignore.
1807 pass
1805 pass
1808 if src:
1806 if src:
1809 src.close()
1807 src.close()
1810 if proc and proc.returncode != 0:
1808 if proc and proc.returncode != 0:
1811 raise error.Abort(
1809 raise error.Abort(
1812 _(b"extdata command '%s' failed: %s")
1810 _(b"extdata command '%s' failed: %s")
1813 % (cmd, procutil.explainexit(proc.returncode))
1811 % (cmd, procutil.explainexit(proc.returncode))
1814 )
1812 )
1815
1813
1816 return data
1814 return data
1817
1815
1818
1816
1819 class progress(object):
1817 class progress(object):
1820 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1818 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1821 self.ui = ui
1819 self.ui = ui
1822 self.pos = 0
1820 self.pos = 0
1823 self.topic = topic
1821 self.topic = topic
1824 self.unit = unit
1822 self.unit = unit
1825 self.total = total
1823 self.total = total
1826 self.debug = ui.configbool(b'progress', b'debug')
1824 self.debug = ui.configbool(b'progress', b'debug')
1827 self._updatebar = updatebar
1825 self._updatebar = updatebar
1828
1826
1829 def __enter__(self):
1827 def __enter__(self):
1830 return self
1828 return self
1831
1829
1832 def __exit__(self, exc_type, exc_value, exc_tb):
1830 def __exit__(self, exc_type, exc_value, exc_tb):
1833 self.complete()
1831 self.complete()
1834
1832
1835 def update(self, pos, item=b"", total=None):
1833 def update(self, pos, item=b"", total=None):
1836 assert pos is not None
1834 assert pos is not None
1837 if total:
1835 if total:
1838 self.total = total
1836 self.total = total
1839 self.pos = pos
1837 self.pos = pos
1840 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1838 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1841 if self.debug:
1839 if self.debug:
1842 self._printdebug(item)
1840 self._printdebug(item)
1843
1841
1844 def increment(self, step=1, item=b"", total=None):
1842 def increment(self, step=1, item=b"", total=None):
1845 self.update(self.pos + step, item, total)
1843 self.update(self.pos + step, item, total)
1846
1844
1847 def complete(self):
1845 def complete(self):
1848 self.pos = None
1846 self.pos = None
1849 self.unit = b""
1847 self.unit = b""
1850 self.total = None
1848 self.total = None
1851 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1849 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1852
1850
1853 def _printdebug(self, item):
1851 def _printdebug(self, item):
1854 unit = b''
1852 unit = b''
1855 if self.unit:
1853 if self.unit:
1856 unit = b' ' + self.unit
1854 unit = b' ' + self.unit
1857 if item:
1855 if item:
1858 item = b' ' + item
1856 item = b' ' + item
1859
1857
1860 if self.total:
1858 if self.total:
1861 pct = 100.0 * self.pos / self.total
1859 pct = 100.0 * self.pos / self.total
1862 self.ui.debug(
1860 self.ui.debug(
1863 b'%s:%s %d/%d%s (%4.2f%%)\n'
1861 b'%s:%s %d/%d%s (%4.2f%%)\n'
1864 % (self.topic, item, self.pos, self.total, unit, pct)
1862 % (self.topic, item, self.pos, self.total, unit, pct)
1865 )
1863 )
1866 else:
1864 else:
1867 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1865 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1868
1866
1869
1867
1870 def gdinitconfig(ui):
1868 def gdinitconfig(ui):
1871 """helper function to know if a repo should be created as general delta
1869 """helper function to know if a repo should be created as general delta
1872 """
1870 """
1873 # experimental config: format.generaldelta
1871 # experimental config: format.generaldelta
1874 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1872 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1875 b'format', b'usegeneraldelta'
1873 b'format', b'usegeneraldelta'
1876 )
1874 )
1877
1875
1878
1876
1879 def gddeltaconfig(ui):
1877 def gddeltaconfig(ui):
1880 """helper function to know if incoming delta should be optimised
1878 """helper function to know if incoming delta should be optimised
1881 """
1879 """
1882 # experimental config: format.generaldelta
1880 # experimental config: format.generaldelta
1883 return ui.configbool(b'format', b'generaldelta')
1881 return ui.configbool(b'format', b'generaldelta')
1884
1882
1885
1883
1886 class simplekeyvaluefile(object):
1884 class simplekeyvaluefile(object):
1887 """A simple file with key=value lines
1885 """A simple file with key=value lines
1888
1886
1889 Keys must be alphanumerics and start with a letter, values must not
1887 Keys must be alphanumerics and start with a letter, values must not
1890 contain '\n' characters"""
1888 contain '\n' characters"""
1891
1889
1892 firstlinekey = b'__firstline'
1890 firstlinekey = b'__firstline'
1893
1891
1894 def __init__(self, vfs, path, keys=None):
1892 def __init__(self, vfs, path, keys=None):
1895 self.vfs = vfs
1893 self.vfs = vfs
1896 self.path = path
1894 self.path = path
1897
1895
1898 def read(self, firstlinenonkeyval=False):
1896 def read(self, firstlinenonkeyval=False):
1899 """Read the contents of a simple key-value file
1897 """Read the contents of a simple key-value file
1900
1898
1901 'firstlinenonkeyval' indicates whether the first line of file should
1899 'firstlinenonkeyval' indicates whether the first line of file should
1902 be treated as a key-value pair or reuturned fully under the
1900 be treated as a key-value pair or reuturned fully under the
1903 __firstline key."""
1901 __firstline key."""
1904 lines = self.vfs.readlines(self.path)
1902 lines = self.vfs.readlines(self.path)
1905 d = {}
1903 d = {}
1906 if firstlinenonkeyval:
1904 if firstlinenonkeyval:
1907 if not lines:
1905 if not lines:
1908 e = _(b"empty simplekeyvalue file")
1906 e = _(b"empty simplekeyvalue file")
1909 raise error.CorruptedState(e)
1907 raise error.CorruptedState(e)
1910 # we don't want to include '\n' in the __firstline
1908 # we don't want to include '\n' in the __firstline
1911 d[self.firstlinekey] = lines[0][:-1]
1909 d[self.firstlinekey] = lines[0][:-1]
1912 del lines[0]
1910 del lines[0]
1913
1911
1914 try:
1912 try:
1915 # the 'if line.strip()' part prevents us from failing on empty
1913 # the 'if line.strip()' part prevents us from failing on empty
1916 # lines which only contain '\n' therefore are not skipped
1914 # lines which only contain '\n' therefore are not skipped
1917 # by 'if line'
1915 # by 'if line'
1918 updatedict = dict(
1916 updatedict = dict(
1919 line[:-1].split(b'=', 1) for line in lines if line.strip()
1917 line[:-1].split(b'=', 1) for line in lines if line.strip()
1920 )
1918 )
1921 if self.firstlinekey in updatedict:
1919 if self.firstlinekey in updatedict:
1922 e = _(b"%r can't be used as a key")
1920 e = _(b"%r can't be used as a key")
1923 raise error.CorruptedState(e % self.firstlinekey)
1921 raise error.CorruptedState(e % self.firstlinekey)
1924 d.update(updatedict)
1922 d.update(updatedict)
1925 except ValueError as e:
1923 except ValueError as e:
1926 raise error.CorruptedState(stringutil.forcebytestr(e))
1924 raise error.CorruptedState(stringutil.forcebytestr(e))
1927 return d
1925 return d
1928
1926
1929 def write(self, data, firstline=None):
1927 def write(self, data, firstline=None):
1930 """Write key=>value mapping to a file
1928 """Write key=>value mapping to a file
1931 data is a dict. Keys must be alphanumerical and start with a letter.
1929 data is a dict. Keys must be alphanumerical and start with a letter.
1932 Values must not contain newline characters.
1930 Values must not contain newline characters.
1933
1931
1934 If 'firstline' is not None, it is written to file before
1932 If 'firstline' is not None, it is written to file before
1935 everything else, as it is, not in a key=value form"""
1933 everything else, as it is, not in a key=value form"""
1936 lines = []
1934 lines = []
1937 if firstline is not None:
1935 if firstline is not None:
1938 lines.append(b'%s\n' % firstline)
1936 lines.append(b'%s\n' % firstline)
1939
1937
1940 for k, v in data.items():
1938 for k, v in data.items():
1941 if k == self.firstlinekey:
1939 if k == self.firstlinekey:
1942 e = b"key name '%s' is reserved" % self.firstlinekey
1940 e = b"key name '%s' is reserved" % self.firstlinekey
1943 raise error.ProgrammingError(e)
1941 raise error.ProgrammingError(e)
1944 if not k[0:1].isalpha():
1942 if not k[0:1].isalpha():
1945 e = b"keys must start with a letter in a key-value file"
1943 e = b"keys must start with a letter in a key-value file"
1946 raise error.ProgrammingError(e)
1944 raise error.ProgrammingError(e)
1947 if not k.isalnum():
1945 if not k.isalnum():
1948 e = b"invalid key name in a simple key-value file"
1946 e = b"invalid key name in a simple key-value file"
1949 raise error.ProgrammingError(e)
1947 raise error.ProgrammingError(e)
1950 if b'\n' in v:
1948 if b'\n' in v:
1951 e = b"invalid value in a simple key-value file"
1949 e = b"invalid value in a simple key-value file"
1952 raise error.ProgrammingError(e)
1950 raise error.ProgrammingError(e)
1953 lines.append(b"%s=%s\n" % (k, v))
1951 lines.append(b"%s=%s\n" % (k, v))
1954 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1952 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1955 fp.write(b''.join(lines))
1953 fp.write(b''.join(lines))
1956
1954
1957
1955
1958 _reportobsoletedsource = [
1956 _reportobsoletedsource = [
1959 b'debugobsolete',
1957 b'debugobsolete',
1960 b'pull',
1958 b'pull',
1961 b'push',
1959 b'push',
1962 b'serve',
1960 b'serve',
1963 b'unbundle',
1961 b'unbundle',
1964 ]
1962 ]
1965
1963
1966 _reportnewcssource = [
1964 _reportnewcssource = [
1967 b'pull',
1965 b'pull',
1968 b'unbundle',
1966 b'unbundle',
1969 ]
1967 ]
1970
1968
1971
1969
1972 def prefetchfiles(repo, revmatches):
1970 def prefetchfiles(repo, revmatches):
1973 """Invokes the registered file prefetch functions, allowing extensions to
1971 """Invokes the registered file prefetch functions, allowing extensions to
1974 ensure the corresponding files are available locally, before the command
1972 ensure the corresponding files are available locally, before the command
1975 uses them.
1973 uses them.
1976
1974
1977 Args:
1975 Args:
1978 revmatches: a list of (revision, match) tuples to indicate the files to
1976 revmatches: a list of (revision, match) tuples to indicate the files to
1979 fetch at each revision. If any of the match elements is None, it matches
1977 fetch at each revision. If any of the match elements is None, it matches
1980 all files.
1978 all files.
1981 """
1979 """
1982
1980
1983 def _matcher(m):
1981 def _matcher(m):
1984 if m:
1982 if m:
1985 assert isinstance(m, matchmod.basematcher)
1983 assert isinstance(m, matchmod.basematcher)
1986 # The command itself will complain about files that don't exist, so
1984 # The command itself will complain about files that don't exist, so
1987 # don't duplicate the message.
1985 # don't duplicate the message.
1988 return matchmod.badmatch(m, lambda fn, msg: None)
1986 return matchmod.badmatch(m, lambda fn, msg: None)
1989 else:
1987 else:
1990 return matchall(repo)
1988 return matchall(repo)
1991
1989
1992 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1990 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1993
1991
1994 fileprefetchhooks(repo, revbadmatches)
1992 fileprefetchhooks(repo, revbadmatches)
1995
1993
1996
1994
1997 # a list of (repo, revs, match) prefetch functions
1995 # a list of (repo, revs, match) prefetch functions
1998 fileprefetchhooks = util.hooks()
1996 fileprefetchhooks = util.hooks()
1999
1997
2000 # A marker that tells the evolve extension to suppress its own reporting
1998 # A marker that tells the evolve extension to suppress its own reporting
2001 _reportstroubledchangesets = True
1999 _reportstroubledchangesets = True
2002
2000
2003
2001
2004 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2002 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2005 """register a callback to issue a summary after the transaction is closed
2003 """register a callback to issue a summary after the transaction is closed
2006
2004
2007 If as_validator is true, then the callbacks are registered as transaction
2005 If as_validator is true, then the callbacks are registered as transaction
2008 validators instead
2006 validators instead
2009 """
2007 """
2010
2008
2011 def txmatch(sources):
2009 def txmatch(sources):
2012 return any(txnname.startswith(source) for source in sources)
2010 return any(txnname.startswith(source) for source in sources)
2013
2011
2014 categories = []
2012 categories = []
2015
2013
2016 def reportsummary(func):
2014 def reportsummary(func):
2017 """decorator for report callbacks."""
2015 """decorator for report callbacks."""
2018 # The repoview life cycle is shorter than the one of the actual
2016 # The repoview life cycle is shorter than the one of the actual
2019 # underlying repository. So the filtered object can die before the
2017 # underlying repository. So the filtered object can die before the
2020 # weakref is used leading to troubles. We keep a reference to the
2018 # weakref is used leading to troubles. We keep a reference to the
2021 # unfiltered object and restore the filtering when retrieving the
2019 # unfiltered object and restore the filtering when retrieving the
2022 # repository through the weakref.
2020 # repository through the weakref.
2023 filtername = repo.filtername
2021 filtername = repo.filtername
2024 reporef = weakref.ref(repo.unfiltered())
2022 reporef = weakref.ref(repo.unfiltered())
2025
2023
2026 def wrapped(tr):
2024 def wrapped(tr):
2027 repo = reporef()
2025 repo = reporef()
2028 if filtername:
2026 if filtername:
2029 assert repo is not None # help pytype
2027 assert repo is not None # help pytype
2030 repo = repo.filtered(filtername)
2028 repo = repo.filtered(filtername)
2031 func(repo, tr)
2029 func(repo, tr)
2032
2030
2033 newcat = b'%02i-txnreport' % len(categories)
2031 newcat = b'%02i-txnreport' % len(categories)
2034 if as_validator:
2032 if as_validator:
2035 otr.addvalidator(newcat, wrapped)
2033 otr.addvalidator(newcat, wrapped)
2036 else:
2034 else:
2037 otr.addpostclose(newcat, wrapped)
2035 otr.addpostclose(newcat, wrapped)
2038 categories.append(newcat)
2036 categories.append(newcat)
2039 return wrapped
2037 return wrapped
2040
2038
2041 @reportsummary
2039 @reportsummary
2042 def reportchangegroup(repo, tr):
2040 def reportchangegroup(repo, tr):
2043 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2041 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2044 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2042 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2045 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2043 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2046 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2044 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2047 if cgchangesets or cgrevisions or cgfiles:
2045 if cgchangesets or cgrevisions or cgfiles:
2048 htext = b""
2046 htext = b""
2049 if cgheads:
2047 if cgheads:
2050 htext = _(b" (%+d heads)") % cgheads
2048 htext = _(b" (%+d heads)") % cgheads
2051 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2049 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2052 if as_validator:
2050 if as_validator:
2053 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2051 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2054 assert repo is not None # help pytype
2052 assert repo is not None # help pytype
2055 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2053 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2056
2054
2057 if txmatch(_reportobsoletedsource):
2055 if txmatch(_reportobsoletedsource):
2058
2056
2059 @reportsummary
2057 @reportsummary
2060 def reportobsoleted(repo, tr):
2058 def reportobsoleted(repo, tr):
2061 obsoleted = obsutil.getobsoleted(repo, tr)
2059 obsoleted = obsutil.getobsoleted(repo, tr)
2062 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2060 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2063 if newmarkers:
2061 if newmarkers:
2064 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2062 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2065 if obsoleted:
2063 if obsoleted:
2066 msg = _(b'obsoleted %i changesets\n')
2064 msg = _(b'obsoleted %i changesets\n')
2067 if as_validator:
2065 if as_validator:
2068 msg = _(b'obsoleting %i changesets\n')
2066 msg = _(b'obsoleting %i changesets\n')
2069 repo.ui.status(msg % len(obsoleted))
2067 repo.ui.status(msg % len(obsoleted))
2070
2068
2071 if obsolete.isenabled(
2069 if obsolete.isenabled(
2072 repo, obsolete.createmarkersopt
2070 repo, obsolete.createmarkersopt
2073 ) and repo.ui.configbool(
2071 ) and repo.ui.configbool(
2074 b'experimental', b'evolution.report-instabilities'
2072 b'experimental', b'evolution.report-instabilities'
2075 ):
2073 ):
2076 instabilitytypes = [
2074 instabilitytypes = [
2077 (b'orphan', b'orphan'),
2075 (b'orphan', b'orphan'),
2078 (b'phase-divergent', b'phasedivergent'),
2076 (b'phase-divergent', b'phasedivergent'),
2079 (b'content-divergent', b'contentdivergent'),
2077 (b'content-divergent', b'contentdivergent'),
2080 ]
2078 ]
2081
2079
2082 def getinstabilitycounts(repo):
2080 def getinstabilitycounts(repo):
2083 filtered = repo.changelog.filteredrevs
2081 filtered = repo.changelog.filteredrevs
2084 counts = {}
2082 counts = {}
2085 for instability, revset in instabilitytypes:
2083 for instability, revset in instabilitytypes:
2086 counts[instability] = len(
2084 counts[instability] = len(
2087 set(obsolete.getrevs(repo, revset)) - filtered
2085 set(obsolete.getrevs(repo, revset)) - filtered
2088 )
2086 )
2089 return counts
2087 return counts
2090
2088
2091 oldinstabilitycounts = getinstabilitycounts(repo)
2089 oldinstabilitycounts = getinstabilitycounts(repo)
2092
2090
2093 @reportsummary
2091 @reportsummary
2094 def reportnewinstabilities(repo, tr):
2092 def reportnewinstabilities(repo, tr):
2095 newinstabilitycounts = getinstabilitycounts(repo)
2093 newinstabilitycounts = getinstabilitycounts(repo)
2096 for instability, revset in instabilitytypes:
2094 for instability, revset in instabilitytypes:
2097 delta = (
2095 delta = (
2098 newinstabilitycounts[instability]
2096 newinstabilitycounts[instability]
2099 - oldinstabilitycounts[instability]
2097 - oldinstabilitycounts[instability]
2100 )
2098 )
2101 msg = getinstabilitymessage(delta, instability)
2099 msg = getinstabilitymessage(delta, instability)
2102 if msg:
2100 if msg:
2103 repo.ui.warn(msg)
2101 repo.ui.warn(msg)
2104
2102
2105 if txmatch(_reportnewcssource):
2103 if txmatch(_reportnewcssource):
2106
2104
2107 @reportsummary
2105 @reportsummary
2108 def reportnewcs(repo, tr):
2106 def reportnewcs(repo, tr):
2109 """Report the range of new revisions pulled/unbundled."""
2107 """Report the range of new revisions pulled/unbundled."""
2110 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2108 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2111 unfi = repo.unfiltered()
2109 unfi = repo.unfiltered()
2112 if origrepolen >= len(unfi):
2110 if origrepolen >= len(unfi):
2113 return
2111 return
2114
2112
2115 # Compute the bounds of new visible revisions' range.
2113 # Compute the bounds of new visible revisions' range.
2116 revs = smartset.spanset(repo, start=origrepolen)
2114 revs = smartset.spanset(repo, start=origrepolen)
2117 if revs:
2115 if revs:
2118 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2116 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2119
2117
2120 if minrev == maxrev:
2118 if minrev == maxrev:
2121 revrange = minrev
2119 revrange = minrev
2122 else:
2120 else:
2123 revrange = b'%s:%s' % (minrev, maxrev)
2121 revrange = b'%s:%s' % (minrev, maxrev)
2124 draft = len(repo.revs(b'%ld and draft()', revs))
2122 draft = len(repo.revs(b'%ld and draft()', revs))
2125 secret = len(repo.revs(b'%ld and secret()', revs))
2123 secret = len(repo.revs(b'%ld and secret()', revs))
2126 if not (draft or secret):
2124 if not (draft or secret):
2127 msg = _(b'new changesets %s\n') % revrange
2125 msg = _(b'new changesets %s\n') % revrange
2128 elif draft and secret:
2126 elif draft and secret:
2129 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2127 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2130 msg %= (revrange, draft, secret)
2128 msg %= (revrange, draft, secret)
2131 elif draft:
2129 elif draft:
2132 msg = _(b'new changesets %s (%d drafts)\n')
2130 msg = _(b'new changesets %s (%d drafts)\n')
2133 msg %= (revrange, draft)
2131 msg %= (revrange, draft)
2134 elif secret:
2132 elif secret:
2135 msg = _(b'new changesets %s (%d secrets)\n')
2133 msg = _(b'new changesets %s (%d secrets)\n')
2136 msg %= (revrange, secret)
2134 msg %= (revrange, secret)
2137 else:
2135 else:
2138 errormsg = b'entered unreachable condition'
2136 errormsg = b'entered unreachable condition'
2139 raise error.ProgrammingError(errormsg)
2137 raise error.ProgrammingError(errormsg)
2140 repo.ui.status(msg)
2138 repo.ui.status(msg)
2141
2139
2142 # search new changesets directly pulled as obsolete
2140 # search new changesets directly pulled as obsolete
2143 duplicates = tr.changes.get(b'revduplicates', ())
2141 duplicates = tr.changes.get(b'revduplicates', ())
2144 obsadded = unfi.revs(
2142 obsadded = unfi.revs(
2145 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2143 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2146 )
2144 )
2147 cl = repo.changelog
2145 cl = repo.changelog
2148 extinctadded = [r for r in obsadded if r not in cl]
2146 extinctadded = [r for r in obsadded if r not in cl]
2149 if extinctadded:
2147 if extinctadded:
2150 # They are not just obsolete, but obsolete and invisible
2148 # They are not just obsolete, but obsolete and invisible
2151 # we call them "extinct" internally but the terms have not been
2149 # we call them "extinct" internally but the terms have not been
2152 # exposed to users.
2150 # exposed to users.
2153 msg = b'(%d other changesets obsolete on arrival)\n'
2151 msg = b'(%d other changesets obsolete on arrival)\n'
2154 repo.ui.status(msg % len(extinctadded))
2152 repo.ui.status(msg % len(extinctadded))
2155
2153
2156 @reportsummary
2154 @reportsummary
2157 def reportphasechanges(repo, tr):
2155 def reportphasechanges(repo, tr):
2158 """Report statistics of phase changes for changesets pre-existing
2156 """Report statistics of phase changes for changesets pre-existing
2159 pull/unbundle.
2157 pull/unbundle.
2160 """
2158 """
2161 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2159 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2162 published = []
2160 published = []
2163 for revs, (old, new) in tr.changes.get(b'phases', []):
2161 for revs, (old, new) in tr.changes.get(b'phases', []):
2164 if new != phases.public:
2162 if new != phases.public:
2165 continue
2163 continue
2166 published.extend(rev for rev in revs if rev < origrepolen)
2164 published.extend(rev for rev in revs if rev < origrepolen)
2167 if not published:
2165 if not published:
2168 return
2166 return
2169 msg = _(b'%d local changesets published\n')
2167 msg = _(b'%d local changesets published\n')
2170 if as_validator:
2168 if as_validator:
2171 msg = _(b'%d local changesets will be published\n')
2169 msg = _(b'%d local changesets will be published\n')
2172 repo.ui.status(msg % len(published))
2170 repo.ui.status(msg % len(published))
2173
2171
2174
2172
2175 def getinstabilitymessage(delta, instability):
2173 def getinstabilitymessage(delta, instability):
2176 """function to return the message to show warning about new instabilities
2174 """function to return the message to show warning about new instabilities
2177
2175
2178 exists as a separate function so that extension can wrap to show more
2176 exists as a separate function so that extension can wrap to show more
2179 information like how to fix instabilities"""
2177 information like how to fix instabilities"""
2180 if delta > 0:
2178 if delta > 0:
2181 return _(b'%i new %s changesets\n') % (delta, instability)
2179 return _(b'%i new %s changesets\n') % (delta, instability)
2182
2180
2183
2181
2184 def nodesummaries(repo, nodes, maxnumnodes=4):
2182 def nodesummaries(repo, nodes, maxnumnodes=4):
2185 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2183 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2186 return b' '.join(short(h) for h in nodes)
2184 return b' '.join(short(h) for h in nodes)
2187 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2185 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2188 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2186 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2189
2187
2190
2188
2191 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2189 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2192 """check that no named branch has multiple heads"""
2190 """check that no named branch has multiple heads"""
2193 if desc in (b'strip', b'repair'):
2191 if desc in (b'strip', b'repair'):
2194 # skip the logic during strip
2192 # skip the logic during strip
2195 return
2193 return
2196 visible = repo.filtered(b'visible')
2194 visible = repo.filtered(b'visible')
2197 # possible improvement: we could restrict the check to affected branch
2195 # possible improvement: we could restrict the check to affected branch
2198 bm = visible.branchmap()
2196 bm = visible.branchmap()
2199 for name in bm:
2197 for name in bm:
2200 heads = bm.branchheads(name, closed=accountclosed)
2198 heads = bm.branchheads(name, closed=accountclosed)
2201 if len(heads) > 1:
2199 if len(heads) > 1:
2202 msg = _(b'rejecting multiple heads on branch "%s"')
2200 msg = _(b'rejecting multiple heads on branch "%s"')
2203 msg %= name
2201 msg %= name
2204 hint = _(b'%d heads: %s')
2202 hint = _(b'%d heads: %s')
2205 hint %= (len(heads), nodesummaries(repo, heads))
2203 hint %= (len(heads), nodesummaries(repo, heads))
2206 raise error.Abort(msg, hint=hint)
2204 raise error.Abort(msg, hint=hint)
2207
2205
2208
2206
2209 def wrapconvertsink(sink):
2207 def wrapconvertsink(sink):
2210 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2208 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2211 before it is used, whether or not the convert extension was formally loaded.
2209 before it is used, whether or not the convert extension was formally loaded.
2212 """
2210 """
2213 return sink
2211 return sink
2214
2212
2215
2213
2216 def unhidehashlikerevs(repo, specs, hiddentype):
2214 def unhidehashlikerevs(repo, specs, hiddentype):
2217 """parse the user specs and unhide changesets whose hash or revision number
2215 """parse the user specs and unhide changesets whose hash or revision number
2218 is passed.
2216 is passed.
2219
2217
2220 hiddentype can be: 1) 'warn': warn while unhiding changesets
2218 hiddentype can be: 1) 'warn': warn while unhiding changesets
2221 2) 'nowarn': don't warn while unhiding changesets
2219 2) 'nowarn': don't warn while unhiding changesets
2222
2220
2223 returns a repo object with the required changesets unhidden
2221 returns a repo object with the required changesets unhidden
2224 """
2222 """
2225 if not repo.filtername or not repo.ui.configbool(
2223 if not repo.filtername or not repo.ui.configbool(
2226 b'experimental', b'directaccess'
2224 b'experimental', b'directaccess'
2227 ):
2225 ):
2228 return repo
2226 return repo
2229
2227
2230 if repo.filtername not in (b'visible', b'visible-hidden'):
2228 if repo.filtername not in (b'visible', b'visible-hidden'):
2231 return repo
2229 return repo
2232
2230
2233 symbols = set()
2231 symbols = set()
2234 for spec in specs:
2232 for spec in specs:
2235 try:
2233 try:
2236 tree = revsetlang.parse(spec)
2234 tree = revsetlang.parse(spec)
2237 except error.ParseError: # will be reported by scmutil.revrange()
2235 except error.ParseError: # will be reported by scmutil.revrange()
2238 continue
2236 continue
2239
2237
2240 symbols.update(revsetlang.gethashlikesymbols(tree))
2238 symbols.update(revsetlang.gethashlikesymbols(tree))
2241
2239
2242 if not symbols:
2240 if not symbols:
2243 return repo
2241 return repo
2244
2242
2245 revs = _getrevsfromsymbols(repo, symbols)
2243 revs = _getrevsfromsymbols(repo, symbols)
2246
2244
2247 if not revs:
2245 if not revs:
2248 return repo
2246 return repo
2249
2247
2250 if hiddentype == b'warn':
2248 if hiddentype == b'warn':
2251 unfi = repo.unfiltered()
2249 unfi = repo.unfiltered()
2252 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2250 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2253 repo.ui.warn(
2251 repo.ui.warn(
2254 _(
2252 _(
2255 b"warning: accessing hidden changesets for write "
2253 b"warning: accessing hidden changesets for write "
2256 b"operation: %s\n"
2254 b"operation: %s\n"
2257 )
2255 )
2258 % revstr
2256 % revstr
2259 )
2257 )
2260
2258
2261 # we have to use new filtername to separate branch/tags cache until we can
2259 # we have to use new filtername to separate branch/tags cache until we can
2262 # disbale these cache when revisions are dynamically pinned.
2260 # disbale these cache when revisions are dynamically pinned.
2263 return repo.filtered(b'visible-hidden', revs)
2261 return repo.filtered(b'visible-hidden', revs)
2264
2262
2265
2263
2266 def _getrevsfromsymbols(repo, symbols):
2264 def _getrevsfromsymbols(repo, symbols):
2267 """parse the list of symbols and returns a set of revision numbers of hidden
2265 """parse the list of symbols and returns a set of revision numbers of hidden
2268 changesets present in symbols"""
2266 changesets present in symbols"""
2269 revs = set()
2267 revs = set()
2270 unfi = repo.unfiltered()
2268 unfi = repo.unfiltered()
2271 unficl = unfi.changelog
2269 unficl = unfi.changelog
2272 cl = repo.changelog
2270 cl = repo.changelog
2273 tiprev = len(unficl)
2271 tiprev = len(unficl)
2274 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2272 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2275 for s in symbols:
2273 for s in symbols:
2276 try:
2274 try:
2277 n = int(s)
2275 n = int(s)
2278 if n <= tiprev:
2276 if n <= tiprev:
2279 if not allowrevnums:
2277 if not allowrevnums:
2280 continue
2278 continue
2281 else:
2279 else:
2282 if n not in cl:
2280 if n not in cl:
2283 revs.add(n)
2281 revs.add(n)
2284 continue
2282 continue
2285 except ValueError:
2283 except ValueError:
2286 pass
2284 pass
2287
2285
2288 try:
2286 try:
2289 s = resolvehexnodeidprefix(unfi, s)
2287 s = resolvehexnodeidprefix(unfi, s)
2290 except (error.LookupError, error.WdirUnsupported):
2288 except (error.LookupError, error.WdirUnsupported):
2291 s = None
2289 s = None
2292
2290
2293 if s is not None:
2291 if s is not None:
2294 rev = unficl.rev(s)
2292 rev = unficl.rev(s)
2295 if rev not in cl:
2293 if rev not in cl:
2296 revs.add(rev)
2294 revs.add(rev)
2297
2295
2298 return revs
2296 return revs
2299
2297
2300
2298
2301 def bookmarkrevs(repo, mark):
2299 def bookmarkrevs(repo, mark):
2302 """
2300 """
2303 Select revisions reachable by a given bookmark
2301 Select revisions reachable by a given bookmark
2304 """
2302 """
2305 return repo.revs(
2303 return repo.revs(
2306 b"ancestors(bookmark(%s)) - "
2304 b"ancestors(bookmark(%s)) - "
2307 b"ancestors(head() and not bookmark(%s)) - "
2305 b"ancestors(head() and not bookmark(%s)) - "
2308 b"ancestors(bookmark() and not bookmark(%s))",
2306 b"ancestors(bookmark() and not bookmark(%s))",
2309 mark,
2307 mark,
2310 mark,
2308 mark,
2311 mark,
2309 mark,
2312 )
2310 )
@@ -1,749 +1,747 b''
1 # wireprotov1server.py - Wire protocol version 1 server functionality
1 # wireprotov1server.py - Wire protocol version 1 server functionality
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import binascii
10 import binascii
11 import os
11 import os
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullid,
16 nullid,
17 )
17 )
18 from .pycompat import getattr
18 from .pycompat import getattr
19
19
20 from . import (
20 from . import (
21 bundle2,
21 bundle2,
22 bundlecaches,
22 bundlecaches,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 discovery,
24 discovery,
25 encoding,
25 encoding,
26 error,
26 error,
27 exchange,
27 exchange,
28 pushkey as pushkeymod,
28 pushkey as pushkeymod,
29 pycompat,
29 pycompat,
30 streamclone,
30 streamclone,
31 util,
31 util,
32 wireprototypes,
32 wireprototypes,
33 )
33 )
34
34
35 from .utils import (
35 from .utils import (
36 procutil,
36 procutil,
37 stringutil,
37 stringutil,
38 )
38 )
39
39
40 urlerr = util.urlerr
40 urlerr = util.urlerr
41 urlreq = util.urlreq
41 urlreq = util.urlreq
42
42
43 bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
43 bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
44 bundle2requiredhint = _(
44 bundle2requiredhint = _(
45 b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
45 b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
46 )
46 )
47 bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
47 bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
48
48
49
49
50 def clientcompressionsupport(proto):
50 def clientcompressionsupport(proto):
51 """Returns a list of compression methods supported by the client.
51 """Returns a list of compression methods supported by the client.
52
52
53 Returns a list of the compression methods supported by the client
53 Returns a list of the compression methods supported by the client
54 according to the protocol capabilities. If no such capability has
54 according to the protocol capabilities. If no such capability has
55 been announced, fallback to the default of zlib and uncompressed.
55 been announced, fallback to the default of zlib and uncompressed.
56 """
56 """
57 for cap in proto.getprotocaps():
57 for cap in proto.getprotocaps():
58 if cap.startswith(b'comp='):
58 if cap.startswith(b'comp='):
59 return cap[5:].split(b',')
59 return cap[5:].split(b',')
60 return [b'zlib', b'none']
60 return [b'zlib', b'none']
61
61
62
62
63 # wire protocol command can either return a string or one of these classes.
63 # wire protocol command can either return a string or one of these classes.
64
64
65
65
66 def getdispatchrepo(repo, proto, command):
66 def getdispatchrepo(repo, proto, command):
67 """Obtain the repo used for processing wire protocol commands.
67 """Obtain the repo used for processing wire protocol commands.
68
68
69 The intent of this function is to serve as a monkeypatch point for
69 The intent of this function is to serve as a monkeypatch point for
70 extensions that need commands to operate on different repo views under
70 extensions that need commands to operate on different repo views under
71 specialized circumstances.
71 specialized circumstances.
72 """
72 """
73 viewconfig = repo.ui.config(b'server', b'view')
73 viewconfig = repo.ui.config(b'server', b'view')
74 return repo.filtered(viewconfig)
74 return repo.filtered(viewconfig)
75
75
76
76
77 def dispatch(repo, proto, command):
77 def dispatch(repo, proto, command):
78 repo = getdispatchrepo(repo, proto, command)
78 repo = getdispatchrepo(repo, proto, command)
79
79
80 func, spec = commands[command]
80 func, spec = commands[command]
81 args = proto.getargs(spec)
81 args = proto.getargs(spec)
82
82
83 return func(repo, proto, *args)
83 return func(repo, proto, *args)
84
84
85
85
86 def options(cmd, keys, others):
86 def options(cmd, keys, others):
87 opts = {}
87 opts = {}
88 for k in keys:
88 for k in keys:
89 if k in others:
89 if k in others:
90 opts[k] = others[k]
90 opts[k] = others[k]
91 del others[k]
91 del others[k]
92 if others:
92 if others:
93 procutil.stderr.write(
93 procutil.stderr.write(
94 b"warning: %s ignored unexpected arguments %s\n"
94 b"warning: %s ignored unexpected arguments %s\n"
95 % (cmd, b",".join(others))
95 % (cmd, b",".join(others))
96 )
96 )
97 return opts
97 return opts
98
98
99
99
100 def bundle1allowed(repo, action):
100 def bundle1allowed(repo, action):
101 """Whether a bundle1 operation is allowed from the server.
101 """Whether a bundle1 operation is allowed from the server.
102
102
103 Priority is:
103 Priority is:
104
104
105 1. server.bundle1gd.<action> (if generaldelta active)
105 1. server.bundle1gd.<action> (if generaldelta active)
106 2. server.bundle1.<action>
106 2. server.bundle1.<action>
107 3. server.bundle1gd (if generaldelta active)
107 3. server.bundle1gd (if generaldelta active)
108 4. server.bundle1
108 4. server.bundle1
109 """
109 """
110 ui = repo.ui
110 ui = repo.ui
111 gd = b'generaldelta' in repo.requirements
111 gd = b'generaldelta' in repo.requirements
112
112
113 if gd:
113 if gd:
114 v = ui.configbool(b'server', b'bundle1gd.%s' % action)
114 v = ui.configbool(b'server', b'bundle1gd.%s' % action)
115 if v is not None:
115 if v is not None:
116 return v
116 return v
117
117
118 v = ui.configbool(b'server', b'bundle1.%s' % action)
118 v = ui.configbool(b'server', b'bundle1.%s' % action)
119 if v is not None:
119 if v is not None:
120 return v
120 return v
121
121
122 if gd:
122 if gd:
123 v = ui.configbool(b'server', b'bundle1gd')
123 v = ui.configbool(b'server', b'bundle1gd')
124 if v is not None:
124 if v is not None:
125 return v
125 return v
126
126
127 return ui.configbool(b'server', b'bundle1')
127 return ui.configbool(b'server', b'bundle1')
128
128
129
129
130 commands = wireprototypes.commanddict()
130 commands = wireprototypes.commanddict()
131
131
132
132
133 def wireprotocommand(name, args=None, permission=b'push'):
133 def wireprotocommand(name, args=None, permission=b'push'):
134 """Decorator to declare a wire protocol command.
134 """Decorator to declare a wire protocol command.
135
135
136 ``name`` is the name of the wire protocol command being provided.
136 ``name`` is the name of the wire protocol command being provided.
137
137
138 ``args`` defines the named arguments accepted by the command. It is
138 ``args`` defines the named arguments accepted by the command. It is
139 a space-delimited list of argument names. ``*`` denotes a special value
139 a space-delimited list of argument names. ``*`` denotes a special value
140 that says to accept all named arguments.
140 that says to accept all named arguments.
141
141
142 ``permission`` defines the permission type needed to run this command.
142 ``permission`` defines the permission type needed to run this command.
143 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
143 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
144 respectively. Default is to assume command requires ``push`` permissions
144 respectively. Default is to assume command requires ``push`` permissions
145 because otherwise commands not declaring their permissions could modify
145 because otherwise commands not declaring their permissions could modify
146 a repository that is supposed to be read-only.
146 a repository that is supposed to be read-only.
147 """
147 """
148 transports = {
148 transports = {
149 k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
149 k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
150 }
150 }
151
151
152 # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
152 # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
153 # SSHv2.
153 # SSHv2.
154 # TODO undo this hack when SSH is using the unified frame protocol.
154 # TODO undo this hack when SSH is using the unified frame protocol.
155 if name == b'batch':
155 if name == b'batch':
156 transports.add(wireprototypes.SSHV2)
156 transports.add(wireprototypes.SSHV2)
157
157
158 if permission not in (b'push', b'pull'):
158 if permission not in (b'push', b'pull'):
159 raise error.ProgrammingError(
159 raise error.ProgrammingError(
160 b'invalid wire protocol permission; '
160 b'invalid wire protocol permission; '
161 b'got %s; expected "push" or "pull"' % permission
161 b'got %s; expected "push" or "pull"' % permission
162 )
162 )
163
163
164 if args is None:
164 if args is None:
165 args = b''
165 args = b''
166
166
167 if not isinstance(args, bytes):
167 if not isinstance(args, bytes):
168 raise error.ProgrammingError(
168 raise error.ProgrammingError(
169 b'arguments for version 1 commands must be declared as bytes'
169 b'arguments for version 1 commands must be declared as bytes'
170 )
170 )
171
171
172 def register(func):
172 def register(func):
173 if name in commands:
173 if name in commands:
174 raise error.ProgrammingError(
174 raise error.ProgrammingError(
175 b'%s command already registered for version 1' % name
175 b'%s command already registered for version 1' % name
176 )
176 )
177 commands[name] = wireprototypes.commandentry(
177 commands[name] = wireprototypes.commandentry(
178 func, args=args, transports=transports, permission=permission
178 func, args=args, transports=transports, permission=permission
179 )
179 )
180
180
181 return func
181 return func
182
182
183 return register
183 return register
184
184
185
185
186 # TODO define a more appropriate permissions type to use for this.
186 # TODO define a more appropriate permissions type to use for this.
187 @wireprotocommand(b'batch', b'cmds *', permission=b'pull')
187 @wireprotocommand(b'batch', b'cmds *', permission=b'pull')
188 def batch(repo, proto, cmds, others):
188 def batch(repo, proto, cmds, others):
189 unescapearg = wireprototypes.unescapebatcharg
189 unescapearg = wireprototypes.unescapebatcharg
190 res = []
190 res = []
191 for pair in cmds.split(b';'):
191 for pair in cmds.split(b';'):
192 op, args = pair.split(b' ', 1)
192 op, args = pair.split(b' ', 1)
193 vals = {}
193 vals = {}
194 for a in args.split(b','):
194 for a in args.split(b','):
195 if a:
195 if a:
196 n, v = a.split(b'=')
196 n, v = a.split(b'=')
197 vals[unescapearg(n)] = unescapearg(v)
197 vals[unescapearg(n)] = unescapearg(v)
198 func, spec = commands[op]
198 func, spec = commands[op]
199
199
200 # Validate that client has permissions to perform this command.
200 # Validate that client has permissions to perform this command.
201 perm = commands[op].permission
201 perm = commands[op].permission
202 assert perm in (b'push', b'pull')
202 assert perm in (b'push', b'pull')
203 proto.checkperm(perm)
203 proto.checkperm(perm)
204
204
205 if spec:
205 if spec:
206 keys = spec.split()
206 keys = spec.split()
207 data = {}
207 data = {}
208 for k in keys:
208 for k in keys:
209 if k == b'*':
209 if k == b'*':
210 star = {}
210 star = {}
211 for key in vals.keys():
211 for key in vals.keys():
212 if key not in keys:
212 if key not in keys:
213 star[key] = vals[key]
213 star[key] = vals[key]
214 data[b'*'] = star
214 data[b'*'] = star
215 else:
215 else:
216 data[k] = vals[k]
216 data[k] = vals[k]
217 result = func(repo, proto, *[data[k] for k in keys])
217 result = func(repo, proto, *[data[k] for k in keys])
218 else:
218 else:
219 result = func(repo, proto)
219 result = func(repo, proto)
220 if isinstance(result, wireprototypes.ooberror):
220 if isinstance(result, wireprototypes.ooberror):
221 return result
221 return result
222
222
223 # For now, all batchable commands must return bytesresponse or
223 # For now, all batchable commands must return bytesresponse or
224 # raw bytes (for backwards compatibility).
224 # raw bytes (for backwards compatibility).
225 assert isinstance(result, (wireprototypes.bytesresponse, bytes))
225 assert isinstance(result, (wireprototypes.bytesresponse, bytes))
226 if isinstance(result, wireprototypes.bytesresponse):
226 if isinstance(result, wireprototypes.bytesresponse):
227 result = result.data
227 result = result.data
228 res.append(wireprototypes.escapebatcharg(result))
228 res.append(wireprototypes.escapebatcharg(result))
229
229
230 return wireprototypes.bytesresponse(b';'.join(res))
230 return wireprototypes.bytesresponse(b';'.join(res))
231
231
232
232
233 @wireprotocommand(b'between', b'pairs', permission=b'pull')
233 @wireprotocommand(b'between', b'pairs', permission=b'pull')
234 def between(repo, proto, pairs):
234 def between(repo, proto, pairs):
235 pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
235 pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
236 r = []
236 r = []
237 for b in repo.between(pairs):
237 for b in repo.between(pairs):
238 r.append(wireprototypes.encodelist(b) + b"\n")
238 r.append(wireprototypes.encodelist(b) + b"\n")
239
239
240 return wireprototypes.bytesresponse(b''.join(r))
240 return wireprototypes.bytesresponse(b''.join(r))
241
241
242
242
243 @wireprotocommand(b'branchmap', permission=b'pull')
243 @wireprotocommand(b'branchmap', permission=b'pull')
244 def branchmap(repo, proto):
244 def branchmap(repo, proto):
245 branchmap = repo.branchmap()
245 branchmap = repo.branchmap()
246 heads = []
246 heads = []
247 for branch, nodes in pycompat.iteritems(branchmap):
247 for branch, nodes in pycompat.iteritems(branchmap):
248 branchname = urlreq.quote(encoding.fromlocal(branch))
248 branchname = urlreq.quote(encoding.fromlocal(branch))
249 branchnodes = wireprototypes.encodelist(nodes)
249 branchnodes = wireprototypes.encodelist(nodes)
250 heads.append(b'%s %s' % (branchname, branchnodes))
250 heads.append(b'%s %s' % (branchname, branchnodes))
251
251
252 return wireprototypes.bytesresponse(b'\n'.join(heads))
252 return wireprototypes.bytesresponse(b'\n'.join(heads))
253
253
254
254
255 @wireprotocommand(b'branches', b'nodes', permission=b'pull')
255 @wireprotocommand(b'branches', b'nodes', permission=b'pull')
256 def branches(repo, proto, nodes):
256 def branches(repo, proto, nodes):
257 nodes = wireprototypes.decodelist(nodes)
257 nodes = wireprototypes.decodelist(nodes)
258 r = []
258 r = []
259 for b in repo.branches(nodes):
259 for b in repo.branches(nodes):
260 r.append(wireprototypes.encodelist(b) + b"\n")
260 r.append(wireprototypes.encodelist(b) + b"\n")
261
261
262 return wireprototypes.bytesresponse(b''.join(r))
262 return wireprototypes.bytesresponse(b''.join(r))
263
263
264
264
265 @wireprotocommand(b'clonebundles', b'', permission=b'pull')
265 @wireprotocommand(b'clonebundles', b'', permission=b'pull')
266 def clonebundles(repo, proto):
266 def clonebundles(repo, proto):
267 """Server command for returning info for available bundles to seed clones.
267 """Server command for returning info for available bundles to seed clones.
268
268
269 Clients will parse this response and determine what bundle to fetch.
269 Clients will parse this response and determine what bundle to fetch.
270
270
271 Extensions may wrap this command to filter or dynamically emit data
271 Extensions may wrap this command to filter or dynamically emit data
272 depending on the request. e.g. you could advertise URLs for the closest
272 depending on the request. e.g. you could advertise URLs for the closest
273 data center given the client's IP address.
273 data center given the client's IP address.
274 """
274 """
275 return wireprototypes.bytesresponse(
275 return wireprototypes.bytesresponse(
276 repo.vfs.tryread(bundlecaches.CB_MANIFEST_FILE)
276 repo.vfs.tryread(bundlecaches.CB_MANIFEST_FILE)
277 )
277 )
278
278
279
279
280 wireprotocaps = [
280 wireprotocaps = [
281 b'lookup',
281 b'lookup',
282 b'branchmap',
282 b'branchmap',
283 b'pushkey',
283 b'pushkey',
284 b'known',
284 b'known',
285 b'getbundle',
285 b'getbundle',
286 b'unbundlehash',
286 b'unbundlehash',
287 ]
287 ]
288
288
289
289
290 def _capabilities(repo, proto):
290 def _capabilities(repo, proto):
291 """return a list of capabilities for a repo
291 """return a list of capabilities for a repo
292
292
293 This function exists to allow extensions to easily wrap capabilities
293 This function exists to allow extensions to easily wrap capabilities
294 computation
294 computation
295
295
296 - returns a lists: easy to alter
296 - returns a lists: easy to alter
297 - change done here will be propagated to both `capabilities` and `hello`
297 - change done here will be propagated to both `capabilities` and `hello`
298 command without any other action needed.
298 command without any other action needed.
299 """
299 """
300 # copy to prevent modification of the global list
300 # copy to prevent modification of the global list
301 caps = list(wireprotocaps)
301 caps = list(wireprotocaps)
302
302
303 # Command of same name as capability isn't exposed to version 1 of
303 # Command of same name as capability isn't exposed to version 1 of
304 # transports. So conditionally add it.
304 # transports. So conditionally add it.
305 if commands.commandavailable(b'changegroupsubset', proto):
305 if commands.commandavailable(b'changegroupsubset', proto):
306 caps.append(b'changegroupsubset')
306 caps.append(b'changegroupsubset')
307
307
308 if streamclone.allowservergeneration(repo):
308 if streamclone.allowservergeneration(repo):
309 if repo.ui.configbool(b'server', b'preferuncompressed'):
309 if repo.ui.configbool(b'server', b'preferuncompressed'):
310 caps.append(b'stream-preferred')
310 caps.append(b'stream-preferred')
311 requiredformats = repo.requirements & repo.supportedformats
311 requiredformats = repo.requirements & repo.supportedformats
312 # if our local revlogs are just revlogv1, add 'stream' cap
312 # if our local revlogs are just revlogv1, add 'stream' cap
313 if not requiredformats - {b'revlogv1'}:
313 if not requiredformats - {b'revlogv1'}:
314 caps.append(b'stream')
314 caps.append(b'stream')
315 # otherwise, add 'streamreqs' detailing our local revlog format
315 # otherwise, add 'streamreqs' detailing our local revlog format
316 else:
316 else:
317 caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
317 caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
318 if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
318 if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
320 caps.append(b'bundle2=' + urlreq.quote(capsblob))
320 caps.append(b'bundle2=' + urlreq.quote(capsblob))
321 caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
321 caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
322
322
323 if repo.ui.configbool(b'experimental', b'narrow'):
323 if repo.ui.configbool(b'experimental', b'narrow'):
324 caps.append(wireprototypes.NARROWCAP)
324 caps.append(wireprototypes.NARROWCAP)
325 if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
325 if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
326 caps.append(wireprototypes.ELLIPSESCAP)
326 caps.append(wireprototypes.ELLIPSESCAP)
327
327
328 return proto.addcapabilities(repo, caps)
328 return proto.addcapabilities(repo, caps)
329
329
330
330
331 # If you are writing an extension and consider wrapping this function. Wrap
331 # If you are writing an extension and consider wrapping this function. Wrap
332 # `_capabilities` instead.
332 # `_capabilities` instead.
333 @wireprotocommand(b'capabilities', permission=b'pull')
333 @wireprotocommand(b'capabilities', permission=b'pull')
334 def capabilities(repo, proto):
334 def capabilities(repo, proto):
335 caps = _capabilities(repo, proto)
335 caps = _capabilities(repo, proto)
336 return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
336 return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
337
337
338
338
339 @wireprotocommand(b'changegroup', b'roots', permission=b'pull')
339 @wireprotocommand(b'changegroup', b'roots', permission=b'pull')
340 def changegroup(repo, proto, roots):
340 def changegroup(repo, proto, roots):
341 nodes = wireprototypes.decodelist(roots)
341 nodes = wireprototypes.decodelist(roots)
342 outgoing = discovery.outgoing(
342 outgoing = discovery.outgoing(
343 repo, missingroots=nodes, ancestorsof=repo.heads()
343 repo, missingroots=nodes, ancestorsof=repo.heads()
344 )
344 )
345 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
345 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
346 gen = iter(lambda: cg.read(32768), b'')
346 gen = iter(lambda: cg.read(32768), b'')
347 return wireprototypes.streamres(gen=gen)
347 return wireprototypes.streamres(gen=gen)
348
348
349
349
350 @wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
350 @wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
351 def changegroupsubset(repo, proto, bases, heads):
351 def changegroupsubset(repo, proto, bases, heads):
352 bases = wireprototypes.decodelist(bases)
352 bases = wireprototypes.decodelist(bases)
353 heads = wireprototypes.decodelist(heads)
353 heads = wireprototypes.decodelist(heads)
354 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
354 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
355 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
355 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
356 gen = iter(lambda: cg.read(32768), b'')
356 gen = iter(lambda: cg.read(32768), b'')
357 return wireprototypes.streamres(gen=gen)
357 return wireprototypes.streamres(gen=gen)
358
358
359
359
360 @wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
360 @wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
361 def debugwireargs(repo, proto, one, two, others):
361 def debugwireargs(repo, proto, one, two, others):
362 # only accept optional args from the known set
362 # only accept optional args from the known set
363 opts = options(b'debugwireargs', [b'three', b'four'], others)
363 opts = options(b'debugwireargs', [b'three', b'four'], others)
364 return wireprototypes.bytesresponse(
364 return wireprototypes.bytesresponse(
365 repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
365 repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
366 )
366 )
367
367
368
368
369 def find_pullbundle(repo, proto, opts, clheads, heads, common):
369 def find_pullbundle(repo, proto, opts, clheads, heads, common):
370 """Return a file object for the first matching pullbundle.
370 """Return a file object for the first matching pullbundle.
371
371
372 Pullbundles are specified in .hg/pullbundles.manifest similar to
372 Pullbundles are specified in .hg/pullbundles.manifest similar to
373 clonebundles.
373 clonebundles.
374 For each entry, the bundle specification is checked for compatibility:
374 For each entry, the bundle specification is checked for compatibility:
375 - Client features vs the BUNDLESPEC.
375 - Client features vs the BUNDLESPEC.
376 - Revisions shared with the clients vs base revisions of the bundle.
376 - Revisions shared with the clients vs base revisions of the bundle.
377 A bundle can be applied only if all its base revisions are known by
377 A bundle can be applied only if all its base revisions are known by
378 the client.
378 the client.
379 - At least one leaf of the bundle's DAG is missing on the client.
379 - At least one leaf of the bundle's DAG is missing on the client.
380 - Every leaf of the bundle's DAG is part of node set the client wants.
380 - Every leaf of the bundle's DAG is part of node set the client wants.
381 E.g. do not send a bundle of all changes if the client wants only
381 E.g. do not send a bundle of all changes if the client wants only
382 one specific branch of many.
382 one specific branch of many.
383 """
383 """
384
384
385 def decodehexstring(s):
385 def decodehexstring(s):
386 return {binascii.unhexlify(h) for h in s.split(b';')}
386 return {binascii.unhexlify(h) for h in s.split(b';')}
387
387
388 manifest = repo.vfs.tryread(b'pullbundles.manifest')
388 manifest = repo.vfs.tryread(b'pullbundles.manifest')
389 if not manifest:
389 if not manifest:
390 return None
390 return None
391 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
391 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
392 res = bundlecaches.filterclonebundleentries(repo, res)
392 res = bundlecaches.filterclonebundleentries(repo, res)
393 if not res:
393 if not res:
394 return None
394 return None
395 cl = repo.unfiltered().changelog
395 cl = repo.unfiltered().changelog
396 heads_anc = cl.ancestors([cl.rev(rev) for rev in heads], inclusive=True)
396 heads_anc = cl.ancestors([cl.rev(rev) for rev in heads], inclusive=True)
397 common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
397 common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
398 compformats = clientcompressionsupport(proto)
398 compformats = clientcompressionsupport(proto)
399 for entry in res:
399 for entry in res:
400 comp = entry.get(b'COMPRESSION')
400 comp = entry.get(b'COMPRESSION')
401 altcomp = util.compengines._bundlenames.get(comp)
401 altcomp = util.compengines._bundlenames.get(comp)
402 if comp and comp not in compformats and altcomp not in compformats:
402 if comp and comp not in compformats and altcomp not in compformats:
403 continue
403 continue
404 # No test yet for VERSION, since V2 is supported by any client
404 # No test yet for VERSION, since V2 is supported by any client
405 # that advertises partial pulls
405 # that advertises partial pulls
406 if b'heads' in entry:
406 if b'heads' in entry:
407 try:
407 try:
408 bundle_heads = decodehexstring(entry[b'heads'])
408 bundle_heads = decodehexstring(entry[b'heads'])
409 except TypeError:
409 except TypeError:
410 # Bad heads entry
410 # Bad heads entry
411 continue
411 continue
412 if bundle_heads.issubset(common):
412 if bundle_heads.issubset(common):
413 continue # Nothing new
413 continue # Nothing new
414 if all(cl.rev(rev) in common_anc for rev in bundle_heads):
414 if all(cl.rev(rev) in common_anc for rev in bundle_heads):
415 continue # Still nothing new
415 continue # Still nothing new
416 if any(
416 if any(
417 cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
417 cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
418 for rev in bundle_heads
418 for rev in bundle_heads
419 ):
419 ):
420 continue
420 continue
421 if b'bases' in entry:
421 if b'bases' in entry:
422 try:
422 try:
423 bundle_bases = decodehexstring(entry[b'bases'])
423 bundle_bases = decodehexstring(entry[b'bases'])
424 except TypeError:
424 except TypeError:
425 # Bad bases entry
425 # Bad bases entry
426 continue
426 continue
427 if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
427 if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
428 continue
428 continue
429 path = entry[b'URL']
429 path = entry[b'URL']
430 repo.ui.debug(b'sending pullbundle "%s"\n' % path)
430 repo.ui.debug(b'sending pullbundle "%s"\n' % path)
431 try:
431 try:
432 return repo.vfs.open(path)
432 return repo.vfs.open(path)
433 except IOError:
433 except IOError:
434 repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
434 repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
435 continue
435 continue
436 return None
436 return None
437
437
438
438
439 @wireprotocommand(b'getbundle', b'*', permission=b'pull')
439 @wireprotocommand(b'getbundle', b'*', permission=b'pull')
440 def getbundle(repo, proto, others):
440 def getbundle(repo, proto, others):
441 opts = options(
441 opts = options(
442 b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
442 b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
443 )
443 )
444 for k, v in pycompat.iteritems(opts):
444 for k, v in pycompat.iteritems(opts):
445 keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
445 keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
446 if keytype == b'nodes':
446 if keytype == b'nodes':
447 opts[k] = wireprototypes.decodelist(v)
447 opts[k] = wireprototypes.decodelist(v)
448 elif keytype == b'csv':
448 elif keytype == b'csv':
449 opts[k] = list(v.split(b','))
449 opts[k] = list(v.split(b','))
450 elif keytype == b'scsv':
450 elif keytype == b'scsv':
451 opts[k] = set(v.split(b','))
451 opts[k] = set(v.split(b','))
452 elif keytype == b'boolean':
452 elif keytype == b'boolean':
453 # Client should serialize False as '0', which is a non-empty string
453 # Client should serialize False as '0', which is a non-empty string
454 # so it evaluates as a True bool.
454 # so it evaluates as a True bool.
455 if v == b'0':
455 if v == b'0':
456 opts[k] = False
456 opts[k] = False
457 else:
457 else:
458 opts[k] = bool(v)
458 opts[k] = bool(v)
459 elif keytype != b'plain':
459 elif keytype != b'plain':
460 raise KeyError(b'unknown getbundle option type %s' % keytype)
460 raise KeyError(b'unknown getbundle option type %s' % keytype)
461
461
462 if not bundle1allowed(repo, b'pull'):
462 if not bundle1allowed(repo, b'pull'):
463 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
463 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
464 if proto.name == b'http-v1':
464 if proto.name == b'http-v1':
465 return wireprototypes.ooberror(bundle2required)
465 return wireprototypes.ooberror(bundle2required)
466 raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
466 raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
467
467
468 try:
468 try:
469 clheads = set(repo.changelog.heads())
469 clheads = set(repo.changelog.heads())
470 heads = set(opts.get(b'heads', set()))
470 heads = set(opts.get(b'heads', set()))
471 common = set(opts.get(b'common', set()))
471 common = set(opts.get(b'common', set()))
472 common.discard(nullid)
472 common.discard(nullid)
473 if (
473 if (
474 repo.ui.configbool(b'server', b'pullbundle')
474 repo.ui.configbool(b'server', b'pullbundle')
475 and b'partial-pull' in proto.getprotocaps()
475 and b'partial-pull' in proto.getprotocaps()
476 ):
476 ):
477 # Check if a pre-built bundle covers this request.
477 # Check if a pre-built bundle covers this request.
478 bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
478 bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
479 if bundle:
479 if bundle:
480 return wireprototypes.streamres(
480 return wireprototypes.streamres(
481 gen=util.filechunkiter(bundle), prefer_uncompressed=True
481 gen=util.filechunkiter(bundle), prefer_uncompressed=True
482 )
482 )
483
483
484 if repo.ui.configbool(b'server', b'disablefullbundle'):
484 if repo.ui.configbool(b'server', b'disablefullbundle'):
485 # Check to see if this is a full clone.
485 # Check to see if this is a full clone.
486 changegroup = opts.get(b'cg', True)
486 changegroup = opts.get(b'cg', True)
487 if changegroup and not common and clheads == heads:
487 if changegroup and not common and clheads == heads:
488 raise error.Abort(
488 raise error.Abort(
489 _(b'server has pull-based clones disabled'),
489 _(b'server has pull-based clones disabled'),
490 hint=_(b'remove --pull if specified or upgrade Mercurial'),
490 hint=_(b'remove --pull if specified or upgrade Mercurial'),
491 )
491 )
492
492
493 info, chunks = exchange.getbundlechunks(
493 info, chunks = exchange.getbundlechunks(
494 repo, b'serve', **pycompat.strkwargs(opts)
494 repo, b'serve', **pycompat.strkwargs(opts)
495 )
495 )
496 prefercompressed = info.get(b'prefercompressed', True)
496 prefercompressed = info.get(b'prefercompressed', True)
497 except error.Abort as exc:
497 except error.Abort as exc:
498 # cleanly forward Abort error to the client
498 # cleanly forward Abort error to the client
499 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
499 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
500 if proto.name == b'http-v1':
500 if proto.name == b'http-v1':
501 return wireprototypes.ooberror(exc.message + b'\n')
501 return wireprototypes.ooberror(exc.message + b'\n')
502 raise # cannot do better for bundle1 + ssh
502 raise # cannot do better for bundle1 + ssh
503 # bundle2 request expect a bundle2 reply
503 # bundle2 request expect a bundle2 reply
504 bundler = bundle2.bundle20(repo.ui)
504 bundler = bundle2.bundle20(repo.ui)
505 manargs = [(b'message', exc.message)]
505 manargs = [(b'message', exc.message)]
506 advargs = []
506 advargs = []
507 if exc.hint is not None:
507 if exc.hint is not None:
508 advargs.append((b'hint', exc.hint))
508 advargs.append((b'hint', exc.hint))
509 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
509 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
510 chunks = bundler.getchunks()
510 chunks = bundler.getchunks()
511 prefercompressed = False
511 prefercompressed = False
512
512
513 return wireprototypes.streamres(
513 return wireprototypes.streamres(
514 gen=chunks, prefer_uncompressed=not prefercompressed
514 gen=chunks, prefer_uncompressed=not prefercompressed
515 )
515 )
516
516
517
517
518 @wireprotocommand(b'heads', permission=b'pull')
518 @wireprotocommand(b'heads', permission=b'pull')
519 def heads(repo, proto):
519 def heads(repo, proto):
520 h = repo.heads()
520 h = repo.heads()
521 return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
521 return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
522
522
523
523
524 @wireprotocommand(b'hello', permission=b'pull')
524 @wireprotocommand(b'hello', permission=b'pull')
525 def hello(repo, proto):
525 def hello(repo, proto):
526 """Called as part of SSH handshake to obtain server info.
526 """Called as part of SSH handshake to obtain server info.
527
527
528 Returns a list of lines describing interesting things about the
528 Returns a list of lines describing interesting things about the
529 server, in an RFC822-like format.
529 server, in an RFC822-like format.
530
530
531 Currently, the only one defined is ``capabilities``, which consists of a
531 Currently, the only one defined is ``capabilities``, which consists of a
532 line of space separated tokens describing server abilities:
532 line of space separated tokens describing server abilities:
533
533
534 capabilities: <token0> <token1> <token2>
534 capabilities: <token0> <token1> <token2>
535 """
535 """
536 caps = capabilities(repo, proto).data
536 caps = capabilities(repo, proto).data
537 return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
537 return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
538
538
539
539
540 @wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
540 @wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
541 def listkeys(repo, proto, namespace):
541 def listkeys(repo, proto, namespace):
542 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
542 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
543 return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
543 return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
544
544
545
545
546 @wireprotocommand(b'lookup', b'key', permission=b'pull')
546 @wireprotocommand(b'lookup', b'key', permission=b'pull')
547 def lookup(repo, proto, key):
547 def lookup(repo, proto, key):
548 try:
548 try:
549 k = encoding.tolocal(key)
549 k = encoding.tolocal(key)
550 n = repo.lookup(k)
550 n = repo.lookup(k)
551 r = hex(n)
551 r = hex(n)
552 success = 1
552 success = 1
553 except Exception as inst:
553 except Exception as inst:
554 r = stringutil.forcebytestr(inst)
554 r = stringutil.forcebytestr(inst)
555 success = 0
555 success = 0
556 return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
556 return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
557
557
558
558
559 @wireprotocommand(b'known', b'nodes *', permission=b'pull')
559 @wireprotocommand(b'known', b'nodes *', permission=b'pull')
560 def known(repo, proto, nodes, others):
560 def known(repo, proto, nodes, others):
561 v = b''.join(
561 v = b''.join(
562 b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
562 b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
563 )
563 )
564 return wireprototypes.bytesresponse(v)
564 return wireprototypes.bytesresponse(v)
565
565
566
566
567 @wireprotocommand(b'protocaps', b'caps', permission=b'pull')
567 @wireprotocommand(b'protocaps', b'caps', permission=b'pull')
568 def protocaps(repo, proto, caps):
568 def protocaps(repo, proto, caps):
569 if proto.name == wireprototypes.SSHV1:
569 if proto.name == wireprototypes.SSHV1:
570 proto._protocaps = set(caps.split(b' '))
570 proto._protocaps = set(caps.split(b' '))
571 return wireprototypes.bytesresponse(b'OK')
571 return wireprototypes.bytesresponse(b'OK')
572
572
573
573
574 @wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
574 @wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
575 def pushkey(repo, proto, namespace, key, old, new):
575 def pushkey(repo, proto, namespace, key, old, new):
576 # compatibility with pre-1.8 clients which were accidentally
576 # compatibility with pre-1.8 clients which were accidentally
577 # sending raw binary nodes rather than utf-8-encoded hex
577 # sending raw binary nodes rather than utf-8-encoded hex
578 if len(new) == 20 and stringutil.escapestr(new) != new:
578 if len(new) == 20 and stringutil.escapestr(new) != new:
579 # looks like it could be a binary node
579 # looks like it could be a binary node
580 try:
580 try:
581 new.decode('utf-8')
581 new.decode('utf-8')
582 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
582 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
583 except UnicodeDecodeError:
583 except UnicodeDecodeError:
584 pass # binary, leave unmodified
584 pass # binary, leave unmodified
585 else:
585 else:
586 new = encoding.tolocal(new) # normal path
586 new = encoding.tolocal(new) # normal path
587
587
588 with proto.mayberedirectstdio() as output:
588 with proto.mayberedirectstdio() as output:
589 r = (
589 r = (
590 repo.pushkey(
590 repo.pushkey(
591 encoding.tolocal(namespace),
591 encoding.tolocal(namespace),
592 encoding.tolocal(key),
592 encoding.tolocal(key),
593 encoding.tolocal(old),
593 encoding.tolocal(old),
594 new,
594 new,
595 )
595 )
596 or False
596 or False
597 )
597 )
598
598
599 output = output.getvalue() if output else b''
599 output = output.getvalue() if output else b''
600 return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
600 return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
601
601
602
602
603 @wireprotocommand(b'stream_out', permission=b'pull')
603 @wireprotocommand(b'stream_out', permission=b'pull')
604 def stream(repo, proto):
604 def stream(repo, proto):
605 '''If the server supports streaming clone, it advertises the "stream"
605 '''If the server supports streaming clone, it advertises the "stream"
606 capability with a value representing the version and flags of the repo
606 capability with a value representing the version and flags of the repo
607 it is serving. Client checks to see if it understands the format.
607 it is serving. Client checks to see if it understands the format.
608 '''
608 '''
609 return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
609 return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
610
610
611
611
612 @wireprotocommand(b'unbundle', b'heads', permission=b'push')
612 @wireprotocommand(b'unbundle', b'heads', permission=b'push')
613 def unbundle(repo, proto, heads):
613 def unbundle(repo, proto, heads):
614 their_heads = wireprototypes.decodelist(heads)
614 their_heads = wireprototypes.decodelist(heads)
615
615
616 with proto.mayberedirectstdio() as output:
616 with proto.mayberedirectstdio() as output:
617 try:
617 try:
618 exchange.check_heads(repo, their_heads, b'preparing changes')
618 exchange.check_heads(repo, their_heads, b'preparing changes')
619 cleanup = lambda: None
619 cleanup = lambda: None
620 try:
620 try:
621 payload = proto.getpayload()
621 payload = proto.getpayload()
622 if repo.ui.configbool(b'server', b'streamunbundle'):
622 if repo.ui.configbool(b'server', b'streamunbundle'):
623
623
624 def cleanup():
624 def cleanup():
625 # Ensure that the full payload is consumed, so
625 # Ensure that the full payload is consumed, so
626 # that the connection doesn't contain trailing garbage.
626 # that the connection doesn't contain trailing garbage.
627 for p in payload:
627 for p in payload:
628 pass
628 pass
629
629
630 fp = util.chunkbuffer(payload)
630 fp = util.chunkbuffer(payload)
631 else:
631 else:
632 # write bundle data to temporary file as it can be big
632 # write bundle data to temporary file as it can be big
633 fp, tempname = None, None
633 fp, tempname = None, None
634
634
635 def cleanup():
635 def cleanup():
636 if fp:
636 if fp:
637 fp.close()
637 fp.close()
638 if tempname:
638 if tempname:
639 os.unlink(tempname)
639 os.unlink(tempname)
640
640
641 fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
641 fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
642 repo.ui.debug(
642 repo.ui.debug(
643 b'redirecting incoming bundle to %s\n' % tempname
643 b'redirecting incoming bundle to %s\n' % tempname
644 )
644 )
645 fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
645 fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
646 for p in payload:
646 for p in payload:
647 fp.write(p)
647 fp.write(p)
648 fp.seek(0)
648 fp.seek(0)
649
649
650 gen = exchange.readbundle(repo.ui, fp, None)
650 gen = exchange.readbundle(repo.ui, fp, None)
651 if isinstance(
651 if isinstance(
652 gen, changegroupmod.cg1unpacker
652 gen, changegroupmod.cg1unpacker
653 ) and not bundle1allowed(repo, b'push'):
653 ) and not bundle1allowed(repo, b'push'):
654 if proto.name == b'http-v1':
654 if proto.name == b'http-v1':
655 # need to special case http because stderr do not get to
655 # need to special case http because stderr do not get to
656 # the http client on failed push so we need to abuse
656 # the http client on failed push so we need to abuse
657 # some other error type to make sure the message get to
657 # some other error type to make sure the message get to
658 # the user.
658 # the user.
659 return wireprototypes.ooberror(bundle2required)
659 return wireprototypes.ooberror(bundle2required)
660 raise error.Abort(
660 raise error.Abort(
661 bundle2requiredmain, hint=bundle2requiredhint
661 bundle2requiredmain, hint=bundle2requiredhint
662 )
662 )
663
663
664 r = exchange.unbundle(
664 r = exchange.unbundle(
665 repo, gen, their_heads, b'serve', proto.client()
665 repo, gen, their_heads, b'serve', proto.client()
666 )
666 )
667 if util.safehasattr(r, b'addpart'):
667 if util.safehasattr(r, b'addpart'):
668 # The return looks streamable, we are in the bundle2 case
668 # The return looks streamable, we are in the bundle2 case
669 # and should return a stream.
669 # and should return a stream.
670 return wireprototypes.streamreslegacy(gen=r.getchunks())
670 return wireprototypes.streamreslegacy(gen=r.getchunks())
671 return wireprototypes.pushres(
671 return wireprototypes.pushres(
672 r, output.getvalue() if output else b''
672 r, output.getvalue() if output else b''
673 )
673 )
674
674
675 finally:
675 finally:
676 cleanup()
676 cleanup()
677
677
678 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
678 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
679 # handle non-bundle2 case first
679 # handle non-bundle2 case first
680 if not getattr(exc, 'duringunbundle2', False):
680 if not getattr(exc, 'duringunbundle2', False):
681 try:
681 try:
682 raise
682 raise
683 except error.Abort as exc:
683 except error.Abort as exc:
684 # The old code we moved used procutil.stderr directly.
684 # The old code we moved used procutil.stderr directly.
685 # We did not change it to minimise code change.
685 # We did not change it to minimise code change.
686 # This need to be moved to something proper.
686 # This need to be moved to something proper.
687 # Feel free to do it.
687 # Feel free to do it.
688 procutil.stderr.write(b"abort: %s\n" % exc.message)
688 procutil.stderr.write(exc.format())
689 if exc.hint is not None:
690 procutil.stderr.write(b"(%s)\n" % exc.hint)
691 procutil.stderr.flush()
689 procutil.stderr.flush()
692 return wireprototypes.pushres(
690 return wireprototypes.pushres(
693 0, output.getvalue() if output else b''
691 0, output.getvalue() if output else b''
694 )
692 )
695 except error.PushRaced:
693 except error.PushRaced:
696 return wireprototypes.pusherr(
694 return wireprototypes.pusherr(
697 pycompat.bytestr(exc),
695 pycompat.bytestr(exc),
698 output.getvalue() if output else b'',
696 output.getvalue() if output else b'',
699 )
697 )
700
698
701 bundler = bundle2.bundle20(repo.ui)
699 bundler = bundle2.bundle20(repo.ui)
702 for out in getattr(exc, '_bundle2salvagedoutput', ()):
700 for out in getattr(exc, '_bundle2salvagedoutput', ()):
703 bundler.addpart(out)
701 bundler.addpart(out)
704 try:
702 try:
705 try:
703 try:
706 raise
704 raise
707 except error.PushkeyFailed as exc:
705 except error.PushkeyFailed as exc:
708 # check client caps
706 # check client caps
709 remotecaps = getattr(exc, '_replycaps', None)
707 remotecaps = getattr(exc, '_replycaps', None)
710 if (
708 if (
711 remotecaps is not None
709 remotecaps is not None
712 and b'pushkey' not in remotecaps.get(b'error', ())
710 and b'pushkey' not in remotecaps.get(b'error', ())
713 ):
711 ):
714 # no support remote side, fallback to Abort handler.
712 # no support remote side, fallback to Abort handler.
715 raise
713 raise
716 part = bundler.newpart(b'error:pushkey')
714 part = bundler.newpart(b'error:pushkey')
717 part.addparam(b'in-reply-to', exc.partid)
715 part.addparam(b'in-reply-to', exc.partid)
718 if exc.namespace is not None:
716 if exc.namespace is not None:
719 part.addparam(
717 part.addparam(
720 b'namespace', exc.namespace, mandatory=False
718 b'namespace', exc.namespace, mandatory=False
721 )
719 )
722 if exc.key is not None:
720 if exc.key is not None:
723 part.addparam(b'key', exc.key, mandatory=False)
721 part.addparam(b'key', exc.key, mandatory=False)
724 if exc.new is not None:
722 if exc.new is not None:
725 part.addparam(b'new', exc.new, mandatory=False)
723 part.addparam(b'new', exc.new, mandatory=False)
726 if exc.old is not None:
724 if exc.old is not None:
727 part.addparam(b'old', exc.old, mandatory=False)
725 part.addparam(b'old', exc.old, mandatory=False)
728 if exc.ret is not None:
726 if exc.ret is not None:
729 part.addparam(b'ret', exc.ret, mandatory=False)
727 part.addparam(b'ret', exc.ret, mandatory=False)
730 except error.BundleValueError as exc:
728 except error.BundleValueError as exc:
731 errpart = bundler.newpart(b'error:unsupportedcontent')
729 errpart = bundler.newpart(b'error:unsupportedcontent')
732 if exc.parttype is not None:
730 if exc.parttype is not None:
733 errpart.addparam(b'parttype', exc.parttype)
731 errpart.addparam(b'parttype', exc.parttype)
734 if exc.params:
732 if exc.params:
735 errpart.addparam(b'params', b'\0'.join(exc.params))
733 errpart.addparam(b'params', b'\0'.join(exc.params))
736 except error.Abort as exc:
734 except error.Abort as exc:
737 manargs = [(b'message', exc.message)]
735 manargs = [(b'message', exc.message)]
738 advargs = []
736 advargs = []
739 if exc.hint is not None:
737 if exc.hint is not None:
740 advargs.append((b'hint', exc.hint))
738 advargs.append((b'hint', exc.hint))
741 bundler.addpart(
739 bundler.addpart(
742 bundle2.bundlepart(b'error:abort', manargs, advargs)
740 bundle2.bundlepart(b'error:abort', manargs, advargs)
743 )
741 )
744 except error.PushRaced as exc:
742 except error.PushRaced as exc:
745 bundler.newpart(
743 bundler.newpart(
746 b'error:pushraced',
744 b'error:pushraced',
747 [(b'message', stringutil.forcebytestr(exc))],
745 [(b'message', stringutil.forcebytestr(exc))],
748 )
746 )
749 return wireprototypes.streamreslegacy(gen=bundler.getchunks())
747 return wireprototypes.streamreslegacy(gen=bundler.getchunks())
General Comments 0
You need to be logged in to leave comments. Login now