##// END OF EJS Templates
errors: catch the new Error class in scmutil and chgserver...
Martin von Zweigbergk -
r48073:7a769ac4 default
parent child Browse files
Show More
@@ -1,761 +1,761 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command (DEPRECATED)
22 'setumask' command (DEPRECATED)
23 'setumask2' command
23 'setumask2' command
24 set umask
24 set umask
25
25
26 'validate' command
26 'validate' command
27 reload the config and check if the server is up to date
27 reload the config and check if the server is up to date
28
28
29 Config
29 Config
30 ------
30 ------
31
31
32 ::
32 ::
33
33
34 [chgserver]
34 [chgserver]
35 # how long (in seconds) should an idle chg server exit
35 # how long (in seconds) should an idle chg server exit
36 idletimeout = 3600
36 idletimeout = 3600
37
37
38 # whether to skip config or env change checks
38 # whether to skip config or env change checks
39 skiphash = False
39 skiphash = False
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import inspect
44 import inspect
45 import os
45 import os
46 import re
46 import re
47 import socket
47 import socket
48 import stat
48 import stat
49 import struct
49 import struct
50 import time
50 import time
51
51
52 from .i18n import _
52 from .i18n import _
53 from .pycompat import (
53 from .pycompat import (
54 getattr,
54 getattr,
55 setattr,
55 setattr,
56 )
56 )
57 from .node import hex
57 from .node import hex
58
58
59 from . import (
59 from . import (
60 commandserver,
60 commandserver,
61 encoding,
61 encoding,
62 error,
62 error,
63 extensions,
63 extensions,
64 pycompat,
64 pycompat,
65 util,
65 util,
66 )
66 )
67
67
68 from .utils import (
68 from .utils import (
69 hashutil,
69 hashutil,
70 procutil,
70 procutil,
71 stringutil,
71 stringutil,
72 )
72 )
73
73
74
74
75 def _hashlist(items):
75 def _hashlist(items):
76 """return sha1 hexdigest for a list"""
76 """return sha1 hexdigest for a list"""
77 return hex(hashutil.sha1(stringutil.pprint(items)).digest())
77 return hex(hashutil.sha1(stringutil.pprint(items)).digest())
78
78
79
79
80 # sensitive config sections affecting confighash
80 # sensitive config sections affecting confighash
81 _configsections = [
81 _configsections = [
82 b'alias', # affects global state commands.table
82 b'alias', # affects global state commands.table
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
83 b'diff-tools', # affects whether gui or not in extdiff's uisetup
84 b'eol', # uses setconfig('eol', ...)
84 b'eol', # uses setconfig('eol', ...)
85 b'extdiff', # uisetup will register new commands
85 b'extdiff', # uisetup will register new commands
86 b'extensions',
86 b'extensions',
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
87 b'fastannotate', # affects annotate command and adds fastannonate cmd
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
88 b'merge-tools', # affects whether gui or not in extdiff's uisetup
89 b'schemes', # extsetup will update global hg.schemes
89 b'schemes', # extsetup will update global hg.schemes
90 ]
90 ]
91
91
92 _configsectionitems = [
92 _configsectionitems = [
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
93 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
94 ]
94 ]
95
95
96 # sensitive environment variables affecting confighash
96 # sensitive environment variables affecting confighash
97 _envre = re.compile(
97 _envre = re.compile(
98 br'''\A(?:
98 br'''\A(?:
99 CHGHG
99 CHGHG
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
100 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
101 |HG(?:ENCODING|PLAIN).*
101 |HG(?:ENCODING|PLAIN).*
102 |LANG(?:UAGE)?
102 |LANG(?:UAGE)?
103 |LC_.*
103 |LC_.*
104 |LD_.*
104 |LD_.*
105 |PATH
105 |PATH
106 |PYTHON.*
106 |PYTHON.*
107 |TERM(?:INFO)?
107 |TERM(?:INFO)?
108 |TZ
108 |TZ
109 )\Z''',
109 )\Z''',
110 re.X,
110 re.X,
111 )
111 )
112
112
113
113
114 def _confighash(ui):
114 def _confighash(ui):
115 """return a quick hash for detecting config/env changes
115 """return a quick hash for detecting config/env changes
116
116
117 confighash is the hash of sensitive config items and environment variables.
117 confighash is the hash of sensitive config items and environment variables.
118
118
119 for chgserver, it is designed that once confighash changes, the server is
119 for chgserver, it is designed that once confighash changes, the server is
120 not qualified to serve its client and should redirect the client to a new
120 not qualified to serve its client and should redirect the client to a new
121 server. different from mtimehash, confighash change will not mark the
121 server. different from mtimehash, confighash change will not mark the
122 server outdated and exit since the user can have different configs at the
122 server outdated and exit since the user can have different configs at the
123 same time.
123 same time.
124 """
124 """
125 sectionitems = []
125 sectionitems = []
126 for section in _configsections:
126 for section in _configsections:
127 sectionitems.append(ui.configitems(section))
127 sectionitems.append(ui.configitems(section))
128 for section, item in _configsectionitems:
128 for section, item in _configsectionitems:
129 sectionitems.append(ui.config(section, item))
129 sectionitems.append(ui.config(section, item))
130 sectionhash = _hashlist(sectionitems)
130 sectionhash = _hashlist(sectionitems)
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
131 # If $CHGHG is set, the change to $HG should not trigger a new chg server
132 if b'CHGHG' in encoding.environ:
132 if b'CHGHG' in encoding.environ:
133 ignored = {b'HG'}
133 ignored = {b'HG'}
134 else:
134 else:
135 ignored = set()
135 ignored = set()
136 envitems = [
136 envitems = [
137 (k, v)
137 (k, v)
138 for k, v in pycompat.iteritems(encoding.environ)
138 for k, v in pycompat.iteritems(encoding.environ)
139 if _envre.match(k) and k not in ignored
139 if _envre.match(k) and k not in ignored
140 ]
140 ]
141 envhash = _hashlist(sorted(envitems))
141 envhash = _hashlist(sorted(envitems))
142 return sectionhash[:6] + envhash[:6]
142 return sectionhash[:6] + envhash[:6]
143
143
144
144
145 def _getmtimepaths(ui):
145 def _getmtimepaths(ui):
146 """get a list of paths that should be checked to detect change
146 """get a list of paths that should be checked to detect change
147
147
148 The list will include:
148 The list will include:
149 - extensions (will not cover all files for complex extensions)
149 - extensions (will not cover all files for complex extensions)
150 - mercurial/__version__.py
150 - mercurial/__version__.py
151 - python binary
151 - python binary
152 """
152 """
153 modules = [m for n, m in extensions.extensions(ui)]
153 modules = [m for n, m in extensions.extensions(ui)]
154 try:
154 try:
155 from . import __version__
155 from . import __version__
156
156
157 modules.append(__version__)
157 modules.append(__version__)
158 except ImportError:
158 except ImportError:
159 pass
159 pass
160 files = []
160 files = []
161 if pycompat.sysexecutable:
161 if pycompat.sysexecutable:
162 files.append(pycompat.sysexecutable)
162 files.append(pycompat.sysexecutable)
163 for m in modules:
163 for m in modules:
164 try:
164 try:
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
165 files.append(pycompat.fsencode(inspect.getabsfile(m)))
166 except TypeError:
166 except TypeError:
167 pass
167 pass
168 return sorted(set(files))
168 return sorted(set(files))
169
169
170
170
171 def _mtimehash(paths):
171 def _mtimehash(paths):
172 """return a quick hash for detecting file changes
172 """return a quick hash for detecting file changes
173
173
174 mtimehash calls stat on given paths and calculate a hash based on size and
174 mtimehash calls stat on given paths and calculate a hash based on size and
175 mtime of each file. mtimehash does not read file content because reading is
175 mtime of each file. mtimehash does not read file content because reading is
176 expensive. therefore it's not 100% reliable for detecting content changes.
176 expensive. therefore it's not 100% reliable for detecting content changes.
177 it's possible to return different hashes for same file contents.
177 it's possible to return different hashes for same file contents.
178 it's also possible to return a same hash for different file contents for
178 it's also possible to return a same hash for different file contents for
179 some carefully crafted situation.
179 some carefully crafted situation.
180
180
181 for chgserver, it is designed that once mtimehash changes, the server is
181 for chgserver, it is designed that once mtimehash changes, the server is
182 considered outdated immediately and should no longer provide service.
182 considered outdated immediately and should no longer provide service.
183
183
184 mtimehash is not included in confighash because we only know the paths of
184 mtimehash is not included in confighash because we only know the paths of
185 extensions after importing them (there is imp.find_module but that faces
185 extensions after importing them (there is imp.find_module but that faces
186 race conditions). We need to calculate confighash without importing.
186 race conditions). We need to calculate confighash without importing.
187 """
187 """
188
188
189 def trystat(path):
189 def trystat(path):
190 try:
190 try:
191 st = os.stat(path)
191 st = os.stat(path)
192 return (st[stat.ST_MTIME], st.st_size)
192 return (st[stat.ST_MTIME], st.st_size)
193 except OSError:
193 except OSError:
194 # could be ENOENT, EPERM etc. not fatal in any case
194 # could be ENOENT, EPERM etc. not fatal in any case
195 pass
195 pass
196
196
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
197 return _hashlist(pycompat.maplist(trystat, paths))[:12]
198
198
199
199
200 class hashstate(object):
200 class hashstate(object):
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
201 """a structure storing confighash, mtimehash, paths used for mtimehash"""
202
202
203 def __init__(self, confighash, mtimehash, mtimepaths):
203 def __init__(self, confighash, mtimehash, mtimepaths):
204 self.confighash = confighash
204 self.confighash = confighash
205 self.mtimehash = mtimehash
205 self.mtimehash = mtimehash
206 self.mtimepaths = mtimepaths
206 self.mtimepaths = mtimepaths
207
207
208 @staticmethod
208 @staticmethod
209 def fromui(ui, mtimepaths=None):
209 def fromui(ui, mtimepaths=None):
210 if mtimepaths is None:
210 if mtimepaths is None:
211 mtimepaths = _getmtimepaths(ui)
211 mtimepaths = _getmtimepaths(ui)
212 confighash = _confighash(ui)
212 confighash = _confighash(ui)
213 mtimehash = _mtimehash(mtimepaths)
213 mtimehash = _mtimehash(mtimepaths)
214 ui.log(
214 ui.log(
215 b'cmdserver',
215 b'cmdserver',
216 b'confighash = %s mtimehash = %s\n',
216 b'confighash = %s mtimehash = %s\n',
217 confighash,
217 confighash,
218 mtimehash,
218 mtimehash,
219 )
219 )
220 return hashstate(confighash, mtimehash, mtimepaths)
220 return hashstate(confighash, mtimehash, mtimepaths)
221
221
222
222
223 def _newchgui(srcui, csystem, attachio):
223 def _newchgui(srcui, csystem, attachio):
224 class chgui(srcui.__class__):
224 class chgui(srcui.__class__):
225 def __init__(self, src=None):
225 def __init__(self, src=None):
226 super(chgui, self).__init__(src)
226 super(chgui, self).__init__(src)
227 if src:
227 if src:
228 self._csystem = getattr(src, '_csystem', csystem)
228 self._csystem = getattr(src, '_csystem', csystem)
229 else:
229 else:
230 self._csystem = csystem
230 self._csystem = csystem
231
231
232 def _runsystem(self, cmd, environ, cwd, out):
232 def _runsystem(self, cmd, environ, cwd, out):
233 # fallback to the original system method if
233 # fallback to the original system method if
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
234 # a. the output stream is not stdout (e.g. stderr, cStringIO),
235 # b. or stdout is redirected by protectfinout(),
235 # b. or stdout is redirected by protectfinout(),
236 # because the chg client is not aware of these situations and
236 # because the chg client is not aware of these situations and
237 # will behave differently (i.e. write to stdout).
237 # will behave differently (i.e. write to stdout).
238 if (
238 if (
239 out is not self.fout
239 out is not self.fout
240 or not util.safehasattr(self.fout, b'fileno')
240 or not util.safehasattr(self.fout, b'fileno')
241 or self.fout.fileno() != procutil.stdout.fileno()
241 or self.fout.fileno() != procutil.stdout.fileno()
242 or self._finoutredirected
242 or self._finoutredirected
243 ):
243 ):
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
244 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
245 self.flush()
245 self.flush()
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
246 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
247
247
248 def _runpager(self, cmd, env=None):
248 def _runpager(self, cmd, env=None):
249 self._csystem(
249 self._csystem(
250 cmd,
250 cmd,
251 procutil.shellenviron(env),
251 procutil.shellenviron(env),
252 type=b'pager',
252 type=b'pager',
253 cmdtable={b'attachio': attachio},
253 cmdtable={b'attachio': attachio},
254 )
254 )
255 return True
255 return True
256
256
257 return chgui(srcui)
257 return chgui(srcui)
258
258
259
259
260 def _loadnewui(srcui, args, cdebug):
260 def _loadnewui(srcui, args, cdebug):
261 from . import dispatch # avoid cycle
261 from . import dispatch # avoid cycle
262
262
263 newui = srcui.__class__.load()
263 newui = srcui.__class__.load()
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
264 for a in [b'fin', b'fout', b'ferr', b'environ']:
265 setattr(newui, a, getattr(srcui, a))
265 setattr(newui, a, getattr(srcui, a))
266 if util.safehasattr(srcui, b'_csystem'):
266 if util.safehasattr(srcui, b'_csystem'):
267 newui._csystem = srcui._csystem
267 newui._csystem = srcui._csystem
268
268
269 # command line args
269 # command line args
270 options = dispatch._earlyparseopts(newui, args)
270 options = dispatch._earlyparseopts(newui, args)
271 dispatch._parseconfig(newui, options[b'config'])
271 dispatch._parseconfig(newui, options[b'config'])
272
272
273 # stolen from tortoisehg.util.copydynamicconfig()
273 # stolen from tortoisehg.util.copydynamicconfig()
274 for section, name, value in srcui.walkconfig():
274 for section, name, value in srcui.walkconfig():
275 source = srcui.configsource(section, name)
275 source = srcui.configsource(section, name)
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
276 if b':' in source or source == b'--config' or source.startswith(b'$'):
277 # path:line or command line, or environ
277 # path:line or command line, or environ
278 continue
278 continue
279 newui.setconfig(section, name, value, source)
279 newui.setconfig(section, name, value, source)
280
280
281 # load wd and repo config, copied from dispatch.py
281 # load wd and repo config, copied from dispatch.py
282 cwd = options[b'cwd']
282 cwd = options[b'cwd']
283 cwd = cwd and os.path.realpath(cwd) or None
283 cwd = cwd and os.path.realpath(cwd) or None
284 rpath = options[b'repository']
284 rpath = options[b'repository']
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
285 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
286
286
287 extensions.populateui(newui)
287 extensions.populateui(newui)
288 commandserver.setuplogging(newui, fp=cdebug)
288 commandserver.setuplogging(newui, fp=cdebug)
289 if newui is not newlui:
289 if newui is not newlui:
290 extensions.populateui(newlui)
290 extensions.populateui(newlui)
291 commandserver.setuplogging(newlui, fp=cdebug)
291 commandserver.setuplogging(newlui, fp=cdebug)
292
292
293 return (newui, newlui)
293 return (newui, newlui)
294
294
295
295
296 class channeledsystem(object):
296 class channeledsystem(object):
297 """Propagate ui.system() request in the following format:
297 """Propagate ui.system() request in the following format:
298
298
299 payload length (unsigned int),
299 payload length (unsigned int),
300 type, '\0',
300 type, '\0',
301 cmd, '\0',
301 cmd, '\0',
302 cwd, '\0',
302 cwd, '\0',
303 envkey, '=', val, '\0',
303 envkey, '=', val, '\0',
304 ...
304 ...
305 envkey, '=', val
305 envkey, '=', val
306
306
307 if type == 'system', waits for:
307 if type == 'system', waits for:
308
308
309 exitcode length (unsigned int),
309 exitcode length (unsigned int),
310 exitcode (int)
310 exitcode (int)
311
311
312 if type == 'pager', repetitively waits for a command name ending with '\n'
312 if type == 'pager', repetitively waits for a command name ending with '\n'
313 and executes it defined by cmdtable, or exits the loop if the command name
313 and executes it defined by cmdtable, or exits the loop if the command name
314 is empty.
314 is empty.
315 """
315 """
316
316
317 def __init__(self, in_, out, channel):
317 def __init__(self, in_, out, channel):
318 self.in_ = in_
318 self.in_ = in_
319 self.out = out
319 self.out = out
320 self.channel = channel
320 self.channel = channel
321
321
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
322 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
323 args = [type, cmd, os.path.abspath(cwd or b'.')]
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
324 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
325 data = b'\0'.join(args)
325 data = b'\0'.join(args)
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
326 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
327 self.out.write(data)
327 self.out.write(data)
328 self.out.flush()
328 self.out.flush()
329
329
330 if type == b'system':
330 if type == b'system':
331 length = self.in_.read(4)
331 length = self.in_.read(4)
332 (length,) = struct.unpack(b'>I', length)
332 (length,) = struct.unpack(b'>I', length)
333 if length != 4:
333 if length != 4:
334 raise error.Abort(_(b'invalid response'))
334 raise error.Abort(_(b'invalid response'))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
335 (rc,) = struct.unpack(b'>i', self.in_.read(4))
336 return rc
336 return rc
337 elif type == b'pager':
337 elif type == b'pager':
338 while True:
338 while True:
339 cmd = self.in_.readline()[:-1]
339 cmd = self.in_.readline()[:-1]
340 if not cmd:
340 if not cmd:
341 break
341 break
342 if cmdtable and cmd in cmdtable:
342 if cmdtable and cmd in cmdtable:
343 cmdtable[cmd]()
343 cmdtable[cmd]()
344 else:
344 else:
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
345 raise error.Abort(_(b'unexpected command: %s') % cmd)
346 else:
346 else:
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
347 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
348
348
349
349
350 _iochannels = [
350 _iochannels = [
351 # server.ch, ui.fp, mode
351 # server.ch, ui.fp, mode
352 (b'cin', b'fin', 'rb'),
352 (b'cin', b'fin', 'rb'),
353 (b'cout', b'fout', 'wb'),
353 (b'cout', b'fout', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
354 (b'cerr', b'ferr', 'wb'),
355 ]
355 ]
356
356
357
357
358 class chgcmdserver(commandserver.server):
358 class chgcmdserver(commandserver.server):
359 def __init__(
359 def __init__(
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
360 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
361 ):
361 ):
362 super(chgcmdserver, self).__init__(
362 super(chgcmdserver, self).__init__(
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
363 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
364 repo,
364 repo,
365 fin,
365 fin,
366 fout,
366 fout,
367 prereposetups,
367 prereposetups,
368 )
368 )
369 self.clientsock = sock
369 self.clientsock = sock
370 self._ioattached = False
370 self._ioattached = False
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
371 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
372 self.hashstate = hashstate
372 self.hashstate = hashstate
373 self.baseaddress = baseaddress
373 self.baseaddress = baseaddress
374 if hashstate is not None:
374 if hashstate is not None:
375 self.capabilities = self.capabilities.copy()
375 self.capabilities = self.capabilities.copy()
376 self.capabilities[b'validate'] = chgcmdserver.validate
376 self.capabilities[b'validate'] = chgcmdserver.validate
377
377
378 def cleanup(self):
378 def cleanup(self):
379 super(chgcmdserver, self).cleanup()
379 super(chgcmdserver, self).cleanup()
380 # dispatch._runcatch() does not flush outputs if exception is not
380 # dispatch._runcatch() does not flush outputs if exception is not
381 # handled by dispatch._dispatch()
381 # handled by dispatch._dispatch()
382 self.ui.flush()
382 self.ui.flush()
383 self._restoreio()
383 self._restoreio()
384 self._ioattached = False
384 self._ioattached = False
385
385
386 def attachio(self):
386 def attachio(self):
387 """Attach to client's stdio passed via unix domain socket; all
387 """Attach to client's stdio passed via unix domain socket; all
388 channels except cresult will no longer be used
388 channels except cresult will no longer be used
389 """
389 """
390 # tell client to sendmsg() with 1-byte payload, which makes it
390 # tell client to sendmsg() with 1-byte payload, which makes it
391 # distinctive from "attachio\n" command consumed by client.read()
391 # distinctive from "attachio\n" command consumed by client.read()
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
392 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
393 clientfds = util.recvfds(self.clientsock.fileno())
393 clientfds = util.recvfds(self.clientsock.fileno())
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
394 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
395
395
396 ui = self.ui
396 ui = self.ui
397 ui.flush()
397 ui.flush()
398 self._saveio()
398 self._saveio()
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
399 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
400 assert fd > 0
400 assert fd > 0
401 fp = getattr(ui, fn)
401 fp = getattr(ui, fn)
402 os.dup2(fd, fp.fileno())
402 os.dup2(fd, fp.fileno())
403 os.close(fd)
403 os.close(fd)
404 if self._ioattached:
404 if self._ioattached:
405 continue
405 continue
406 # reset buffering mode when client is first attached. as we want
406 # reset buffering mode when client is first attached. as we want
407 # to see output immediately on pager, the mode stays unchanged
407 # to see output immediately on pager, the mode stays unchanged
408 # when client re-attached. ferr is unchanged because it should
408 # when client re-attached. ferr is unchanged because it should
409 # be unbuffered no matter if it is a tty or not.
409 # be unbuffered no matter if it is a tty or not.
410 if fn == b'ferr':
410 if fn == b'ferr':
411 newfp = fp
411 newfp = fp
412 elif pycompat.ispy3:
412 elif pycompat.ispy3:
413 # On Python 3, the standard library doesn't offer line-buffered
413 # On Python 3, the standard library doesn't offer line-buffered
414 # binary streams, so wrap/unwrap it.
414 # binary streams, so wrap/unwrap it.
415 if fp.isatty():
415 if fp.isatty():
416 newfp = procutil.make_line_buffered(fp)
416 newfp = procutil.make_line_buffered(fp)
417 else:
417 else:
418 newfp = procutil.unwrap_line_buffered(fp)
418 newfp = procutil.unwrap_line_buffered(fp)
419 else:
419 else:
420 # Python 2 uses the I/O streams provided by the C library, so
420 # Python 2 uses the I/O streams provided by the C library, so
421 # make it line-buffered explicitly. Otherwise the default would
421 # make it line-buffered explicitly. Otherwise the default would
422 # be decided on first write(), where fout could be a pager.
422 # be decided on first write(), where fout could be a pager.
423 if fp.isatty():
423 if fp.isatty():
424 bufsize = 1 # line buffered
424 bufsize = 1 # line buffered
425 else:
425 else:
426 bufsize = -1 # system default
426 bufsize = -1 # system default
427 newfp = os.fdopen(fp.fileno(), mode, bufsize)
427 newfp = os.fdopen(fp.fileno(), mode, bufsize)
428 if newfp is not fp:
428 if newfp is not fp:
429 setattr(ui, fn, newfp)
429 setattr(ui, fn, newfp)
430 setattr(self, cn, newfp)
430 setattr(self, cn, newfp)
431
431
432 self._ioattached = True
432 self._ioattached = True
433 self.cresult.write(struct.pack(b'>i', len(clientfds)))
433 self.cresult.write(struct.pack(b'>i', len(clientfds)))
434
434
435 def _saveio(self):
435 def _saveio(self):
436 if self._oldios:
436 if self._oldios:
437 return
437 return
438 ui = self.ui
438 ui = self.ui
439 for cn, fn, _mode in _iochannels:
439 for cn, fn, _mode in _iochannels:
440 ch = getattr(self, cn)
440 ch = getattr(self, cn)
441 fp = getattr(ui, fn)
441 fp = getattr(ui, fn)
442 fd = os.dup(fp.fileno())
442 fd = os.dup(fp.fileno())
443 self._oldios.append((ch, fp, fd))
443 self._oldios.append((ch, fp, fd))
444
444
445 def _restoreio(self):
445 def _restoreio(self):
446 if not self._oldios:
446 if not self._oldios:
447 return
447 return
448 nullfd = os.open(os.devnull, os.O_WRONLY)
448 nullfd = os.open(os.devnull, os.O_WRONLY)
449 ui = self.ui
449 ui = self.ui
450 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
450 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
451 newfp = getattr(ui, fn)
451 newfp = getattr(ui, fn)
452 # On Python 2, newfp and fp may be separate file objects associated
452 # On Python 2, newfp and fp may be separate file objects associated
453 # with the same fd, so we must close newfp while it's associated
453 # with the same fd, so we must close newfp while it's associated
454 # with the client. Otherwise the new associated fd would be closed
454 # with the client. Otherwise the new associated fd would be closed
455 # when newfp gets deleted. On Python 3, newfp is just a wrapper
455 # when newfp gets deleted. On Python 3, newfp is just a wrapper
456 # around fp even if newfp is not fp, so deleting newfp is safe.
456 # around fp even if newfp is not fp, so deleting newfp is safe.
457 if not (pycompat.ispy3 or newfp is fp):
457 if not (pycompat.ispy3 or newfp is fp):
458 newfp.close()
458 newfp.close()
459 # restore original fd: fp is open again
459 # restore original fd: fp is open again
460 try:
460 try:
461 if (pycompat.ispy3 or newfp is fp) and 'w' in mode:
461 if (pycompat.ispy3 or newfp is fp) and 'w' in mode:
462 # Discard buffered data which couldn't be flushed because
462 # Discard buffered data which couldn't be flushed because
463 # of EPIPE. The data should belong to the current session
463 # of EPIPE. The data should belong to the current session
464 # and should never persist.
464 # and should never persist.
465 os.dup2(nullfd, fp.fileno())
465 os.dup2(nullfd, fp.fileno())
466 fp.flush()
466 fp.flush()
467 os.dup2(fd, fp.fileno())
467 os.dup2(fd, fp.fileno())
468 except OSError as err:
468 except OSError as err:
469 # According to issue6330, running chg on heavy loaded systems
469 # According to issue6330, running chg on heavy loaded systems
470 # can lead to EBUSY. [man dup2] indicates that, on Linux,
470 # can lead to EBUSY. [man dup2] indicates that, on Linux,
471 # EBUSY comes from a race condition between open() and dup2().
471 # EBUSY comes from a race condition between open() and dup2().
472 # However it's not clear why open() race occurred for
472 # However it's not clear why open() race occurred for
473 # newfd=stdin/out/err.
473 # newfd=stdin/out/err.
474 self.ui.log(
474 self.ui.log(
475 b'chgserver',
475 b'chgserver',
476 b'got %s while duplicating %s\n',
476 b'got %s while duplicating %s\n',
477 stringutil.forcebytestr(err),
477 stringutil.forcebytestr(err),
478 fn,
478 fn,
479 )
479 )
480 os.close(fd)
480 os.close(fd)
481 setattr(self, cn, ch)
481 setattr(self, cn, ch)
482 setattr(ui, fn, fp)
482 setattr(ui, fn, fp)
483 os.close(nullfd)
483 os.close(nullfd)
484 del self._oldios[:]
484 del self._oldios[:]
485
485
486 def validate(self):
486 def validate(self):
487 """Reload the config and check if the server is up to date
487 """Reload the config and check if the server is up to date
488
488
489 Read a list of '\0' separated arguments.
489 Read a list of '\0' separated arguments.
490 Write a non-empty list of '\0' separated instruction strings or '\0'
490 Write a non-empty list of '\0' separated instruction strings or '\0'
491 if the list is empty.
491 if the list is empty.
492 An instruction string could be either:
492 An instruction string could be either:
493 - "unlink $path", the client should unlink the path to stop the
493 - "unlink $path", the client should unlink the path to stop the
494 outdated server.
494 outdated server.
495 - "redirect $path", the client should attempt to connect to $path
495 - "redirect $path", the client should attempt to connect to $path
496 first. If it does not work, start a new server. It implies
496 first. If it does not work, start a new server. It implies
497 "reconnect".
497 "reconnect".
498 - "exit $n", the client should exit directly with code n.
498 - "exit $n", the client should exit directly with code n.
499 This may happen if we cannot parse the config.
499 This may happen if we cannot parse the config.
500 - "reconnect", the client should close the connection and
500 - "reconnect", the client should close the connection and
501 reconnect.
501 reconnect.
502 If neither "reconnect" nor "redirect" is included in the instruction
502 If neither "reconnect" nor "redirect" is included in the instruction
503 list, the client can continue with this server after completing all
503 list, the client can continue with this server after completing all
504 the instructions.
504 the instructions.
505 """
505 """
506 args = self._readlist()
506 args = self._readlist()
507 errorraised = False
507 errorraised = False
508 detailed_exit_code = 255
508 detailed_exit_code = 255
509 try:
509 try:
510 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
510 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
511 except error.RepoError as inst:
511 except error.RepoError as inst:
512 # RepoError can be raised while trying to read shared source
512 # RepoError can be raised while trying to read shared source
513 # configuration
513 # configuration
514 self.ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
514 self.ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
515 if inst.hint:
515 if inst.hint:
516 self.ui.error(_(b"(%s)\n") % inst.hint)
516 self.ui.error(_(b"(%s)\n") % inst.hint)
517 errorraised = True
517 errorraised = True
518 except error.Abort as inst:
518 except error.Error as inst:
519 if inst.detailed_exit_code is not None:
519 if inst.detailed_exit_code is not None:
520 detailed_exit_code = inst.detailed_exit_code
520 detailed_exit_code = inst.detailed_exit_code
521 self.ui.error(inst.format())
521 self.ui.error(inst.format())
522 errorraised = True
522 errorraised = True
523
523
524 if errorraised:
524 if errorraised:
525 self.ui.flush()
525 self.ui.flush()
526 exit_code = 255
526 exit_code = 255
527 if self.ui.configbool(b'ui', b'detailed-exit-code'):
527 if self.ui.configbool(b'ui', b'detailed-exit-code'):
528 exit_code = detailed_exit_code
528 exit_code = detailed_exit_code
529 self.cresult.write(b'exit %d' % exit_code)
529 self.cresult.write(b'exit %d' % exit_code)
530 return
530 return
531 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
531 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
532 insts = []
532 insts = []
533 if newhash.mtimehash != self.hashstate.mtimehash:
533 if newhash.mtimehash != self.hashstate.mtimehash:
534 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
534 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
535 insts.append(b'unlink %s' % addr)
535 insts.append(b'unlink %s' % addr)
536 # mtimehash is empty if one or more extensions fail to load.
536 # mtimehash is empty if one or more extensions fail to load.
537 # to be compatible with hg, still serve the client this time.
537 # to be compatible with hg, still serve the client this time.
538 if self.hashstate.mtimehash:
538 if self.hashstate.mtimehash:
539 insts.append(b'reconnect')
539 insts.append(b'reconnect')
540 if newhash.confighash != self.hashstate.confighash:
540 if newhash.confighash != self.hashstate.confighash:
541 addr = _hashaddress(self.baseaddress, newhash.confighash)
541 addr = _hashaddress(self.baseaddress, newhash.confighash)
542 insts.append(b'redirect %s' % addr)
542 insts.append(b'redirect %s' % addr)
543 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
543 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
544 self.cresult.write(b'\0'.join(insts) or b'\0')
544 self.cresult.write(b'\0'.join(insts) or b'\0')
545
545
546 def chdir(self):
546 def chdir(self):
547 """Change current directory
547 """Change current directory
548
548
549 Note that the behavior of --cwd option is bit different from this.
549 Note that the behavior of --cwd option is bit different from this.
550 It does not affect --config parameter.
550 It does not affect --config parameter.
551 """
551 """
552 path = self._readstr()
552 path = self._readstr()
553 if not path:
553 if not path:
554 return
554 return
555 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
555 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
556 os.chdir(path)
556 os.chdir(path)
557
557
558 def setumask(self):
558 def setumask(self):
559 """Change umask (DEPRECATED)"""
559 """Change umask (DEPRECATED)"""
560 # BUG: this does not follow the message frame structure, but kept for
560 # BUG: this does not follow the message frame structure, but kept for
561 # backward compatibility with old chg clients for some time
561 # backward compatibility with old chg clients for some time
562 self._setumask(self._read(4))
562 self._setumask(self._read(4))
563
563
564 def setumask2(self):
564 def setumask2(self):
565 """Change umask"""
565 """Change umask"""
566 data = self._readstr()
566 data = self._readstr()
567 if len(data) != 4:
567 if len(data) != 4:
568 raise ValueError(b'invalid mask length in setumask2 request')
568 raise ValueError(b'invalid mask length in setumask2 request')
569 self._setumask(data)
569 self._setumask(data)
570
570
571 def _setumask(self, data):
571 def _setumask(self, data):
572 mask = struct.unpack(b'>I', data)[0]
572 mask = struct.unpack(b'>I', data)[0]
573 self.ui.log(b'chgserver', b'setumask %r\n', mask)
573 self.ui.log(b'chgserver', b'setumask %r\n', mask)
574 util.setumask(mask)
574 util.setumask(mask)
575
575
576 def runcommand(self):
576 def runcommand(self):
577 # pager may be attached within the runcommand session, which should
577 # pager may be attached within the runcommand session, which should
578 # be detached at the end of the session. otherwise the pager wouldn't
578 # be detached at the end of the session. otherwise the pager wouldn't
579 # receive EOF.
579 # receive EOF.
580 globaloldios = self._oldios
580 globaloldios = self._oldios
581 self._oldios = []
581 self._oldios = []
582 try:
582 try:
583 return super(chgcmdserver, self).runcommand()
583 return super(chgcmdserver, self).runcommand()
584 finally:
584 finally:
585 self._restoreio()
585 self._restoreio()
586 self._oldios = globaloldios
586 self._oldios = globaloldios
587
587
588 def setenv(self):
588 def setenv(self):
589 """Clear and update os.environ
589 """Clear and update os.environ
590
590
591 Note that not all variables can make an effect on the running process.
591 Note that not all variables can make an effect on the running process.
592 """
592 """
593 l = self._readlist()
593 l = self._readlist()
594 try:
594 try:
595 newenv = dict(s.split(b'=', 1) for s in l)
595 newenv = dict(s.split(b'=', 1) for s in l)
596 except ValueError:
596 except ValueError:
597 raise ValueError(b'unexpected value in setenv request')
597 raise ValueError(b'unexpected value in setenv request')
598 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
598 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
599
599
600 encoding.environ.clear()
600 encoding.environ.clear()
601 encoding.environ.update(newenv)
601 encoding.environ.update(newenv)
602
602
603 capabilities = commandserver.server.capabilities.copy()
603 capabilities = commandserver.server.capabilities.copy()
604 capabilities.update(
604 capabilities.update(
605 {
605 {
606 b'attachio': attachio,
606 b'attachio': attachio,
607 b'chdir': chdir,
607 b'chdir': chdir,
608 b'runcommand': runcommand,
608 b'runcommand': runcommand,
609 b'setenv': setenv,
609 b'setenv': setenv,
610 b'setumask': setumask,
610 b'setumask': setumask,
611 b'setumask2': setumask2,
611 b'setumask2': setumask2,
612 }
612 }
613 )
613 )
614
614
615 if util.safehasattr(procutil, b'setprocname'):
615 if util.safehasattr(procutil, b'setprocname'):
616
616
617 def setprocname(self):
617 def setprocname(self):
618 """Change process title"""
618 """Change process title"""
619 name = self._readstr()
619 name = self._readstr()
620 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
620 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
621 procutil.setprocname(name)
621 procutil.setprocname(name)
622
622
623 capabilities[b'setprocname'] = setprocname
623 capabilities[b'setprocname'] = setprocname
624
624
625
625
626 def _tempaddress(address):
626 def _tempaddress(address):
627 return b'%s.%d.tmp' % (address, os.getpid())
627 return b'%s.%d.tmp' % (address, os.getpid())
628
628
629
629
630 def _hashaddress(address, hashstr):
630 def _hashaddress(address, hashstr):
631 # if the basename of address contains '.', use only the left part. this
631 # if the basename of address contains '.', use only the left part. this
632 # makes it possible for the client to pass 'server.tmp$PID' and follow by
632 # makes it possible for the client to pass 'server.tmp$PID' and follow by
633 # an atomic rename to avoid locking when spawning new servers.
633 # an atomic rename to avoid locking when spawning new servers.
634 dirname, basename = os.path.split(address)
634 dirname, basename = os.path.split(address)
635 basename = basename.split(b'.', 1)[0]
635 basename = basename.split(b'.', 1)[0]
636 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
636 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
637
637
638
638
639 class chgunixservicehandler(object):
639 class chgunixservicehandler(object):
640 """Set of operations for chg services"""
640 """Set of operations for chg services"""
641
641
642 pollinterval = 1 # [sec]
642 pollinterval = 1 # [sec]
643
643
644 def __init__(self, ui):
644 def __init__(self, ui):
645 self.ui = ui
645 self.ui = ui
646 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
646 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
647 self._lastactive = time.time()
647 self._lastactive = time.time()
648
648
649 def bindsocket(self, sock, address):
649 def bindsocket(self, sock, address):
650 self._inithashstate(address)
650 self._inithashstate(address)
651 self._checkextensions()
651 self._checkextensions()
652 self._bind(sock)
652 self._bind(sock)
653 self._createsymlink()
653 self._createsymlink()
654 # no "listening at" message should be printed to simulate hg behavior
654 # no "listening at" message should be printed to simulate hg behavior
655
655
656 def _inithashstate(self, address):
656 def _inithashstate(self, address):
657 self._baseaddress = address
657 self._baseaddress = address
658 if self.ui.configbool(b'chgserver', b'skiphash'):
658 if self.ui.configbool(b'chgserver', b'skiphash'):
659 self._hashstate = None
659 self._hashstate = None
660 self._realaddress = address
660 self._realaddress = address
661 return
661 return
662 self._hashstate = hashstate.fromui(self.ui)
662 self._hashstate = hashstate.fromui(self.ui)
663 self._realaddress = _hashaddress(address, self._hashstate.confighash)
663 self._realaddress = _hashaddress(address, self._hashstate.confighash)
664
664
665 def _checkextensions(self):
665 def _checkextensions(self):
666 if not self._hashstate:
666 if not self._hashstate:
667 return
667 return
668 if extensions.notloaded():
668 if extensions.notloaded():
669 # one or more extensions failed to load. mtimehash becomes
669 # one or more extensions failed to load. mtimehash becomes
670 # meaningless because we do not know the paths of those extensions.
670 # meaningless because we do not know the paths of those extensions.
671 # set mtimehash to an illegal hash value to invalidate the server.
671 # set mtimehash to an illegal hash value to invalidate the server.
672 self._hashstate.mtimehash = b''
672 self._hashstate.mtimehash = b''
673
673
674 def _bind(self, sock):
674 def _bind(self, sock):
675 # use a unique temp address so we can stat the file and do ownership
675 # use a unique temp address so we can stat the file and do ownership
676 # check later
676 # check later
677 tempaddress = _tempaddress(self._realaddress)
677 tempaddress = _tempaddress(self._realaddress)
678 util.bindunixsocket(sock, tempaddress)
678 util.bindunixsocket(sock, tempaddress)
679 self._socketstat = os.stat(tempaddress)
679 self._socketstat = os.stat(tempaddress)
680 sock.listen(socket.SOMAXCONN)
680 sock.listen(socket.SOMAXCONN)
681 # rename will replace the old socket file if exists atomically. the
681 # rename will replace the old socket file if exists atomically. the
682 # old server will detect ownership change and exit.
682 # old server will detect ownership change and exit.
683 util.rename(tempaddress, self._realaddress)
683 util.rename(tempaddress, self._realaddress)
684
684
685 def _createsymlink(self):
685 def _createsymlink(self):
686 if self._baseaddress == self._realaddress:
686 if self._baseaddress == self._realaddress:
687 return
687 return
688 tempaddress = _tempaddress(self._baseaddress)
688 tempaddress = _tempaddress(self._baseaddress)
689 os.symlink(os.path.basename(self._realaddress), tempaddress)
689 os.symlink(os.path.basename(self._realaddress), tempaddress)
690 util.rename(tempaddress, self._baseaddress)
690 util.rename(tempaddress, self._baseaddress)
691
691
692 def _issocketowner(self):
692 def _issocketowner(self):
693 try:
693 try:
694 st = os.stat(self._realaddress)
694 st = os.stat(self._realaddress)
695 return (
695 return (
696 st.st_ino == self._socketstat.st_ino
696 st.st_ino == self._socketstat.st_ino
697 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
697 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
698 )
698 )
699 except OSError:
699 except OSError:
700 return False
700 return False
701
701
702 def unlinksocket(self, address):
702 def unlinksocket(self, address):
703 if not self._issocketowner():
703 if not self._issocketowner():
704 return
704 return
705 # it is possible to have a race condition here that we may
705 # it is possible to have a race condition here that we may
706 # remove another server's socket file. but that's okay
706 # remove another server's socket file. but that's okay
707 # since that server will detect and exit automatically and
707 # since that server will detect and exit automatically and
708 # the client will start a new server on demand.
708 # the client will start a new server on demand.
709 util.tryunlink(self._realaddress)
709 util.tryunlink(self._realaddress)
710
710
711 def shouldexit(self):
711 def shouldexit(self):
712 if not self._issocketowner():
712 if not self._issocketowner():
713 self.ui.log(
713 self.ui.log(
714 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
714 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
715 )
715 )
716 return True
716 return True
717 if time.time() - self._lastactive > self._idletimeout:
717 if time.time() - self._lastactive > self._idletimeout:
718 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
718 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
719 return True
719 return True
720 return False
720 return False
721
721
722 def newconnection(self):
722 def newconnection(self):
723 self._lastactive = time.time()
723 self._lastactive = time.time()
724
724
725 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
725 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
726 return chgcmdserver(
726 return chgcmdserver(
727 self.ui,
727 self.ui,
728 repo,
728 repo,
729 fin,
729 fin,
730 fout,
730 fout,
731 conn,
731 conn,
732 prereposetups,
732 prereposetups,
733 self._hashstate,
733 self._hashstate,
734 self._baseaddress,
734 self._baseaddress,
735 )
735 )
736
736
737
737
738 def chgunixservice(ui, repo, opts):
738 def chgunixservice(ui, repo, opts):
739 # CHGINTERNALMARK is set by chg client. It is an indication of things are
739 # CHGINTERNALMARK is set by chg client. It is an indication of things are
740 # started by chg so other code can do things accordingly, like disabling
740 # started by chg so other code can do things accordingly, like disabling
741 # demandimport or detecting chg client started by chg client. When executed
741 # demandimport or detecting chg client started by chg client. When executed
742 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
742 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
743 # environ cleaner.
743 # environ cleaner.
744 if b'CHGINTERNALMARK' in encoding.environ:
744 if b'CHGINTERNALMARK' in encoding.environ:
745 del encoding.environ[b'CHGINTERNALMARK']
745 del encoding.environ[b'CHGINTERNALMARK']
746 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
746 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
747 # it thinks the current value is "C". This breaks the hash computation and
747 # it thinks the current value is "C". This breaks the hash computation and
748 # causes chg to restart loop.
748 # causes chg to restart loop.
749 if b'CHGORIG_LC_CTYPE' in encoding.environ:
749 if b'CHGORIG_LC_CTYPE' in encoding.environ:
750 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
750 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
751 del encoding.environ[b'CHGORIG_LC_CTYPE']
751 del encoding.environ[b'CHGORIG_LC_CTYPE']
752 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
752 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
753 if b'LC_CTYPE' in encoding.environ:
753 if b'LC_CTYPE' in encoding.environ:
754 del encoding.environ[b'LC_CTYPE']
754 del encoding.environ[b'LC_CTYPE']
755 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
755 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
756
756
757 if repo:
757 if repo:
758 # one chgserver can serve multiple repos. drop repo information
758 # one chgserver can serve multiple repos. drop repo information
759 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
759 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
760 h = chgunixservicehandler(ui)
760 h = chgunixservicehandler(ui)
761 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
761 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,2304 +1,2304 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status(object):
66 class status(object):
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 ui.error(_(b"abort: %s\n") % inst)
183 ui.error(_(b"abort: %s\n") % inst)
184 if inst.hint:
184 if inst.hint:
185 ui.error(_(b"(%s)\n") % inst.hint)
185 ui.error(_(b"(%s)\n") % inst.hint)
186 except error.ResponseError as inst:
186 except error.ResponseError as inst:
187 ui.error(_(b"abort: %s") % inst.args[0])
187 ui.error(_(b"abort: %s") % inst.args[0])
188 msg = inst.args[1]
188 msg = inst.args[1]
189 if isinstance(msg, type(u'')):
189 if isinstance(msg, type(u'')):
190 msg = pycompat.sysbytes(msg)
190 msg = pycompat.sysbytes(msg)
191 if msg is None:
191 if msg is None:
192 ui.error(b"\n")
192 ui.error(b"\n")
193 elif not isinstance(msg, bytes):
193 elif not isinstance(msg, bytes):
194 ui.error(b" %r\n" % (msg,))
194 ui.error(b" %r\n" % (msg,))
195 elif not msg:
195 elif not msg:
196 ui.error(_(b" empty string\n"))
196 ui.error(_(b" empty string\n"))
197 else:
197 else:
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 except error.CensoredNodeError as inst:
199 except error.CensoredNodeError as inst:
200 ui.error(_(b"abort: file censored %s\n") % inst)
200 ui.error(_(b"abort: file censored %s\n") % inst)
201 except error.StorageError as inst:
201 except error.StorageError as inst:
202 ui.error(_(b"abort: %s\n") % inst)
202 ui.error(_(b"abort: %s\n") % inst)
203 if inst.hint:
203 if inst.hint:
204 ui.error(_(b"(%s)\n") % inst.hint)
204 ui.error(_(b"(%s)\n") % inst.hint)
205 detailed_exit_code = 50
205 detailed_exit_code = 50
206 except error.WdirUnsupported:
206 except error.WdirUnsupported:
207 ui.error(_(b"abort: working directory revision cannot be specified\n"))
207 ui.error(_(b"abort: working directory revision cannot be specified\n"))
208 except error.Abort as inst:
208 except error.Error as inst:
209 if inst.detailed_exit_code is not None:
209 if inst.detailed_exit_code is not None:
210 detailed_exit_code = inst.detailed_exit_code
210 detailed_exit_code = inst.detailed_exit_code
211 if inst.coarse_exit_code is not None:
211 if inst.coarse_exit_code is not None:
212 coarse_exit_code = inst.coarse_exit_code
212 coarse_exit_code = inst.coarse_exit_code
213 ui.error(inst.format())
213 ui.error(inst.format())
214 except error.WorkerError as inst:
214 except error.WorkerError as inst:
215 # Don't print a message -- the worker already should have
215 # Don't print a message -- the worker already should have
216 return inst.status_code
216 return inst.status_code
217 except ImportError as inst:
217 except ImportError as inst:
218 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
218 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
219 m = stringutil.forcebytestr(inst).split()[-1]
219 m = stringutil.forcebytestr(inst).split()[-1]
220 if m in b"mpatch bdiff".split():
220 if m in b"mpatch bdiff".split():
221 ui.error(_(b"(did you forget to compile extensions?)\n"))
221 ui.error(_(b"(did you forget to compile extensions?)\n"))
222 elif m in b"zlib".split():
222 elif m in b"zlib".split():
223 ui.error(_(b"(is your Python install correct?)\n"))
223 ui.error(_(b"(is your Python install correct?)\n"))
224 except util.urlerr.httperror as inst:
224 except util.urlerr.httperror as inst:
225 detailed_exit_code = 100
225 detailed_exit_code = 100
226 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
226 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
227 except util.urlerr.urlerror as inst:
227 except util.urlerr.urlerror as inst:
228 detailed_exit_code = 100
228 detailed_exit_code = 100
229 try: # usually it is in the form (errno, strerror)
229 try: # usually it is in the form (errno, strerror)
230 reason = inst.reason.args[1]
230 reason = inst.reason.args[1]
231 except (AttributeError, IndexError):
231 except (AttributeError, IndexError):
232 # it might be anything, for example a string
232 # it might be anything, for example a string
233 reason = inst.reason
233 reason = inst.reason
234 if isinstance(reason, pycompat.unicode):
234 if isinstance(reason, pycompat.unicode):
235 # SSLError of Python 2.7.9 contains a unicode
235 # SSLError of Python 2.7.9 contains a unicode
236 reason = encoding.unitolocal(reason)
236 reason = encoding.unitolocal(reason)
237 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
237 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
238 except (IOError, OSError) as inst:
238 except (IOError, OSError) as inst:
239 if (
239 if (
240 util.safehasattr(inst, b"args")
240 util.safehasattr(inst, b"args")
241 and inst.args
241 and inst.args
242 and inst.args[0] == errno.EPIPE
242 and inst.args[0] == errno.EPIPE
243 ):
243 ):
244 pass
244 pass
245 elif getattr(inst, "strerror", None): # common IOError or OSError
245 elif getattr(inst, "strerror", None): # common IOError or OSError
246 if getattr(inst, "filename", None) is not None:
246 if getattr(inst, "filename", None) is not None:
247 ui.error(
247 ui.error(
248 _(b"abort: %s: '%s'\n")
248 _(b"abort: %s: '%s'\n")
249 % (
249 % (
250 encoding.strtolocal(inst.strerror),
250 encoding.strtolocal(inst.strerror),
251 stringutil.forcebytestr(inst.filename),
251 stringutil.forcebytestr(inst.filename),
252 )
252 )
253 )
253 )
254 else:
254 else:
255 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
255 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
256 else: # suspicious IOError
256 else: # suspicious IOError
257 raise
257 raise
258 except MemoryError:
258 except MemoryError:
259 ui.error(_(b"abort: out of memory\n"))
259 ui.error(_(b"abort: out of memory\n"))
260 except SystemExit as inst:
260 except SystemExit as inst:
261 # Commands shouldn't sys.exit directly, but give a return code.
261 # Commands shouldn't sys.exit directly, but give a return code.
262 # Just in case catch this and and pass exit code to caller.
262 # Just in case catch this and and pass exit code to caller.
263 detailed_exit_code = 254
263 detailed_exit_code = 254
264 coarse_exit_code = inst.code
264 coarse_exit_code = inst.code
265
265
266 if ui.configbool(b'ui', b'detailed-exit-code'):
266 if ui.configbool(b'ui', b'detailed-exit-code'):
267 return detailed_exit_code
267 return detailed_exit_code
268 else:
268 else:
269 return coarse_exit_code
269 return coarse_exit_code
270
270
271
271
272 def checknewlabel(repo, lbl, kind):
272 def checknewlabel(repo, lbl, kind):
273 # Do not use the "kind" parameter in ui output.
273 # Do not use the "kind" parameter in ui output.
274 # It makes strings difficult to translate.
274 # It makes strings difficult to translate.
275 if lbl in [b'tip', b'.', b'null']:
275 if lbl in [b'tip', b'.', b'null']:
276 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
276 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
277 for c in (b':', b'\0', b'\n', b'\r'):
277 for c in (b':', b'\0', b'\n', b'\r'):
278 if c in lbl:
278 if c in lbl:
279 raise error.InputError(
279 raise error.InputError(
280 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
280 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
281 )
281 )
282 try:
282 try:
283 int(lbl)
283 int(lbl)
284 raise error.InputError(_(b"cannot use an integer as a name"))
284 raise error.InputError(_(b"cannot use an integer as a name"))
285 except ValueError:
285 except ValueError:
286 pass
286 pass
287 if lbl.strip() != lbl:
287 if lbl.strip() != lbl:
288 raise error.InputError(
288 raise error.InputError(
289 _(b"leading or trailing whitespace in name %r") % lbl
289 _(b"leading or trailing whitespace in name %r") % lbl
290 )
290 )
291
291
292
292
293 def checkfilename(f):
293 def checkfilename(f):
294 '''Check that the filename f is an acceptable filename for a tracked file'''
294 '''Check that the filename f is an acceptable filename for a tracked file'''
295 if b'\r' in f or b'\n' in f:
295 if b'\r' in f or b'\n' in f:
296 raise error.InputError(
296 raise error.InputError(
297 _(b"'\\n' and '\\r' disallowed in filenames: %r")
297 _(b"'\\n' and '\\r' disallowed in filenames: %r")
298 % pycompat.bytestr(f)
298 % pycompat.bytestr(f)
299 )
299 )
300
300
301
301
302 def checkportable(ui, f):
302 def checkportable(ui, f):
303 '''Check if filename f is portable and warn or abort depending on config'''
303 '''Check if filename f is portable and warn or abort depending on config'''
304 checkfilename(f)
304 checkfilename(f)
305 abort, warn = checkportabilityalert(ui)
305 abort, warn = checkportabilityalert(ui)
306 if abort or warn:
306 if abort or warn:
307 msg = util.checkwinfilename(f)
307 msg = util.checkwinfilename(f)
308 if msg:
308 if msg:
309 msg = b"%s: %s" % (msg, procutil.shellquote(f))
309 msg = b"%s: %s" % (msg, procutil.shellquote(f))
310 if abort:
310 if abort:
311 raise error.InputError(msg)
311 raise error.InputError(msg)
312 ui.warn(_(b"warning: %s\n") % msg)
312 ui.warn(_(b"warning: %s\n") % msg)
313
313
314
314
315 def checkportabilityalert(ui):
315 def checkportabilityalert(ui):
316 """check if the user's config requests nothing, a warning, or abort for
316 """check if the user's config requests nothing, a warning, or abort for
317 non-portable filenames"""
317 non-portable filenames"""
318 val = ui.config(b'ui', b'portablefilenames')
318 val = ui.config(b'ui', b'portablefilenames')
319 lval = val.lower()
319 lval = val.lower()
320 bval = stringutil.parsebool(val)
320 bval = stringutil.parsebool(val)
321 abort = pycompat.iswindows or lval == b'abort'
321 abort = pycompat.iswindows or lval == b'abort'
322 warn = bval or lval == b'warn'
322 warn = bval or lval == b'warn'
323 if bval is None and not (warn or abort or lval == b'ignore'):
323 if bval is None and not (warn or abort or lval == b'ignore'):
324 raise error.ConfigError(
324 raise error.ConfigError(
325 _(b"ui.portablefilenames value is invalid ('%s')") % val
325 _(b"ui.portablefilenames value is invalid ('%s')") % val
326 )
326 )
327 return abort, warn
327 return abort, warn
328
328
329
329
330 class casecollisionauditor(object):
330 class casecollisionauditor(object):
331 def __init__(self, ui, abort, dirstate):
331 def __init__(self, ui, abort, dirstate):
332 self._ui = ui
332 self._ui = ui
333 self._abort = abort
333 self._abort = abort
334 allfiles = b'\0'.join(dirstate)
334 allfiles = b'\0'.join(dirstate)
335 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
335 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
336 self._dirstate = dirstate
336 self._dirstate = dirstate
337 # The purpose of _newfiles is so that we don't complain about
337 # The purpose of _newfiles is so that we don't complain about
338 # case collisions if someone were to call this object with the
338 # case collisions if someone were to call this object with the
339 # same filename twice.
339 # same filename twice.
340 self._newfiles = set()
340 self._newfiles = set()
341
341
342 def __call__(self, f):
342 def __call__(self, f):
343 if f in self._newfiles:
343 if f in self._newfiles:
344 return
344 return
345 fl = encoding.lower(f)
345 fl = encoding.lower(f)
346 if fl in self._loweredfiles and f not in self._dirstate:
346 if fl in self._loweredfiles and f not in self._dirstate:
347 msg = _(b'possible case-folding collision for %s') % f
347 msg = _(b'possible case-folding collision for %s') % f
348 if self._abort:
348 if self._abort:
349 raise error.Abort(msg)
349 raise error.Abort(msg)
350 self._ui.warn(_(b"warning: %s\n") % msg)
350 self._ui.warn(_(b"warning: %s\n") % msg)
351 self._loweredfiles.add(fl)
351 self._loweredfiles.add(fl)
352 self._newfiles.add(f)
352 self._newfiles.add(f)
353
353
354
354
355 def filteredhash(repo, maxrev):
355 def filteredhash(repo, maxrev):
356 """build hash of filtered revisions in the current repoview.
356 """build hash of filtered revisions in the current repoview.
357
357
358 Multiple caches perform up-to-date validation by checking that the
358 Multiple caches perform up-to-date validation by checking that the
359 tiprev and tipnode stored in the cache file match the current repository.
359 tiprev and tipnode stored in the cache file match the current repository.
360 However, this is not sufficient for validating repoviews because the set
360 However, this is not sufficient for validating repoviews because the set
361 of revisions in the view may change without the repository tiprev and
361 of revisions in the view may change without the repository tiprev and
362 tipnode changing.
362 tipnode changing.
363
363
364 This function hashes all the revs filtered from the view and returns
364 This function hashes all the revs filtered from the view and returns
365 that SHA-1 digest.
365 that SHA-1 digest.
366 """
366 """
367 cl = repo.changelog
367 cl = repo.changelog
368 if not cl.filteredrevs:
368 if not cl.filteredrevs:
369 return None
369 return None
370 key = cl._filteredrevs_hashcache.get(maxrev)
370 key = cl._filteredrevs_hashcache.get(maxrev)
371 if not key:
371 if not key:
372 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
372 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
373 if revs:
373 if revs:
374 s = hashutil.sha1()
374 s = hashutil.sha1()
375 for rev in revs:
375 for rev in revs:
376 s.update(b'%d;' % rev)
376 s.update(b'%d;' % rev)
377 key = s.digest()
377 key = s.digest()
378 cl._filteredrevs_hashcache[maxrev] = key
378 cl._filteredrevs_hashcache[maxrev] = key
379 return key
379 return key
380
380
381
381
382 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
382 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
383 """yield every hg repository under path, always recursively.
383 """yield every hg repository under path, always recursively.
384 The recurse flag will only control recursion into repo working dirs"""
384 The recurse flag will only control recursion into repo working dirs"""
385
385
386 def errhandler(err):
386 def errhandler(err):
387 if err.filename == path:
387 if err.filename == path:
388 raise err
388 raise err
389
389
390 samestat = getattr(os.path, 'samestat', None)
390 samestat = getattr(os.path, 'samestat', None)
391 if followsym and samestat is not None:
391 if followsym and samestat is not None:
392
392
393 def adddir(dirlst, dirname):
393 def adddir(dirlst, dirname):
394 dirstat = os.stat(dirname)
394 dirstat = os.stat(dirname)
395 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
395 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
396 if not match:
396 if not match:
397 dirlst.append(dirstat)
397 dirlst.append(dirstat)
398 return not match
398 return not match
399
399
400 else:
400 else:
401 followsym = False
401 followsym = False
402
402
403 if (seen_dirs is None) and followsym:
403 if (seen_dirs is None) and followsym:
404 seen_dirs = []
404 seen_dirs = []
405 adddir(seen_dirs, path)
405 adddir(seen_dirs, path)
406 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
406 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
407 dirs.sort()
407 dirs.sort()
408 if b'.hg' in dirs:
408 if b'.hg' in dirs:
409 yield root # found a repository
409 yield root # found a repository
410 qroot = os.path.join(root, b'.hg', b'patches')
410 qroot = os.path.join(root, b'.hg', b'patches')
411 if os.path.isdir(os.path.join(qroot, b'.hg')):
411 if os.path.isdir(os.path.join(qroot, b'.hg')):
412 yield qroot # we have a patch queue repo here
412 yield qroot # we have a patch queue repo here
413 if recurse:
413 if recurse:
414 # avoid recursing inside the .hg directory
414 # avoid recursing inside the .hg directory
415 dirs.remove(b'.hg')
415 dirs.remove(b'.hg')
416 else:
416 else:
417 dirs[:] = [] # don't descend further
417 dirs[:] = [] # don't descend further
418 elif followsym:
418 elif followsym:
419 newdirs = []
419 newdirs = []
420 for d in dirs:
420 for d in dirs:
421 fname = os.path.join(root, d)
421 fname = os.path.join(root, d)
422 if adddir(seen_dirs, fname):
422 if adddir(seen_dirs, fname):
423 if os.path.islink(fname):
423 if os.path.islink(fname):
424 for hgname in walkrepos(fname, True, seen_dirs):
424 for hgname in walkrepos(fname, True, seen_dirs):
425 yield hgname
425 yield hgname
426 else:
426 else:
427 newdirs.append(d)
427 newdirs.append(d)
428 dirs[:] = newdirs
428 dirs[:] = newdirs
429
429
430
430
431 def binnode(ctx):
431 def binnode(ctx):
432 """Return binary node id for a given basectx"""
432 """Return binary node id for a given basectx"""
433 node = ctx.node()
433 node = ctx.node()
434 if node is None:
434 if node is None:
435 return ctx.repo().nodeconstants.wdirid
435 return ctx.repo().nodeconstants.wdirid
436 return node
436 return node
437
437
438
438
439 def intrev(ctx):
439 def intrev(ctx):
440 """Return integer for a given basectx that can be used in comparison or
440 """Return integer for a given basectx that can be used in comparison or
441 arithmetic operation"""
441 arithmetic operation"""
442 rev = ctx.rev()
442 rev = ctx.rev()
443 if rev is None:
443 if rev is None:
444 return wdirrev
444 return wdirrev
445 return rev
445 return rev
446
446
447
447
448 def formatchangeid(ctx):
448 def formatchangeid(ctx):
449 """Format changectx as '{rev}:{node|formatnode}', which is the default
449 """Format changectx as '{rev}:{node|formatnode}', which is the default
450 template provided by logcmdutil.changesettemplater"""
450 template provided by logcmdutil.changesettemplater"""
451 repo = ctx.repo()
451 repo = ctx.repo()
452 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
452 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
453
453
454
454
455 def formatrevnode(ui, rev, node):
455 def formatrevnode(ui, rev, node):
456 """Format given revision and node depending on the current verbosity"""
456 """Format given revision and node depending on the current verbosity"""
457 if ui.debugflag:
457 if ui.debugflag:
458 hexfunc = hex
458 hexfunc = hex
459 else:
459 else:
460 hexfunc = short
460 hexfunc = short
461 return b'%d:%s' % (rev, hexfunc(node))
461 return b'%d:%s' % (rev, hexfunc(node))
462
462
463
463
464 def resolvehexnodeidprefix(repo, prefix):
464 def resolvehexnodeidprefix(repo, prefix):
465 if prefix.startswith(b'x'):
465 if prefix.startswith(b'x'):
466 prefix = prefix[1:]
466 prefix = prefix[1:]
467 try:
467 try:
468 # Uses unfiltered repo because it's faster when prefix is ambiguous/
468 # Uses unfiltered repo because it's faster when prefix is ambiguous/
469 # This matches the shortesthexnodeidprefix() function below.
469 # This matches the shortesthexnodeidprefix() function below.
470 node = repo.unfiltered().changelog._partialmatch(prefix)
470 node = repo.unfiltered().changelog._partialmatch(prefix)
471 except error.AmbiguousPrefixLookupError:
471 except error.AmbiguousPrefixLookupError:
472 revset = repo.ui.config(
472 revset = repo.ui.config(
473 b'experimental', b'revisions.disambiguatewithin'
473 b'experimental', b'revisions.disambiguatewithin'
474 )
474 )
475 if revset:
475 if revset:
476 # Clear config to avoid infinite recursion
476 # Clear config to avoid infinite recursion
477 configoverrides = {
477 configoverrides = {
478 (b'experimental', b'revisions.disambiguatewithin'): None
478 (b'experimental', b'revisions.disambiguatewithin'): None
479 }
479 }
480 with repo.ui.configoverride(configoverrides):
480 with repo.ui.configoverride(configoverrides):
481 revs = repo.anyrevs([revset], user=True)
481 revs = repo.anyrevs([revset], user=True)
482 matches = []
482 matches = []
483 for rev in revs:
483 for rev in revs:
484 node = repo.changelog.node(rev)
484 node = repo.changelog.node(rev)
485 if hex(node).startswith(prefix):
485 if hex(node).startswith(prefix):
486 matches.append(node)
486 matches.append(node)
487 if len(matches) == 1:
487 if len(matches) == 1:
488 return matches[0]
488 return matches[0]
489 raise
489 raise
490 if node is None:
490 if node is None:
491 return
491 return
492 repo.changelog.rev(node) # make sure node isn't filtered
492 repo.changelog.rev(node) # make sure node isn't filtered
493 return node
493 return node
494
494
495
495
496 def mayberevnum(repo, prefix):
496 def mayberevnum(repo, prefix):
497 """Checks if the given prefix may be mistaken for a revision number"""
497 """Checks if the given prefix may be mistaken for a revision number"""
498 try:
498 try:
499 i = int(prefix)
499 i = int(prefix)
500 # if we are a pure int, then starting with zero will not be
500 # if we are a pure int, then starting with zero will not be
501 # confused as a rev; or, obviously, if the int is larger
501 # confused as a rev; or, obviously, if the int is larger
502 # than the value of the tip rev. We still need to disambiguate if
502 # than the value of the tip rev. We still need to disambiguate if
503 # prefix == '0', since that *is* a valid revnum.
503 # prefix == '0', since that *is* a valid revnum.
504 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
504 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
505 return False
505 return False
506 return True
506 return True
507 except ValueError:
507 except ValueError:
508 return False
508 return False
509
509
510
510
511 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
511 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
512 """Find the shortest unambiguous prefix that matches hexnode.
512 """Find the shortest unambiguous prefix that matches hexnode.
513
513
514 If "cache" is not None, it must be a dictionary that can be used for
514 If "cache" is not None, it must be a dictionary that can be used for
515 caching between calls to this method.
515 caching between calls to this method.
516 """
516 """
517 # _partialmatch() of filtered changelog could take O(len(repo)) time,
517 # _partialmatch() of filtered changelog could take O(len(repo)) time,
518 # which would be unacceptably slow. so we look for hash collision in
518 # which would be unacceptably slow. so we look for hash collision in
519 # unfiltered space, which means some hashes may be slightly longer.
519 # unfiltered space, which means some hashes may be slightly longer.
520
520
521 minlength = max(minlength, 1)
521 minlength = max(minlength, 1)
522
522
523 def disambiguate(prefix):
523 def disambiguate(prefix):
524 """Disambiguate against revnums."""
524 """Disambiguate against revnums."""
525 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
525 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
526 if mayberevnum(repo, prefix):
526 if mayberevnum(repo, prefix):
527 return b'x' + prefix
527 return b'x' + prefix
528 else:
528 else:
529 return prefix
529 return prefix
530
530
531 hexnode = hex(node)
531 hexnode = hex(node)
532 for length in range(len(prefix), len(hexnode) + 1):
532 for length in range(len(prefix), len(hexnode) + 1):
533 prefix = hexnode[:length]
533 prefix = hexnode[:length]
534 if not mayberevnum(repo, prefix):
534 if not mayberevnum(repo, prefix):
535 return prefix
535 return prefix
536
536
537 cl = repo.unfiltered().changelog
537 cl = repo.unfiltered().changelog
538 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
538 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
539 if revset:
539 if revset:
540 revs = None
540 revs = None
541 if cache is not None:
541 if cache is not None:
542 revs = cache.get(b'disambiguationrevset')
542 revs = cache.get(b'disambiguationrevset')
543 if revs is None:
543 if revs is None:
544 revs = repo.anyrevs([revset], user=True)
544 revs = repo.anyrevs([revset], user=True)
545 if cache is not None:
545 if cache is not None:
546 cache[b'disambiguationrevset'] = revs
546 cache[b'disambiguationrevset'] = revs
547 if cl.rev(node) in revs:
547 if cl.rev(node) in revs:
548 hexnode = hex(node)
548 hexnode = hex(node)
549 nodetree = None
549 nodetree = None
550 if cache is not None:
550 if cache is not None:
551 nodetree = cache.get(b'disambiguationnodetree')
551 nodetree = cache.get(b'disambiguationnodetree')
552 if not nodetree:
552 if not nodetree:
553 if util.safehasattr(parsers, 'nodetree'):
553 if util.safehasattr(parsers, 'nodetree'):
554 # The CExt is the only implementation to provide a nodetree
554 # The CExt is the only implementation to provide a nodetree
555 # class so far.
555 # class so far.
556 index = cl.index
556 index = cl.index
557 if util.safehasattr(index, 'get_cindex'):
557 if util.safehasattr(index, 'get_cindex'):
558 # the rust wrapped need to give access to its internal index
558 # the rust wrapped need to give access to its internal index
559 index = index.get_cindex()
559 index = index.get_cindex()
560 nodetree = parsers.nodetree(index, len(revs))
560 nodetree = parsers.nodetree(index, len(revs))
561 for r in revs:
561 for r in revs:
562 nodetree.insert(r)
562 nodetree.insert(r)
563 if cache is not None:
563 if cache is not None:
564 cache[b'disambiguationnodetree'] = nodetree
564 cache[b'disambiguationnodetree'] = nodetree
565 if nodetree is not None:
565 if nodetree is not None:
566 length = max(nodetree.shortest(node), minlength)
566 length = max(nodetree.shortest(node), minlength)
567 prefix = hexnode[:length]
567 prefix = hexnode[:length]
568 return disambiguate(prefix)
568 return disambiguate(prefix)
569 for length in range(minlength, len(hexnode) + 1):
569 for length in range(minlength, len(hexnode) + 1):
570 matches = []
570 matches = []
571 prefix = hexnode[:length]
571 prefix = hexnode[:length]
572 for rev in revs:
572 for rev in revs:
573 otherhexnode = repo[rev].hex()
573 otherhexnode = repo[rev].hex()
574 if prefix == otherhexnode[:length]:
574 if prefix == otherhexnode[:length]:
575 matches.append(otherhexnode)
575 matches.append(otherhexnode)
576 if len(matches) == 1:
576 if len(matches) == 1:
577 return disambiguate(prefix)
577 return disambiguate(prefix)
578
578
579 try:
579 try:
580 return disambiguate(cl.shortest(node, minlength))
580 return disambiguate(cl.shortest(node, minlength))
581 except error.LookupError:
581 except error.LookupError:
582 raise error.RepoLookupError()
582 raise error.RepoLookupError()
583
583
584
584
585 def isrevsymbol(repo, symbol):
585 def isrevsymbol(repo, symbol):
586 """Checks if a symbol exists in the repo.
586 """Checks if a symbol exists in the repo.
587
587
588 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
588 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
589 symbol is an ambiguous nodeid prefix.
589 symbol is an ambiguous nodeid prefix.
590 """
590 """
591 try:
591 try:
592 revsymbol(repo, symbol)
592 revsymbol(repo, symbol)
593 return True
593 return True
594 except error.RepoLookupError:
594 except error.RepoLookupError:
595 return False
595 return False
596
596
597
597
598 def revsymbol(repo, symbol):
598 def revsymbol(repo, symbol):
599 """Returns a context given a single revision symbol (as string).
599 """Returns a context given a single revision symbol (as string).
600
600
601 This is similar to revsingle(), but accepts only a single revision symbol,
601 This is similar to revsingle(), but accepts only a single revision symbol,
602 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
602 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
603 not "max(public())".
603 not "max(public())".
604 """
604 """
605 if not isinstance(symbol, bytes):
605 if not isinstance(symbol, bytes):
606 msg = (
606 msg = (
607 b"symbol (%s of type %s) was not a string, did you mean "
607 b"symbol (%s of type %s) was not a string, did you mean "
608 b"repo[symbol]?" % (symbol, type(symbol))
608 b"repo[symbol]?" % (symbol, type(symbol))
609 )
609 )
610 raise error.ProgrammingError(msg)
610 raise error.ProgrammingError(msg)
611 try:
611 try:
612 if symbol in (b'.', b'tip', b'null'):
612 if symbol in (b'.', b'tip', b'null'):
613 return repo[symbol]
613 return repo[symbol]
614
614
615 try:
615 try:
616 r = int(symbol)
616 r = int(symbol)
617 if b'%d' % r != symbol:
617 if b'%d' % r != symbol:
618 raise ValueError
618 raise ValueError
619 l = len(repo.changelog)
619 l = len(repo.changelog)
620 if r < 0:
620 if r < 0:
621 r += l
621 r += l
622 if r < 0 or r >= l and r != wdirrev:
622 if r < 0 or r >= l and r != wdirrev:
623 raise ValueError
623 raise ValueError
624 return repo[r]
624 return repo[r]
625 except error.FilteredIndexError:
625 except error.FilteredIndexError:
626 raise
626 raise
627 except (ValueError, OverflowError, IndexError):
627 except (ValueError, OverflowError, IndexError):
628 pass
628 pass
629
629
630 if len(symbol) == 2 * repo.nodeconstants.nodelen:
630 if len(symbol) == 2 * repo.nodeconstants.nodelen:
631 try:
631 try:
632 node = bin(symbol)
632 node = bin(symbol)
633 rev = repo.changelog.rev(node)
633 rev = repo.changelog.rev(node)
634 return repo[rev]
634 return repo[rev]
635 except error.FilteredLookupError:
635 except error.FilteredLookupError:
636 raise
636 raise
637 except (TypeError, LookupError):
637 except (TypeError, LookupError):
638 pass
638 pass
639
639
640 # look up bookmarks through the name interface
640 # look up bookmarks through the name interface
641 try:
641 try:
642 node = repo.names.singlenode(repo, symbol)
642 node = repo.names.singlenode(repo, symbol)
643 rev = repo.changelog.rev(node)
643 rev = repo.changelog.rev(node)
644 return repo[rev]
644 return repo[rev]
645 except KeyError:
645 except KeyError:
646 pass
646 pass
647
647
648 node = resolvehexnodeidprefix(repo, symbol)
648 node = resolvehexnodeidprefix(repo, symbol)
649 if node is not None:
649 if node is not None:
650 rev = repo.changelog.rev(node)
650 rev = repo.changelog.rev(node)
651 return repo[rev]
651 return repo[rev]
652
652
653 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
653 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
654
654
655 except error.WdirUnsupported:
655 except error.WdirUnsupported:
656 return repo[None]
656 return repo[None]
657 except (
657 except (
658 error.FilteredIndexError,
658 error.FilteredIndexError,
659 error.FilteredLookupError,
659 error.FilteredLookupError,
660 error.FilteredRepoLookupError,
660 error.FilteredRepoLookupError,
661 ):
661 ):
662 raise _filterederror(repo, symbol)
662 raise _filterederror(repo, symbol)
663
663
664
664
665 def _filterederror(repo, changeid):
665 def _filterederror(repo, changeid):
666 """build an exception to be raised about a filtered changeid
666 """build an exception to be raised about a filtered changeid
667
667
668 This is extracted in a function to help extensions (eg: evolve) to
668 This is extracted in a function to help extensions (eg: evolve) to
669 experiment with various message variants."""
669 experiment with various message variants."""
670 if repo.filtername.startswith(b'visible'):
670 if repo.filtername.startswith(b'visible'):
671
671
672 # Check if the changeset is obsolete
672 # Check if the changeset is obsolete
673 unfilteredrepo = repo.unfiltered()
673 unfilteredrepo = repo.unfiltered()
674 ctx = revsymbol(unfilteredrepo, changeid)
674 ctx = revsymbol(unfilteredrepo, changeid)
675
675
676 # If the changeset is obsolete, enrich the message with the reason
676 # If the changeset is obsolete, enrich the message with the reason
677 # that made this changeset not visible
677 # that made this changeset not visible
678 if ctx.obsolete():
678 if ctx.obsolete():
679 msg = obsutil._getfilteredreason(repo, changeid, ctx)
679 msg = obsutil._getfilteredreason(repo, changeid, ctx)
680 else:
680 else:
681 msg = _(b"hidden revision '%s'") % changeid
681 msg = _(b"hidden revision '%s'") % changeid
682
682
683 hint = _(b'use --hidden to access hidden revisions')
683 hint = _(b'use --hidden to access hidden revisions')
684
684
685 return error.FilteredRepoLookupError(msg, hint=hint)
685 return error.FilteredRepoLookupError(msg, hint=hint)
686 msg = _(b"filtered revision '%s' (not in '%s' subset)")
686 msg = _(b"filtered revision '%s' (not in '%s' subset)")
687 msg %= (changeid, repo.filtername)
687 msg %= (changeid, repo.filtername)
688 return error.FilteredRepoLookupError(msg)
688 return error.FilteredRepoLookupError(msg)
689
689
690
690
691 def revsingle(repo, revspec, default=b'.', localalias=None):
691 def revsingle(repo, revspec, default=b'.', localalias=None):
692 if not revspec and revspec != 0:
692 if not revspec and revspec != 0:
693 return repo[default]
693 return repo[default]
694
694
695 l = revrange(repo, [revspec], localalias=localalias)
695 l = revrange(repo, [revspec], localalias=localalias)
696 if not l:
696 if not l:
697 raise error.Abort(_(b'empty revision set'))
697 raise error.Abort(_(b'empty revision set'))
698 return repo[l.last()]
698 return repo[l.last()]
699
699
700
700
701 def _pairspec(revspec):
701 def _pairspec(revspec):
702 tree = revsetlang.parse(revspec)
702 tree = revsetlang.parse(revspec)
703 return tree and tree[0] in (
703 return tree and tree[0] in (
704 b'range',
704 b'range',
705 b'rangepre',
705 b'rangepre',
706 b'rangepost',
706 b'rangepost',
707 b'rangeall',
707 b'rangeall',
708 )
708 )
709
709
710
710
711 def revpair(repo, revs):
711 def revpair(repo, revs):
712 if not revs:
712 if not revs:
713 return repo[b'.'], repo[None]
713 return repo[b'.'], repo[None]
714
714
715 l = revrange(repo, revs)
715 l = revrange(repo, revs)
716
716
717 if not l:
717 if not l:
718 raise error.Abort(_(b'empty revision range'))
718 raise error.Abort(_(b'empty revision range'))
719
719
720 first = l.first()
720 first = l.first()
721 second = l.last()
721 second = l.last()
722
722
723 if (
723 if (
724 first == second
724 first == second
725 and len(revs) >= 2
725 and len(revs) >= 2
726 and not all(revrange(repo, [r]) for r in revs)
726 and not all(revrange(repo, [r]) for r in revs)
727 ):
727 ):
728 raise error.Abort(_(b'empty revision on one side of range'))
728 raise error.Abort(_(b'empty revision on one side of range'))
729
729
730 # if top-level is range expression, the result must always be a pair
730 # if top-level is range expression, the result must always be a pair
731 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
731 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
732 return repo[first], repo[None]
732 return repo[first], repo[None]
733
733
734 return repo[first], repo[second]
734 return repo[first], repo[second]
735
735
736
736
737 def revrange(repo, specs, localalias=None):
737 def revrange(repo, specs, localalias=None):
738 """Execute 1 to many revsets and return the union.
738 """Execute 1 to many revsets and return the union.
739
739
740 This is the preferred mechanism for executing revsets using user-specified
740 This is the preferred mechanism for executing revsets using user-specified
741 config options, such as revset aliases.
741 config options, such as revset aliases.
742
742
743 The revsets specified by ``specs`` will be executed via a chained ``OR``
743 The revsets specified by ``specs`` will be executed via a chained ``OR``
744 expression. If ``specs`` is empty, an empty result is returned.
744 expression. If ``specs`` is empty, an empty result is returned.
745
745
746 ``specs`` can contain integers, in which case they are assumed to be
746 ``specs`` can contain integers, in which case they are assumed to be
747 revision numbers.
747 revision numbers.
748
748
749 It is assumed the revsets are already formatted. If you have arguments
749 It is assumed the revsets are already formatted. If you have arguments
750 that need to be expanded in the revset, call ``revsetlang.formatspec()``
750 that need to be expanded in the revset, call ``revsetlang.formatspec()``
751 and pass the result as an element of ``specs``.
751 and pass the result as an element of ``specs``.
752
752
753 Specifying a single revset is allowed.
753 Specifying a single revset is allowed.
754
754
755 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
755 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
756 integer revisions.
756 integer revisions.
757 """
757 """
758 allspecs = []
758 allspecs = []
759 for spec in specs:
759 for spec in specs:
760 if isinstance(spec, int):
760 if isinstance(spec, int):
761 spec = revsetlang.formatspec(b'%d', spec)
761 spec = revsetlang.formatspec(b'%d', spec)
762 allspecs.append(spec)
762 allspecs.append(spec)
763 return repo.anyrevs(allspecs, user=True, localalias=localalias)
763 return repo.anyrevs(allspecs, user=True, localalias=localalias)
764
764
765
765
766 def increasingwindows(windowsize=8, sizelimit=512):
766 def increasingwindows(windowsize=8, sizelimit=512):
767 while True:
767 while True:
768 yield windowsize
768 yield windowsize
769 if windowsize < sizelimit:
769 if windowsize < sizelimit:
770 windowsize *= 2
770 windowsize *= 2
771
771
772
772
773 def walkchangerevs(repo, revs, makefilematcher, prepare):
773 def walkchangerevs(repo, revs, makefilematcher, prepare):
774 """Iterate over files and the revs in a "windowed" way.
774 """Iterate over files and the revs in a "windowed" way.
775
775
776 Callers most commonly need to iterate backwards over the history
776 Callers most commonly need to iterate backwards over the history
777 in which they are interested. Doing so has awful (quadratic-looking)
777 in which they are interested. Doing so has awful (quadratic-looking)
778 performance, so we use iterators in a "windowed" way.
778 performance, so we use iterators in a "windowed" way.
779
779
780 We walk a window of revisions in the desired order. Within the
780 We walk a window of revisions in the desired order. Within the
781 window, we first walk forwards to gather data, then in the desired
781 window, we first walk forwards to gather data, then in the desired
782 order (usually backwards) to display it.
782 order (usually backwards) to display it.
783
783
784 This function returns an iterator yielding contexts. Before
784 This function returns an iterator yielding contexts. Before
785 yielding each context, the iterator will first call the prepare
785 yielding each context, the iterator will first call the prepare
786 function on each context in the window in forward order."""
786 function on each context in the window in forward order."""
787
787
788 if not revs:
788 if not revs:
789 return []
789 return []
790 change = repo.__getitem__
790 change = repo.__getitem__
791
791
792 def iterate():
792 def iterate():
793 it = iter(revs)
793 it = iter(revs)
794 stopiteration = False
794 stopiteration = False
795 for windowsize in increasingwindows():
795 for windowsize in increasingwindows():
796 nrevs = []
796 nrevs = []
797 for i in pycompat.xrange(windowsize):
797 for i in pycompat.xrange(windowsize):
798 rev = next(it, None)
798 rev = next(it, None)
799 if rev is None:
799 if rev is None:
800 stopiteration = True
800 stopiteration = True
801 break
801 break
802 nrevs.append(rev)
802 nrevs.append(rev)
803 for rev in sorted(nrevs):
803 for rev in sorted(nrevs):
804 ctx = change(rev)
804 ctx = change(rev)
805 prepare(ctx, makefilematcher(ctx))
805 prepare(ctx, makefilematcher(ctx))
806 for rev in nrevs:
806 for rev in nrevs:
807 yield change(rev)
807 yield change(rev)
808
808
809 if stopiteration:
809 if stopiteration:
810 break
810 break
811
811
812 return iterate()
812 return iterate()
813
813
814
814
815 def meaningfulparents(repo, ctx):
815 def meaningfulparents(repo, ctx):
816 """Return list of meaningful (or all if debug) parentrevs for rev.
816 """Return list of meaningful (or all if debug) parentrevs for rev.
817
817
818 For merges (two non-nullrev revisions) both parents are meaningful.
818 For merges (two non-nullrev revisions) both parents are meaningful.
819 Otherwise the first parent revision is considered meaningful if it
819 Otherwise the first parent revision is considered meaningful if it
820 is not the preceding revision.
820 is not the preceding revision.
821 """
821 """
822 parents = ctx.parents()
822 parents = ctx.parents()
823 if len(parents) > 1:
823 if len(parents) > 1:
824 return parents
824 return parents
825 if repo.ui.debugflag:
825 if repo.ui.debugflag:
826 return [parents[0], repo[nullrev]]
826 return [parents[0], repo[nullrev]]
827 if parents[0].rev() >= intrev(ctx) - 1:
827 if parents[0].rev() >= intrev(ctx) - 1:
828 return []
828 return []
829 return parents
829 return parents
830
830
831
831
832 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
832 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
833 """Return a function that produced paths for presenting to the user.
833 """Return a function that produced paths for presenting to the user.
834
834
835 The returned function takes a repo-relative path and produces a path
835 The returned function takes a repo-relative path and produces a path
836 that can be presented in the UI.
836 that can be presented in the UI.
837
837
838 Depending on the value of ui.relative-paths, either a repo-relative or
838 Depending on the value of ui.relative-paths, either a repo-relative or
839 cwd-relative path will be produced.
839 cwd-relative path will be produced.
840
840
841 legacyrelativevalue is the value to use if ui.relative-paths=legacy
841 legacyrelativevalue is the value to use if ui.relative-paths=legacy
842
842
843 If forcerelativevalue is not None, then that value will be used regardless
843 If forcerelativevalue is not None, then that value will be used regardless
844 of what ui.relative-paths is set to.
844 of what ui.relative-paths is set to.
845 """
845 """
846 if forcerelativevalue is not None:
846 if forcerelativevalue is not None:
847 relative = forcerelativevalue
847 relative = forcerelativevalue
848 else:
848 else:
849 config = repo.ui.config(b'ui', b'relative-paths')
849 config = repo.ui.config(b'ui', b'relative-paths')
850 if config == b'legacy':
850 if config == b'legacy':
851 relative = legacyrelativevalue
851 relative = legacyrelativevalue
852 else:
852 else:
853 relative = stringutil.parsebool(config)
853 relative = stringutil.parsebool(config)
854 if relative is None:
854 if relative is None:
855 raise error.ConfigError(
855 raise error.ConfigError(
856 _(b"ui.relative-paths is not a boolean ('%s')") % config
856 _(b"ui.relative-paths is not a boolean ('%s')") % config
857 )
857 )
858
858
859 if relative:
859 if relative:
860 cwd = repo.getcwd()
860 cwd = repo.getcwd()
861 if cwd != b'':
861 if cwd != b'':
862 # this branch would work even if cwd == b'' (ie cwd = repo
862 # this branch would work even if cwd == b'' (ie cwd = repo
863 # root), but its generality makes the returned function slower
863 # root), but its generality makes the returned function slower
864 pathto = repo.pathto
864 pathto = repo.pathto
865 return lambda f: pathto(f, cwd)
865 return lambda f: pathto(f, cwd)
866 if repo.ui.configbool(b'ui', b'slash'):
866 if repo.ui.configbool(b'ui', b'slash'):
867 return lambda f: f
867 return lambda f: f
868 else:
868 else:
869 return util.localpath
869 return util.localpath
870
870
871
871
872 def subdiruipathfn(subpath, uipathfn):
872 def subdiruipathfn(subpath, uipathfn):
873 '''Create a new uipathfn that treats the file as relative to subpath.'''
873 '''Create a new uipathfn that treats the file as relative to subpath.'''
874 return lambda f: uipathfn(posixpath.join(subpath, f))
874 return lambda f: uipathfn(posixpath.join(subpath, f))
875
875
876
876
877 def anypats(pats, opts):
877 def anypats(pats, opts):
878 """Checks if any patterns, including --include and --exclude were given.
878 """Checks if any patterns, including --include and --exclude were given.
879
879
880 Some commands (e.g. addremove) use this condition for deciding whether to
880 Some commands (e.g. addremove) use this condition for deciding whether to
881 print absolute or relative paths.
881 print absolute or relative paths.
882 """
882 """
883 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
883 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
884
884
885
885
886 def expandpats(pats):
886 def expandpats(pats):
887 """Expand bare globs when running on windows.
887 """Expand bare globs when running on windows.
888 On posix we assume it already has already been done by sh."""
888 On posix we assume it already has already been done by sh."""
889 if not util.expandglobs:
889 if not util.expandglobs:
890 return list(pats)
890 return list(pats)
891 ret = []
891 ret = []
892 for kindpat in pats:
892 for kindpat in pats:
893 kind, pat = matchmod._patsplit(kindpat, None)
893 kind, pat = matchmod._patsplit(kindpat, None)
894 if kind is None:
894 if kind is None:
895 try:
895 try:
896 globbed = glob.glob(pat)
896 globbed = glob.glob(pat)
897 except re.error:
897 except re.error:
898 globbed = [pat]
898 globbed = [pat]
899 if globbed:
899 if globbed:
900 ret.extend(globbed)
900 ret.extend(globbed)
901 continue
901 continue
902 ret.append(kindpat)
902 ret.append(kindpat)
903 return ret
903 return ret
904
904
905
905
906 def matchandpats(
906 def matchandpats(
907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
908 ):
908 ):
909 """Return a matcher and the patterns that were used.
909 """Return a matcher and the patterns that were used.
910 The matcher will warn about bad matches, unless an alternate badfn callback
910 The matcher will warn about bad matches, unless an alternate badfn callback
911 is provided."""
911 is provided."""
912 if opts is None:
912 if opts is None:
913 opts = {}
913 opts = {}
914 if not globbed and default == b'relpath':
914 if not globbed and default == b'relpath':
915 pats = expandpats(pats or [])
915 pats = expandpats(pats or [])
916
916
917 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
917 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
918
918
919 def bad(f, msg):
919 def bad(f, msg):
920 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
920 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
921
921
922 if badfn is None:
922 if badfn is None:
923 badfn = bad
923 badfn = bad
924
924
925 m = ctx.match(
925 m = ctx.match(
926 pats,
926 pats,
927 opts.get(b'include'),
927 opts.get(b'include'),
928 opts.get(b'exclude'),
928 opts.get(b'exclude'),
929 default,
929 default,
930 listsubrepos=opts.get(b'subrepos'),
930 listsubrepos=opts.get(b'subrepos'),
931 badfn=badfn,
931 badfn=badfn,
932 )
932 )
933
933
934 if m.always():
934 if m.always():
935 pats = []
935 pats = []
936 return m, pats
936 return m, pats
937
937
938
938
939 def match(
939 def match(
940 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
940 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
941 ):
941 ):
942 '''Return a matcher that will warn about bad matches.'''
942 '''Return a matcher that will warn about bad matches.'''
943 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
943 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
944
944
945
945
946 def matchall(repo):
946 def matchall(repo):
947 '''Return a matcher that will efficiently match everything.'''
947 '''Return a matcher that will efficiently match everything.'''
948 return matchmod.always()
948 return matchmod.always()
949
949
950
950
951 def matchfiles(repo, files, badfn=None):
951 def matchfiles(repo, files, badfn=None):
952 '''Return a matcher that will efficiently match exactly these files.'''
952 '''Return a matcher that will efficiently match exactly these files.'''
953 return matchmod.exact(files, badfn=badfn)
953 return matchmod.exact(files, badfn=badfn)
954
954
955
955
956 def parsefollowlinespattern(repo, rev, pat, msg):
956 def parsefollowlinespattern(repo, rev, pat, msg):
957 """Return a file name from `pat` pattern suitable for usage in followlines
957 """Return a file name from `pat` pattern suitable for usage in followlines
958 logic.
958 logic.
959 """
959 """
960 if not matchmod.patkind(pat):
960 if not matchmod.patkind(pat):
961 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
961 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
962 else:
962 else:
963 ctx = repo[rev]
963 ctx = repo[rev]
964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
965 files = [f for f in ctx if m(f)]
965 files = [f for f in ctx if m(f)]
966 if len(files) != 1:
966 if len(files) != 1:
967 raise error.ParseError(msg)
967 raise error.ParseError(msg)
968 return files[0]
968 return files[0]
969
969
970
970
971 def getorigvfs(ui, repo):
971 def getorigvfs(ui, repo):
972 """return a vfs suitable to save 'orig' file
972 """return a vfs suitable to save 'orig' file
973
973
974 return None if no special directory is configured"""
974 return None if no special directory is configured"""
975 origbackuppath = ui.config(b'ui', b'origbackuppath')
975 origbackuppath = ui.config(b'ui', b'origbackuppath')
976 if not origbackuppath:
976 if not origbackuppath:
977 return None
977 return None
978 return vfs.vfs(repo.wvfs.join(origbackuppath))
978 return vfs.vfs(repo.wvfs.join(origbackuppath))
979
979
980
980
981 def backuppath(ui, repo, filepath):
981 def backuppath(ui, repo, filepath):
982 """customize where working copy backup files (.orig files) are created
982 """customize where working copy backup files (.orig files) are created
983
983
984 Fetch user defined path from config file: [ui] origbackuppath = <path>
984 Fetch user defined path from config file: [ui] origbackuppath = <path>
985 Fall back to default (filepath with .orig suffix) if not specified
985 Fall back to default (filepath with .orig suffix) if not specified
986
986
987 filepath is repo-relative
987 filepath is repo-relative
988
988
989 Returns an absolute path
989 Returns an absolute path
990 """
990 """
991 origvfs = getorigvfs(ui, repo)
991 origvfs = getorigvfs(ui, repo)
992 if origvfs is None:
992 if origvfs is None:
993 return repo.wjoin(filepath + b".orig")
993 return repo.wjoin(filepath + b".orig")
994
994
995 origbackupdir = origvfs.dirname(filepath)
995 origbackupdir = origvfs.dirname(filepath)
996 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
996 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
997 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
997 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
998
998
999 # Remove any files that conflict with the backup file's path
999 # Remove any files that conflict with the backup file's path
1000 for f in reversed(list(pathutil.finddirs(filepath))):
1000 for f in reversed(list(pathutil.finddirs(filepath))):
1001 if origvfs.isfileorlink(f):
1001 if origvfs.isfileorlink(f):
1002 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1002 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1003 origvfs.unlink(f)
1003 origvfs.unlink(f)
1004 break
1004 break
1005
1005
1006 origvfs.makedirs(origbackupdir)
1006 origvfs.makedirs(origbackupdir)
1007
1007
1008 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1008 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1009 ui.note(
1009 ui.note(
1010 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1010 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1011 )
1011 )
1012 origvfs.rmtree(filepath, forcibly=True)
1012 origvfs.rmtree(filepath, forcibly=True)
1013
1013
1014 return origvfs.join(filepath)
1014 return origvfs.join(filepath)
1015
1015
1016
1016
1017 class _containsnode(object):
1017 class _containsnode(object):
1018 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1018 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1019
1019
1020 def __init__(self, repo, revcontainer):
1020 def __init__(self, repo, revcontainer):
1021 self._torev = repo.changelog.rev
1021 self._torev = repo.changelog.rev
1022 self._revcontains = revcontainer.__contains__
1022 self._revcontains = revcontainer.__contains__
1023
1023
1024 def __contains__(self, node):
1024 def __contains__(self, node):
1025 return self._revcontains(self._torev(node))
1025 return self._revcontains(self._torev(node))
1026
1026
1027
1027
1028 def cleanupnodes(
1028 def cleanupnodes(
1029 repo,
1029 repo,
1030 replacements,
1030 replacements,
1031 operation,
1031 operation,
1032 moves=None,
1032 moves=None,
1033 metadata=None,
1033 metadata=None,
1034 fixphase=False,
1034 fixphase=False,
1035 targetphase=None,
1035 targetphase=None,
1036 backup=True,
1036 backup=True,
1037 ):
1037 ):
1038 """do common cleanups when old nodes are replaced by new nodes
1038 """do common cleanups when old nodes are replaced by new nodes
1039
1039
1040 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1040 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1041 (we might also want to move working directory parent in the future)
1041 (we might also want to move working directory parent in the future)
1042
1042
1043 By default, bookmark moves are calculated automatically from 'replacements',
1043 By default, bookmark moves are calculated automatically from 'replacements',
1044 but 'moves' can be used to override that. Also, 'moves' may include
1044 but 'moves' can be used to override that. Also, 'moves' may include
1045 additional bookmark moves that should not have associated obsmarkers.
1045 additional bookmark moves that should not have associated obsmarkers.
1046
1046
1047 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1047 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1048 have replacements. operation is a string, like "rebase".
1048 have replacements. operation is a string, like "rebase".
1049
1049
1050 metadata is dictionary containing metadata to be stored in obsmarker if
1050 metadata is dictionary containing metadata to be stored in obsmarker if
1051 obsolescence is enabled.
1051 obsolescence is enabled.
1052 """
1052 """
1053 assert fixphase or targetphase is None
1053 assert fixphase or targetphase is None
1054 if not replacements and not moves:
1054 if not replacements and not moves:
1055 return
1055 return
1056
1056
1057 # translate mapping's other forms
1057 # translate mapping's other forms
1058 if not util.safehasattr(replacements, b'items'):
1058 if not util.safehasattr(replacements, b'items'):
1059 replacements = {(n,): () for n in replacements}
1059 replacements = {(n,): () for n in replacements}
1060 else:
1060 else:
1061 # upgrading non tuple "source" to tuple ones for BC
1061 # upgrading non tuple "source" to tuple ones for BC
1062 repls = {}
1062 repls = {}
1063 for key, value in replacements.items():
1063 for key, value in replacements.items():
1064 if not isinstance(key, tuple):
1064 if not isinstance(key, tuple):
1065 key = (key,)
1065 key = (key,)
1066 repls[key] = value
1066 repls[key] = value
1067 replacements = repls
1067 replacements = repls
1068
1068
1069 # Unfiltered repo is needed since nodes in replacements might be hidden.
1069 # Unfiltered repo is needed since nodes in replacements might be hidden.
1070 unfi = repo.unfiltered()
1070 unfi = repo.unfiltered()
1071
1071
1072 # Calculate bookmark movements
1072 # Calculate bookmark movements
1073 if moves is None:
1073 if moves is None:
1074 moves = {}
1074 moves = {}
1075 for oldnodes, newnodes in replacements.items():
1075 for oldnodes, newnodes in replacements.items():
1076 for oldnode in oldnodes:
1076 for oldnode in oldnodes:
1077 if oldnode in moves:
1077 if oldnode in moves:
1078 continue
1078 continue
1079 if len(newnodes) > 1:
1079 if len(newnodes) > 1:
1080 # usually a split, take the one with biggest rev number
1080 # usually a split, take the one with biggest rev number
1081 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1081 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1082 elif len(newnodes) == 0:
1082 elif len(newnodes) == 0:
1083 # move bookmark backwards
1083 # move bookmark backwards
1084 allreplaced = []
1084 allreplaced = []
1085 for rep in replacements:
1085 for rep in replacements:
1086 allreplaced.extend(rep)
1086 allreplaced.extend(rep)
1087 roots = list(
1087 roots = list(
1088 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1088 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1089 )
1089 )
1090 if roots:
1090 if roots:
1091 newnode = roots[0].node()
1091 newnode = roots[0].node()
1092 else:
1092 else:
1093 newnode = repo.nullid
1093 newnode = repo.nullid
1094 else:
1094 else:
1095 newnode = newnodes[0]
1095 newnode = newnodes[0]
1096 moves[oldnode] = newnode
1096 moves[oldnode] = newnode
1097
1097
1098 allnewnodes = [n for ns in replacements.values() for n in ns]
1098 allnewnodes = [n for ns in replacements.values() for n in ns]
1099 toretract = {}
1099 toretract = {}
1100 toadvance = {}
1100 toadvance = {}
1101 if fixphase:
1101 if fixphase:
1102 precursors = {}
1102 precursors = {}
1103 for oldnodes, newnodes in replacements.items():
1103 for oldnodes, newnodes in replacements.items():
1104 for oldnode in oldnodes:
1104 for oldnode in oldnodes:
1105 for newnode in newnodes:
1105 for newnode in newnodes:
1106 precursors.setdefault(newnode, []).append(oldnode)
1106 precursors.setdefault(newnode, []).append(oldnode)
1107
1107
1108 allnewnodes.sort(key=lambda n: unfi[n].rev())
1108 allnewnodes.sort(key=lambda n: unfi[n].rev())
1109 newphases = {}
1109 newphases = {}
1110
1110
1111 def phase(ctx):
1111 def phase(ctx):
1112 return newphases.get(ctx.node(), ctx.phase())
1112 return newphases.get(ctx.node(), ctx.phase())
1113
1113
1114 for newnode in allnewnodes:
1114 for newnode in allnewnodes:
1115 ctx = unfi[newnode]
1115 ctx = unfi[newnode]
1116 parentphase = max(phase(p) for p in ctx.parents())
1116 parentphase = max(phase(p) for p in ctx.parents())
1117 if targetphase is None:
1117 if targetphase is None:
1118 oldphase = max(
1118 oldphase = max(
1119 unfi[oldnode].phase() for oldnode in precursors[newnode]
1119 unfi[oldnode].phase() for oldnode in precursors[newnode]
1120 )
1120 )
1121 newphase = max(oldphase, parentphase)
1121 newphase = max(oldphase, parentphase)
1122 else:
1122 else:
1123 newphase = max(targetphase, parentphase)
1123 newphase = max(targetphase, parentphase)
1124 newphases[newnode] = newphase
1124 newphases[newnode] = newphase
1125 if newphase > ctx.phase():
1125 if newphase > ctx.phase():
1126 toretract.setdefault(newphase, []).append(newnode)
1126 toretract.setdefault(newphase, []).append(newnode)
1127 elif newphase < ctx.phase():
1127 elif newphase < ctx.phase():
1128 toadvance.setdefault(newphase, []).append(newnode)
1128 toadvance.setdefault(newphase, []).append(newnode)
1129
1129
1130 with repo.transaction(b'cleanup') as tr:
1130 with repo.transaction(b'cleanup') as tr:
1131 # Move bookmarks
1131 # Move bookmarks
1132 bmarks = repo._bookmarks
1132 bmarks = repo._bookmarks
1133 bmarkchanges = []
1133 bmarkchanges = []
1134 for oldnode, newnode in moves.items():
1134 for oldnode, newnode in moves.items():
1135 oldbmarks = repo.nodebookmarks(oldnode)
1135 oldbmarks = repo.nodebookmarks(oldnode)
1136 if not oldbmarks:
1136 if not oldbmarks:
1137 continue
1137 continue
1138 from . import bookmarks # avoid import cycle
1138 from . import bookmarks # avoid import cycle
1139
1139
1140 repo.ui.debug(
1140 repo.ui.debug(
1141 b'moving bookmarks %r from %s to %s\n'
1141 b'moving bookmarks %r from %s to %s\n'
1142 % (
1142 % (
1143 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1143 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1144 hex(oldnode),
1144 hex(oldnode),
1145 hex(newnode),
1145 hex(newnode),
1146 )
1146 )
1147 )
1147 )
1148 # Delete divergent bookmarks being parents of related newnodes
1148 # Delete divergent bookmarks being parents of related newnodes
1149 deleterevs = repo.revs(
1149 deleterevs = repo.revs(
1150 b'parents(roots(%ln & (::%n))) - parents(%n)',
1150 b'parents(roots(%ln & (::%n))) - parents(%n)',
1151 allnewnodes,
1151 allnewnodes,
1152 newnode,
1152 newnode,
1153 oldnode,
1153 oldnode,
1154 )
1154 )
1155 deletenodes = _containsnode(repo, deleterevs)
1155 deletenodes = _containsnode(repo, deleterevs)
1156 for name in oldbmarks:
1156 for name in oldbmarks:
1157 bmarkchanges.append((name, newnode))
1157 bmarkchanges.append((name, newnode))
1158 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1158 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1159 bmarkchanges.append((b, None))
1159 bmarkchanges.append((b, None))
1160
1160
1161 if bmarkchanges:
1161 if bmarkchanges:
1162 bmarks.applychanges(repo, tr, bmarkchanges)
1162 bmarks.applychanges(repo, tr, bmarkchanges)
1163
1163
1164 for phase, nodes in toretract.items():
1164 for phase, nodes in toretract.items():
1165 phases.retractboundary(repo, tr, phase, nodes)
1165 phases.retractboundary(repo, tr, phase, nodes)
1166 for phase, nodes in toadvance.items():
1166 for phase, nodes in toadvance.items():
1167 phases.advanceboundary(repo, tr, phase, nodes)
1167 phases.advanceboundary(repo, tr, phase, nodes)
1168
1168
1169 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1169 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1170 # Obsolete or strip nodes
1170 # Obsolete or strip nodes
1171 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1171 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1172 # If a node is already obsoleted, and we want to obsolete it
1172 # If a node is already obsoleted, and we want to obsolete it
1173 # without a successor, skip that obssolete request since it's
1173 # without a successor, skip that obssolete request since it's
1174 # unnecessary. That's the "if s or not isobs(n)" check below.
1174 # unnecessary. That's the "if s or not isobs(n)" check below.
1175 # Also sort the node in topology order, that might be useful for
1175 # Also sort the node in topology order, that might be useful for
1176 # some obsstore logic.
1176 # some obsstore logic.
1177 # NOTE: the sorting might belong to createmarkers.
1177 # NOTE: the sorting might belong to createmarkers.
1178 torev = unfi.changelog.rev
1178 torev = unfi.changelog.rev
1179 sortfunc = lambda ns: torev(ns[0][0])
1179 sortfunc = lambda ns: torev(ns[0][0])
1180 rels = []
1180 rels = []
1181 for ns, s in sorted(replacements.items(), key=sortfunc):
1181 for ns, s in sorted(replacements.items(), key=sortfunc):
1182 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1182 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1183 rels.append(rel)
1183 rels.append(rel)
1184 if rels:
1184 if rels:
1185 obsolete.createmarkers(
1185 obsolete.createmarkers(
1186 repo, rels, operation=operation, metadata=metadata
1186 repo, rels, operation=operation, metadata=metadata
1187 )
1187 )
1188 elif phases.supportinternal(repo) and mayusearchived:
1188 elif phases.supportinternal(repo) and mayusearchived:
1189 # this assume we do not have "unstable" nodes above the cleaned ones
1189 # this assume we do not have "unstable" nodes above the cleaned ones
1190 allreplaced = set()
1190 allreplaced = set()
1191 for ns in replacements.keys():
1191 for ns in replacements.keys():
1192 allreplaced.update(ns)
1192 allreplaced.update(ns)
1193 if backup:
1193 if backup:
1194 from . import repair # avoid import cycle
1194 from . import repair # avoid import cycle
1195
1195
1196 node = min(allreplaced, key=repo.changelog.rev)
1196 node = min(allreplaced, key=repo.changelog.rev)
1197 repair.backupbundle(
1197 repair.backupbundle(
1198 repo, allreplaced, allreplaced, node, operation
1198 repo, allreplaced, allreplaced, node, operation
1199 )
1199 )
1200 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1200 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1201 else:
1201 else:
1202 from . import repair # avoid import cycle
1202 from . import repair # avoid import cycle
1203
1203
1204 tostrip = list(n for ns in replacements for n in ns)
1204 tostrip = list(n for ns in replacements for n in ns)
1205 if tostrip:
1205 if tostrip:
1206 repair.delayedstrip(
1206 repair.delayedstrip(
1207 repo.ui, repo, tostrip, operation, backup=backup
1207 repo.ui, repo, tostrip, operation, backup=backup
1208 )
1208 )
1209
1209
1210
1210
1211 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1211 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1212 if opts is None:
1212 if opts is None:
1213 opts = {}
1213 opts = {}
1214 m = matcher
1214 m = matcher
1215 dry_run = opts.get(b'dry_run')
1215 dry_run = opts.get(b'dry_run')
1216 try:
1216 try:
1217 similarity = float(opts.get(b'similarity') or 0)
1217 similarity = float(opts.get(b'similarity') or 0)
1218 except ValueError:
1218 except ValueError:
1219 raise error.Abort(_(b'similarity must be a number'))
1219 raise error.Abort(_(b'similarity must be a number'))
1220 if similarity < 0 or similarity > 100:
1220 if similarity < 0 or similarity > 100:
1221 raise error.Abort(_(b'similarity must be between 0 and 100'))
1221 raise error.Abort(_(b'similarity must be between 0 and 100'))
1222 similarity /= 100.0
1222 similarity /= 100.0
1223
1223
1224 ret = 0
1224 ret = 0
1225
1225
1226 wctx = repo[None]
1226 wctx = repo[None]
1227 for subpath in sorted(wctx.substate):
1227 for subpath in sorted(wctx.substate):
1228 submatch = matchmod.subdirmatcher(subpath, m)
1228 submatch = matchmod.subdirmatcher(subpath, m)
1229 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1229 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1230 sub = wctx.sub(subpath)
1230 sub = wctx.sub(subpath)
1231 subprefix = repo.wvfs.reljoin(prefix, subpath)
1231 subprefix = repo.wvfs.reljoin(prefix, subpath)
1232 subuipathfn = subdiruipathfn(subpath, uipathfn)
1232 subuipathfn = subdiruipathfn(subpath, uipathfn)
1233 try:
1233 try:
1234 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1234 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1235 ret = 1
1235 ret = 1
1236 except error.LookupError:
1236 except error.LookupError:
1237 repo.ui.status(
1237 repo.ui.status(
1238 _(b"skipping missing subrepository: %s\n")
1238 _(b"skipping missing subrepository: %s\n")
1239 % uipathfn(subpath)
1239 % uipathfn(subpath)
1240 )
1240 )
1241
1241
1242 rejected = []
1242 rejected = []
1243
1243
1244 def badfn(f, msg):
1244 def badfn(f, msg):
1245 if f in m.files():
1245 if f in m.files():
1246 m.bad(f, msg)
1246 m.bad(f, msg)
1247 rejected.append(f)
1247 rejected.append(f)
1248
1248
1249 badmatch = matchmod.badmatch(m, badfn)
1249 badmatch = matchmod.badmatch(m, badfn)
1250 added, unknown, deleted, removed, forgotten = _interestingfiles(
1250 added, unknown, deleted, removed, forgotten = _interestingfiles(
1251 repo, badmatch
1251 repo, badmatch
1252 )
1252 )
1253
1253
1254 unknownset = set(unknown + forgotten)
1254 unknownset = set(unknown + forgotten)
1255 toprint = unknownset.copy()
1255 toprint = unknownset.copy()
1256 toprint.update(deleted)
1256 toprint.update(deleted)
1257 for abs in sorted(toprint):
1257 for abs in sorted(toprint):
1258 if repo.ui.verbose or not m.exact(abs):
1258 if repo.ui.verbose or not m.exact(abs):
1259 if abs in unknownset:
1259 if abs in unknownset:
1260 status = _(b'adding %s\n') % uipathfn(abs)
1260 status = _(b'adding %s\n') % uipathfn(abs)
1261 label = b'ui.addremove.added'
1261 label = b'ui.addremove.added'
1262 else:
1262 else:
1263 status = _(b'removing %s\n') % uipathfn(abs)
1263 status = _(b'removing %s\n') % uipathfn(abs)
1264 label = b'ui.addremove.removed'
1264 label = b'ui.addremove.removed'
1265 repo.ui.status(status, label=label)
1265 repo.ui.status(status, label=label)
1266
1266
1267 renames = _findrenames(
1267 renames = _findrenames(
1268 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1268 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1269 )
1269 )
1270
1270
1271 if not dry_run:
1271 if not dry_run:
1272 _markchanges(repo, unknown + forgotten, deleted, renames)
1272 _markchanges(repo, unknown + forgotten, deleted, renames)
1273
1273
1274 for f in rejected:
1274 for f in rejected:
1275 if f in m.files():
1275 if f in m.files():
1276 return 1
1276 return 1
1277 return ret
1277 return ret
1278
1278
1279
1279
1280 def marktouched(repo, files, similarity=0.0):
1280 def marktouched(repo, files, similarity=0.0):
1281 """Assert that files have somehow been operated upon. files are relative to
1281 """Assert that files have somehow been operated upon. files are relative to
1282 the repo root."""
1282 the repo root."""
1283 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1283 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1284 rejected = []
1284 rejected = []
1285
1285
1286 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1286 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1287
1287
1288 if repo.ui.verbose:
1288 if repo.ui.verbose:
1289 unknownset = set(unknown + forgotten)
1289 unknownset = set(unknown + forgotten)
1290 toprint = unknownset.copy()
1290 toprint = unknownset.copy()
1291 toprint.update(deleted)
1291 toprint.update(deleted)
1292 for abs in sorted(toprint):
1292 for abs in sorted(toprint):
1293 if abs in unknownset:
1293 if abs in unknownset:
1294 status = _(b'adding %s\n') % abs
1294 status = _(b'adding %s\n') % abs
1295 else:
1295 else:
1296 status = _(b'removing %s\n') % abs
1296 status = _(b'removing %s\n') % abs
1297 repo.ui.status(status)
1297 repo.ui.status(status)
1298
1298
1299 # TODO: We should probably have the caller pass in uipathfn and apply it to
1299 # TODO: We should probably have the caller pass in uipathfn and apply it to
1300 # the messages above too. legacyrelativevalue=True is consistent with how
1300 # the messages above too. legacyrelativevalue=True is consistent with how
1301 # it used to work.
1301 # it used to work.
1302 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1302 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1303 renames = _findrenames(
1303 renames = _findrenames(
1304 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1304 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1305 )
1305 )
1306
1306
1307 _markchanges(repo, unknown + forgotten, deleted, renames)
1307 _markchanges(repo, unknown + forgotten, deleted, renames)
1308
1308
1309 for f in rejected:
1309 for f in rejected:
1310 if f in m.files():
1310 if f in m.files():
1311 return 1
1311 return 1
1312 return 0
1312 return 0
1313
1313
1314
1314
1315 def _interestingfiles(repo, matcher):
1315 def _interestingfiles(repo, matcher):
1316 """Walk dirstate with matcher, looking for files that addremove would care
1316 """Walk dirstate with matcher, looking for files that addremove would care
1317 about.
1317 about.
1318
1318
1319 This is different from dirstate.status because it doesn't care about
1319 This is different from dirstate.status because it doesn't care about
1320 whether files are modified or clean."""
1320 whether files are modified or clean."""
1321 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1321 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1322 audit_path = pathutil.pathauditor(repo.root, cached=True)
1322 audit_path = pathutil.pathauditor(repo.root, cached=True)
1323
1323
1324 ctx = repo[None]
1324 ctx = repo[None]
1325 dirstate = repo.dirstate
1325 dirstate = repo.dirstate
1326 matcher = repo.narrowmatch(matcher, includeexact=True)
1326 matcher = repo.narrowmatch(matcher, includeexact=True)
1327 walkresults = dirstate.walk(
1327 walkresults = dirstate.walk(
1328 matcher,
1328 matcher,
1329 subrepos=sorted(ctx.substate),
1329 subrepos=sorted(ctx.substate),
1330 unknown=True,
1330 unknown=True,
1331 ignored=False,
1331 ignored=False,
1332 full=False,
1332 full=False,
1333 )
1333 )
1334 for abs, st in pycompat.iteritems(walkresults):
1334 for abs, st in pycompat.iteritems(walkresults):
1335 dstate = dirstate[abs]
1335 dstate = dirstate[abs]
1336 if dstate == b'?' and audit_path.check(abs):
1336 if dstate == b'?' and audit_path.check(abs):
1337 unknown.append(abs)
1337 unknown.append(abs)
1338 elif dstate != b'r' and not st:
1338 elif dstate != b'r' and not st:
1339 deleted.append(abs)
1339 deleted.append(abs)
1340 elif dstate == b'r' and st:
1340 elif dstate == b'r' and st:
1341 forgotten.append(abs)
1341 forgotten.append(abs)
1342 # for finding renames
1342 # for finding renames
1343 elif dstate == b'r' and not st:
1343 elif dstate == b'r' and not st:
1344 removed.append(abs)
1344 removed.append(abs)
1345 elif dstate == b'a':
1345 elif dstate == b'a':
1346 added.append(abs)
1346 added.append(abs)
1347
1347
1348 return added, unknown, deleted, removed, forgotten
1348 return added, unknown, deleted, removed, forgotten
1349
1349
1350
1350
1351 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1351 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1352 '''Find renames from removed files to added ones.'''
1352 '''Find renames from removed files to added ones.'''
1353 renames = {}
1353 renames = {}
1354 if similarity > 0:
1354 if similarity > 0:
1355 for old, new, score in similar.findrenames(
1355 for old, new, score in similar.findrenames(
1356 repo, added, removed, similarity
1356 repo, added, removed, similarity
1357 ):
1357 ):
1358 if (
1358 if (
1359 repo.ui.verbose
1359 repo.ui.verbose
1360 or not matcher.exact(old)
1360 or not matcher.exact(old)
1361 or not matcher.exact(new)
1361 or not matcher.exact(new)
1362 ):
1362 ):
1363 repo.ui.status(
1363 repo.ui.status(
1364 _(
1364 _(
1365 b'recording removal of %s as rename to %s '
1365 b'recording removal of %s as rename to %s '
1366 b'(%d%% similar)\n'
1366 b'(%d%% similar)\n'
1367 )
1367 )
1368 % (uipathfn(old), uipathfn(new), score * 100)
1368 % (uipathfn(old), uipathfn(new), score * 100)
1369 )
1369 )
1370 renames[new] = old
1370 renames[new] = old
1371 return renames
1371 return renames
1372
1372
1373
1373
1374 def _markchanges(repo, unknown, deleted, renames):
1374 def _markchanges(repo, unknown, deleted, renames):
1375 """Marks the files in unknown as added, the files in deleted as removed,
1375 """Marks the files in unknown as added, the files in deleted as removed,
1376 and the files in renames as copied."""
1376 and the files in renames as copied."""
1377 wctx = repo[None]
1377 wctx = repo[None]
1378 with repo.wlock():
1378 with repo.wlock():
1379 wctx.forget(deleted)
1379 wctx.forget(deleted)
1380 wctx.add(unknown)
1380 wctx.add(unknown)
1381 for new, old in pycompat.iteritems(renames):
1381 for new, old in pycompat.iteritems(renames):
1382 wctx.copy(old, new)
1382 wctx.copy(old, new)
1383
1383
1384
1384
1385 def getrenamedfn(repo, endrev=None):
1385 def getrenamedfn(repo, endrev=None):
1386 if copiesmod.usechangesetcentricalgo(repo):
1386 if copiesmod.usechangesetcentricalgo(repo):
1387
1387
1388 def getrenamed(fn, rev):
1388 def getrenamed(fn, rev):
1389 ctx = repo[rev]
1389 ctx = repo[rev]
1390 p1copies = ctx.p1copies()
1390 p1copies = ctx.p1copies()
1391 if fn in p1copies:
1391 if fn in p1copies:
1392 return p1copies[fn]
1392 return p1copies[fn]
1393 p2copies = ctx.p2copies()
1393 p2copies = ctx.p2copies()
1394 if fn in p2copies:
1394 if fn in p2copies:
1395 return p2copies[fn]
1395 return p2copies[fn]
1396 return None
1396 return None
1397
1397
1398 return getrenamed
1398 return getrenamed
1399
1399
1400 rcache = {}
1400 rcache = {}
1401 if endrev is None:
1401 if endrev is None:
1402 endrev = len(repo)
1402 endrev = len(repo)
1403
1403
1404 def getrenamed(fn, rev):
1404 def getrenamed(fn, rev):
1405 """looks up all renames for a file (up to endrev) the first
1405 """looks up all renames for a file (up to endrev) the first
1406 time the file is given. It indexes on the changerev and only
1406 time the file is given. It indexes on the changerev and only
1407 parses the manifest if linkrev != changerev.
1407 parses the manifest if linkrev != changerev.
1408 Returns rename info for fn at changerev rev."""
1408 Returns rename info for fn at changerev rev."""
1409 if fn not in rcache:
1409 if fn not in rcache:
1410 rcache[fn] = {}
1410 rcache[fn] = {}
1411 fl = repo.file(fn)
1411 fl = repo.file(fn)
1412 for i in fl:
1412 for i in fl:
1413 lr = fl.linkrev(i)
1413 lr = fl.linkrev(i)
1414 renamed = fl.renamed(fl.node(i))
1414 renamed = fl.renamed(fl.node(i))
1415 rcache[fn][lr] = renamed and renamed[0]
1415 rcache[fn][lr] = renamed and renamed[0]
1416 if lr >= endrev:
1416 if lr >= endrev:
1417 break
1417 break
1418 if rev in rcache[fn]:
1418 if rev in rcache[fn]:
1419 return rcache[fn][rev]
1419 return rcache[fn][rev]
1420
1420
1421 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1421 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1422 # filectx logic.
1422 # filectx logic.
1423 try:
1423 try:
1424 return repo[rev][fn].copysource()
1424 return repo[rev][fn].copysource()
1425 except error.LookupError:
1425 except error.LookupError:
1426 return None
1426 return None
1427
1427
1428 return getrenamed
1428 return getrenamed
1429
1429
1430
1430
1431 def getcopiesfn(repo, endrev=None):
1431 def getcopiesfn(repo, endrev=None):
1432 if copiesmod.usechangesetcentricalgo(repo):
1432 if copiesmod.usechangesetcentricalgo(repo):
1433
1433
1434 def copiesfn(ctx):
1434 def copiesfn(ctx):
1435 if ctx.p2copies():
1435 if ctx.p2copies():
1436 allcopies = ctx.p1copies().copy()
1436 allcopies = ctx.p1copies().copy()
1437 # There should be no overlap
1437 # There should be no overlap
1438 allcopies.update(ctx.p2copies())
1438 allcopies.update(ctx.p2copies())
1439 return sorted(allcopies.items())
1439 return sorted(allcopies.items())
1440 else:
1440 else:
1441 return sorted(ctx.p1copies().items())
1441 return sorted(ctx.p1copies().items())
1442
1442
1443 else:
1443 else:
1444 getrenamed = getrenamedfn(repo, endrev)
1444 getrenamed = getrenamedfn(repo, endrev)
1445
1445
1446 def copiesfn(ctx):
1446 def copiesfn(ctx):
1447 copies = []
1447 copies = []
1448 for fn in ctx.files():
1448 for fn in ctx.files():
1449 rename = getrenamed(fn, ctx.rev())
1449 rename = getrenamed(fn, ctx.rev())
1450 if rename:
1450 if rename:
1451 copies.append((fn, rename))
1451 copies.append((fn, rename))
1452 return copies
1452 return copies
1453
1453
1454 return copiesfn
1454 return copiesfn
1455
1455
1456
1456
1457 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1457 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1458 """Update the dirstate to reflect the intent of copying src to dst. For
1458 """Update the dirstate to reflect the intent of copying src to dst. For
1459 different reasons it might not end with dst being marked as copied from src.
1459 different reasons it might not end with dst being marked as copied from src.
1460 """
1460 """
1461 origsrc = repo.dirstate.copied(src) or src
1461 origsrc = repo.dirstate.copied(src) or src
1462 if dst == origsrc: # copying back a copy?
1462 if dst == origsrc: # copying back a copy?
1463 if repo.dirstate[dst] not in b'mn' and not dryrun:
1463 if repo.dirstate[dst] not in b'mn' and not dryrun:
1464 repo.dirstate.normallookup(dst)
1464 repo.dirstate.normallookup(dst)
1465 else:
1465 else:
1466 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1466 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1467 if not ui.quiet:
1467 if not ui.quiet:
1468 ui.warn(
1468 ui.warn(
1469 _(
1469 _(
1470 b"%s has not been committed yet, so no copy "
1470 b"%s has not been committed yet, so no copy "
1471 b"data will be stored for %s.\n"
1471 b"data will be stored for %s.\n"
1472 )
1472 )
1473 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1473 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1474 )
1474 )
1475 if repo.dirstate[dst] in b'?r' and not dryrun:
1475 if repo.dirstate[dst] in b'?r' and not dryrun:
1476 wctx.add([dst])
1476 wctx.add([dst])
1477 elif not dryrun:
1477 elif not dryrun:
1478 wctx.copy(origsrc, dst)
1478 wctx.copy(origsrc, dst)
1479
1479
1480
1480
1481 def movedirstate(repo, newctx, match=None):
1481 def movedirstate(repo, newctx, match=None):
1482 """Move the dirstate to newctx and adjust it as necessary.
1482 """Move the dirstate to newctx and adjust it as necessary.
1483
1483
1484 A matcher can be provided as an optimization. It is probably a bug to pass
1484 A matcher can be provided as an optimization. It is probably a bug to pass
1485 a matcher that doesn't match all the differences between the parent of the
1485 a matcher that doesn't match all the differences between the parent of the
1486 working copy and newctx.
1486 working copy and newctx.
1487 """
1487 """
1488 oldctx = repo[b'.']
1488 oldctx = repo[b'.']
1489 ds = repo.dirstate
1489 ds = repo.dirstate
1490 copies = dict(ds.copies())
1490 copies = dict(ds.copies())
1491 ds.setparents(newctx.node(), repo.nullid)
1491 ds.setparents(newctx.node(), repo.nullid)
1492 s = newctx.status(oldctx, match=match)
1492 s = newctx.status(oldctx, match=match)
1493 for f in s.modified:
1493 for f in s.modified:
1494 if ds[f] == b'r':
1494 if ds[f] == b'r':
1495 # modified + removed -> removed
1495 # modified + removed -> removed
1496 continue
1496 continue
1497 ds.normallookup(f)
1497 ds.normallookup(f)
1498
1498
1499 for f in s.added:
1499 for f in s.added:
1500 if ds[f] == b'r':
1500 if ds[f] == b'r':
1501 # added + removed -> unknown
1501 # added + removed -> unknown
1502 ds.drop(f)
1502 ds.drop(f)
1503 elif ds[f] != b'a':
1503 elif ds[f] != b'a':
1504 ds.add(f)
1504 ds.add(f)
1505
1505
1506 for f in s.removed:
1506 for f in s.removed:
1507 if ds[f] == b'a':
1507 if ds[f] == b'a':
1508 # removed + added -> normal
1508 # removed + added -> normal
1509 ds.normallookup(f)
1509 ds.normallookup(f)
1510 elif ds[f] != b'r':
1510 elif ds[f] != b'r':
1511 ds.remove(f)
1511 ds.remove(f)
1512
1512
1513 # Merge old parent and old working dir copies
1513 # Merge old parent and old working dir copies
1514 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1514 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1515 oldcopies.update(copies)
1515 oldcopies.update(copies)
1516 copies = {
1516 copies = {
1517 dst: oldcopies.get(src, src)
1517 dst: oldcopies.get(src, src)
1518 for dst, src in pycompat.iteritems(oldcopies)
1518 for dst, src in pycompat.iteritems(oldcopies)
1519 }
1519 }
1520 # Adjust the dirstate copies
1520 # Adjust the dirstate copies
1521 for dst, src in pycompat.iteritems(copies):
1521 for dst, src in pycompat.iteritems(copies):
1522 if src not in newctx or dst in newctx or ds[dst] != b'a':
1522 if src not in newctx or dst in newctx or ds[dst] != b'a':
1523 src = None
1523 src = None
1524 ds.copy(src, dst)
1524 ds.copy(src, dst)
1525 repo._quick_access_changeid_invalidate()
1525 repo._quick_access_changeid_invalidate()
1526
1526
1527
1527
1528 def filterrequirements(requirements):
1528 def filterrequirements(requirements):
1529 """filters the requirements into two sets:
1529 """filters the requirements into two sets:
1530
1530
1531 wcreq: requirements which should be written in .hg/requires
1531 wcreq: requirements which should be written in .hg/requires
1532 storereq: which should be written in .hg/store/requires
1532 storereq: which should be written in .hg/store/requires
1533
1533
1534 Returns (wcreq, storereq)
1534 Returns (wcreq, storereq)
1535 """
1535 """
1536 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1536 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1537 wc, store = set(), set()
1537 wc, store = set(), set()
1538 for r in requirements:
1538 for r in requirements:
1539 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1539 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1540 wc.add(r)
1540 wc.add(r)
1541 else:
1541 else:
1542 store.add(r)
1542 store.add(r)
1543 return wc, store
1543 return wc, store
1544 return requirements, None
1544 return requirements, None
1545
1545
1546
1546
1547 def istreemanifest(repo):
1547 def istreemanifest(repo):
1548 """returns whether the repository is using treemanifest or not"""
1548 """returns whether the repository is using treemanifest or not"""
1549 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1549 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1550
1550
1551
1551
1552 def writereporequirements(repo, requirements=None):
1552 def writereporequirements(repo, requirements=None):
1553 """writes requirements for the repo
1553 """writes requirements for the repo
1554
1554
1555 Requirements are written to .hg/requires and .hg/store/requires based
1555 Requirements are written to .hg/requires and .hg/store/requires based
1556 on whether share-safe mode is enabled and which requirements are wdir
1556 on whether share-safe mode is enabled and which requirements are wdir
1557 requirements and which are store requirements
1557 requirements and which are store requirements
1558 """
1558 """
1559 if requirements:
1559 if requirements:
1560 repo.requirements = requirements
1560 repo.requirements = requirements
1561 wcreq, storereq = filterrequirements(repo.requirements)
1561 wcreq, storereq = filterrequirements(repo.requirements)
1562 if wcreq is not None:
1562 if wcreq is not None:
1563 writerequires(repo.vfs, wcreq)
1563 writerequires(repo.vfs, wcreq)
1564 if storereq is not None:
1564 if storereq is not None:
1565 writerequires(repo.svfs, storereq)
1565 writerequires(repo.svfs, storereq)
1566 elif repo.ui.configbool(b'format', b'usestore'):
1566 elif repo.ui.configbool(b'format', b'usestore'):
1567 # only remove store requires if we are using store
1567 # only remove store requires if we are using store
1568 repo.svfs.tryunlink(b'requires')
1568 repo.svfs.tryunlink(b'requires')
1569
1569
1570
1570
1571 def writerequires(opener, requirements):
1571 def writerequires(opener, requirements):
1572 with opener(b'requires', b'w', atomictemp=True) as fp:
1572 with opener(b'requires', b'w', atomictemp=True) as fp:
1573 for r in sorted(requirements):
1573 for r in sorted(requirements):
1574 fp.write(b"%s\n" % r)
1574 fp.write(b"%s\n" % r)
1575
1575
1576
1576
1577 class filecachesubentry(object):
1577 class filecachesubentry(object):
1578 def __init__(self, path, stat):
1578 def __init__(self, path, stat):
1579 self.path = path
1579 self.path = path
1580 self.cachestat = None
1580 self.cachestat = None
1581 self._cacheable = None
1581 self._cacheable = None
1582
1582
1583 if stat:
1583 if stat:
1584 self.cachestat = filecachesubentry.stat(self.path)
1584 self.cachestat = filecachesubentry.stat(self.path)
1585
1585
1586 if self.cachestat:
1586 if self.cachestat:
1587 self._cacheable = self.cachestat.cacheable()
1587 self._cacheable = self.cachestat.cacheable()
1588 else:
1588 else:
1589 # None means we don't know yet
1589 # None means we don't know yet
1590 self._cacheable = None
1590 self._cacheable = None
1591
1591
1592 def refresh(self):
1592 def refresh(self):
1593 if self.cacheable():
1593 if self.cacheable():
1594 self.cachestat = filecachesubentry.stat(self.path)
1594 self.cachestat = filecachesubentry.stat(self.path)
1595
1595
1596 def cacheable(self):
1596 def cacheable(self):
1597 if self._cacheable is not None:
1597 if self._cacheable is not None:
1598 return self._cacheable
1598 return self._cacheable
1599
1599
1600 # we don't know yet, assume it is for now
1600 # we don't know yet, assume it is for now
1601 return True
1601 return True
1602
1602
1603 def changed(self):
1603 def changed(self):
1604 # no point in going further if we can't cache it
1604 # no point in going further if we can't cache it
1605 if not self.cacheable():
1605 if not self.cacheable():
1606 return True
1606 return True
1607
1607
1608 newstat = filecachesubentry.stat(self.path)
1608 newstat = filecachesubentry.stat(self.path)
1609
1609
1610 # we may not know if it's cacheable yet, check again now
1610 # we may not know if it's cacheable yet, check again now
1611 if newstat and self._cacheable is None:
1611 if newstat and self._cacheable is None:
1612 self._cacheable = newstat.cacheable()
1612 self._cacheable = newstat.cacheable()
1613
1613
1614 # check again
1614 # check again
1615 if not self._cacheable:
1615 if not self._cacheable:
1616 return True
1616 return True
1617
1617
1618 if self.cachestat != newstat:
1618 if self.cachestat != newstat:
1619 self.cachestat = newstat
1619 self.cachestat = newstat
1620 return True
1620 return True
1621 else:
1621 else:
1622 return False
1622 return False
1623
1623
1624 @staticmethod
1624 @staticmethod
1625 def stat(path):
1625 def stat(path):
1626 try:
1626 try:
1627 return util.cachestat(path)
1627 return util.cachestat(path)
1628 except OSError as e:
1628 except OSError as e:
1629 if e.errno != errno.ENOENT:
1629 if e.errno != errno.ENOENT:
1630 raise
1630 raise
1631
1631
1632
1632
1633 class filecacheentry(object):
1633 class filecacheentry(object):
1634 def __init__(self, paths, stat=True):
1634 def __init__(self, paths, stat=True):
1635 self._entries = []
1635 self._entries = []
1636 for path in paths:
1636 for path in paths:
1637 self._entries.append(filecachesubentry(path, stat))
1637 self._entries.append(filecachesubentry(path, stat))
1638
1638
1639 def changed(self):
1639 def changed(self):
1640 '''true if any entry has changed'''
1640 '''true if any entry has changed'''
1641 for entry in self._entries:
1641 for entry in self._entries:
1642 if entry.changed():
1642 if entry.changed():
1643 return True
1643 return True
1644 return False
1644 return False
1645
1645
1646 def refresh(self):
1646 def refresh(self):
1647 for entry in self._entries:
1647 for entry in self._entries:
1648 entry.refresh()
1648 entry.refresh()
1649
1649
1650
1650
1651 class filecache(object):
1651 class filecache(object):
1652 """A property like decorator that tracks files under .hg/ for updates.
1652 """A property like decorator that tracks files under .hg/ for updates.
1653
1653
1654 On first access, the files defined as arguments are stat()ed and the
1654 On first access, the files defined as arguments are stat()ed and the
1655 results cached. The decorated function is called. The results are stashed
1655 results cached. The decorated function is called. The results are stashed
1656 away in a ``_filecache`` dict on the object whose method is decorated.
1656 away in a ``_filecache`` dict on the object whose method is decorated.
1657
1657
1658 On subsequent access, the cached result is used as it is set to the
1658 On subsequent access, the cached result is used as it is set to the
1659 instance dictionary.
1659 instance dictionary.
1660
1660
1661 On external property set/delete operations, the caller must update the
1661 On external property set/delete operations, the caller must update the
1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1663 instead of directly setting <attr>.
1663 instead of directly setting <attr>.
1664
1664
1665 When using the property API, the cached data is always used if available.
1665 When using the property API, the cached data is always used if available.
1666 No stat() is performed to check if the file has changed.
1666 No stat() is performed to check if the file has changed.
1667
1667
1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1669 can populate an entry before the property's getter is called. In this case,
1669 can populate an entry before the property's getter is called. In this case,
1670 entries in ``_filecache`` will be used during property operations,
1670 entries in ``_filecache`` will be used during property operations,
1671 if available. If the underlying file changes, it is up to external callers
1671 if available. If the underlying file changes, it is up to external callers
1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1674 remove the ``filecacheentry``.
1674 remove the ``filecacheentry``.
1675 """
1675 """
1676
1676
1677 def __init__(self, *paths):
1677 def __init__(self, *paths):
1678 self.paths = paths
1678 self.paths = paths
1679
1679
1680 def join(self, obj, fname):
1680 def join(self, obj, fname):
1681 """Used to compute the runtime path of a cached file.
1681 """Used to compute the runtime path of a cached file.
1682
1682
1683 Users should subclass filecache and provide their own version of this
1683 Users should subclass filecache and provide their own version of this
1684 function to call the appropriate join function on 'obj' (an instance
1684 function to call the appropriate join function on 'obj' (an instance
1685 of the class that its member function was decorated).
1685 of the class that its member function was decorated).
1686 """
1686 """
1687 raise NotImplementedError
1687 raise NotImplementedError
1688
1688
1689 def __call__(self, func):
1689 def __call__(self, func):
1690 self.func = func
1690 self.func = func
1691 self.sname = func.__name__
1691 self.sname = func.__name__
1692 self.name = pycompat.sysbytes(self.sname)
1692 self.name = pycompat.sysbytes(self.sname)
1693 return self
1693 return self
1694
1694
1695 def __get__(self, obj, type=None):
1695 def __get__(self, obj, type=None):
1696 # if accessed on the class, return the descriptor itself.
1696 # if accessed on the class, return the descriptor itself.
1697 if obj is None:
1697 if obj is None:
1698 return self
1698 return self
1699
1699
1700 assert self.sname not in obj.__dict__
1700 assert self.sname not in obj.__dict__
1701
1701
1702 entry = obj._filecache.get(self.name)
1702 entry = obj._filecache.get(self.name)
1703
1703
1704 if entry:
1704 if entry:
1705 if entry.changed():
1705 if entry.changed():
1706 entry.obj = self.func(obj)
1706 entry.obj = self.func(obj)
1707 else:
1707 else:
1708 paths = [self.join(obj, path) for path in self.paths]
1708 paths = [self.join(obj, path) for path in self.paths]
1709
1709
1710 # We stat -before- creating the object so our cache doesn't lie if
1710 # We stat -before- creating the object so our cache doesn't lie if
1711 # a writer modified between the time we read and stat
1711 # a writer modified between the time we read and stat
1712 entry = filecacheentry(paths, True)
1712 entry = filecacheentry(paths, True)
1713 entry.obj = self.func(obj)
1713 entry.obj = self.func(obj)
1714
1714
1715 obj._filecache[self.name] = entry
1715 obj._filecache[self.name] = entry
1716
1716
1717 obj.__dict__[self.sname] = entry.obj
1717 obj.__dict__[self.sname] = entry.obj
1718 return entry.obj
1718 return entry.obj
1719
1719
1720 # don't implement __set__(), which would make __dict__ lookup as slow as
1720 # don't implement __set__(), which would make __dict__ lookup as slow as
1721 # function call.
1721 # function call.
1722
1722
1723 def set(self, obj, value):
1723 def set(self, obj, value):
1724 if self.name not in obj._filecache:
1724 if self.name not in obj._filecache:
1725 # we add an entry for the missing value because X in __dict__
1725 # we add an entry for the missing value because X in __dict__
1726 # implies X in _filecache
1726 # implies X in _filecache
1727 paths = [self.join(obj, path) for path in self.paths]
1727 paths = [self.join(obj, path) for path in self.paths]
1728 ce = filecacheentry(paths, False)
1728 ce = filecacheentry(paths, False)
1729 obj._filecache[self.name] = ce
1729 obj._filecache[self.name] = ce
1730 else:
1730 else:
1731 ce = obj._filecache[self.name]
1731 ce = obj._filecache[self.name]
1732
1732
1733 ce.obj = value # update cached copy
1733 ce.obj = value # update cached copy
1734 obj.__dict__[self.sname] = value # update copy returned by obj.x
1734 obj.__dict__[self.sname] = value # update copy returned by obj.x
1735
1735
1736
1736
1737 def extdatasource(repo, source):
1737 def extdatasource(repo, source):
1738 """Gather a map of rev -> value dict from the specified source
1738 """Gather a map of rev -> value dict from the specified source
1739
1739
1740 A source spec is treated as a URL, with a special case shell: type
1740 A source spec is treated as a URL, with a special case shell: type
1741 for parsing the output from a shell command.
1741 for parsing the output from a shell command.
1742
1742
1743 The data is parsed as a series of newline-separated records where
1743 The data is parsed as a series of newline-separated records where
1744 each record is a revision specifier optionally followed by a space
1744 each record is a revision specifier optionally followed by a space
1745 and a freeform string value. If the revision is known locally, it
1745 and a freeform string value. If the revision is known locally, it
1746 is converted to a rev, otherwise the record is skipped.
1746 is converted to a rev, otherwise the record is skipped.
1747
1747
1748 Note that both key and value are treated as UTF-8 and converted to
1748 Note that both key and value are treated as UTF-8 and converted to
1749 the local encoding. This allows uniformity between local and
1749 the local encoding. This allows uniformity between local and
1750 remote data sources.
1750 remote data sources.
1751 """
1751 """
1752
1752
1753 spec = repo.ui.config(b"extdata", source)
1753 spec = repo.ui.config(b"extdata", source)
1754 if not spec:
1754 if not spec:
1755 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1755 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1756
1756
1757 data = {}
1757 data = {}
1758 src = proc = None
1758 src = proc = None
1759 try:
1759 try:
1760 if spec.startswith(b"shell:"):
1760 if spec.startswith(b"shell:"):
1761 # external commands should be run relative to the repo root
1761 # external commands should be run relative to the repo root
1762 cmd = spec[6:]
1762 cmd = spec[6:]
1763 proc = subprocess.Popen(
1763 proc = subprocess.Popen(
1764 procutil.tonativestr(cmd),
1764 procutil.tonativestr(cmd),
1765 shell=True,
1765 shell=True,
1766 bufsize=-1,
1766 bufsize=-1,
1767 close_fds=procutil.closefds,
1767 close_fds=procutil.closefds,
1768 stdout=subprocess.PIPE,
1768 stdout=subprocess.PIPE,
1769 cwd=procutil.tonativestr(repo.root),
1769 cwd=procutil.tonativestr(repo.root),
1770 )
1770 )
1771 src = proc.stdout
1771 src = proc.stdout
1772 else:
1772 else:
1773 # treat as a URL or file
1773 # treat as a URL or file
1774 src = url.open(repo.ui, spec)
1774 src = url.open(repo.ui, spec)
1775 for l in src:
1775 for l in src:
1776 if b" " in l:
1776 if b" " in l:
1777 k, v = l.strip().split(b" ", 1)
1777 k, v = l.strip().split(b" ", 1)
1778 else:
1778 else:
1779 k, v = l.strip(), b""
1779 k, v = l.strip(), b""
1780
1780
1781 k = encoding.tolocal(k)
1781 k = encoding.tolocal(k)
1782 try:
1782 try:
1783 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1783 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1784 except (error.LookupError, error.RepoLookupError, error.InputError):
1784 except (error.LookupError, error.RepoLookupError, error.InputError):
1785 pass # we ignore data for nodes that don't exist locally
1785 pass # we ignore data for nodes that don't exist locally
1786 finally:
1786 finally:
1787 if proc:
1787 if proc:
1788 try:
1788 try:
1789 proc.communicate()
1789 proc.communicate()
1790 except ValueError:
1790 except ValueError:
1791 # This happens if we started iterating src and then
1791 # This happens if we started iterating src and then
1792 # get a parse error on a line. It should be safe to ignore.
1792 # get a parse error on a line. It should be safe to ignore.
1793 pass
1793 pass
1794 if src:
1794 if src:
1795 src.close()
1795 src.close()
1796 if proc and proc.returncode != 0:
1796 if proc and proc.returncode != 0:
1797 raise error.Abort(
1797 raise error.Abort(
1798 _(b"extdata command '%s' failed: %s")
1798 _(b"extdata command '%s' failed: %s")
1799 % (cmd, procutil.explainexit(proc.returncode))
1799 % (cmd, procutil.explainexit(proc.returncode))
1800 )
1800 )
1801
1801
1802 return data
1802 return data
1803
1803
1804
1804
1805 class progress(object):
1805 class progress(object):
1806 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1806 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1807 self.ui = ui
1807 self.ui = ui
1808 self.pos = 0
1808 self.pos = 0
1809 self.topic = topic
1809 self.topic = topic
1810 self.unit = unit
1810 self.unit = unit
1811 self.total = total
1811 self.total = total
1812 self.debug = ui.configbool(b'progress', b'debug')
1812 self.debug = ui.configbool(b'progress', b'debug')
1813 self._updatebar = updatebar
1813 self._updatebar = updatebar
1814
1814
1815 def __enter__(self):
1815 def __enter__(self):
1816 return self
1816 return self
1817
1817
1818 def __exit__(self, exc_type, exc_value, exc_tb):
1818 def __exit__(self, exc_type, exc_value, exc_tb):
1819 self.complete()
1819 self.complete()
1820
1820
1821 def update(self, pos, item=b"", total=None):
1821 def update(self, pos, item=b"", total=None):
1822 assert pos is not None
1822 assert pos is not None
1823 if total:
1823 if total:
1824 self.total = total
1824 self.total = total
1825 self.pos = pos
1825 self.pos = pos
1826 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1826 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1827 if self.debug:
1827 if self.debug:
1828 self._printdebug(item)
1828 self._printdebug(item)
1829
1829
1830 def increment(self, step=1, item=b"", total=None):
1830 def increment(self, step=1, item=b"", total=None):
1831 self.update(self.pos + step, item, total)
1831 self.update(self.pos + step, item, total)
1832
1832
1833 def complete(self):
1833 def complete(self):
1834 self.pos = None
1834 self.pos = None
1835 self.unit = b""
1835 self.unit = b""
1836 self.total = None
1836 self.total = None
1837 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1837 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1838
1838
1839 def _printdebug(self, item):
1839 def _printdebug(self, item):
1840 unit = b''
1840 unit = b''
1841 if self.unit:
1841 if self.unit:
1842 unit = b' ' + self.unit
1842 unit = b' ' + self.unit
1843 if item:
1843 if item:
1844 item = b' ' + item
1844 item = b' ' + item
1845
1845
1846 if self.total:
1846 if self.total:
1847 pct = 100.0 * self.pos / self.total
1847 pct = 100.0 * self.pos / self.total
1848 self.ui.debug(
1848 self.ui.debug(
1849 b'%s:%s %d/%d%s (%4.2f%%)\n'
1849 b'%s:%s %d/%d%s (%4.2f%%)\n'
1850 % (self.topic, item, self.pos, self.total, unit, pct)
1850 % (self.topic, item, self.pos, self.total, unit, pct)
1851 )
1851 )
1852 else:
1852 else:
1853 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1853 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1854
1854
1855
1855
1856 def gdinitconfig(ui):
1856 def gdinitconfig(ui):
1857 """helper function to know if a repo should be created as general delta"""
1857 """helper function to know if a repo should be created as general delta"""
1858 # experimental config: format.generaldelta
1858 # experimental config: format.generaldelta
1859 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1859 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1860 b'format', b'usegeneraldelta'
1860 b'format', b'usegeneraldelta'
1861 )
1861 )
1862
1862
1863
1863
1864 def gddeltaconfig(ui):
1864 def gddeltaconfig(ui):
1865 """helper function to know if incoming delta should be optimised"""
1865 """helper function to know if incoming delta should be optimised"""
1866 # experimental config: format.generaldelta
1866 # experimental config: format.generaldelta
1867 return ui.configbool(b'format', b'generaldelta')
1867 return ui.configbool(b'format', b'generaldelta')
1868
1868
1869
1869
1870 class simplekeyvaluefile(object):
1870 class simplekeyvaluefile(object):
1871 """A simple file with key=value lines
1871 """A simple file with key=value lines
1872
1872
1873 Keys must be alphanumerics and start with a letter, values must not
1873 Keys must be alphanumerics and start with a letter, values must not
1874 contain '\n' characters"""
1874 contain '\n' characters"""
1875
1875
1876 firstlinekey = b'__firstline'
1876 firstlinekey = b'__firstline'
1877
1877
1878 def __init__(self, vfs, path, keys=None):
1878 def __init__(self, vfs, path, keys=None):
1879 self.vfs = vfs
1879 self.vfs = vfs
1880 self.path = path
1880 self.path = path
1881
1881
1882 def read(self, firstlinenonkeyval=False):
1882 def read(self, firstlinenonkeyval=False):
1883 """Read the contents of a simple key-value file
1883 """Read the contents of a simple key-value file
1884
1884
1885 'firstlinenonkeyval' indicates whether the first line of file should
1885 'firstlinenonkeyval' indicates whether the first line of file should
1886 be treated as a key-value pair or reuturned fully under the
1886 be treated as a key-value pair or reuturned fully under the
1887 __firstline key."""
1887 __firstline key."""
1888 lines = self.vfs.readlines(self.path)
1888 lines = self.vfs.readlines(self.path)
1889 d = {}
1889 d = {}
1890 if firstlinenonkeyval:
1890 if firstlinenonkeyval:
1891 if not lines:
1891 if not lines:
1892 e = _(b"empty simplekeyvalue file")
1892 e = _(b"empty simplekeyvalue file")
1893 raise error.CorruptedState(e)
1893 raise error.CorruptedState(e)
1894 # we don't want to include '\n' in the __firstline
1894 # we don't want to include '\n' in the __firstline
1895 d[self.firstlinekey] = lines[0][:-1]
1895 d[self.firstlinekey] = lines[0][:-1]
1896 del lines[0]
1896 del lines[0]
1897
1897
1898 try:
1898 try:
1899 # the 'if line.strip()' part prevents us from failing on empty
1899 # the 'if line.strip()' part prevents us from failing on empty
1900 # lines which only contain '\n' therefore are not skipped
1900 # lines which only contain '\n' therefore are not skipped
1901 # by 'if line'
1901 # by 'if line'
1902 updatedict = dict(
1902 updatedict = dict(
1903 line[:-1].split(b'=', 1) for line in lines if line.strip()
1903 line[:-1].split(b'=', 1) for line in lines if line.strip()
1904 )
1904 )
1905 if self.firstlinekey in updatedict:
1905 if self.firstlinekey in updatedict:
1906 e = _(b"%r can't be used as a key")
1906 e = _(b"%r can't be used as a key")
1907 raise error.CorruptedState(e % self.firstlinekey)
1907 raise error.CorruptedState(e % self.firstlinekey)
1908 d.update(updatedict)
1908 d.update(updatedict)
1909 except ValueError as e:
1909 except ValueError as e:
1910 raise error.CorruptedState(stringutil.forcebytestr(e))
1910 raise error.CorruptedState(stringutil.forcebytestr(e))
1911 return d
1911 return d
1912
1912
1913 def write(self, data, firstline=None):
1913 def write(self, data, firstline=None):
1914 """Write key=>value mapping to a file
1914 """Write key=>value mapping to a file
1915 data is a dict. Keys must be alphanumerical and start with a letter.
1915 data is a dict. Keys must be alphanumerical and start with a letter.
1916 Values must not contain newline characters.
1916 Values must not contain newline characters.
1917
1917
1918 If 'firstline' is not None, it is written to file before
1918 If 'firstline' is not None, it is written to file before
1919 everything else, as it is, not in a key=value form"""
1919 everything else, as it is, not in a key=value form"""
1920 lines = []
1920 lines = []
1921 if firstline is not None:
1921 if firstline is not None:
1922 lines.append(b'%s\n' % firstline)
1922 lines.append(b'%s\n' % firstline)
1923
1923
1924 for k, v in data.items():
1924 for k, v in data.items():
1925 if k == self.firstlinekey:
1925 if k == self.firstlinekey:
1926 e = b"key name '%s' is reserved" % self.firstlinekey
1926 e = b"key name '%s' is reserved" % self.firstlinekey
1927 raise error.ProgrammingError(e)
1927 raise error.ProgrammingError(e)
1928 if not k[0:1].isalpha():
1928 if not k[0:1].isalpha():
1929 e = b"keys must start with a letter in a key-value file"
1929 e = b"keys must start with a letter in a key-value file"
1930 raise error.ProgrammingError(e)
1930 raise error.ProgrammingError(e)
1931 if not k.isalnum():
1931 if not k.isalnum():
1932 e = b"invalid key name in a simple key-value file"
1932 e = b"invalid key name in a simple key-value file"
1933 raise error.ProgrammingError(e)
1933 raise error.ProgrammingError(e)
1934 if b'\n' in v:
1934 if b'\n' in v:
1935 e = b"invalid value in a simple key-value file"
1935 e = b"invalid value in a simple key-value file"
1936 raise error.ProgrammingError(e)
1936 raise error.ProgrammingError(e)
1937 lines.append(b"%s=%s\n" % (k, v))
1937 lines.append(b"%s=%s\n" % (k, v))
1938 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1938 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1939 fp.write(b''.join(lines))
1939 fp.write(b''.join(lines))
1940
1940
1941
1941
1942 _reportobsoletedsource = [
1942 _reportobsoletedsource = [
1943 b'debugobsolete',
1943 b'debugobsolete',
1944 b'pull',
1944 b'pull',
1945 b'push',
1945 b'push',
1946 b'serve',
1946 b'serve',
1947 b'unbundle',
1947 b'unbundle',
1948 ]
1948 ]
1949
1949
1950 _reportnewcssource = [
1950 _reportnewcssource = [
1951 b'pull',
1951 b'pull',
1952 b'unbundle',
1952 b'unbundle',
1953 ]
1953 ]
1954
1954
1955
1955
1956 def prefetchfiles(repo, revmatches):
1956 def prefetchfiles(repo, revmatches):
1957 """Invokes the registered file prefetch functions, allowing extensions to
1957 """Invokes the registered file prefetch functions, allowing extensions to
1958 ensure the corresponding files are available locally, before the command
1958 ensure the corresponding files are available locally, before the command
1959 uses them.
1959 uses them.
1960
1960
1961 Args:
1961 Args:
1962 revmatches: a list of (revision, match) tuples to indicate the files to
1962 revmatches: a list of (revision, match) tuples to indicate the files to
1963 fetch at each revision. If any of the match elements is None, it matches
1963 fetch at each revision. If any of the match elements is None, it matches
1964 all files.
1964 all files.
1965 """
1965 """
1966
1966
1967 def _matcher(m):
1967 def _matcher(m):
1968 if m:
1968 if m:
1969 assert isinstance(m, matchmod.basematcher)
1969 assert isinstance(m, matchmod.basematcher)
1970 # The command itself will complain about files that don't exist, so
1970 # The command itself will complain about files that don't exist, so
1971 # don't duplicate the message.
1971 # don't duplicate the message.
1972 return matchmod.badmatch(m, lambda fn, msg: None)
1972 return matchmod.badmatch(m, lambda fn, msg: None)
1973 else:
1973 else:
1974 return matchall(repo)
1974 return matchall(repo)
1975
1975
1976 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1976 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1977
1977
1978 fileprefetchhooks(repo, revbadmatches)
1978 fileprefetchhooks(repo, revbadmatches)
1979
1979
1980
1980
1981 # a list of (repo, revs, match) prefetch functions
1981 # a list of (repo, revs, match) prefetch functions
1982 fileprefetchhooks = util.hooks()
1982 fileprefetchhooks = util.hooks()
1983
1983
1984 # A marker that tells the evolve extension to suppress its own reporting
1984 # A marker that tells the evolve extension to suppress its own reporting
1985 _reportstroubledchangesets = True
1985 _reportstroubledchangesets = True
1986
1986
1987
1987
1988 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1988 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1989 """register a callback to issue a summary after the transaction is closed
1989 """register a callback to issue a summary after the transaction is closed
1990
1990
1991 If as_validator is true, then the callbacks are registered as transaction
1991 If as_validator is true, then the callbacks are registered as transaction
1992 validators instead
1992 validators instead
1993 """
1993 """
1994
1994
1995 def txmatch(sources):
1995 def txmatch(sources):
1996 return any(txnname.startswith(source) for source in sources)
1996 return any(txnname.startswith(source) for source in sources)
1997
1997
1998 categories = []
1998 categories = []
1999
1999
2000 def reportsummary(func):
2000 def reportsummary(func):
2001 """decorator for report callbacks."""
2001 """decorator for report callbacks."""
2002 # The repoview life cycle is shorter than the one of the actual
2002 # The repoview life cycle is shorter than the one of the actual
2003 # underlying repository. So the filtered object can die before the
2003 # underlying repository. So the filtered object can die before the
2004 # weakref is used leading to troubles. We keep a reference to the
2004 # weakref is used leading to troubles. We keep a reference to the
2005 # unfiltered object and restore the filtering when retrieving the
2005 # unfiltered object and restore the filtering when retrieving the
2006 # repository through the weakref.
2006 # repository through the weakref.
2007 filtername = repo.filtername
2007 filtername = repo.filtername
2008 reporef = weakref.ref(repo.unfiltered())
2008 reporef = weakref.ref(repo.unfiltered())
2009
2009
2010 def wrapped(tr):
2010 def wrapped(tr):
2011 repo = reporef()
2011 repo = reporef()
2012 if filtername:
2012 if filtername:
2013 assert repo is not None # help pytype
2013 assert repo is not None # help pytype
2014 repo = repo.filtered(filtername)
2014 repo = repo.filtered(filtername)
2015 func(repo, tr)
2015 func(repo, tr)
2016
2016
2017 newcat = b'%02i-txnreport' % len(categories)
2017 newcat = b'%02i-txnreport' % len(categories)
2018 if as_validator:
2018 if as_validator:
2019 otr.addvalidator(newcat, wrapped)
2019 otr.addvalidator(newcat, wrapped)
2020 else:
2020 else:
2021 otr.addpostclose(newcat, wrapped)
2021 otr.addpostclose(newcat, wrapped)
2022 categories.append(newcat)
2022 categories.append(newcat)
2023 return wrapped
2023 return wrapped
2024
2024
2025 @reportsummary
2025 @reportsummary
2026 def reportchangegroup(repo, tr):
2026 def reportchangegroup(repo, tr):
2027 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2027 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2028 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2028 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2029 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2029 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2030 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2030 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2031 if cgchangesets or cgrevisions or cgfiles:
2031 if cgchangesets or cgrevisions or cgfiles:
2032 htext = b""
2032 htext = b""
2033 if cgheads:
2033 if cgheads:
2034 htext = _(b" (%+d heads)") % cgheads
2034 htext = _(b" (%+d heads)") % cgheads
2035 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2035 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2036 if as_validator:
2036 if as_validator:
2037 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2037 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2038 assert repo is not None # help pytype
2038 assert repo is not None # help pytype
2039 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2039 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2040
2040
2041 if txmatch(_reportobsoletedsource):
2041 if txmatch(_reportobsoletedsource):
2042
2042
2043 @reportsummary
2043 @reportsummary
2044 def reportobsoleted(repo, tr):
2044 def reportobsoleted(repo, tr):
2045 obsoleted = obsutil.getobsoleted(repo, tr)
2045 obsoleted = obsutil.getobsoleted(repo, tr)
2046 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2046 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2047 if newmarkers:
2047 if newmarkers:
2048 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2048 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2049 if obsoleted:
2049 if obsoleted:
2050 msg = _(b'obsoleted %i changesets\n')
2050 msg = _(b'obsoleted %i changesets\n')
2051 if as_validator:
2051 if as_validator:
2052 msg = _(b'obsoleting %i changesets\n')
2052 msg = _(b'obsoleting %i changesets\n')
2053 repo.ui.status(msg % len(obsoleted))
2053 repo.ui.status(msg % len(obsoleted))
2054
2054
2055 if obsolete.isenabled(
2055 if obsolete.isenabled(
2056 repo, obsolete.createmarkersopt
2056 repo, obsolete.createmarkersopt
2057 ) and repo.ui.configbool(
2057 ) and repo.ui.configbool(
2058 b'experimental', b'evolution.report-instabilities'
2058 b'experimental', b'evolution.report-instabilities'
2059 ):
2059 ):
2060 instabilitytypes = [
2060 instabilitytypes = [
2061 (b'orphan', b'orphan'),
2061 (b'orphan', b'orphan'),
2062 (b'phase-divergent', b'phasedivergent'),
2062 (b'phase-divergent', b'phasedivergent'),
2063 (b'content-divergent', b'contentdivergent'),
2063 (b'content-divergent', b'contentdivergent'),
2064 ]
2064 ]
2065
2065
2066 def getinstabilitycounts(repo):
2066 def getinstabilitycounts(repo):
2067 filtered = repo.changelog.filteredrevs
2067 filtered = repo.changelog.filteredrevs
2068 counts = {}
2068 counts = {}
2069 for instability, revset in instabilitytypes:
2069 for instability, revset in instabilitytypes:
2070 counts[instability] = len(
2070 counts[instability] = len(
2071 set(obsolete.getrevs(repo, revset)) - filtered
2071 set(obsolete.getrevs(repo, revset)) - filtered
2072 )
2072 )
2073 return counts
2073 return counts
2074
2074
2075 oldinstabilitycounts = getinstabilitycounts(repo)
2075 oldinstabilitycounts = getinstabilitycounts(repo)
2076
2076
2077 @reportsummary
2077 @reportsummary
2078 def reportnewinstabilities(repo, tr):
2078 def reportnewinstabilities(repo, tr):
2079 newinstabilitycounts = getinstabilitycounts(repo)
2079 newinstabilitycounts = getinstabilitycounts(repo)
2080 for instability, revset in instabilitytypes:
2080 for instability, revset in instabilitytypes:
2081 delta = (
2081 delta = (
2082 newinstabilitycounts[instability]
2082 newinstabilitycounts[instability]
2083 - oldinstabilitycounts[instability]
2083 - oldinstabilitycounts[instability]
2084 )
2084 )
2085 msg = getinstabilitymessage(delta, instability)
2085 msg = getinstabilitymessage(delta, instability)
2086 if msg:
2086 if msg:
2087 repo.ui.warn(msg)
2087 repo.ui.warn(msg)
2088
2088
2089 if txmatch(_reportnewcssource):
2089 if txmatch(_reportnewcssource):
2090
2090
2091 @reportsummary
2091 @reportsummary
2092 def reportnewcs(repo, tr):
2092 def reportnewcs(repo, tr):
2093 """Report the range of new revisions pulled/unbundled."""
2093 """Report the range of new revisions pulled/unbundled."""
2094 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2094 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2095 unfi = repo.unfiltered()
2095 unfi = repo.unfiltered()
2096 if origrepolen >= len(unfi):
2096 if origrepolen >= len(unfi):
2097 return
2097 return
2098
2098
2099 # Compute the bounds of new visible revisions' range.
2099 # Compute the bounds of new visible revisions' range.
2100 revs = smartset.spanset(repo, start=origrepolen)
2100 revs = smartset.spanset(repo, start=origrepolen)
2101 if revs:
2101 if revs:
2102 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2102 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2103
2103
2104 if minrev == maxrev:
2104 if minrev == maxrev:
2105 revrange = minrev
2105 revrange = minrev
2106 else:
2106 else:
2107 revrange = b'%s:%s' % (minrev, maxrev)
2107 revrange = b'%s:%s' % (minrev, maxrev)
2108 draft = len(repo.revs(b'%ld and draft()', revs))
2108 draft = len(repo.revs(b'%ld and draft()', revs))
2109 secret = len(repo.revs(b'%ld and secret()', revs))
2109 secret = len(repo.revs(b'%ld and secret()', revs))
2110 if not (draft or secret):
2110 if not (draft or secret):
2111 msg = _(b'new changesets %s\n') % revrange
2111 msg = _(b'new changesets %s\n') % revrange
2112 elif draft and secret:
2112 elif draft and secret:
2113 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2113 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2114 msg %= (revrange, draft, secret)
2114 msg %= (revrange, draft, secret)
2115 elif draft:
2115 elif draft:
2116 msg = _(b'new changesets %s (%d drafts)\n')
2116 msg = _(b'new changesets %s (%d drafts)\n')
2117 msg %= (revrange, draft)
2117 msg %= (revrange, draft)
2118 elif secret:
2118 elif secret:
2119 msg = _(b'new changesets %s (%d secrets)\n')
2119 msg = _(b'new changesets %s (%d secrets)\n')
2120 msg %= (revrange, secret)
2120 msg %= (revrange, secret)
2121 else:
2121 else:
2122 errormsg = b'entered unreachable condition'
2122 errormsg = b'entered unreachable condition'
2123 raise error.ProgrammingError(errormsg)
2123 raise error.ProgrammingError(errormsg)
2124 repo.ui.status(msg)
2124 repo.ui.status(msg)
2125
2125
2126 # search new changesets directly pulled as obsolete
2126 # search new changesets directly pulled as obsolete
2127 duplicates = tr.changes.get(b'revduplicates', ())
2127 duplicates = tr.changes.get(b'revduplicates', ())
2128 obsadded = unfi.revs(
2128 obsadded = unfi.revs(
2129 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2129 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2130 )
2130 )
2131 cl = repo.changelog
2131 cl = repo.changelog
2132 extinctadded = [r for r in obsadded if r not in cl]
2132 extinctadded = [r for r in obsadded if r not in cl]
2133 if extinctadded:
2133 if extinctadded:
2134 # They are not just obsolete, but obsolete and invisible
2134 # They are not just obsolete, but obsolete and invisible
2135 # we call them "extinct" internally but the terms have not been
2135 # we call them "extinct" internally but the terms have not been
2136 # exposed to users.
2136 # exposed to users.
2137 msg = b'(%d other changesets obsolete on arrival)\n'
2137 msg = b'(%d other changesets obsolete on arrival)\n'
2138 repo.ui.status(msg % len(extinctadded))
2138 repo.ui.status(msg % len(extinctadded))
2139
2139
2140 @reportsummary
2140 @reportsummary
2141 def reportphasechanges(repo, tr):
2141 def reportphasechanges(repo, tr):
2142 """Report statistics of phase changes for changesets pre-existing
2142 """Report statistics of phase changes for changesets pre-existing
2143 pull/unbundle.
2143 pull/unbundle.
2144 """
2144 """
2145 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2145 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2146 published = []
2146 published = []
2147 for revs, (old, new) in tr.changes.get(b'phases', []):
2147 for revs, (old, new) in tr.changes.get(b'phases', []):
2148 if new != phases.public:
2148 if new != phases.public:
2149 continue
2149 continue
2150 published.extend(rev for rev in revs if rev < origrepolen)
2150 published.extend(rev for rev in revs if rev < origrepolen)
2151 if not published:
2151 if not published:
2152 return
2152 return
2153 msg = _(b'%d local changesets published\n')
2153 msg = _(b'%d local changesets published\n')
2154 if as_validator:
2154 if as_validator:
2155 msg = _(b'%d local changesets will be published\n')
2155 msg = _(b'%d local changesets will be published\n')
2156 repo.ui.status(msg % len(published))
2156 repo.ui.status(msg % len(published))
2157
2157
2158
2158
2159 def getinstabilitymessage(delta, instability):
2159 def getinstabilitymessage(delta, instability):
2160 """function to return the message to show warning about new instabilities
2160 """function to return the message to show warning about new instabilities
2161
2161
2162 exists as a separate function so that extension can wrap to show more
2162 exists as a separate function so that extension can wrap to show more
2163 information like how to fix instabilities"""
2163 information like how to fix instabilities"""
2164 if delta > 0:
2164 if delta > 0:
2165 return _(b'%i new %s changesets\n') % (delta, instability)
2165 return _(b'%i new %s changesets\n') % (delta, instability)
2166
2166
2167
2167
2168 def nodesummaries(repo, nodes, maxnumnodes=4):
2168 def nodesummaries(repo, nodes, maxnumnodes=4):
2169 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2169 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2170 return b' '.join(short(h) for h in nodes)
2170 return b' '.join(short(h) for h in nodes)
2171 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2171 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2172 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2172 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2173
2173
2174
2174
2175 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2175 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2176 """check that no named branch has multiple heads"""
2176 """check that no named branch has multiple heads"""
2177 if desc in (b'strip', b'repair'):
2177 if desc in (b'strip', b'repair'):
2178 # skip the logic during strip
2178 # skip the logic during strip
2179 return
2179 return
2180 visible = repo.filtered(filtername)
2180 visible = repo.filtered(filtername)
2181 # possible improvement: we could restrict the check to affected branch
2181 # possible improvement: we could restrict the check to affected branch
2182 bm = visible.branchmap()
2182 bm = visible.branchmap()
2183 for name in bm:
2183 for name in bm:
2184 heads = bm.branchheads(name, closed=accountclosed)
2184 heads = bm.branchheads(name, closed=accountclosed)
2185 if len(heads) > 1:
2185 if len(heads) > 1:
2186 msg = _(b'rejecting multiple heads on branch "%s"')
2186 msg = _(b'rejecting multiple heads on branch "%s"')
2187 msg %= name
2187 msg %= name
2188 hint = _(b'%d heads: %s')
2188 hint = _(b'%d heads: %s')
2189 hint %= (len(heads), nodesummaries(repo, heads))
2189 hint %= (len(heads), nodesummaries(repo, heads))
2190 raise error.Abort(msg, hint=hint)
2190 raise error.Abort(msg, hint=hint)
2191
2191
2192
2192
2193 def wrapconvertsink(sink):
2193 def wrapconvertsink(sink):
2194 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2194 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2195 before it is used, whether or not the convert extension was formally loaded.
2195 before it is used, whether or not the convert extension was formally loaded.
2196 """
2196 """
2197 return sink
2197 return sink
2198
2198
2199
2199
2200 def unhidehashlikerevs(repo, specs, hiddentype):
2200 def unhidehashlikerevs(repo, specs, hiddentype):
2201 """parse the user specs and unhide changesets whose hash or revision number
2201 """parse the user specs and unhide changesets whose hash or revision number
2202 is passed.
2202 is passed.
2203
2203
2204 hiddentype can be: 1) 'warn': warn while unhiding changesets
2204 hiddentype can be: 1) 'warn': warn while unhiding changesets
2205 2) 'nowarn': don't warn while unhiding changesets
2205 2) 'nowarn': don't warn while unhiding changesets
2206
2206
2207 returns a repo object with the required changesets unhidden
2207 returns a repo object with the required changesets unhidden
2208 """
2208 """
2209 if not repo.filtername or not repo.ui.configbool(
2209 if not repo.filtername or not repo.ui.configbool(
2210 b'experimental', b'directaccess'
2210 b'experimental', b'directaccess'
2211 ):
2211 ):
2212 return repo
2212 return repo
2213
2213
2214 if repo.filtername not in (b'visible', b'visible-hidden'):
2214 if repo.filtername not in (b'visible', b'visible-hidden'):
2215 return repo
2215 return repo
2216
2216
2217 symbols = set()
2217 symbols = set()
2218 for spec in specs:
2218 for spec in specs:
2219 try:
2219 try:
2220 tree = revsetlang.parse(spec)
2220 tree = revsetlang.parse(spec)
2221 except error.ParseError: # will be reported by scmutil.revrange()
2221 except error.ParseError: # will be reported by scmutil.revrange()
2222 continue
2222 continue
2223
2223
2224 symbols.update(revsetlang.gethashlikesymbols(tree))
2224 symbols.update(revsetlang.gethashlikesymbols(tree))
2225
2225
2226 if not symbols:
2226 if not symbols:
2227 return repo
2227 return repo
2228
2228
2229 revs = _getrevsfromsymbols(repo, symbols)
2229 revs = _getrevsfromsymbols(repo, symbols)
2230
2230
2231 if not revs:
2231 if not revs:
2232 return repo
2232 return repo
2233
2233
2234 if hiddentype == b'warn':
2234 if hiddentype == b'warn':
2235 unfi = repo.unfiltered()
2235 unfi = repo.unfiltered()
2236 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2236 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2237 repo.ui.warn(
2237 repo.ui.warn(
2238 _(
2238 _(
2239 b"warning: accessing hidden changesets for write "
2239 b"warning: accessing hidden changesets for write "
2240 b"operation: %s\n"
2240 b"operation: %s\n"
2241 )
2241 )
2242 % revstr
2242 % revstr
2243 )
2243 )
2244
2244
2245 # we have to use new filtername to separate branch/tags cache until we can
2245 # we have to use new filtername to separate branch/tags cache until we can
2246 # disbale these cache when revisions are dynamically pinned.
2246 # disbale these cache when revisions are dynamically pinned.
2247 return repo.filtered(b'visible-hidden', revs)
2247 return repo.filtered(b'visible-hidden', revs)
2248
2248
2249
2249
2250 def _getrevsfromsymbols(repo, symbols):
2250 def _getrevsfromsymbols(repo, symbols):
2251 """parse the list of symbols and returns a set of revision numbers of hidden
2251 """parse the list of symbols and returns a set of revision numbers of hidden
2252 changesets present in symbols"""
2252 changesets present in symbols"""
2253 revs = set()
2253 revs = set()
2254 unfi = repo.unfiltered()
2254 unfi = repo.unfiltered()
2255 unficl = unfi.changelog
2255 unficl = unfi.changelog
2256 cl = repo.changelog
2256 cl = repo.changelog
2257 tiprev = len(unficl)
2257 tiprev = len(unficl)
2258 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2258 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2259 for s in symbols:
2259 for s in symbols:
2260 try:
2260 try:
2261 n = int(s)
2261 n = int(s)
2262 if n <= tiprev:
2262 if n <= tiprev:
2263 if not allowrevnums:
2263 if not allowrevnums:
2264 continue
2264 continue
2265 else:
2265 else:
2266 if n not in cl:
2266 if n not in cl:
2267 revs.add(n)
2267 revs.add(n)
2268 continue
2268 continue
2269 except ValueError:
2269 except ValueError:
2270 pass
2270 pass
2271
2271
2272 try:
2272 try:
2273 s = resolvehexnodeidprefix(unfi, s)
2273 s = resolvehexnodeidprefix(unfi, s)
2274 except (error.LookupError, error.WdirUnsupported):
2274 except (error.LookupError, error.WdirUnsupported):
2275 s = None
2275 s = None
2276
2276
2277 if s is not None:
2277 if s is not None:
2278 rev = unficl.rev(s)
2278 rev = unficl.rev(s)
2279 if rev not in cl:
2279 if rev not in cl:
2280 revs.add(rev)
2280 revs.add(rev)
2281
2281
2282 return revs
2282 return revs
2283
2283
2284
2284
2285 def bookmarkrevs(repo, mark):
2285 def bookmarkrevs(repo, mark):
2286 """Select revisions reachable by a given bookmark
2286 """Select revisions reachable by a given bookmark
2287
2287
2288 If the bookmarked revision isn't a head, an empty set will be returned.
2288 If the bookmarked revision isn't a head, an empty set will be returned.
2289 """
2289 """
2290 return repo.revs(format_bookmark_revspec(mark))
2290 return repo.revs(format_bookmark_revspec(mark))
2291
2291
2292
2292
2293 def format_bookmark_revspec(mark):
2293 def format_bookmark_revspec(mark):
2294 """Build a revset expression to select revisions reachable by a given
2294 """Build a revset expression to select revisions reachable by a given
2295 bookmark"""
2295 bookmark"""
2296 mark = b'literal:' + mark
2296 mark = b'literal:' + mark
2297 return revsetlang.formatspec(
2297 return revsetlang.formatspec(
2298 b"ancestors(bookmark(%s)) - "
2298 b"ancestors(bookmark(%s)) - "
2299 b"ancestors(head() and not bookmark(%s)) - "
2299 b"ancestors(head() and not bookmark(%s)) - "
2300 b"ancestors(bookmark() and not bookmark(%s))",
2300 b"ancestors(bookmark() and not bookmark(%s))",
2301 mark,
2301 mark,
2302 mark,
2302 mark,
2303 mark,
2303 mark,
2304 )
2304 )
General Comments 0
You need to be logged in to leave comments. Login now