##// END OF EJS Templates
pytype: convert type comment for inline variable too...
marmoute -
r52181:8b2ea224 default
parent child Browse files
Show More
@@ -1,755 +1,761 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command (DEPRECATED)
22 'setumask' command (DEPRECATED)
23 'setumask2' command
23 'setumask2' command
24 set umask
24 set umask
25
25
26 'validate' command
26 'validate' command
27 reload the config and check if the server is up to date
27 reload the config and check if the server is up to date
28
28
29 Config
29 Config
30 ------
30 ------
31
31
32 ::
32 ::
33
33
34 [chgserver]
34 [chgserver]
35 # how long (in seconds) should an idle chg server exit
35 # how long (in seconds) should an idle chg server exit
36 idletimeout = 3600
36 idletimeout = 3600
37
37
38 # whether to skip config or env change checks
38 # whether to skip config or env change checks
39 skiphash = False
39 skiphash = False
40 """
40 """
41
41
42
42
43 import inspect
43 import inspect
44 import os
44 import os
45 import re
45 import re
46 import socket
46 import socket
47 import stat
47 import stat
48 import struct
48 import struct
49 import time
49 import time
50
50
51 from typing import (
52 Optional,
53 )
54
51 from .i18n import _
55 from .i18n import _
52 from .node import hex
56 from .node import hex
53
57
54 from . import (
58 from . import (
55 commandserver,
59 commandserver,
56 encoding,
60 encoding,
57 error,
61 error,
58 extensions,
62 extensions,
59 pycompat,
63 pycompat,
60 util,
64 util,
61 )
65 )
62
66
63 from .utils import (
67 from .utils import (
64 hashutil,
68 hashutil,
65 procutil,
69 procutil,
66 stringutil,
70 stringutil,
67 )
71 )
68
72
69
73
70 def _hashlist(items):
74 def _hashlist(items):
71 """return sha1 hexdigest for a list"""
75 """return sha1 hexdigest for a list"""
72 return hex(hashutil.sha1(stringutil.pprint(items)).digest())
76 return hex(hashutil.sha1(stringutil.pprint(items)).digest())
73
77
74
78
75 # sensitive config sections affecting confighash
79 # sensitive config sections affecting confighash
76 _configsections = [
80 _configsections = [
77 b'alias', # affects global state commands.table
81 b'alias', # affects global state commands.table
78 b'diff-tools', # affects whether gui or not in extdiff's uisetup
82 b'diff-tools', # affects whether gui or not in extdiff's uisetup
79 b'eol', # uses setconfig('eol', ...)
83 b'eol', # uses setconfig('eol', ...)
80 b'extdiff', # uisetup will register new commands
84 b'extdiff', # uisetup will register new commands
81 b'extensions',
85 b'extensions',
82 b'fastannotate', # affects annotate command and adds fastannonate cmd
86 b'fastannotate', # affects annotate command and adds fastannonate cmd
83 b'merge-tools', # affects whether gui or not in extdiff's uisetup
87 b'merge-tools', # affects whether gui or not in extdiff's uisetup
84 b'schemes', # extsetup will update global hg.schemes
88 b'schemes', # extsetup will update global hg.schemes
85 ]
89 ]
86
90
87 _configsectionitems = [
91 _configsectionitems = [
88 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
92 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
89 ]
93 ]
90
94
91 # sensitive environment variables affecting confighash
95 # sensitive environment variables affecting confighash
92 _envre = re.compile(
96 _envre = re.compile(
93 br'''\A(?:
97 br'''\A(?:
94 CHGHG
98 CHGHG
95 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
99 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
96 |HG(?:ENCODING|PLAIN).*
100 |HG(?:ENCODING|PLAIN).*
97 |LANG(?:UAGE)?
101 |LANG(?:UAGE)?
98 |LC_.*
102 |LC_.*
99 |LD_.*
103 |LD_.*
100 |PATH
104 |PATH
101 |PYTHON.*
105 |PYTHON.*
102 |TERM(?:INFO)?
106 |TERM(?:INFO)?
103 |TZ
107 |TZ
104 )\Z''',
108 )\Z''',
105 re.X,
109 re.X,
106 )
110 )
107
111
108
112
109 def _confighash(ui):
113 def _confighash(ui):
110 """return a quick hash for detecting config/env changes
114 """return a quick hash for detecting config/env changes
111
115
112 confighash is the hash of sensitive config items and environment variables.
116 confighash is the hash of sensitive config items and environment variables.
113
117
114 for chgserver, it is designed that once confighash changes, the server is
118 for chgserver, it is designed that once confighash changes, the server is
115 not qualified to serve its client and should redirect the client to a new
119 not qualified to serve its client and should redirect the client to a new
116 server. different from mtimehash, confighash change will not mark the
120 server. different from mtimehash, confighash change will not mark the
117 server outdated and exit since the user can have different configs at the
121 server outdated and exit since the user can have different configs at the
118 same time.
122 same time.
119 """
123 """
120 sectionitems = []
124 sectionitems = []
121 for section in _configsections:
125 for section in _configsections:
122 sectionitems.append(ui.configitems(section))
126 sectionitems.append(ui.configitems(section))
123 for section, item in _configsectionitems:
127 for section, item in _configsectionitems:
124 sectionitems.append(ui.config(section, item))
128 sectionitems.append(ui.config(section, item))
125 sectionhash = _hashlist(sectionitems)
129 sectionhash = _hashlist(sectionitems)
126 # If $CHGHG is set, the change to $HG should not trigger a new chg server
130 # If $CHGHG is set, the change to $HG should not trigger a new chg server
127 if b'CHGHG' in encoding.environ:
131 if b'CHGHG' in encoding.environ:
128 ignored = {b'HG'}
132 ignored = {b'HG'}
129 else:
133 else:
130 ignored = set()
134 ignored = set()
131 envitems = [
135 envitems = [
132 (k, v)
136 (k, v)
133 for k, v in encoding.environ.items()
137 for k, v in encoding.environ.items()
134 if _envre.match(k) and k not in ignored
138 if _envre.match(k) and k not in ignored
135 ]
139 ]
136 envhash = _hashlist(sorted(envitems))
140 envhash = _hashlist(sorted(envitems))
137 return sectionhash[:6] + envhash[:6]
141 return sectionhash[:6] + envhash[:6]
138
142
139
143
140 def _getmtimepaths(ui):
144 def _getmtimepaths(ui):
141 """get a list of paths that should be checked to detect change
145 """get a list of paths that should be checked to detect change
142
146
143 The list will include:
147 The list will include:
144 - extensions (will not cover all files for complex extensions)
148 - extensions (will not cover all files for complex extensions)
145 - mercurial/__version__.py
149 - mercurial/__version__.py
146 - python binary
150 - python binary
147 """
151 """
148 modules = [m for n, m in extensions.extensions(ui)]
152 modules = [m for n, m in extensions.extensions(ui)]
149 try:
153 try:
150 from . import __version__
154 from . import __version__
151
155
152 modules.append(__version__)
156 modules.append(__version__)
153 except ImportError:
157 except ImportError:
154 pass
158 pass
155 files = []
159 files = []
156 if pycompat.sysexecutable:
160 if pycompat.sysexecutable:
157 files.append(pycompat.sysexecutable)
161 files.append(pycompat.sysexecutable)
158 for m in modules:
162 for m in modules:
159 try:
163 try:
160 files.append(pycompat.fsencode(inspect.getabsfile(m)))
164 files.append(pycompat.fsencode(inspect.getabsfile(m)))
161 except TypeError:
165 except TypeError:
162 pass
166 pass
163 return sorted(set(files))
167 return sorted(set(files))
164
168
165
169
166 def _mtimehash(paths):
170 def _mtimehash(paths):
167 """return a quick hash for detecting file changes
171 """return a quick hash for detecting file changes
168
172
169 mtimehash calls stat on given paths and calculate a hash based on size and
173 mtimehash calls stat on given paths and calculate a hash based on size and
170 mtime of each file. mtimehash does not read file content because reading is
174 mtime of each file. mtimehash does not read file content because reading is
171 expensive. therefore it's not 100% reliable for detecting content changes.
175 expensive. therefore it's not 100% reliable for detecting content changes.
172 it's possible to return different hashes for same file contents.
176 it's possible to return different hashes for same file contents.
173 it's also possible to return a same hash for different file contents for
177 it's also possible to return a same hash for different file contents for
174 some carefully crafted situation.
178 some carefully crafted situation.
175
179
176 for chgserver, it is designed that once mtimehash changes, the server is
180 for chgserver, it is designed that once mtimehash changes, the server is
177 considered outdated immediately and should no longer provide service.
181 considered outdated immediately and should no longer provide service.
178
182
179 mtimehash is not included in confighash because we only know the paths of
183 mtimehash is not included in confighash because we only know the paths of
180 extensions after importing them (there is imp.find_module but that faces
184 extensions after importing them (there is imp.find_module but that faces
181 race conditions). We need to calculate confighash without importing.
185 race conditions). We need to calculate confighash without importing.
182 """
186 """
183
187
184 def trystat(path):
188 def trystat(path):
185 try:
189 try:
186 st = os.stat(path)
190 st = os.stat(path)
187 return (st[stat.ST_MTIME], st.st_size)
191 return (st[stat.ST_MTIME], st.st_size)
188 except OSError:
192 except OSError:
189 # could be ENOENT, EPERM etc. not fatal in any case
193 # could be ENOENT, EPERM etc. not fatal in any case
190 pass
194 pass
191
195
192 return _hashlist(pycompat.maplist(trystat, paths))[:12]
196 return _hashlist(pycompat.maplist(trystat, paths))[:12]
193
197
194
198
195 class hashstate:
199 class hashstate:
196 """a structure storing confighash, mtimehash, paths used for mtimehash"""
200 """a structure storing confighash, mtimehash, paths used for mtimehash"""
197
201
198 def __init__(self, confighash, mtimehash, mtimepaths):
202 def __init__(self, confighash, mtimehash, mtimepaths):
199 self.confighash = confighash
203 self.confighash = confighash
200 self.mtimehash = mtimehash
204 self.mtimehash = mtimehash
201 self.mtimepaths = mtimepaths
205 self.mtimepaths = mtimepaths
202
206
203 @staticmethod
207 @staticmethod
204 def fromui(ui, mtimepaths=None):
208 def fromui(ui, mtimepaths=None):
205 if mtimepaths is None:
209 if mtimepaths is None:
206 mtimepaths = _getmtimepaths(ui)
210 mtimepaths = _getmtimepaths(ui)
207 confighash = _confighash(ui)
211 confighash = _confighash(ui)
208 mtimehash = _mtimehash(mtimepaths)
212 mtimehash = _mtimehash(mtimepaths)
209 ui.log(
213 ui.log(
210 b'cmdserver',
214 b'cmdserver',
211 b'confighash = %s mtimehash = %s\n',
215 b'confighash = %s mtimehash = %s\n',
212 confighash,
216 confighash,
213 mtimehash,
217 mtimehash,
214 )
218 )
215 return hashstate(confighash, mtimehash, mtimepaths)
219 return hashstate(confighash, mtimehash, mtimepaths)
216
220
217
221
218 def _newchgui(srcui, csystem, attachio):
222 def _newchgui(srcui, csystem, attachio):
219 class chgui(srcui.__class__):
223 class chgui(srcui.__class__):
220 def __init__(self, src=None):
224 def __init__(self, src=None):
221 super(chgui, self).__init__(src)
225 super(chgui, self).__init__(src)
222 if src:
226 if src:
223 self._csystem = getattr(src, '_csystem', csystem)
227 self._csystem = getattr(src, '_csystem', csystem)
224 else:
228 else:
225 self._csystem = csystem
229 self._csystem = csystem
226
230
227 def _runsystem(self, cmd, environ, cwd, out):
231 def _runsystem(self, cmd, environ, cwd, out):
228 # fallback to the original system method if
232 # fallback to the original system method if
229 # a. the output stream is not stdout (e.g. stderr, cStringIO),
233 # a. the output stream is not stdout (e.g. stderr, cStringIO),
230 # b. or stdout is redirected by protectfinout(),
234 # b. or stdout is redirected by protectfinout(),
231 # because the chg client is not aware of these situations and
235 # because the chg client is not aware of these situations and
232 # will behave differently (i.e. write to stdout).
236 # will behave differently (i.e. write to stdout).
233 if (
237 if (
234 out is not self.fout
238 out is not self.fout
235 or not hasattr(self.fout, 'fileno')
239 or not hasattr(self.fout, 'fileno')
236 or self.fout.fileno() != procutil.stdout.fileno()
240 or self.fout.fileno() != procutil.stdout.fileno()
237 or self._finoutredirected
241 or self._finoutredirected
238 ):
242 ):
239 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
243 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
240 self.flush()
244 self.flush()
241 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
245 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
242
246
243 def _runpager(self, cmd, env=None):
247 def _runpager(self, cmd, env=None):
244 self._csystem(
248 self._csystem(
245 cmd,
249 cmd,
246 procutil.shellenviron(env),
250 procutil.shellenviron(env),
247 type=b'pager',
251 type=b'pager',
248 cmdtable={b'attachio': attachio},
252 cmdtable={b'attachio': attachio},
249 )
253 )
250 return True
254 return True
251
255
252 return chgui(srcui)
256 return chgui(srcui)
253
257
254
258
255 def _loadnewui(srcui, args, cdebug):
259 def _loadnewui(srcui, args, cdebug):
256 from . import dispatch # avoid cycle
260 from . import dispatch # avoid cycle
257
261
258 newui = srcui.__class__.load()
262 newui = srcui.__class__.load()
259 for a in ['fin', 'fout', 'ferr', 'environ']:
263 for a in ['fin', 'fout', 'ferr', 'environ']:
260 setattr(newui, a, getattr(srcui, a))
264 setattr(newui, a, getattr(srcui, a))
261 if hasattr(srcui, '_csystem'):
265 if hasattr(srcui, '_csystem'):
262 newui._csystem = srcui._csystem
266 newui._csystem = srcui._csystem
263
267
264 # command line args
268 # command line args
265 options = dispatch._earlyparseopts(newui, args)
269 options = dispatch._earlyparseopts(newui, args)
266 dispatch._parseconfig(newui, options[b'config'])
270 dispatch._parseconfig(newui, options[b'config'])
267
271
268 # stolen from tortoisehg.util.copydynamicconfig()
272 # stolen from tortoisehg.util.copydynamicconfig()
269 for section, name, value in srcui.walkconfig():
273 for section, name, value in srcui.walkconfig():
270 source = srcui.configsource(section, name)
274 source = srcui.configsource(section, name)
271 if b':' in source or source == b'--config' or source.startswith(b'$'):
275 if b':' in source or source == b'--config' or source.startswith(b'$'):
272 # path:line or command line, or environ
276 # path:line or command line, or environ
273 continue
277 continue
274 newui.setconfig(section, name, value, source)
278 newui.setconfig(section, name, value, source)
275
279
276 # load wd and repo config, copied from dispatch.py
280 # load wd and repo config, copied from dispatch.py
277 cwd = options[b'cwd']
281 cwd = options[b'cwd']
278 cwd = cwd and os.path.realpath(cwd) or None
282 cwd = cwd and os.path.realpath(cwd) or None
279 rpath = options[b'repository']
283 rpath = options[b'repository']
280 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
284 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
281
285
282 extensions.populateui(newui)
286 extensions.populateui(newui)
283 commandserver.setuplogging(newui, fp=cdebug)
287 commandserver.setuplogging(newui, fp=cdebug)
284 if newui is not newlui:
288 if newui is not newlui:
285 extensions.populateui(newlui)
289 extensions.populateui(newlui)
286 commandserver.setuplogging(newlui, fp=cdebug)
290 commandserver.setuplogging(newlui, fp=cdebug)
287
291
288 return (newui, newlui)
292 return (newui, newlui)
289
293
290
294
291 class channeledsystem:
295 class channeledsystem:
292 """Propagate ui.system() request in the following format:
296 """Propagate ui.system() request in the following format:
293
297
294 payload length (unsigned int),
298 payload length (unsigned int),
295 type, '\0',
299 type, '\0',
296 cmd, '\0',
300 cmd, '\0',
297 cwd, '\0',
301 cwd, '\0',
298 envkey, '=', val, '\0',
302 envkey, '=', val, '\0',
299 ...
303 ...
300 envkey, '=', val
304 envkey, '=', val
301
305
302 if type == 'system', waits for:
306 if type == 'system', waits for:
303
307
304 exitcode length (unsigned int),
308 exitcode length (unsigned int),
305 exitcode (int)
309 exitcode (int)
306
310
307 if type == 'pager', repetitively waits for a command name ending with '\n'
311 if type == 'pager', repetitively waits for a command name ending with '\n'
308 and executes it defined by cmdtable, or exits the loop if the command name
312 and executes it defined by cmdtable, or exits the loop if the command name
309 is empty.
313 is empty.
310 """
314 """
311
315
312 def __init__(self, in_, out, channel):
316 def __init__(self, in_, out, channel):
313 self.in_ = in_
317 self.in_ = in_
314 self.out = out
318 self.out = out
315 self.channel = channel
319 self.channel = channel
316
320
317 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
321 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
318 args = [type, cmd, util.abspath(cwd or b'.')]
322 args = [type, cmd, util.abspath(cwd or b'.')]
319 args.extend(b'%s=%s' % (k, v) for k, v in environ.items())
323 args.extend(b'%s=%s' % (k, v) for k, v in environ.items())
320 data = b'\0'.join(args)
324 data = b'\0'.join(args)
321 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
325 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
322 self.out.write(data)
326 self.out.write(data)
323 self.out.flush()
327 self.out.flush()
324
328
325 if type == b'system':
329 if type == b'system':
326 length = self.in_.read(4)
330 length = self.in_.read(4)
327 (length,) = struct.unpack(b'>I', length)
331 (length,) = struct.unpack(b'>I', length)
328 if length != 4:
332 if length != 4:
329 raise error.Abort(_(b'invalid response'))
333 raise error.Abort(_(b'invalid response'))
330 (rc,) = struct.unpack(b'>i', self.in_.read(4))
334 (rc,) = struct.unpack(b'>i', self.in_.read(4))
331 return rc
335 return rc
332 elif type == b'pager':
336 elif type == b'pager':
333 while True:
337 while True:
334 cmd = self.in_.readline()[:-1]
338 cmd = self.in_.readline()[:-1]
335 if not cmd:
339 if not cmd:
336 break
340 break
337 if cmdtable and cmd in cmdtable:
341 if cmdtable and cmd in cmdtable:
338 cmdtable[cmd]()
342 cmdtable[cmd]()
339 else:
343 else:
340 raise error.Abort(_(b'unexpected command: %s') % cmd)
344 raise error.Abort(_(b'unexpected command: %s') % cmd)
341 else:
345 else:
342 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
346 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
343
347
344
348
345 _iochannels = [
349 _iochannels = [
346 # server.ch, ui.fp, mode
350 # server.ch, ui.fp, mode
347 ('cin', 'fin', 'rb'),
351 ('cin', 'fin', 'rb'),
348 ('cout', 'fout', 'wb'),
352 ('cout', 'fout', 'wb'),
349 ('cerr', 'ferr', 'wb'),
353 ('cerr', 'ferr', 'wb'),
350 ]
354 ]
351
355
352
356
353 class chgcmdserver(commandserver.server):
357 class chgcmdserver(commandserver.server):
354 def __init__(
358 def __init__(
355 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
359 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
356 ):
360 ):
357 super(chgcmdserver, self).__init__(
361 super(chgcmdserver, self).__init__(
358 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
362 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
359 repo,
363 repo,
360 fin,
364 fin,
361 fout,
365 fout,
362 prereposetups,
366 prereposetups,
363 )
367 )
364 self.clientsock = sock
368 self.clientsock = sock
365 self._ioattached = False
369 self._ioattached = False
366 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
370 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
367 self.hashstate = hashstate
371 self.hashstate = hashstate
368 self.baseaddress = baseaddress
372 self.baseaddress = baseaddress
369 if hashstate is not None:
373 if hashstate is not None:
370 self.capabilities = self.capabilities.copy()
374 self.capabilities = self.capabilities.copy()
371 self.capabilities[b'validate'] = chgcmdserver.validate
375 self.capabilities[b'validate'] = chgcmdserver.validate
372
376
373 def cleanup(self):
377 def cleanup(self):
374 super(chgcmdserver, self).cleanup()
378 super(chgcmdserver, self).cleanup()
375 # dispatch._runcatch() does not flush outputs if exception is not
379 # dispatch._runcatch() does not flush outputs if exception is not
376 # handled by dispatch._dispatch()
380 # handled by dispatch._dispatch()
377 self.ui.flush()
381 self.ui.flush()
378 self._restoreio()
382 self._restoreio()
379 self._ioattached = False
383 self._ioattached = False
380
384
381 def attachio(self):
385 def attachio(self):
382 """Attach to client's stdio passed via unix domain socket; all
386 """Attach to client's stdio passed via unix domain socket; all
383 channels except cresult will no longer be used
387 channels except cresult will no longer be used
384 """
388 """
385 # tell client to sendmsg() with 1-byte payload, which makes it
389 # tell client to sendmsg() with 1-byte payload, which makes it
386 # distinctive from "attachio\n" command consumed by client.read()
390 # distinctive from "attachio\n" command consumed by client.read()
387 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
391 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
388
392
389 data, ancdata, msg_flags, address = self.clientsock.recvmsg(1, 256)
393 data, ancdata, msg_flags, address = self.clientsock.recvmsg(1, 256)
390 assert len(ancdata) == 1
394 assert len(ancdata) == 1
391 cmsg_level, cmsg_type, cmsg_data = ancdata[0]
395 cmsg_level, cmsg_type, cmsg_data = ancdata[0]
392 assert cmsg_level == socket.SOL_SOCKET
396 assert cmsg_level == socket.SOL_SOCKET
393 assert cmsg_type == socket.SCM_RIGHTS
397 assert cmsg_type == socket.SCM_RIGHTS
394 # memoryview.cast() was added in typeshed 61600d68772a, but pytype
398 # memoryview.cast() was added in typeshed 61600d68772a, but pytype
395 # still complains
399 # still complains
396 # pytype: disable=attribute-error
400 # pytype: disable=attribute-error
397 clientfds = memoryview(cmsg_data).cast('i').tolist()
401 clientfds = memoryview(cmsg_data).cast('i').tolist()
398 # pytype: enable=attribute-error
402 # pytype: enable=attribute-error
399 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
403 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
400
404
401 ui = self.ui
405 ui = self.ui
402 ui.flush()
406 ui.flush()
403 self._saveio()
407 self._saveio()
404 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
408 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
405 assert fd > 0
409 assert fd > 0
406 fp = getattr(ui, fn)
410 fp = getattr(ui, fn)
407 os.dup2(fd, fp.fileno())
411 os.dup2(fd, fp.fileno())
408 os.close(fd)
412 os.close(fd)
409 if self._ioattached:
413 if self._ioattached:
410 continue
414 continue
411 # reset buffering mode when client is first attached. as we want
415 # reset buffering mode when client is first attached. as we want
412 # to see output immediately on pager, the mode stays unchanged
416 # to see output immediately on pager, the mode stays unchanged
413 # when client re-attached. ferr is unchanged because it should
417 # when client re-attached. ferr is unchanged because it should
414 # be unbuffered no matter if it is a tty or not.
418 # be unbuffered no matter if it is a tty or not.
415 if fn == b'ferr':
419 if fn == b'ferr':
416 newfp = fp
420 newfp = fp
417 else:
421 else:
418 # On Python 3, the standard library doesn't offer line-buffered
422 # On Python 3, the standard library doesn't offer line-buffered
419 # binary streams, so wrap/unwrap it.
423 # binary streams, so wrap/unwrap it.
420 if fp.isatty():
424 if fp.isatty():
421 newfp = procutil.make_line_buffered(fp)
425 newfp = procutil.make_line_buffered(fp)
422 else:
426 else:
423 newfp = procutil.unwrap_line_buffered(fp)
427 newfp = procutil.unwrap_line_buffered(fp)
424 if newfp is not fp:
428 if newfp is not fp:
425 setattr(ui, fn, newfp)
429 setattr(ui, fn, newfp)
426 setattr(self, cn, newfp)
430 setattr(self, cn, newfp)
427
431
428 self._ioattached = True
432 self._ioattached = True
429 self.cresult.write(struct.pack(b'>i', len(clientfds)))
433 self.cresult.write(struct.pack(b'>i', len(clientfds)))
430
434
431 def _saveio(self):
435 def _saveio(self):
432 if self._oldios:
436 if self._oldios:
433 return
437 return
434 ui = self.ui
438 ui = self.ui
435 for cn, fn, _mode in _iochannels:
439 for cn, fn, _mode in _iochannels:
436 ch = getattr(self, cn)
440 ch = getattr(self, cn)
437 fp = getattr(ui, fn)
441 fp = getattr(ui, fn)
438 fd = os.dup(fp.fileno())
442 fd = os.dup(fp.fileno())
439 self._oldios.append((ch, fp, fd))
443 self._oldios.append((ch, fp, fd))
440
444
441 def _restoreio(self):
445 def _restoreio(self):
442 if not self._oldios:
446 if not self._oldios:
443 return
447 return
444 nullfd = os.open(os.devnull, os.O_WRONLY)
448 nullfd = os.open(os.devnull, os.O_WRONLY)
445 ui = self.ui
449 ui = self.ui
446 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
450 for (ch, fp, fd), (cn, fn, mode) in zip(self._oldios, _iochannels):
447 try:
451 try:
448 if 'w' in mode:
452 if 'w' in mode:
449 # Discard buffered data which couldn't be flushed because
453 # Discard buffered data which couldn't be flushed because
450 # of EPIPE. The data should belong to the current session
454 # of EPIPE. The data should belong to the current session
451 # and should never persist.
455 # and should never persist.
452 os.dup2(nullfd, fp.fileno())
456 os.dup2(nullfd, fp.fileno())
453 fp.flush()
457 fp.flush()
454 os.dup2(fd, fp.fileno())
458 os.dup2(fd, fp.fileno())
455 os.close(fd)
459 os.close(fd)
456 except OSError as err:
460 except OSError as err:
457 # According to issue6330, running chg on heavy loaded systems
461 # According to issue6330, running chg on heavy loaded systems
458 # can lead to EBUSY. [man dup2] indicates that, on Linux,
462 # can lead to EBUSY. [man dup2] indicates that, on Linux,
459 # EBUSY comes from a race condition between open() and dup2().
463 # EBUSY comes from a race condition between open() and dup2().
460 # However it's not clear why open() race occurred for
464 # However it's not clear why open() race occurred for
461 # newfd=stdin/out/err.
465 # newfd=stdin/out/err.
462 self.ui.log(
466 self.ui.log(
463 b'chgserver',
467 b'chgserver',
464 b'got %s while duplicating %s\n',
468 b'got %s while duplicating %s\n',
465 stringutil.forcebytestr(err),
469 stringutil.forcebytestr(err),
466 fn,
470 fn,
467 )
471 )
468 setattr(self, cn, ch)
472 setattr(self, cn, ch)
469 setattr(ui, fn, fp)
473 setattr(ui, fn, fp)
470 os.close(nullfd)
474 os.close(nullfd)
471 del self._oldios[:]
475 del self._oldios[:]
472
476
473 def validate(self):
477 def validate(self):
474 """Reload the config and check if the server is up to date
478 """Reload the config and check if the server is up to date
475
479
476 Read a list of '\0' separated arguments.
480 Read a list of '\0' separated arguments.
477 Write a non-empty list of '\0' separated instruction strings or '\0'
481 Write a non-empty list of '\0' separated instruction strings or '\0'
478 if the list is empty.
482 if the list is empty.
479 An instruction string could be either:
483 An instruction string could be either:
480 - "unlink $path", the client should unlink the path to stop the
484 - "unlink $path", the client should unlink the path to stop the
481 outdated server.
485 outdated server.
482 - "redirect $path", the client should attempt to connect to $path
486 - "redirect $path", the client should attempt to connect to $path
483 first. If it does not work, start a new server. It implies
487 first. If it does not work, start a new server. It implies
484 "reconnect".
488 "reconnect".
485 - "exit $n", the client should exit directly with code n.
489 - "exit $n", the client should exit directly with code n.
486 This may happen if we cannot parse the config.
490 This may happen if we cannot parse the config.
487 - "reconnect", the client should close the connection and
491 - "reconnect", the client should close the connection and
488 reconnect.
492 reconnect.
489 If neither "reconnect" nor "redirect" is included in the instruction
493 If neither "reconnect" nor "redirect" is included in the instruction
490 list, the client can continue with this server after completing all
494 list, the client can continue with this server after completing all
491 the instructions.
495 the instructions.
492 """
496 """
493 args = self._readlist()
497 args = self._readlist()
494 errorraised = False
498 errorraised = False
495 detailed_exit_code = 255
499 detailed_exit_code = 255
496 try:
500 try:
497 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
501 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
498 except error.RepoError as inst:
502 except error.RepoError as inst:
499 # RepoError can be raised while trying to read shared source
503 # RepoError can be raised while trying to read shared source
500 # configuration
504 # configuration
501 self.ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
505 self.ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
502 if inst.hint:
506 if inst.hint:
503 self.ui.error(_(b"(%s)\n") % inst.hint)
507 self.ui.error(_(b"(%s)\n") % inst.hint)
504 errorraised = True
508 errorraised = True
505 except error.Error as inst:
509 except error.Error as inst:
506 if inst.detailed_exit_code is not None:
510 if inst.detailed_exit_code is not None:
507 detailed_exit_code = inst.detailed_exit_code
511 detailed_exit_code = inst.detailed_exit_code
508 self.ui.error(inst.format())
512 self.ui.error(inst.format())
509 errorraised = True
513 errorraised = True
510
514
511 if errorraised:
515 if errorraised:
512 self.ui.flush()
516 self.ui.flush()
513 exit_code = 255
517 exit_code = 255
514 if self.ui.configbool(b'ui', b'detailed-exit-code'):
518 if self.ui.configbool(b'ui', b'detailed-exit-code'):
515 exit_code = detailed_exit_code
519 exit_code = detailed_exit_code
516 self.cresult.write(b'exit %d' % exit_code)
520 self.cresult.write(b'exit %d' % exit_code)
517 return
521 return
518 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
522 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
519 insts = []
523 insts = []
520 if newhash.mtimehash != self.hashstate.mtimehash:
524 if newhash.mtimehash != self.hashstate.mtimehash:
521 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
525 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
522 insts.append(b'unlink %s' % addr)
526 insts.append(b'unlink %s' % addr)
523 # mtimehash is empty if one or more extensions fail to load.
527 # mtimehash is empty if one or more extensions fail to load.
524 # to be compatible with hg, still serve the client this time.
528 # to be compatible with hg, still serve the client this time.
525 if self.hashstate.mtimehash:
529 if self.hashstate.mtimehash:
526 insts.append(b'reconnect')
530 insts.append(b'reconnect')
527 if newhash.confighash != self.hashstate.confighash:
531 if newhash.confighash != self.hashstate.confighash:
528 addr = _hashaddress(self.baseaddress, newhash.confighash)
532 addr = _hashaddress(self.baseaddress, newhash.confighash)
529 insts.append(b'redirect %s' % addr)
533 insts.append(b'redirect %s' % addr)
530 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
534 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
531 self.cresult.write(b'\0'.join(insts) or b'\0')
535 self.cresult.write(b'\0'.join(insts) or b'\0')
532
536
533 def chdir(self):
537 def chdir(self):
534 """Change current directory
538 """Change current directory
535
539
536 Note that the behavior of --cwd option is bit different from this.
540 Note that the behavior of --cwd option is bit different from this.
537 It does not affect --config parameter.
541 It does not affect --config parameter.
538 """
542 """
539 path = self._readstr()
543 path = self._readstr()
540 if not path:
544 if not path:
541 return
545 return
542 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
546 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
543 os.chdir(path)
547 os.chdir(path)
544
548
545 def setumask(self):
549 def setumask(self):
546 """Change umask (DEPRECATED)"""
550 """Change umask (DEPRECATED)"""
547 # BUG: this does not follow the message frame structure, but kept for
551 # BUG: this does not follow the message frame structure, but kept for
548 # backward compatibility with old chg clients for some time
552 # backward compatibility with old chg clients for some time
549 self._setumask(self._read(4))
553 self._setumask(self._read(4))
550
554
551 def setumask2(self):
555 def setumask2(self):
552 """Change umask"""
556 """Change umask"""
553 data = self._readstr()
557 data = self._readstr()
554 if len(data) != 4:
558 if len(data) != 4:
555 raise ValueError(b'invalid mask length in setumask2 request')
559 raise ValueError(b'invalid mask length in setumask2 request')
556 self._setumask(data)
560 self._setumask(data)
557
561
558 def _setumask(self, data):
562 def _setumask(self, data):
559 mask = struct.unpack(b'>I', data)[0]
563 mask = struct.unpack(b'>I', data)[0]
560 self.ui.log(b'chgserver', b'setumask %r\n', mask)
564 self.ui.log(b'chgserver', b'setumask %r\n', mask)
561 util.setumask(mask)
565 util.setumask(mask)
562
566
563 def runcommand(self):
567 def runcommand(self):
564 # pager may be attached within the runcommand session, which should
568 # pager may be attached within the runcommand session, which should
565 # be detached at the end of the session. otherwise the pager wouldn't
569 # be detached at the end of the session. otherwise the pager wouldn't
566 # receive EOF.
570 # receive EOF.
567 globaloldios = self._oldios
571 globaloldios = self._oldios
568 self._oldios = []
572 self._oldios = []
569 try:
573 try:
570 return super(chgcmdserver, self).runcommand()
574 return super(chgcmdserver, self).runcommand()
571 finally:
575 finally:
572 self._restoreio()
576 self._restoreio()
573 self._oldios = globaloldios
577 self._oldios = globaloldios
574
578
575 def setenv(self):
579 def setenv(self):
576 """Clear and update os.environ
580 """Clear and update os.environ
577
581
578 Note that not all variables can make an effect on the running process.
582 Note that not all variables can make an effect on the running process.
579 """
583 """
580 l = self._readlist()
584 l = self._readlist()
581 try:
585 try:
582 newenv = dict(s.split(b'=', 1) for s in l)
586 newenv = dict(s.split(b'=', 1) for s in l)
583 except ValueError:
587 except ValueError:
584 raise ValueError(b'unexpected value in setenv request')
588 raise ValueError(b'unexpected value in setenv request')
585 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
589 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
586
590
587 encoding.environ.clear()
591 encoding.environ.clear()
588 encoding.environ.update(newenv)
592 encoding.environ.update(newenv)
589
593
590 capabilities = commandserver.server.capabilities.copy()
594 capabilities = commandserver.server.capabilities.copy()
591 capabilities.update(
595 capabilities.update(
592 {
596 {
593 b'attachio': attachio,
597 b'attachio': attachio,
594 b'chdir': chdir,
598 b'chdir': chdir,
595 b'runcommand': runcommand,
599 b'runcommand': runcommand,
596 b'setenv': setenv,
600 b'setenv': setenv,
597 b'setumask': setumask,
601 b'setumask': setumask,
598 b'setumask2': setumask2,
602 b'setumask2': setumask2,
599 }
603 }
600 )
604 )
601
605
602 if hasattr(procutil, 'setprocname'):
606 if hasattr(procutil, 'setprocname'):
603
607
604 def setprocname(self):
608 def setprocname(self):
605 """Change process title"""
609 """Change process title"""
606 name = self._readstr()
610 name = self._readstr()
607 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
611 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
608 procutil.setprocname(name)
612 procutil.setprocname(name)
609
613
610 capabilities[b'setprocname'] = setprocname
614 capabilities[b'setprocname'] = setprocname
611
615
612
616
613 def _tempaddress(address):
617 def _tempaddress(address):
614 return b'%s.%d.tmp' % (address, os.getpid())
618 return b'%s.%d.tmp' % (address, os.getpid())
615
619
616
620
617 def _hashaddress(address, hashstr):
621 def _hashaddress(address, hashstr):
618 # if the basename of address contains '.', use only the left part. this
622 # if the basename of address contains '.', use only the left part. this
619 # makes it possible for the client to pass 'server.tmp$PID' and follow by
623 # makes it possible for the client to pass 'server.tmp$PID' and follow by
620 # an atomic rename to avoid locking when spawning new servers.
624 # an atomic rename to avoid locking when spawning new servers.
621 dirname, basename = os.path.split(address)
625 dirname, basename = os.path.split(address)
622 basename = basename.split(b'.', 1)[0]
626 basename = basename.split(b'.', 1)[0]
623 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
627 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
624
628
625
629
626 class chgunixservicehandler:
630 class chgunixservicehandler:
627 """Set of operations for chg services"""
631 """Set of operations for chg services"""
628
632
629 pollinterval = 1 # [sec]
633 pollinterval = 1 # [sec]
630
634
635 _hashstate: Optional[hashstate]
636 _baseaddress: Optional[bytes]
637 _realaddress: Optional[bytes]
638
631 def __init__(self, ui):
639 def __init__(self, ui):
632 self.ui = ui
640 self.ui = ui
633
641
634 # TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level)
642 self._hashstate = None
635 # when 3.5 support is dropped.
643 self._baseaddress = None
636 self._hashstate = None # type: hashstate
644 self._realaddress = None
637 self._baseaddress = None # type: bytes
638 self._realaddress = None # type: bytes
639
645
640 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
646 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
641 self._lastactive = time.time()
647 self._lastactive = time.time()
642
648
643 def bindsocket(self, sock, address):
649 def bindsocket(self, sock, address):
644 self._inithashstate(address)
650 self._inithashstate(address)
645 self._checkextensions()
651 self._checkextensions()
646 self._bind(sock)
652 self._bind(sock)
647 self._createsymlink()
653 self._createsymlink()
648 # no "listening at" message should be printed to simulate hg behavior
654 # no "listening at" message should be printed to simulate hg behavior
649
655
650 def _inithashstate(self, address):
656 def _inithashstate(self, address):
651 self._baseaddress = address
657 self._baseaddress = address
652 if self.ui.configbool(b'chgserver', b'skiphash'):
658 if self.ui.configbool(b'chgserver', b'skiphash'):
653 self._hashstate = None
659 self._hashstate = None
654 self._realaddress = address
660 self._realaddress = address
655 return
661 return
656 self._hashstate = hashstate.fromui(self.ui)
662 self._hashstate = hashstate.fromui(self.ui)
657 self._realaddress = _hashaddress(address, self._hashstate.confighash)
663 self._realaddress = _hashaddress(address, self._hashstate.confighash)
658
664
659 def _checkextensions(self):
665 def _checkextensions(self):
660 if not self._hashstate:
666 if not self._hashstate:
661 return
667 return
662 if extensions.notloaded():
668 if extensions.notloaded():
663 # one or more extensions failed to load. mtimehash becomes
669 # one or more extensions failed to load. mtimehash becomes
664 # meaningless because we do not know the paths of those extensions.
670 # meaningless because we do not know the paths of those extensions.
665 # set mtimehash to an illegal hash value to invalidate the server.
671 # set mtimehash to an illegal hash value to invalidate the server.
666 self._hashstate.mtimehash = b''
672 self._hashstate.mtimehash = b''
667
673
668 def _bind(self, sock):
674 def _bind(self, sock):
669 # use a unique temp address so we can stat the file and do ownership
675 # use a unique temp address so we can stat the file and do ownership
670 # check later
676 # check later
671 tempaddress = _tempaddress(self._realaddress)
677 tempaddress = _tempaddress(self._realaddress)
672 util.bindunixsocket(sock, tempaddress)
678 util.bindunixsocket(sock, tempaddress)
673 self._socketstat = os.stat(tempaddress)
679 self._socketstat = os.stat(tempaddress)
674 sock.listen(socket.SOMAXCONN)
680 sock.listen(socket.SOMAXCONN)
675 # rename will replace the old socket file if exists atomically. the
681 # rename will replace the old socket file if exists atomically. the
676 # old server will detect ownership change and exit.
682 # old server will detect ownership change and exit.
677 util.rename(tempaddress, self._realaddress)
683 util.rename(tempaddress, self._realaddress)
678
684
679 def _createsymlink(self):
685 def _createsymlink(self):
680 if self._baseaddress == self._realaddress:
686 if self._baseaddress == self._realaddress:
681 return
687 return
682 tempaddress = _tempaddress(self._baseaddress)
688 tempaddress = _tempaddress(self._baseaddress)
683 os.symlink(os.path.basename(self._realaddress), tempaddress)
689 os.symlink(os.path.basename(self._realaddress), tempaddress)
684 util.rename(tempaddress, self._baseaddress)
690 util.rename(tempaddress, self._baseaddress)
685
691
686 def _issocketowner(self):
692 def _issocketowner(self):
687 try:
693 try:
688 st = os.stat(self._realaddress)
694 st = os.stat(self._realaddress)
689 return (
695 return (
690 st.st_ino == self._socketstat.st_ino
696 st.st_ino == self._socketstat.st_ino
691 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
697 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
692 )
698 )
693 except OSError:
699 except OSError:
694 return False
700 return False
695
701
696 def unlinksocket(self, address):
702 def unlinksocket(self, address):
697 if not self._issocketowner():
703 if not self._issocketowner():
698 return
704 return
699 # it is possible to have a race condition here that we may
705 # it is possible to have a race condition here that we may
700 # remove another server's socket file. but that's okay
706 # remove another server's socket file. but that's okay
701 # since that server will detect and exit automatically and
707 # since that server will detect and exit automatically and
702 # the client will start a new server on demand.
708 # the client will start a new server on demand.
703 util.tryunlink(self._realaddress)
709 util.tryunlink(self._realaddress)
704
710
705 def shouldexit(self):
711 def shouldexit(self):
706 if not self._issocketowner():
712 if not self._issocketowner():
707 self.ui.log(
713 self.ui.log(
708 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
714 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
709 )
715 )
710 return True
716 return True
711 if time.time() - self._lastactive > self._idletimeout:
717 if time.time() - self._lastactive > self._idletimeout:
712 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
718 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
713 return True
719 return True
714 return False
720 return False
715
721
716 def newconnection(self):
722 def newconnection(self):
717 self._lastactive = time.time()
723 self._lastactive = time.time()
718
724
719 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
725 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
720 return chgcmdserver(
726 return chgcmdserver(
721 self.ui,
727 self.ui,
722 repo,
728 repo,
723 fin,
729 fin,
724 fout,
730 fout,
725 conn,
731 conn,
726 prereposetups,
732 prereposetups,
727 self._hashstate,
733 self._hashstate,
728 self._baseaddress,
734 self._baseaddress,
729 )
735 )
730
736
731
737
732 def chgunixservice(ui, repo, opts):
738 def chgunixservice(ui, repo, opts):
733 # CHGINTERNALMARK is set by chg client. It is an indication of things are
739 # CHGINTERNALMARK is set by chg client. It is an indication of things are
734 # started by chg so other code can do things accordingly, like disabling
740 # started by chg so other code can do things accordingly, like disabling
735 # demandimport or detecting chg client started by chg client. When executed
741 # demandimport or detecting chg client started by chg client. When executed
736 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
742 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
737 # environ cleaner.
743 # environ cleaner.
738 if b'CHGINTERNALMARK' in encoding.environ:
744 if b'CHGINTERNALMARK' in encoding.environ:
739 del encoding.environ[b'CHGINTERNALMARK']
745 del encoding.environ[b'CHGINTERNALMARK']
740 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
746 # Python3.7+ "coerces" the LC_CTYPE environment variable to a UTF-8 one if
741 # it thinks the current value is "C". This breaks the hash computation and
747 # it thinks the current value is "C". This breaks the hash computation and
742 # causes chg to restart loop.
748 # causes chg to restart loop.
743 if b'CHGORIG_LC_CTYPE' in encoding.environ:
749 if b'CHGORIG_LC_CTYPE' in encoding.environ:
744 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
750 encoding.environ[b'LC_CTYPE'] = encoding.environ[b'CHGORIG_LC_CTYPE']
745 del encoding.environ[b'CHGORIG_LC_CTYPE']
751 del encoding.environ[b'CHGORIG_LC_CTYPE']
746 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
752 elif b'CHG_CLEAR_LC_CTYPE' in encoding.environ:
747 if b'LC_CTYPE' in encoding.environ:
753 if b'LC_CTYPE' in encoding.environ:
748 del encoding.environ[b'LC_CTYPE']
754 del encoding.environ[b'LC_CTYPE']
749 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
755 del encoding.environ[b'CHG_CLEAR_LC_CTYPE']
750
756
751 if repo:
757 if repo:
752 # one chgserver can serve multiple repos. drop repo information
758 # one chgserver can serve multiple repos. drop repo information
753 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
759 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
754 h = chgunixservicehandler(ui)
760 h = chgunixservicehandler(ui)
755 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
761 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,704 +1,704 b''
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14
14
15 import difflib
15 import difflib
16
16
17 from typing import (
17 from typing import (
18 Any,
18 Any,
19 AnyStr,
19 AnyStr,
20 Iterable,
20 Iterable,
21 List,
21 List,
22 Optional,
22 Optional,
23 Sequence,
23 Sequence,
24 Union,
24 Union,
25 )
25 )
26
26
27 # Do not import anything but pycompat here, please
27 # Do not import anything but pycompat here, please
28 from . import pycompat
28 from . import pycompat
29
29
30
30
31 # keeps pyflakes happy
31 # keeps pyflakes happy
32 assert [
32 assert [
33 Any,
33 Any,
34 AnyStr,
34 AnyStr,
35 Iterable,
35 Iterable,
36 List,
36 List,
37 Optional,
37 Optional,
38 Sequence,
38 Sequence,
39 Union,
39 Union,
40 ]
40 ]
41
41
42
42
43 def _tobytes(exc) -> bytes:
43 def _tobytes(exc) -> bytes:
44 """Byte-stringify exception in the same way as BaseException_str()"""
44 """Byte-stringify exception in the same way as BaseException_str()"""
45 if not exc.args:
45 if not exc.args:
46 return b''
46 return b''
47 if len(exc.args) == 1:
47 if len(exc.args) == 1:
48 return pycompat.bytestr(exc.args[0])
48 return pycompat.bytestr(exc.args[0])
49 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
49 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
50
50
51
51
52 class Hint:
52 class Hint:
53 """Mix-in to provide a hint of an error
53 """Mix-in to provide a hint of an error
54
54
55 This should come first in the inheritance list to consume a hint and
55 This should come first in the inheritance list to consume a hint and
56 pass remaining arguments to the exception class.
56 pass remaining arguments to the exception class.
57 """
57 """
58
58
59 def __init__(self, *args, **kw):
59 def __init__(self, *args, **kw):
60 self.hint = kw.pop('hint', None) # type: Optional[bytes]
60 self.hint: Optional[bytes] = kw.pop('hint', None)
61 super(Hint, self).__init__(*args, **kw)
61 super(Hint, self).__init__(*args, **kw)
62
62
63
63
64 class Error(Hint, Exception):
64 class Error(Hint, Exception):
65 """Base class for Mercurial errors."""
65 """Base class for Mercurial errors."""
66
66
67 coarse_exit_code = None
67 coarse_exit_code = None
68 detailed_exit_code = None
68 detailed_exit_code = None
69
69
70 def __init__(self, message: bytes, hint: Optional[bytes] = None) -> None:
70 def __init__(self, message: bytes, hint: Optional[bytes] = None) -> None:
71 self.message = message
71 self.message = message
72 self.hint = hint
72 self.hint = hint
73 # Pass the message into the Exception constructor to help extensions
73 # Pass the message into the Exception constructor to help extensions
74 # that look for exc.args[0].
74 # that look for exc.args[0].
75 Exception.__init__(self, message)
75 Exception.__init__(self, message)
76
76
77 def __bytes__(self):
77 def __bytes__(self):
78 return self.message
78 return self.message
79
79
80 def __str__(self) -> str:
80 def __str__(self) -> str:
81 # the output would be unreadable if the message was translated,
81 # the output would be unreadable if the message was translated,
82 # but do not replace it with encoding.strfromlocal(), which
82 # but do not replace it with encoding.strfromlocal(), which
83 # may raise another exception.
83 # may raise another exception.
84 return pycompat.sysstr(self.__bytes__())
84 return pycompat.sysstr(self.__bytes__())
85
85
86 def format(self) -> bytes:
86 def format(self) -> bytes:
87 from .i18n import _
87 from .i18n import _
88
88
89 message = _(b"abort: %s\n") % self.message
89 message = _(b"abort: %s\n") % self.message
90 if self.hint:
90 if self.hint:
91 message += _(b"(%s)\n") % self.hint
91 message += _(b"(%s)\n") % self.hint
92 return message
92 return message
93
93
94
94
95 class Abort(Error):
95 class Abort(Error):
96 """Raised if a command needs to print an error and exit."""
96 """Raised if a command needs to print an error and exit."""
97
97
98
98
99 class StorageError(Error):
99 class StorageError(Error):
100 """Raised when an error occurs in a storage layer.
100 """Raised when an error occurs in a storage layer.
101
101
102 Usually subclassed by a storage-specific exception.
102 Usually subclassed by a storage-specific exception.
103 """
103 """
104
104
105 detailed_exit_code = 50
105 detailed_exit_code = 50
106
106
107
107
108 class RevlogError(StorageError):
108 class RevlogError(StorageError):
109 pass
109 pass
110
110
111
111
112 class SidedataHashError(RevlogError):
112 class SidedataHashError(RevlogError):
113 def __init__(self, key: int, expected: bytes, got: bytes) -> None:
113 def __init__(self, key: int, expected: bytes, got: bytes) -> None:
114 self.hint = None
114 self.hint = None
115 self.sidedatakey = key
115 self.sidedatakey = key
116 self.expecteddigest = expected
116 self.expecteddigest = expected
117 self.actualdigest = got
117 self.actualdigest = got
118
118
119
119
120 class FilteredIndexError(IndexError):
120 class FilteredIndexError(IndexError):
121 __bytes__ = _tobytes
121 __bytes__ = _tobytes
122
122
123
123
124 class LookupError(RevlogError, KeyError):
124 class LookupError(RevlogError, KeyError):
125 def __init__(self, name: bytes, index: bytes, message: bytes) -> None:
125 def __init__(self, name: bytes, index: bytes, message: bytes) -> None:
126 self.name = name
126 self.name = name
127 self.index = index
127 self.index = index
128 # this can't be called 'message' because at least some installs of
128 # this can't be called 'message' because at least some installs of
129 # Python 2.6+ complain about the 'message' property being deprecated
129 # Python 2.6+ complain about the 'message' property being deprecated
130 self.lookupmessage = message
130 self.lookupmessage = message
131 if isinstance(name, bytes) and len(name) == 20:
131 if isinstance(name, bytes) and len(name) == 20:
132 from .node import hex
132 from .node import hex
133
133
134 name = hex(name)
134 name = hex(name)
135 # if name is a binary node, it can be None
135 # if name is a binary node, it can be None
136 RevlogError.__init__(
136 RevlogError.__init__(
137 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
137 self, b'%s@%s: %s' % (index, pycompat.bytestr(name), message)
138 )
138 )
139
139
140 def __bytes__(self):
140 def __bytes__(self):
141 return RevlogError.__bytes__(self)
141 return RevlogError.__bytes__(self)
142
142
143 def __str__(self):
143 def __str__(self):
144 return RevlogError.__str__(self)
144 return RevlogError.__str__(self)
145
145
146
146
147 class AmbiguousPrefixLookupError(LookupError):
147 class AmbiguousPrefixLookupError(LookupError):
148 pass
148 pass
149
149
150
150
151 class FilteredLookupError(LookupError):
151 class FilteredLookupError(LookupError):
152 pass
152 pass
153
153
154
154
155 class ManifestLookupError(LookupError):
155 class ManifestLookupError(LookupError):
156 pass
156 pass
157
157
158
158
159 class CommandError(Exception):
159 class CommandError(Exception):
160 """Exception raised on errors in parsing the command line."""
160 """Exception raised on errors in parsing the command line."""
161
161
162 def __init__(self, command: Optional[bytes], message: bytes) -> None:
162 def __init__(self, command: Optional[bytes], message: bytes) -> None:
163 self.command = command
163 self.command = command
164 self.message = message
164 self.message = message
165 super(CommandError, self).__init__()
165 super(CommandError, self).__init__()
166
166
167 __bytes__ = _tobytes
167 __bytes__ = _tobytes
168
168
169
169
170 class UnknownCommand(Exception):
170 class UnknownCommand(Exception):
171 """Exception raised if command is not in the command table."""
171 """Exception raised if command is not in the command table."""
172
172
173 def __init__(
173 def __init__(
174 self,
174 self,
175 command: bytes,
175 command: bytes,
176 all_commands: Optional[List[bytes]] = None,
176 all_commands: Optional[List[bytes]] = None,
177 ) -> None:
177 ) -> None:
178 self.command = command
178 self.command = command
179 self.all_commands = all_commands
179 self.all_commands = all_commands
180 super(UnknownCommand, self).__init__()
180 super(UnknownCommand, self).__init__()
181
181
182 __bytes__ = _tobytes
182 __bytes__ = _tobytes
183
183
184
184
185 class AmbiguousCommand(Exception):
185 class AmbiguousCommand(Exception):
186 """Exception raised if command shortcut matches more than one command."""
186 """Exception raised if command shortcut matches more than one command."""
187
187
188 def __init__(self, prefix: bytes, matches: List[bytes]) -> None:
188 def __init__(self, prefix: bytes, matches: List[bytes]) -> None:
189 self.prefix = prefix
189 self.prefix = prefix
190 self.matches = matches
190 self.matches = matches
191 super(AmbiguousCommand, self).__init__()
191 super(AmbiguousCommand, self).__init__()
192
192
193 __bytes__ = _tobytes
193 __bytes__ = _tobytes
194
194
195
195
196 class WorkerError(Exception):
196 class WorkerError(Exception):
197 """Exception raised when a worker process dies."""
197 """Exception raised when a worker process dies."""
198
198
199 def __init__(self, status_code: int) -> None:
199 def __init__(self, status_code: int) -> None:
200 self.status_code = status_code
200 self.status_code = status_code
201 # Pass status code to superclass just so it becomes part of __bytes__
201 # Pass status code to superclass just so it becomes part of __bytes__
202 super(WorkerError, self).__init__(status_code)
202 super(WorkerError, self).__init__(status_code)
203
203
204 __bytes__ = _tobytes
204 __bytes__ = _tobytes
205
205
206
206
207 class InterventionRequired(Abort):
207 class InterventionRequired(Abort):
208 """Exception raised when a command requires human intervention."""
208 """Exception raised when a command requires human intervention."""
209
209
210 coarse_exit_code = 1
210 coarse_exit_code = 1
211 detailed_exit_code = 240
211 detailed_exit_code = 240
212
212
213 def format(self) -> bytes:
213 def format(self) -> bytes:
214 from .i18n import _
214 from .i18n import _
215
215
216 message = _(b"%s\n") % self.message
216 message = _(b"%s\n") % self.message
217 if self.hint:
217 if self.hint:
218 message += _(b"(%s)\n") % self.hint
218 message += _(b"(%s)\n") % self.hint
219 return message
219 return message
220
220
221
221
222 class ConflictResolutionRequired(InterventionRequired):
222 class ConflictResolutionRequired(InterventionRequired):
223 """Exception raised when a continuable command required merge conflict resolution."""
223 """Exception raised when a continuable command required merge conflict resolution."""
224
224
225 def __init__(self, opname: bytes) -> None:
225 def __init__(self, opname: bytes) -> None:
226 from .i18n import _
226 from .i18n import _
227
227
228 self.opname = opname
228 self.opname = opname
229 InterventionRequired.__init__(
229 InterventionRequired.__init__(
230 self,
230 self,
231 _(
231 _(
232 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
232 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
233 )
233 )
234 % opname,
234 % opname,
235 )
235 )
236
236
237
237
238 class InputError(Abort):
238 class InputError(Abort):
239 """Indicates that the user made an error in their input.
239 """Indicates that the user made an error in their input.
240
240
241 Examples: Invalid command, invalid flags, invalid revision.
241 Examples: Invalid command, invalid flags, invalid revision.
242 """
242 """
243
243
244 detailed_exit_code = 10
244 detailed_exit_code = 10
245
245
246
246
247 class StateError(Abort):
247 class StateError(Abort):
248 """Indicates that the operation might work if retried in a different state.
248 """Indicates that the operation might work if retried in a different state.
249
249
250 Examples: Unresolved merge conflicts, unfinished operations.
250 Examples: Unresolved merge conflicts, unfinished operations.
251 """
251 """
252
252
253 detailed_exit_code = 20
253 detailed_exit_code = 20
254
254
255
255
256 class CanceledError(Abort):
256 class CanceledError(Abort):
257 """Indicates that the user canceled the operation.
257 """Indicates that the user canceled the operation.
258
258
259 Examples: Close commit editor with error status, quit chistedit.
259 Examples: Close commit editor with error status, quit chistedit.
260 """
260 """
261
261
262 detailed_exit_code = 250
262 detailed_exit_code = 250
263
263
264
264
265 class SecurityError(Abort):
265 class SecurityError(Abort):
266 """Indicates that some aspect of security failed.
266 """Indicates that some aspect of security failed.
267
267
268 Examples: Bad server credentials, expired local credentials for network
268 Examples: Bad server credentials, expired local credentials for network
269 filesystem, mismatched GPG signature, DoS protection.
269 filesystem, mismatched GPG signature, DoS protection.
270 """
270 """
271
271
272 detailed_exit_code = 150
272 detailed_exit_code = 150
273
273
274
274
275 class HookLoadError(Abort):
275 class HookLoadError(Abort):
276 """raised when loading a hook fails, aborting an operation
276 """raised when loading a hook fails, aborting an operation
277
277
278 Exists to allow more specialized catching."""
278 Exists to allow more specialized catching."""
279
279
280
280
281 class HookAbort(Abort):
281 class HookAbort(Abort):
282 """raised when a validation hook fails, aborting an operation
282 """raised when a validation hook fails, aborting an operation
283
283
284 Exists to allow more specialized catching."""
284 Exists to allow more specialized catching."""
285
285
286 detailed_exit_code = 40
286 detailed_exit_code = 40
287
287
288
288
289 class ConfigError(Abort):
289 class ConfigError(Abort):
290 """Exception raised when parsing config files"""
290 """Exception raised when parsing config files"""
291
291
292 detailed_exit_code = 30
292 detailed_exit_code = 30
293
293
294 def __init__(
294 def __init__(
295 self,
295 self,
296 message: bytes,
296 message: bytes,
297 location: Optional[bytes] = None,
297 location: Optional[bytes] = None,
298 hint: Optional[bytes] = None,
298 hint: Optional[bytes] = None,
299 ) -> None:
299 ) -> None:
300 super(ConfigError, self).__init__(message, hint=hint)
300 super(ConfigError, self).__init__(message, hint=hint)
301 self.location = location
301 self.location = location
302
302
303 def format(self) -> bytes:
303 def format(self) -> bytes:
304 from .i18n import _
304 from .i18n import _
305
305
306 if self.location is not None:
306 if self.location is not None:
307 message = _(b"config error at %s: %s\n") % (
307 message = _(b"config error at %s: %s\n") % (
308 pycompat.bytestr(self.location),
308 pycompat.bytestr(self.location),
309 self.message,
309 self.message,
310 )
310 )
311 else:
311 else:
312 message = _(b"config error: %s\n") % self.message
312 message = _(b"config error: %s\n") % self.message
313 if self.hint:
313 if self.hint:
314 message += _(b"(%s)\n") % self.hint
314 message += _(b"(%s)\n") % self.hint
315 return message
315 return message
316
316
317
317
318 class UpdateAbort(Abort):
318 class UpdateAbort(Abort):
319 """Raised when an update is aborted for destination issue"""
319 """Raised when an update is aborted for destination issue"""
320
320
321
321
322 class MergeDestAbort(Abort):
322 class MergeDestAbort(Abort):
323 """Raised when an update is aborted for destination issues"""
323 """Raised when an update is aborted for destination issues"""
324
324
325
325
326 class NoMergeDestAbort(MergeDestAbort):
326 class NoMergeDestAbort(MergeDestAbort):
327 """Raised when an update is aborted because there is nothing to merge"""
327 """Raised when an update is aborted because there is nothing to merge"""
328
328
329
329
330 class ManyMergeDestAbort(MergeDestAbort):
330 class ManyMergeDestAbort(MergeDestAbort):
331 """Raised when an update is aborted because destination is ambiguous"""
331 """Raised when an update is aborted because destination is ambiguous"""
332
332
333
333
334 class ResponseExpected(Abort):
334 class ResponseExpected(Abort):
335 """Raised when an EOF is received for a prompt"""
335 """Raised when an EOF is received for a prompt"""
336
336
337 def __init__(self):
337 def __init__(self):
338 from .i18n import _
338 from .i18n import _
339
339
340 Abort.__init__(self, _(b'response expected'))
340 Abort.__init__(self, _(b'response expected'))
341
341
342
342
343 class RemoteError(Abort):
343 class RemoteError(Abort):
344 """Exception raised when interacting with a remote repo fails"""
344 """Exception raised when interacting with a remote repo fails"""
345
345
346 detailed_exit_code = 100
346 detailed_exit_code = 100
347
347
348
348
349 class OutOfBandError(RemoteError):
349 class OutOfBandError(RemoteError):
350 """Exception raised when a remote repo reports failure"""
350 """Exception raised when a remote repo reports failure"""
351
351
352 def __init__(
352 def __init__(
353 self,
353 self,
354 message: Optional[bytes] = None,
354 message: Optional[bytes] = None,
355 hint: Optional[bytes] = None,
355 hint: Optional[bytes] = None,
356 ):
356 ):
357 from .i18n import _
357 from .i18n import _
358
358
359 if message:
359 if message:
360 # Abort.format() adds a trailing newline
360 # Abort.format() adds a trailing newline
361 message = _(b"remote error:\n%s") % message.rstrip(b'\n')
361 message = _(b"remote error:\n%s") % message.rstrip(b'\n')
362 else:
362 else:
363 message = _(b"remote error")
363 message = _(b"remote error")
364 super(OutOfBandError, self).__init__(message, hint=hint)
364 super(OutOfBandError, self).__init__(message, hint=hint)
365
365
366
366
367 class ParseError(Abort):
367 class ParseError(Abort):
368 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
368 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
369
369
370 detailed_exit_code = 10
370 detailed_exit_code = 10
371
371
372 def __init__(
372 def __init__(
373 self,
373 self,
374 message: bytes,
374 message: bytes,
375 location: Optional[Union[bytes, int]] = None,
375 location: Optional[Union[bytes, int]] = None,
376 hint: Optional[bytes] = None,
376 hint: Optional[bytes] = None,
377 ):
377 ):
378 super(ParseError, self).__init__(message, hint=hint)
378 super(ParseError, self).__init__(message, hint=hint)
379 self.location = location
379 self.location = location
380
380
381 def format(self) -> bytes:
381 def format(self) -> bytes:
382 from .i18n import _
382 from .i18n import _
383
383
384 if self.location is not None:
384 if self.location is not None:
385 message = _(b"hg: parse error at %s: %s\n") % (
385 message = _(b"hg: parse error at %s: %s\n") % (
386 pycompat.bytestr(self.location),
386 pycompat.bytestr(self.location),
387 self.message,
387 self.message,
388 )
388 )
389 else:
389 else:
390 message = _(b"hg: parse error: %s\n") % self.message
390 message = _(b"hg: parse error: %s\n") % self.message
391 if self.hint:
391 if self.hint:
392 message += _(b"(%s)\n") % self.hint
392 message += _(b"(%s)\n") % self.hint
393 return message
393 return message
394
394
395
395
396 class PatchError(Exception):
396 class PatchError(Exception):
397 __bytes__ = _tobytes
397 __bytes__ = _tobytes
398
398
399
399
400 class PatchParseError(PatchError):
400 class PatchParseError(PatchError):
401 __bytes__ = _tobytes
401 __bytes__ = _tobytes
402
402
403
403
404 class PatchApplicationError(PatchError):
404 class PatchApplicationError(PatchError):
405 __bytes__ = _tobytes
405 __bytes__ = _tobytes
406
406
407
407
408 def getsimilar(symbols: Iterable[bytes], value: bytes) -> List[bytes]:
408 def getsimilar(symbols: Iterable[bytes], value: bytes) -> List[bytes]:
409 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
409 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
410 # The cutoff for similarity here is pretty arbitrary. It should
410 # The cutoff for similarity here is pretty arbitrary. It should
411 # probably be investigated and tweaked.
411 # probably be investigated and tweaked.
412 return [s for s in symbols if sim(s) > 0.6]
412 return [s for s in symbols if sim(s) > 0.6]
413
413
414
414
415 def similarity_hint(similar: List[bytes]) -> Optional[bytes]:
415 def similarity_hint(similar: List[bytes]) -> Optional[bytes]:
416 from .i18n import _
416 from .i18n import _
417
417
418 if len(similar) == 1:
418 if len(similar) == 1:
419 return _(b"did you mean %s?") % similar[0]
419 return _(b"did you mean %s?") % similar[0]
420 elif similar:
420 elif similar:
421 ss = b", ".join(sorted(similar))
421 ss = b", ".join(sorted(similar))
422 return _(b"did you mean one of %s?") % ss
422 return _(b"did you mean one of %s?") % ss
423 else:
423 else:
424 return None
424 return None
425
425
426
426
427 class UnknownIdentifier(ParseError):
427 class UnknownIdentifier(ParseError):
428 """Exception raised when a {rev,file}set references an unknown identifier"""
428 """Exception raised when a {rev,file}set references an unknown identifier"""
429
429
430 def __init__(self, function: bytes, symbols: Iterable[bytes]) -> None:
430 def __init__(self, function: bytes, symbols: Iterable[bytes]) -> None:
431 from .i18n import _
431 from .i18n import _
432
432
433 similar = getsimilar(symbols, function)
433 similar = getsimilar(symbols, function)
434 hint = similarity_hint(similar)
434 hint = similarity_hint(similar)
435
435
436 ParseError.__init__(
436 ParseError.__init__(
437 self, _(b"unknown identifier: %s") % function, hint=hint
437 self, _(b"unknown identifier: %s") % function, hint=hint
438 )
438 )
439
439
440
440
441 class RepoError(Hint, Exception):
441 class RepoError(Hint, Exception):
442 __bytes__ = _tobytes
442 __bytes__ = _tobytes
443
443
444
444
445 class RepoLookupError(RepoError):
445 class RepoLookupError(RepoError):
446 pass
446 pass
447
447
448
448
449 class FilteredRepoLookupError(RepoLookupError):
449 class FilteredRepoLookupError(RepoLookupError):
450 pass
450 pass
451
451
452
452
453 class CapabilityError(RepoError):
453 class CapabilityError(RepoError):
454 pass
454 pass
455
455
456
456
457 class RequirementError(RepoError):
457 class RequirementError(RepoError):
458 """Exception raised if .hg/requires has an unknown entry."""
458 """Exception raised if .hg/requires has an unknown entry."""
459
459
460
460
461 class StdioError(IOError):
461 class StdioError(IOError):
462 """Raised if I/O to stdout or stderr fails"""
462 """Raised if I/O to stdout or stderr fails"""
463
463
464 def __init__(self, err: IOError) -> None:
464 def __init__(self, err: IOError) -> None:
465 IOError.__init__(self, err.errno, err.strerror)
465 IOError.__init__(self, err.errno, err.strerror)
466
466
467 # no __bytes__() because error message is derived from the standard IOError
467 # no __bytes__() because error message is derived from the standard IOError
468
468
469
469
470 class UnsupportedMergeRecords(Abort):
470 class UnsupportedMergeRecords(Abort):
471 def __init__(self, recordtypes: Iterable[bytes]) -> None:
471 def __init__(self, recordtypes: Iterable[bytes]) -> None:
472 from .i18n import _
472 from .i18n import _
473
473
474 self.recordtypes = sorted(recordtypes)
474 self.recordtypes = sorted(recordtypes)
475 s = b' '.join(self.recordtypes)
475 s = b' '.join(self.recordtypes)
476 Abort.__init__(
476 Abort.__init__(
477 self,
477 self,
478 _(b'unsupported merge state records: %s') % s,
478 _(b'unsupported merge state records: %s') % s,
479 hint=_(
479 hint=_(
480 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
480 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
481 b'more information'
481 b'more information'
482 ),
482 ),
483 )
483 )
484
484
485
485
486 class UnknownVersion(Abort):
486 class UnknownVersion(Abort):
487 """generic exception for aborting from an encounter with an unknown version"""
487 """generic exception for aborting from an encounter with an unknown version"""
488
488
489 def __init__(
489 def __init__(
490 self,
490 self,
491 msg: bytes,
491 msg: bytes,
492 hint: Optional[bytes] = None,
492 hint: Optional[bytes] = None,
493 version: Optional[bytes] = None,
493 version: Optional[bytes] = None,
494 ) -> None:
494 ) -> None:
495 self.version = version
495 self.version = version
496 super(UnknownVersion, self).__init__(msg, hint=hint)
496 super(UnknownVersion, self).__init__(msg, hint=hint)
497
497
498
498
499 class LockError(IOError):
499 class LockError(IOError):
500 def __init__(
500 def __init__(
501 self,
501 self,
502 errno: int,
502 errno: int,
503 strerror: str,
503 strerror: str,
504 filename: bytes,
504 filename: bytes,
505 desc: Optional[bytes],
505 desc: Optional[bytes],
506 ) -> None:
506 ) -> None:
507 IOError.__init__(self, errno, strerror, filename)
507 IOError.__init__(self, errno, strerror, filename)
508 self.desc = desc
508 self.desc = desc
509
509
510 # no __bytes__() because error message is derived from the standard IOError
510 # no __bytes__() because error message is derived from the standard IOError
511
511
512
512
513 class LockHeld(LockError):
513 class LockHeld(LockError):
514 def __init__(
514 def __init__(
515 self,
515 self,
516 errno: int,
516 errno: int,
517 filename: bytes,
517 filename: bytes,
518 desc: Optional[bytes],
518 desc: Optional[bytes],
519 locker,
519 locker,
520 ):
520 ):
521 LockError.__init__(self, errno, 'Lock held', filename, desc)
521 LockError.__init__(self, errno, 'Lock held', filename, desc)
522 self.filename: bytes = filename
522 self.filename: bytes = filename
523 self.locker = locker
523 self.locker = locker
524
524
525
525
526 class LockUnavailable(LockError):
526 class LockUnavailable(LockError):
527 pass
527 pass
528
528
529
529
530 # LockError is for errors while acquiring the lock -- this is unrelated
530 # LockError is for errors while acquiring the lock -- this is unrelated
531 class LockInheritanceContractViolation(RuntimeError):
531 class LockInheritanceContractViolation(RuntimeError):
532 __bytes__ = _tobytes
532 __bytes__ = _tobytes
533
533
534
534
535 class ResponseError(Exception):
535 class ResponseError(Exception):
536 """Raised to print an error with part of output and exit."""
536 """Raised to print an error with part of output and exit."""
537
537
538 __bytes__ = _tobytes
538 __bytes__ = _tobytes
539
539
540
540
541 # derived from KeyboardInterrupt to simplify some breakout code
541 # derived from KeyboardInterrupt to simplify some breakout code
542 class SignalInterrupt(KeyboardInterrupt):
542 class SignalInterrupt(KeyboardInterrupt):
543 """Exception raised on SIGTERM and SIGHUP."""
543 """Exception raised on SIGTERM and SIGHUP."""
544
544
545
545
546 class SignatureError(Exception):
546 class SignatureError(Exception):
547 __bytes__ = _tobytes
547 __bytes__ = _tobytes
548
548
549
549
550 class PushRaced(RuntimeError):
550 class PushRaced(RuntimeError):
551 """An exception raised during unbundling that indicate a push race"""
551 """An exception raised during unbundling that indicate a push race"""
552
552
553 __bytes__ = _tobytes
553 __bytes__ = _tobytes
554
554
555
555
556 class ProgrammingError(Hint, RuntimeError):
556 class ProgrammingError(Hint, RuntimeError):
557 """Raised if a mercurial (core or extension) developer made a mistake"""
557 """Raised if a mercurial (core or extension) developer made a mistake"""
558
558
559 def __init__(self, msg: AnyStr, *args, **kwargs):
559 def __init__(self, msg: AnyStr, *args, **kwargs):
560 # On Python 3, turn the message back into a string since this is
560 # On Python 3, turn the message back into a string since this is
561 # an internal-only error that won't be printed except in a
561 # an internal-only error that won't be printed except in a
562 # stack traces.
562 # stack traces.
563 msg = pycompat.sysstr(msg)
563 msg = pycompat.sysstr(msg)
564 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
564 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
565
565
566 __bytes__ = _tobytes
566 __bytes__ = _tobytes
567
567
568
568
569 class WdirUnsupported(Exception):
569 class WdirUnsupported(Exception):
570 """An exception which is raised when 'wdir()' is not supported"""
570 """An exception which is raised when 'wdir()' is not supported"""
571
571
572 __bytes__ = _tobytes
572 __bytes__ = _tobytes
573
573
574
574
575 # bundle2 related errors
575 # bundle2 related errors
576 class BundleValueError(ValueError):
576 class BundleValueError(ValueError):
577 """error raised when bundle2 cannot be processed"""
577 """error raised when bundle2 cannot be processed"""
578
578
579 __bytes__ = _tobytes
579 __bytes__ = _tobytes
580
580
581
581
582 class BundleUnknownFeatureError(BundleValueError):
582 class BundleUnknownFeatureError(BundleValueError):
583 def __init__(self, parttype=None, params=(), values=()):
583 def __init__(self, parttype=None, params=(), values=()):
584 self.parttype = parttype
584 self.parttype = parttype
585 self.params = params
585 self.params = params
586 self.values = values
586 self.values = values
587 if self.parttype is None:
587 if self.parttype is None:
588 msg = b'Stream Parameter'
588 msg = b'Stream Parameter'
589 else:
589 else:
590 msg = parttype
590 msg = parttype
591 entries = self.params
591 entries = self.params
592 if self.params and self.values:
592 if self.params and self.values:
593 assert len(self.params) == len(self.values)
593 assert len(self.params) == len(self.values)
594 entries = []
594 entries = []
595 for idx, par in enumerate(self.params):
595 for idx, par in enumerate(self.params):
596 val = self.values[idx]
596 val = self.values[idx]
597 if val is None:
597 if val is None:
598 entries.append(val)
598 entries.append(val)
599 else:
599 else:
600 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
600 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
601 if entries:
601 if entries:
602 msg = b'%s - %s' % (msg, b', '.join(entries))
602 msg = b'%s - %s' % (msg, b', '.join(entries))
603 ValueError.__init__(self, msg) # TODO: convert to str?
603 ValueError.__init__(self, msg) # TODO: convert to str?
604
604
605
605
606 class ReadOnlyPartError(RuntimeError):
606 class ReadOnlyPartError(RuntimeError):
607 """error raised when code tries to alter a part being generated"""
607 """error raised when code tries to alter a part being generated"""
608
608
609 __bytes__ = _tobytes
609 __bytes__ = _tobytes
610
610
611
611
612 class PushkeyFailed(Abort):
612 class PushkeyFailed(Abort):
613 """error raised when a pushkey part failed to update a value"""
613 """error raised when a pushkey part failed to update a value"""
614
614
615 def __init__(
615 def __init__(
616 self, partid, namespace=None, key=None, new=None, old=None, ret=None
616 self, partid, namespace=None, key=None, new=None, old=None, ret=None
617 ):
617 ):
618 self.partid = partid
618 self.partid = partid
619 self.namespace = namespace
619 self.namespace = namespace
620 self.key = key
620 self.key = key
621 self.new = new
621 self.new = new
622 self.old = old
622 self.old = old
623 self.ret = ret
623 self.ret = ret
624 # no i18n expected to be processed into a better message
624 # no i18n expected to be processed into a better message
625 Abort.__init__(
625 Abort.__init__(
626 self, b'failed to update value for "%s/%s"' % (namespace, key)
626 self, b'failed to update value for "%s/%s"' % (namespace, key)
627 )
627 )
628
628
629
629
630 class CensoredNodeError(StorageError):
630 class CensoredNodeError(StorageError):
631 """error raised when content verification fails on a censored node
631 """error raised when content verification fails on a censored node
632
632
633 Also contains the tombstone data substituted for the uncensored data.
633 Also contains the tombstone data substituted for the uncensored data.
634 """
634 """
635
635
636 def __init__(self, filename: bytes, node: bytes, tombstone: bytes):
636 def __init__(self, filename: bytes, node: bytes, tombstone: bytes):
637 from .node import short
637 from .node import short
638
638
639 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
639 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
640 self.tombstone = tombstone
640 self.tombstone = tombstone
641
641
642
642
643 class CensoredBaseError(StorageError):
643 class CensoredBaseError(StorageError):
644 """error raised when a delta is rejected because its base is censored
644 """error raised when a delta is rejected because its base is censored
645
645
646 A delta based on a censored revision must be formed as single patch
646 A delta based on a censored revision must be formed as single patch
647 operation which replaces the entire base with new content. This ensures
647 operation which replaces the entire base with new content. This ensures
648 the delta may be applied by clones which have not censored the base.
648 the delta may be applied by clones which have not censored the base.
649 """
649 """
650
650
651
651
652 class InvalidBundleSpecification(Exception):
652 class InvalidBundleSpecification(Exception):
653 """error raised when a bundle specification is invalid.
653 """error raised when a bundle specification is invalid.
654
654
655 This is used for syntax errors as opposed to support errors.
655 This is used for syntax errors as opposed to support errors.
656 """
656 """
657
657
658 __bytes__ = _tobytes
658 __bytes__ = _tobytes
659
659
660
660
661 class UnsupportedBundleSpecification(Exception):
661 class UnsupportedBundleSpecification(Exception):
662 """error raised when a bundle specification is not supported."""
662 """error raised when a bundle specification is not supported."""
663
663
664 __bytes__ = _tobytes
664 __bytes__ = _tobytes
665
665
666
666
667 class CorruptedState(Exception):
667 class CorruptedState(Exception):
668 """error raised when a command is not able to read its state from file"""
668 """error raised when a command is not able to read its state from file"""
669
669
670 __bytes__ = _tobytes
670 __bytes__ = _tobytes
671
671
672
672
673 class CorruptedDirstate(Exception):
673 class CorruptedDirstate(Exception):
674 """error raised the dirstate appears corrupted on-disk. It may be due to
674 """error raised the dirstate appears corrupted on-disk. It may be due to
675 a dirstate version mismatch (i.e. expecting v2 and finding v1 on disk)."""
675 a dirstate version mismatch (i.e. expecting v2 and finding v1 on disk)."""
676
676
677 __bytes__ = _tobytes
677 __bytes__ = _tobytes
678
678
679
679
680 class PeerTransportError(Abort):
680 class PeerTransportError(Abort):
681 """Transport-level I/O error when communicating with a peer repo."""
681 """Transport-level I/O error when communicating with a peer repo."""
682
682
683
683
684 class InMemoryMergeConflictsError(Exception):
684 class InMemoryMergeConflictsError(Exception):
685 """Exception raised when merge conflicts arose during an in-memory merge."""
685 """Exception raised when merge conflicts arose during an in-memory merge."""
686
686
687 __bytes__ = _tobytes
687 __bytes__ = _tobytes
688
688
689
689
690 class WireprotoCommandError(Exception):
690 class WireprotoCommandError(Exception):
691 """Represents an error during execution of a wire protocol command.
691 """Represents an error during execution of a wire protocol command.
692
692
693 Should only be thrown by wire protocol version 2 commands.
693 Should only be thrown by wire protocol version 2 commands.
694
694
695 The error is a formatter string and an optional iterable of arguments.
695 The error is a formatter string and an optional iterable of arguments.
696 """
696 """
697
697
698 def __init__(
698 def __init__(
699 self,
699 self,
700 message: bytes,
700 message: bytes,
701 args: Optional[Sequence[bytes]] = None,
701 args: Optional[Sequence[bytes]] = None,
702 ) -> None:
702 ) -> None:
703 self.message = message
703 self.message = message
704 self.messageargs = args
704 self.messageargs = args
@@ -1,131 +1,131 b''
1 # i18n.py - internationalization support for mercurial
1 # i18n.py - internationalization support for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import gettext as gettextmod
9 import gettext as gettextmod
10 import locale
10 import locale
11 import os
11 import os
12 import sys
12 import sys
13
13
14 from typing import (
14 from typing import (
15 Callable,
15 Callable,
16 List,
16 List,
17 )
17 )
18
18
19 from .utils import resourceutil
19 from .utils import resourceutil
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 pycompat,
22 pycompat,
23 )
23 )
24
24
25 # keeps pyflakes happy
25 # keeps pyflakes happy
26 assert [
26 assert [
27 Callable,
27 Callable,
28 List,
28 List,
29 ]
29 ]
30
30
31 # modelled after templater.templatepath:
31 # modelled after templater.templatepath:
32 if getattr(sys, 'frozen', None) is not None:
32 if getattr(sys, 'frozen', None) is not None:
33 module = pycompat.sysexecutable
33 module = pycompat.sysexecutable
34 else:
34 else:
35 module = pycompat.fsencode(__file__)
35 module = pycompat.fsencode(__file__)
36
36
37 _languages = None
37 _languages = None
38 if (
38 if (
39 pycompat.iswindows
39 pycompat.iswindows
40 and b'LANGUAGE' not in encoding.environ
40 and b'LANGUAGE' not in encoding.environ
41 and b'LC_ALL' not in encoding.environ
41 and b'LC_ALL' not in encoding.environ
42 and b'LC_MESSAGES' not in encoding.environ
42 and b'LC_MESSAGES' not in encoding.environ
43 and b'LANG' not in encoding.environ
43 and b'LANG' not in encoding.environ
44 ):
44 ):
45 # Try to detect UI language by "User Interface Language Management" API
45 # Try to detect UI language by "User Interface Language Management" API
46 # if no locale variables are set. Note that locale.getdefaultlocale()
46 # if no locale variables are set. Note that locale.getdefaultlocale()
47 # uses GetLocaleInfo(), which may be different from UI language.
47 # uses GetLocaleInfo(), which may be different from UI language.
48 # (See http://msdn.microsoft.com/en-us/library/dd374098(v=VS.85).aspx )
48 # (See http://msdn.microsoft.com/en-us/library/dd374098(v=VS.85).aspx )
49 try:
49 try:
50 import ctypes
50 import ctypes
51
51
52 # pytype: disable=module-attr
52 # pytype: disable=module-attr
53 langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
53 langid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
54 # pytype: enable=module-attr
54 # pytype: enable=module-attr
55
55
56 _languages = [locale.windows_locale[langid]]
56 _languages = [locale.windows_locale[langid]]
57 except (ImportError, AttributeError, KeyError):
57 except (ImportError, AttributeError, KeyError):
58 # ctypes not found or unknown langid
58 # ctypes not found or unknown langid
59 pass
59 pass
60
60
61
61
62 datapath = pycompat.fsdecode(resourceutil.datapath)
62 datapath = pycompat.fsdecode(resourceutil.datapath)
63 localedir = os.path.join(datapath, 'locale')
63 localedir = os.path.join(datapath, 'locale')
64 t = gettextmod.translation('hg', localedir, _languages, fallback=True)
64 t = gettextmod.translation('hg', localedir, _languages, fallback=True)
65 try:
65 try:
66 _ugettext = t.ugettext # pytype: disable=attribute-error
66 _ugettext = t.ugettext # pytype: disable=attribute-error
67 except AttributeError:
67 except AttributeError:
68 _ugettext = t.gettext
68 _ugettext = t.gettext
69
69
70
70
71 _msgcache = {} # encoding: {message: translation}
71 _msgcache = {} # encoding: {message: translation}
72
72
73
73
74 def gettext(message: bytes) -> bytes:
74 def gettext(message: bytes) -> bytes:
75 """Translate message.
75 """Translate message.
76
76
77 The message is looked up in the catalog to get a Unicode string,
77 The message is looked up in the catalog to get a Unicode string,
78 which is encoded in the local encoding before being returned.
78 which is encoded in the local encoding before being returned.
79
79
80 Important: message is restricted to characters in the encoding
80 Important: message is restricted to characters in the encoding
81 given by sys.getdefaultencoding() which is most likely 'ascii'.
81 given by sys.getdefaultencoding() which is most likely 'ascii'.
82 """
82 """
83 # If message is None, t.ugettext will return u'None' as the
83 # If message is None, t.ugettext will return u'None' as the
84 # translation whereas our callers expect us to return None.
84 # translation whereas our callers expect us to return None.
85 if message is None or not _ugettext:
85 if message is None or not _ugettext:
86 return message
86 return message
87
87
88 cache = _msgcache.setdefault(encoding.encoding, {})
88 cache = _msgcache.setdefault(encoding.encoding, {})
89 if message not in cache:
89 if message not in cache:
90 if type(message) is str:
90 if type(message) is str:
91 # goofy unicode docstrings in test
91 # goofy unicode docstrings in test
92 paragraphs = message.split(u'\n\n') # type: List[str]
92 paragraphs: List[str] = message.split(u'\n\n')
93 else:
93 else:
94 # should be ascii, but we have unicode docstrings in test, which
94 # should be ascii, but we have unicode docstrings in test, which
95 # are converted to utf-8 bytes on Python 3.
95 # are converted to utf-8 bytes on Python 3.
96 paragraphs = [p.decode("utf-8") for p in message.split(b'\n\n')]
96 paragraphs = [p.decode("utf-8") for p in message.split(b'\n\n')]
97 # Be careful not to translate the empty string -- it holds the
97 # Be careful not to translate the empty string -- it holds the
98 # meta data of the .po file.
98 # meta data of the .po file.
99 u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
99 u = u'\n\n'.join([p and _ugettext(p) or u'' for p in paragraphs])
100 try:
100 try:
101 # encoding.tolocal cannot be used since it will first try to
101 # encoding.tolocal cannot be used since it will first try to
102 # decode the Unicode string. Calling u.decode(enc) really
102 # decode the Unicode string. Calling u.decode(enc) really
103 # means u.encode(sys.getdefaultencoding()).decode(enc). Since
103 # means u.encode(sys.getdefaultencoding()).decode(enc). Since
104 # the Python encoding defaults to 'ascii', this fails if the
104 # the Python encoding defaults to 'ascii', this fails if the
105 # translated string use non-ASCII characters.
105 # translated string use non-ASCII characters.
106 encodingstr = pycompat.sysstr(encoding.encoding)
106 encodingstr = pycompat.sysstr(encoding.encoding)
107 cache[message] = u.encode(encodingstr, "replace")
107 cache[message] = u.encode(encodingstr, "replace")
108 except LookupError:
108 except LookupError:
109 # An unknown encoding results in a LookupError.
109 # An unknown encoding results in a LookupError.
110 cache[message] = message
110 cache[message] = message
111 return cache[message]
111 return cache[message]
112
112
113
113
114 def _plain():
114 def _plain():
115 if (
115 if (
116 b'HGPLAIN' not in encoding.environ
116 b'HGPLAIN' not in encoding.environ
117 and b'HGPLAINEXCEPT' not in encoding.environ
117 and b'HGPLAINEXCEPT' not in encoding.environ
118 ):
118 ):
119 return False
119 return False
120 exceptions = encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
120 exceptions = encoding.environ.get(b'HGPLAINEXCEPT', b'').strip().split(b',')
121 return b'i18n' not in exceptions
121 return b'i18n' not in exceptions
122
122
123
123
124 if _plain():
124 if _plain():
125
125
126 def _(message: bytes) -> bytes:
126 def _(message: bytes) -> bytes:
127 return message
127 return message
128
128
129
129
130 else:
130 else:
131 _ = gettext
131 _ = gettext
@@ -1,395 +1,395 b''
1 import contextlib
1 import contextlib
2 import errno
2 import errno
3 import os
3 import os
4 import posixpath
4 import posixpath
5 import stat
5 import stat
6
6
7 from typing import (
7 from typing import (
8 Any,
8 Any,
9 Callable,
9 Callable,
10 Iterator,
10 Iterator,
11 Optional,
11 Optional,
12 )
12 )
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 encoding,
16 encoding,
17 error,
17 error,
18 policy,
18 policy,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 )
21 )
22
22
23 rustdirs = policy.importrust('dirstate', 'Dirs')
23 rustdirs = policy.importrust('dirstate', 'Dirs')
24 parsers = policy.importmod('parsers')
24 parsers = policy.importmod('parsers')
25
25
26 # keeps pyflakes happy
26 # keeps pyflakes happy
27 assert [
27 assert [
28 Any,
28 Any,
29 Callable,
29 Callable,
30 Iterator,
30 Iterator,
31 Optional,
31 Optional,
32 ]
32 ]
33
33
34
34
35 def _lowerclean(s: bytes) -> bytes:
35 def _lowerclean(s: bytes) -> bytes:
36 return encoding.hfsignoreclean(s.lower())
36 return encoding.hfsignoreclean(s.lower())
37
37
38
38
39 class pathauditor:
39 class pathauditor:
40 """ensure that a filesystem path contains no banned components.
40 """ensure that a filesystem path contains no banned components.
41 the following properties of a path are checked:
41 the following properties of a path are checked:
42
42
43 - ends with a directory separator
43 - ends with a directory separator
44 - under top-level .hg
44 - under top-level .hg
45 - starts at the root of a windows drive
45 - starts at the root of a windows drive
46 - contains ".."
46 - contains ".."
47
47
48 More check are also done about the file system states:
48 More check are also done about the file system states:
49 - traverses a symlink (e.g. a/symlink_here/b)
49 - traverses a symlink (e.g. a/symlink_here/b)
50 - inside a nested repository (a callback can be used to approve
50 - inside a nested repository (a callback can be used to approve
51 some nested repositories, e.g., subrepositories)
51 some nested repositories, e.g., subrepositories)
52
52
53 The file system checks are only done when 'realfs' is set to True (the
53 The file system checks are only done when 'realfs' is set to True (the
54 default). They should be disable then we are auditing path for operation on
54 default). They should be disable then we are auditing path for operation on
55 stored history.
55 stored history.
56
56
57 If 'cached' is set to True, audited paths and sub-directories are cached.
57 If 'cached' is set to True, audited paths and sub-directories are cached.
58 Be careful to not keep the cache of unmanaged directories for long because
58 Be careful to not keep the cache of unmanaged directories for long because
59 audited paths may be replaced with symlinks.
59 audited paths may be replaced with symlinks.
60 """
60 """
61
61
62 def __init__(self, root, callback=None, realfs=True, cached=False):
62 def __init__(self, root, callback=None, realfs=True, cached=False):
63 self.audited = set()
63 self.audited = set()
64 self.auditeddir = dict()
64 self.auditeddir = dict()
65 self.root = root
65 self.root = root
66 self._realfs = realfs
66 self._realfs = realfs
67 self._cached = cached
67 self._cached = cached
68 self.callback = callback
68 self.callback = callback
69 if os.path.lexists(root) and not util.fscasesensitive(root):
69 if os.path.lexists(root) and not util.fscasesensitive(root):
70 self.normcase = util.normcase
70 self.normcase = util.normcase
71 else:
71 else:
72 self.normcase = lambda x: x
72 self.normcase = lambda x: x
73
73
74 def __call__(self, path: bytes, mode: Optional[Any] = None) -> None:
74 def __call__(self, path: bytes, mode: Optional[Any] = None) -> None:
75 """Check the relative path.
75 """Check the relative path.
76 path may contain a pattern (e.g. foodir/**.txt)"""
76 path may contain a pattern (e.g. foodir/**.txt)"""
77
77
78 path = util.localpath(path)
78 path = util.localpath(path)
79 if path in self.audited:
79 if path in self.audited:
80 return
80 return
81 # AIX ignores "/" at end of path, others raise EISDIR.
81 # AIX ignores "/" at end of path, others raise EISDIR.
82 if util.endswithsep(path):
82 if util.endswithsep(path):
83 raise error.InputError(
83 raise error.InputError(
84 _(b"path ends in directory separator: %s") % path
84 _(b"path ends in directory separator: %s") % path
85 )
85 )
86 parts = util.splitpath(path)
86 parts = util.splitpath(path)
87 if (
87 if (
88 os.path.splitdrive(path)[0]
88 os.path.splitdrive(path)[0]
89 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
89 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
90 or pycompat.ospardir in parts
90 or pycompat.ospardir in parts
91 ):
91 ):
92 raise error.InputError(
92 raise error.InputError(
93 _(b"path contains illegal component: %s") % path
93 _(b"path contains illegal component: %s") % path
94 )
94 )
95 # Windows shortname aliases
95 # Windows shortname aliases
96 if b"~" in path:
96 if b"~" in path:
97 for p in parts:
97 for p in parts:
98 if b"~" in p:
98 if b"~" in p:
99 first, last = p.split(b"~", 1)
99 first, last = p.split(b"~", 1)
100 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
100 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
101 raise error.InputError(
101 raise error.InputError(
102 _(b"path contains illegal component: %s") % path
102 _(b"path contains illegal component: %s") % path
103 )
103 )
104 if b'.hg' in _lowerclean(path):
104 if b'.hg' in _lowerclean(path):
105 lparts = [_lowerclean(p) for p in parts]
105 lparts = [_lowerclean(p) for p in parts]
106 for p in b'.hg', b'.hg.':
106 for p in b'.hg', b'.hg.':
107 if p in lparts[1:]:
107 if p in lparts[1:]:
108 pos = lparts.index(p)
108 pos = lparts.index(p)
109 base = os.path.join(*parts[:pos])
109 base = os.path.join(*parts[:pos])
110 raise error.InputError(
110 raise error.InputError(
111 _(b"path '%s' is inside nested repo %r")
111 _(b"path '%s' is inside nested repo %r")
112 % (path, pycompat.bytestr(base))
112 % (path, pycompat.bytestr(base))
113 )
113 )
114
114
115 if self._realfs:
115 if self._realfs:
116 # It's important that we check the path parts starting from the root.
116 # It's important that we check the path parts starting from the root.
117 # We don't want to add "foo/bar/baz" to auditeddir before checking if
117 # We don't want to add "foo/bar/baz" to auditeddir before checking if
118 # there's a "foo/.hg" directory. This also means we won't accidentally
118 # there's a "foo/.hg" directory. This also means we won't accidentally
119 # traverse a symlink into some other filesystem (which is potentially
119 # traverse a symlink into some other filesystem (which is potentially
120 # expensive to access).
120 # expensive to access).
121 for prefix in finddirs_rev_noroot(path):
121 for prefix in finddirs_rev_noroot(path):
122 if prefix in self.auditeddir:
122 if prefix in self.auditeddir:
123 res = self.auditeddir[prefix]
123 res = self.auditeddir[prefix]
124 else:
124 else:
125 res = pathauditor._checkfs_exists(
125 res = pathauditor._checkfs_exists(
126 self.root, prefix, path, self.callback
126 self.root, prefix, path, self.callback
127 )
127 )
128 if self._cached:
128 if self._cached:
129 self.auditeddir[prefix] = res
129 self.auditeddir[prefix] = res
130 if not res:
130 if not res:
131 break
131 break
132
132
133 if self._cached:
133 if self._cached:
134 self.audited.add(path)
134 self.audited.add(path)
135
135
136 @staticmethod
136 @staticmethod
137 def _checkfs_exists(
137 def _checkfs_exists(
138 root,
138 root,
139 prefix: bytes,
139 prefix: bytes,
140 path: bytes,
140 path: bytes,
141 callback: Optional[Callable[[bytes], bool]] = None,
141 callback: Optional[Callable[[bytes], bool]] = None,
142 ):
142 ):
143 """raise exception if a file system backed check fails.
143 """raise exception if a file system backed check fails.
144
144
145 Return a bool that indicates that the directory (or file) exists."""
145 Return a bool that indicates that the directory (or file) exists."""
146 curpath = os.path.join(root, prefix)
146 curpath = os.path.join(root, prefix)
147 try:
147 try:
148 st = os.lstat(curpath)
148 st = os.lstat(curpath)
149 except OSError as err:
149 except OSError as err:
150 if err.errno == errno.ENOENT:
150 if err.errno == errno.ENOENT:
151 return False
151 return False
152 # EINVAL can be raised as invalid path syntax under win32.
152 # EINVAL can be raised as invalid path syntax under win32.
153 # They must be ignored for patterns can be checked too.
153 # They must be ignored for patterns can be checked too.
154 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
154 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
155 raise
155 raise
156 else:
156 else:
157 if stat.S_ISLNK(st.st_mode):
157 if stat.S_ISLNK(st.st_mode):
158 msg = _(b'path %r traverses symbolic link %r') % (
158 msg = _(b'path %r traverses symbolic link %r') % (
159 pycompat.bytestr(path),
159 pycompat.bytestr(path),
160 pycompat.bytestr(prefix),
160 pycompat.bytestr(prefix),
161 )
161 )
162 raise error.Abort(msg)
162 raise error.Abort(msg)
163 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
163 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
164 os.path.join(curpath, b'.hg')
164 os.path.join(curpath, b'.hg')
165 ):
165 ):
166 if not callback or not callback(curpath):
166 if not callback or not callback(curpath):
167 msg = _(b"path '%s' is inside nested repo %r")
167 msg = _(b"path '%s' is inside nested repo %r")
168 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
168 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
169 return True
169 return True
170
170
171 def check(self, path: bytes) -> bool:
171 def check(self, path: bytes) -> bool:
172 try:
172 try:
173 self(path)
173 self(path)
174 return True
174 return True
175 except (OSError, error.Abort):
175 except (OSError, error.Abort):
176 return False
176 return False
177
177
178 @contextlib.contextmanager
178 @contextlib.contextmanager
179 def cached(self):
179 def cached(self):
180 if self._cached:
180 if self._cached:
181 yield
181 yield
182 else:
182 else:
183 try:
183 try:
184 self._cached = True
184 self._cached = True
185 yield
185 yield
186 finally:
186 finally:
187 self.audited.clear()
187 self.audited.clear()
188 self.auditeddir.clear()
188 self.auditeddir.clear()
189 self._cached = False
189 self._cached = False
190
190
191
191
192 def canonpath(
192 def canonpath(
193 root: bytes,
193 root: bytes,
194 cwd: bytes,
194 cwd: bytes,
195 myname: bytes,
195 myname: bytes,
196 auditor: Optional[pathauditor] = None,
196 auditor: Optional[pathauditor] = None,
197 ) -> bytes:
197 ) -> bytes:
198 """return the canonical path of myname, given cwd and root
198 """return the canonical path of myname, given cwd and root
199
199
200 >>> def check(root, cwd, myname):
200 >>> def check(root, cwd, myname):
201 ... a = pathauditor(root, realfs=False)
201 ... a = pathauditor(root, realfs=False)
202 ... try:
202 ... try:
203 ... return canonpath(root, cwd, myname, a)
203 ... return canonpath(root, cwd, myname, a)
204 ... except error.Abort:
204 ... except error.Abort:
205 ... return 'aborted'
205 ... return 'aborted'
206 >>> def unixonly(root, cwd, myname, expected='aborted'):
206 >>> def unixonly(root, cwd, myname, expected='aborted'):
207 ... if pycompat.iswindows:
207 ... if pycompat.iswindows:
208 ... return expected
208 ... return expected
209 ... return check(root, cwd, myname)
209 ... return check(root, cwd, myname)
210 >>> def winonly(root, cwd, myname, expected='aborted'):
210 >>> def winonly(root, cwd, myname, expected='aborted'):
211 ... if not pycompat.iswindows:
211 ... if not pycompat.iswindows:
212 ... return expected
212 ... return expected
213 ... return check(root, cwd, myname)
213 ... return check(root, cwd, myname)
214 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
214 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
215 'aborted'
215 'aborted'
216 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
216 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
217 'aborted'
217 'aborted'
218 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
218 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
219 'aborted'
219 'aborted'
220 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
220 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
221 ... b'filename')
221 ... b'filename')
222 'filename'
222 'filename'
223 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
223 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
224 'filename'
224 'filename'
225 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
225 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
226 ... b'subdir/filename')
226 ... b'subdir/filename')
227 'subdir/filename'
227 'subdir/filename'
228 >>> unixonly(b'/repo', b'/dir', b'filename')
228 >>> unixonly(b'/repo', b'/dir', b'filename')
229 'aborted'
229 'aborted'
230 >>> unixonly(b'/repo', b'/', b'filename')
230 >>> unixonly(b'/repo', b'/', b'filename')
231 'aborted'
231 'aborted'
232 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
232 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
233 'filename'
233 'filename'
234 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
234 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
235 'filename'
235 'filename'
236 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
236 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
237 'subdir/filename'
237 'subdir/filename'
238 """
238 """
239 if util.endswithsep(root):
239 if util.endswithsep(root):
240 rootsep = root
240 rootsep = root
241 else:
241 else:
242 rootsep = root + pycompat.ossep
242 rootsep = root + pycompat.ossep
243 name = myname
243 name = myname
244 if not os.path.isabs(name):
244 if not os.path.isabs(name):
245 name = os.path.join(root, cwd, name)
245 name = os.path.join(root, cwd, name)
246 name = os.path.normpath(name)
246 name = os.path.normpath(name)
247 if auditor is None:
247 if auditor is None:
248 auditor = pathauditor(root)
248 auditor = pathauditor(root)
249 if name != rootsep and name.startswith(rootsep):
249 if name != rootsep and name.startswith(rootsep):
250 name = name[len(rootsep) :]
250 name = name[len(rootsep) :]
251 auditor(name)
251 auditor(name)
252 return util.pconvert(name)
252 return util.pconvert(name)
253 elif name == root:
253 elif name == root:
254 return b''
254 return b''
255 else:
255 else:
256 # Determine whether `name' is in the hierarchy at or beneath `root',
256 # Determine whether `name' is in the hierarchy at or beneath `root',
257 # by iterating name=dirname(name) until that causes no change (can't
257 # by iterating name=dirname(name) until that causes no change (can't
258 # check name == '/', because that doesn't work on windows). The list
258 # check name == '/', because that doesn't work on windows). The list
259 # `rel' holds the reversed list of components making up the relative
259 # `rel' holds the reversed list of components making up the relative
260 # file name we want.
260 # file name we want.
261 rel = []
261 rel = []
262 while True:
262 while True:
263 try:
263 try:
264 s = util.samefile(name, root)
264 s = util.samefile(name, root)
265 except OSError:
265 except OSError:
266 s = False
266 s = False
267 if s:
267 if s:
268 if not rel:
268 if not rel:
269 # name was actually the same as root (maybe a symlink)
269 # name was actually the same as root (maybe a symlink)
270 return b''
270 return b''
271 rel.reverse()
271 rel.reverse()
272 name = os.path.join(*rel)
272 name = os.path.join(*rel)
273 auditor(name)
273 auditor(name)
274 return util.pconvert(name)
274 return util.pconvert(name)
275 dirname, basename = util.split(name)
275 dirname, basename = util.split(name)
276 rel.append(basename)
276 rel.append(basename)
277 if dirname == name:
277 if dirname == name:
278 break
278 break
279 name = dirname
279 name = dirname
280
280
281 # A common mistake is to use -R, but specify a file relative to the repo
281 # A common mistake is to use -R, but specify a file relative to the repo
282 # instead of cwd. Detect that case, and provide a hint to the user.
282 # instead of cwd. Detect that case, and provide a hint to the user.
283 hint = None
283 hint = None
284 try:
284 try:
285 if cwd != root:
285 if cwd != root:
286 canonpath(root, root, myname, auditor)
286 canonpath(root, root, myname, auditor)
287 relpath = util.pathto(root, cwd, b'')
287 relpath = util.pathto(root, cwd, b'')
288 if relpath.endswith(pycompat.ossep):
288 if relpath.endswith(pycompat.ossep):
289 relpath = relpath[:-1]
289 relpath = relpath[:-1]
290 hint = _(b"consider using '--cwd %s'") % relpath
290 hint = _(b"consider using '--cwd %s'") % relpath
291 except error.Abort:
291 except error.Abort:
292 pass
292 pass
293
293
294 raise error.Abort(
294 raise error.Abort(
295 _(b"%s not under root '%s'") % (myname, root), hint=hint
295 _(b"%s not under root '%s'") % (myname, root), hint=hint
296 )
296 )
297
297
298
298
299 def normasprefix(path: bytes) -> bytes:
299 def normasprefix(path: bytes) -> bytes:
300 """normalize the specified path as path prefix
300 """normalize the specified path as path prefix
301
301
302 Returned value can be used safely for "p.startswith(prefix)",
302 Returned value can be used safely for "p.startswith(prefix)",
303 "p[len(prefix):]", and so on.
303 "p[len(prefix):]", and so on.
304
304
305 For efficiency, this expects "path" argument to be already
305 For efficiency, this expects "path" argument to be already
306 normalized by "os.path.normpath", "os.path.realpath", and so on.
306 normalized by "os.path.normpath", "os.path.realpath", and so on.
307
307
308 See also issue3033 for detail about need of this function.
308 See also issue3033 for detail about need of this function.
309
309
310 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
310 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
311 '/foo/bar/'
311 '/foo/bar/'
312 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
312 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
313 '/'
313 '/'
314 """
314 """
315 d, p = os.path.splitdrive(path)
315 d, p = os.path.splitdrive(path)
316 if len(p) != len(pycompat.ossep):
316 if len(p) != len(pycompat.ossep):
317 return path + pycompat.ossep
317 return path + pycompat.ossep
318 else:
318 else:
319 return path
319 return path
320
320
321
321
322 def finddirs(path: bytes) -> Iterator[bytes]:
322 def finddirs(path: bytes) -> Iterator[bytes]:
323 pos = path.rfind(b'/')
323 pos = path.rfind(b'/')
324 while pos != -1:
324 while pos != -1:
325 yield path[:pos]
325 yield path[:pos]
326 pos = path.rfind(b'/', 0, pos)
326 pos = path.rfind(b'/', 0, pos)
327 yield b''
327 yield b''
328
328
329
329
330 def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
330 def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
331 pos = path.find(pycompat.ossep)
331 pos = path.find(pycompat.ossep)
332 while pos != -1:
332 while pos != -1:
333 yield path[:pos]
333 yield path[:pos]
334 pos = path.find(pycompat.ossep, pos + 1)
334 pos = path.find(pycompat.ossep, pos + 1)
335
335
336
336
337 class dirs:
337 class dirs:
338 '''a multiset of directory names from a set of file paths'''
338 '''a multiset of directory names from a set of file paths'''
339
339
340 def __init__(self, map, only_tracked=False):
340 def __init__(self, map, only_tracked=False):
341 """
341 """
342 a dict map indicates a dirstate while a list indicates a manifest
342 a dict map indicates a dirstate while a list indicates a manifest
343 """
343 """
344 self._dirs = {}
344 self._dirs = {}
345 addpath = self.addpath
345 addpath = self.addpath
346 if isinstance(map, dict) and only_tracked:
346 if isinstance(map, dict) and only_tracked:
347 for f, s in map.items():
347 for f, s in map.items():
348 if s.state != b'r':
348 if s.state != b'r':
349 addpath(f)
349 addpath(f)
350 elif only_tracked:
350 elif only_tracked:
351 msg = b"`only_tracked` is only supported with a dict source"
351 msg = b"`only_tracked` is only supported with a dict source"
352 raise error.ProgrammingError(msg)
352 raise error.ProgrammingError(msg)
353 else:
353 else:
354 for f in map:
354 for f in map:
355 addpath(f)
355 addpath(f)
356
356
357 def addpath(self, path: bytes) -> None:
357 def addpath(self, path: bytes) -> None:
358 dirs = self._dirs
358 dirs = self._dirs
359 for base in finddirs(path):
359 for base in finddirs(path):
360 if base.endswith(b'/'):
360 if base.endswith(b'/'):
361 raise ValueError(
361 raise ValueError(
362 "found invalid consecutive slashes in path: %r" % base
362 "found invalid consecutive slashes in path: %r" % base
363 )
363 )
364 if base in dirs:
364 if base in dirs:
365 dirs[base] += 1
365 dirs[base] += 1
366 return
366 return
367 dirs[base] = 1
367 dirs[base] = 1
368
368
369 def delpath(self, path: bytes) -> None:
369 def delpath(self, path: bytes) -> None:
370 dirs = self._dirs
370 dirs = self._dirs
371 for base in finddirs(path):
371 for base in finddirs(path):
372 if dirs[base] > 1:
372 if dirs[base] > 1:
373 dirs[base] -= 1
373 dirs[base] -= 1
374 return
374 return
375 del dirs[base]
375 del dirs[base]
376
376
377 def __iter__(self):
377 def __iter__(self):
378 return iter(self._dirs)
378 return iter(self._dirs)
379
379
380 def __contains__(self, d: bytes) -> bool:
380 def __contains__(self, d: bytes) -> bool:
381 return d in self._dirs
381 return d in self._dirs
382
382
383
383
384 if hasattr(parsers, 'dirs'):
384 if hasattr(parsers, 'dirs'):
385 dirs = parsers.dirs
385 dirs = parsers.dirs
386
386
387 if rustdirs is not None:
387 if rustdirs is not None:
388 dirs = rustdirs
388 dirs = rustdirs
389
389
390
390
391 # forward two methods from posixpath that do what we need, but we'd
391 # forward two methods from posixpath that do what we need, but we'd
392 # rather not let our internals know that we're thinking in posix terms
392 # rather not let our internals know that we're thinking in posix terms
393 # - instead we'll let them be oblivious.
393 # - instead we'll let them be oblivious.
394 join = posixpath.join
394 join = posixpath.join
395 dirname = posixpath.dirname # type: Callable[[bytes], bytes]
395 dirname: Callable[[bytes], bytes] = posixpath.dirname
@@ -1,1004 +1,1006 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103
103
104 import struct
104 import struct
105 import typing
105 import typing
106
106
107 from typing import (
107 from typing import (
108 Any,
108 Any,
109 Callable,
109 Callable,
110 Dict,
110 Dict,
111 Iterable,
111 Iterable,
112 List,
112 List,
113 Optional,
113 Optional,
114 Set,
114 Set,
115 Tuple,
115 Tuple,
116 )
116 )
117
117
118 from .i18n import _
118 from .i18n import _
119 from .node import (
119 from .node import (
120 bin,
120 bin,
121 hex,
121 hex,
122 nullrev,
122 nullrev,
123 short,
123 short,
124 wdirrev,
124 wdirrev,
125 )
125 )
126 from . import (
126 from . import (
127 error,
127 error,
128 pycompat,
128 pycompat,
129 requirements,
129 requirements,
130 smartset,
130 smartset,
131 txnutil,
131 txnutil,
132 util,
132 util,
133 )
133 )
134
134
135 # keeps pyflakes happy
135 # keeps pyflakes happy
136 assert [
136 assert [
137 Any,
137 Any,
138 Callable,
138 Callable,
139 Dict,
139 Dict,
140 Iterable,
140 Iterable,
141 List,
141 List,
142 Optional,
142 Optional,
143 Set,
143 Set,
144 Tuple,
144 Tuple,
145 ]
145 ]
146
146
147 Phaseroots = Dict[int, Set[bytes]]
147 Phaseroots = Dict[int, Set[bytes]]
148
148
149 if typing.TYPE_CHECKING:
149 if typing.TYPE_CHECKING:
150 from . import (
150 from . import (
151 localrepo,
151 localrepo,
152 ui as uimod,
152 ui as uimod,
153 )
153 )
154
154
155 # keeps pyflakes happy
155 # keeps pyflakes happy
156 assert [uimod]
156 assert [uimod]
157
157
158 Phasedefaults = List[
158 Phasedefaults = List[
159 Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
159 Callable[[localrepo.localrepository, Phaseroots], Phaseroots]
160 ]
160 ]
161
161
162
162
163 _fphasesentry = struct.Struct(b'>i20s')
163 _fphasesentry = struct.Struct(b'>i20s')
164
164
165 # record phase index
165 # record phase index
166 public, draft, secret = range(3) # type: int
166 public: int = 0
167 draft: int = 1
168 secret: int = 2
167 archived = 32 # non-continuous for compatibility
169 archived = 32 # non-continuous for compatibility
168 internal = 96 # non-continuous for compatibility
170 internal = 96 # non-continuous for compatibility
169 allphases = (public, draft, secret, archived, internal)
171 allphases = (public, draft, secret, archived, internal)
170 trackedphases = (draft, secret, archived, internal)
172 trackedphases = (draft, secret, archived, internal)
171 not_public_phases = trackedphases
173 not_public_phases = trackedphases
172 # record phase names
174 # record phase names
173 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
175 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
174 phasenames = dict(enumerate(cmdphasenames))
176 phasenames = dict(enumerate(cmdphasenames))
175 phasenames[archived] = b'archived'
177 phasenames[archived] = b'archived'
176 phasenames[internal] = b'internal'
178 phasenames[internal] = b'internal'
177 # map phase name to phase number
179 # map phase name to phase number
178 phasenumber = {name: phase for phase, name in phasenames.items()}
180 phasenumber = {name: phase for phase, name in phasenames.items()}
179 # like phasenumber, but also include maps for the numeric and binary
181 # like phasenumber, but also include maps for the numeric and binary
180 # phase number to the phase number
182 # phase number to the phase number
181 phasenumber2 = phasenumber.copy()
183 phasenumber2 = phasenumber.copy()
182 phasenumber2.update({phase: phase for phase in phasenames})
184 phasenumber2.update({phase: phase for phase in phasenames})
183 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
185 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
184 # record phase property
186 # record phase property
185 mutablephases = (draft, secret, archived, internal)
187 mutablephases = (draft, secret, archived, internal)
186 relevant_mutable_phases = (draft, secret) # could be obsolete or unstable
188 relevant_mutable_phases = (draft, secret) # could be obsolete or unstable
187 remotehiddenphases = (secret, archived, internal)
189 remotehiddenphases = (secret, archived, internal)
188 localhiddenphases = (internal, archived)
190 localhiddenphases = (internal, archived)
189
191
190 all_internal_phases = tuple(p for p in allphases if p & internal)
192 all_internal_phases = tuple(p for p in allphases if p & internal)
191 # We do not want any internal content to exit the repository, ever.
193 # We do not want any internal content to exit the repository, ever.
192 no_bundle_phases = all_internal_phases
194 no_bundle_phases = all_internal_phases
193
195
194
196
195 def supportinternal(repo: "localrepo.localrepository") -> bool:
197 def supportinternal(repo: "localrepo.localrepository") -> bool:
196 """True if the internal phase can be used on a repository"""
198 """True if the internal phase can be used on a repository"""
197 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
199 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
198
200
199
201
200 def supportarchived(repo: "localrepo.localrepository") -> bool:
202 def supportarchived(repo: "localrepo.localrepository") -> bool:
201 """True if the archived phase can be used on a repository"""
203 """True if the archived phase can be used on a repository"""
202 return requirements.ARCHIVED_PHASE_REQUIREMENT in repo.requirements
204 return requirements.ARCHIVED_PHASE_REQUIREMENT in repo.requirements
203
205
204
206
205 def _readroots(
207 def _readroots(
206 repo: "localrepo.localrepository",
208 repo: "localrepo.localrepository",
207 phasedefaults: Optional["Phasedefaults"] = None,
209 phasedefaults: Optional["Phasedefaults"] = None,
208 ) -> Tuple[Phaseroots, bool]:
210 ) -> Tuple[Phaseroots, bool]:
209 """Read phase roots from disk
211 """Read phase roots from disk
210
212
211 phasedefaults is a list of fn(repo, roots) callable, which are
213 phasedefaults is a list of fn(repo, roots) callable, which are
212 executed if the phase roots file does not exist. When phases are
214 executed if the phase roots file does not exist. When phases are
213 being initialized on an existing repository, this could be used to
215 being initialized on an existing repository, this could be used to
214 set selected changesets phase to something else than public.
216 set selected changesets phase to something else than public.
215
217
216 Return (roots, dirty) where dirty is true if roots differ from
218 Return (roots, dirty) where dirty is true if roots differ from
217 what is being stored.
219 what is being stored.
218 """
220 """
219 repo = repo.unfiltered()
221 repo = repo.unfiltered()
220 dirty = False
222 dirty = False
221 roots = {i: set() for i in allphases}
223 roots = {i: set() for i in allphases}
222 try:
224 try:
223 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
225 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
224 try:
226 try:
225 for line in f:
227 for line in f:
226 phase, nh = line.split()
228 phase, nh = line.split()
227 roots[int(phase)].add(bin(nh))
229 roots[int(phase)].add(bin(nh))
228 finally:
230 finally:
229 f.close()
231 f.close()
230 except FileNotFoundError:
232 except FileNotFoundError:
231 if phasedefaults:
233 if phasedefaults:
232 for f in phasedefaults:
234 for f in phasedefaults:
233 roots = f(repo, roots)
235 roots = f(repo, roots)
234 dirty = True
236 dirty = True
235 return roots, dirty
237 return roots, dirty
236
238
237
239
238 def binaryencode(phasemapping: Dict[int, List[bytes]]) -> bytes:
240 def binaryencode(phasemapping: Dict[int, List[bytes]]) -> bytes:
239 """encode a 'phase -> nodes' mapping into a binary stream
241 """encode a 'phase -> nodes' mapping into a binary stream
240
242
241 The revision lists are encoded as (phase, root) pairs.
243 The revision lists are encoded as (phase, root) pairs.
242 """
244 """
243 binarydata = []
245 binarydata = []
244 for phase, nodes in phasemapping.items():
246 for phase, nodes in phasemapping.items():
245 for head in nodes:
247 for head in nodes:
246 binarydata.append(_fphasesentry.pack(phase, head))
248 binarydata.append(_fphasesentry.pack(phase, head))
247 return b''.join(binarydata)
249 return b''.join(binarydata)
248
250
249
251
250 def binarydecode(stream) -> Dict[int, List[bytes]]:
252 def binarydecode(stream) -> Dict[int, List[bytes]]:
251 """decode a binary stream into a 'phase -> nodes' mapping
253 """decode a binary stream into a 'phase -> nodes' mapping
252
254
253 The (phase, root) pairs are turned back into a dictionary with
255 The (phase, root) pairs are turned back into a dictionary with
254 the phase as index and the aggregated roots of that phase as value."""
256 the phase as index and the aggregated roots of that phase as value."""
255 headsbyphase = {i: [] for i in allphases}
257 headsbyphase = {i: [] for i in allphases}
256 entrysize = _fphasesentry.size
258 entrysize = _fphasesentry.size
257 while True:
259 while True:
258 entry = stream.read(entrysize)
260 entry = stream.read(entrysize)
259 if len(entry) < entrysize:
261 if len(entry) < entrysize:
260 if entry:
262 if entry:
261 raise error.Abort(_(b'bad phase-heads stream'))
263 raise error.Abort(_(b'bad phase-heads stream'))
262 break
264 break
263 phase, node = _fphasesentry.unpack(entry)
265 phase, node = _fphasesentry.unpack(entry)
264 headsbyphase[phase].append(node)
266 headsbyphase[phase].append(node)
265 return headsbyphase
267 return headsbyphase
266
268
267
269
268 def _sortedrange_insert(data, idx, rev, t):
270 def _sortedrange_insert(data, idx, rev, t):
269 merge_before = False
271 merge_before = False
270 if idx:
272 if idx:
271 r1, t1 = data[idx - 1]
273 r1, t1 = data[idx - 1]
272 merge_before = r1[-1] + 1 == rev and t1 == t
274 merge_before = r1[-1] + 1 == rev and t1 == t
273 merge_after = False
275 merge_after = False
274 if idx < len(data):
276 if idx < len(data):
275 r2, t2 = data[idx]
277 r2, t2 = data[idx]
276 merge_after = r2[0] == rev + 1 and t2 == t
278 merge_after = r2[0] == rev + 1 and t2 == t
277
279
278 if merge_before and merge_after:
280 if merge_before and merge_after:
279 data[idx - 1] = (range(r1[0], r2[-1] + 1), t)
281 data[idx - 1] = (range(r1[0], r2[-1] + 1), t)
280 data.pop(idx)
282 data.pop(idx)
281 elif merge_before:
283 elif merge_before:
282 data[idx - 1] = (range(r1[0], rev + 1), t)
284 data[idx - 1] = (range(r1[0], rev + 1), t)
283 elif merge_after:
285 elif merge_after:
284 data[idx] = (range(rev, r2[-1] + 1), t)
286 data[idx] = (range(rev, r2[-1] + 1), t)
285 else:
287 else:
286 data.insert(idx, (range(rev, rev + 1), t))
288 data.insert(idx, (range(rev, rev + 1), t))
287
289
288
290
289 def _sortedrange_split(data, idx, rev, t):
291 def _sortedrange_split(data, idx, rev, t):
290 r1, t1 = data[idx]
292 r1, t1 = data[idx]
291 if t == t1:
293 if t == t1:
292 return
294 return
293 t = (t1[0], t[1])
295 t = (t1[0], t[1])
294 if len(r1) == 1:
296 if len(r1) == 1:
295 data.pop(idx)
297 data.pop(idx)
296 _sortedrange_insert(data, idx, rev, t)
298 _sortedrange_insert(data, idx, rev, t)
297 elif r1[0] == rev:
299 elif r1[0] == rev:
298 data[idx] = (range(rev + 1, r1[-1] + 1), t1)
300 data[idx] = (range(rev + 1, r1[-1] + 1), t1)
299 _sortedrange_insert(data, idx, rev, t)
301 _sortedrange_insert(data, idx, rev, t)
300 elif r1[-1] == rev:
302 elif r1[-1] == rev:
301 data[idx] = (range(r1[0], rev), t1)
303 data[idx] = (range(r1[0], rev), t1)
302 _sortedrange_insert(data, idx + 1, rev, t)
304 _sortedrange_insert(data, idx + 1, rev, t)
303 else:
305 else:
304 data[idx : idx + 1] = [
306 data[idx : idx + 1] = [
305 (range(r1[0], rev), t1),
307 (range(r1[0], rev), t1),
306 (range(rev, rev + 1), t),
308 (range(rev, rev + 1), t),
307 (range(rev + 1, r1[-1] + 1), t1),
309 (range(rev + 1, r1[-1] + 1), t1),
308 ]
310 ]
309
311
310
312
311 def _trackphasechange(data, rev, old, new):
313 def _trackphasechange(data, rev, old, new):
312 """add a phase move to the <data> list of ranges
314 """add a phase move to the <data> list of ranges
313
315
314 If data is None, nothing happens.
316 If data is None, nothing happens.
315 """
317 """
316 if data is None:
318 if data is None:
317 return
319 return
318
320
319 # If data is empty, create a one-revision range and done
321 # If data is empty, create a one-revision range and done
320 if not data:
322 if not data:
321 data.insert(0, (range(rev, rev + 1), (old, new)))
323 data.insert(0, (range(rev, rev + 1), (old, new)))
322 return
324 return
323
325
324 low = 0
326 low = 0
325 high = len(data)
327 high = len(data)
326 t = (old, new)
328 t = (old, new)
327 while low < high:
329 while low < high:
328 mid = (low + high) // 2
330 mid = (low + high) // 2
329 revs = data[mid][0]
331 revs = data[mid][0]
330 revs_low = revs[0]
332 revs_low = revs[0]
331 revs_high = revs[-1]
333 revs_high = revs[-1]
332
334
333 if rev >= revs_low and rev <= revs_high:
335 if rev >= revs_low and rev <= revs_high:
334 _sortedrange_split(data, mid, rev, t)
336 _sortedrange_split(data, mid, rev, t)
335 return
337 return
336
338
337 if revs_low == rev + 1:
339 if revs_low == rev + 1:
338 if mid and data[mid - 1][0][-1] == rev:
340 if mid and data[mid - 1][0][-1] == rev:
339 _sortedrange_split(data, mid - 1, rev, t)
341 _sortedrange_split(data, mid - 1, rev, t)
340 else:
342 else:
341 _sortedrange_insert(data, mid, rev, t)
343 _sortedrange_insert(data, mid, rev, t)
342 return
344 return
343
345
344 if revs_high == rev - 1:
346 if revs_high == rev - 1:
345 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
347 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
346 _sortedrange_split(data, mid + 1, rev, t)
348 _sortedrange_split(data, mid + 1, rev, t)
347 else:
349 else:
348 _sortedrange_insert(data, mid + 1, rev, t)
350 _sortedrange_insert(data, mid + 1, rev, t)
349 return
351 return
350
352
351 if revs_low > rev:
353 if revs_low > rev:
352 high = mid
354 high = mid
353 else:
355 else:
354 low = mid + 1
356 low = mid + 1
355
357
356 if low == len(data):
358 if low == len(data):
357 data.append((range(rev, rev + 1), t))
359 data.append((range(rev, rev + 1), t))
358 return
360 return
359
361
360 r1, t1 = data[low]
362 r1, t1 = data[low]
361 if r1[0] > rev:
363 if r1[0] > rev:
362 data.insert(low, (range(rev, rev + 1), t))
364 data.insert(low, (range(rev, rev + 1), t))
363 else:
365 else:
364 data.insert(low + 1, (range(rev, rev + 1), t))
366 data.insert(low + 1, (range(rev, rev + 1), t))
365
367
366
368
367 class phasecache:
369 class phasecache:
368 def __init__(
370 def __init__(
369 self,
371 self,
370 repo: "localrepo.localrepository",
372 repo: "localrepo.localrepository",
371 phasedefaults: Optional["Phasedefaults"],
373 phasedefaults: Optional["Phasedefaults"],
372 _load: bool = True,
374 _load: bool = True,
373 ):
375 ):
374 if _load:
376 if _load:
375 # Cheap trick to allow shallow-copy without copy module
377 # Cheap trick to allow shallow-copy without copy module
376 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
378 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
377 self._loadedrevslen = 0
379 self._loadedrevslen = 0
378 self._phasesets = None
380 self._phasesets = None
379 self.filterunknown(repo)
381 self.filterunknown(repo)
380 self.opener = repo.svfs
382 self.opener = repo.svfs
381
383
382 def hasnonpublicphases(self, repo: "localrepo.localrepository") -> bool:
384 def hasnonpublicphases(self, repo: "localrepo.localrepository") -> bool:
383 """detect if there are revisions with non-public phase"""
385 """detect if there are revisions with non-public phase"""
384 repo = repo.unfiltered()
386 repo = repo.unfiltered()
385 cl = repo.changelog
387 cl = repo.changelog
386 if len(cl) >= self._loadedrevslen:
388 if len(cl) >= self._loadedrevslen:
387 self.invalidate()
389 self.invalidate()
388 self.loadphaserevs(repo)
390 self.loadphaserevs(repo)
389 return any(
391 return any(
390 revs for phase, revs in self.phaseroots.items() if phase != public
392 revs for phase, revs in self.phaseroots.items() if phase != public
391 )
393 )
392
394
393 def nonpublicphaseroots(
395 def nonpublicphaseroots(
394 self, repo: "localrepo.localrepository"
396 self, repo: "localrepo.localrepository"
395 ) -> Set[bytes]:
397 ) -> Set[bytes]:
396 """returns the roots of all non-public phases
398 """returns the roots of all non-public phases
397
399
398 The roots are not minimized, so if the secret revisions are
400 The roots are not minimized, so if the secret revisions are
399 descendants of draft revisions, their roots will still be present.
401 descendants of draft revisions, their roots will still be present.
400 """
402 """
401 repo = repo.unfiltered()
403 repo = repo.unfiltered()
402 cl = repo.changelog
404 cl = repo.changelog
403 if len(cl) >= self._loadedrevslen:
405 if len(cl) >= self._loadedrevslen:
404 self.invalidate()
406 self.invalidate()
405 self.loadphaserevs(repo)
407 self.loadphaserevs(repo)
406 return set().union(
408 return set().union(
407 *[
409 *[
408 revs
410 revs
409 for phase, revs in self.phaseroots.items()
411 for phase, revs in self.phaseroots.items()
410 if phase != public
412 if phase != public
411 ]
413 ]
412 )
414 )
413
415
414 def getrevset(
416 def getrevset(
415 self,
417 self,
416 repo: "localrepo.localrepository",
418 repo: "localrepo.localrepository",
417 phases: Iterable[int],
419 phases: Iterable[int],
418 subset: Optional[Any] = None,
420 subset: Optional[Any] = None,
419 ) -> Any:
421 ) -> Any:
420 # TODO: finish typing this
422 # TODO: finish typing this
421 """return a smartset for the given phases"""
423 """return a smartset for the given phases"""
422 self.loadphaserevs(repo) # ensure phase's sets are loaded
424 self.loadphaserevs(repo) # ensure phase's sets are loaded
423 phases = set(phases)
425 phases = set(phases)
424 publicphase = public in phases
426 publicphase = public in phases
425
427
426 if publicphase:
428 if publicphase:
427 # In this case, phases keeps all the *other* phases.
429 # In this case, phases keeps all the *other* phases.
428 phases = set(allphases).difference(phases)
430 phases = set(allphases).difference(phases)
429 if not phases:
431 if not phases:
430 return smartset.fullreposet(repo)
432 return smartset.fullreposet(repo)
431
433
432 # fast path: _phasesets contains the interesting sets,
434 # fast path: _phasesets contains the interesting sets,
433 # might only need a union and post-filtering.
435 # might only need a union and post-filtering.
434 revsneedscopy = False
436 revsneedscopy = False
435 if len(phases) == 1:
437 if len(phases) == 1:
436 [p] = phases
438 [p] = phases
437 revs = self._phasesets[p]
439 revs = self._phasesets[p]
438 revsneedscopy = True # Don't modify _phasesets
440 revsneedscopy = True # Don't modify _phasesets
439 else:
441 else:
440 # revs has the revisions in all *other* phases.
442 # revs has the revisions in all *other* phases.
441 revs = set.union(*[self._phasesets[p] for p in phases])
443 revs = set.union(*[self._phasesets[p] for p in phases])
442
444
443 def _addwdir(wdirsubset, wdirrevs):
445 def _addwdir(wdirsubset, wdirrevs):
444 if wdirrev in wdirsubset and repo[None].phase() in phases:
446 if wdirrev in wdirsubset and repo[None].phase() in phases:
445 if revsneedscopy:
447 if revsneedscopy:
446 wdirrevs = wdirrevs.copy()
448 wdirrevs = wdirrevs.copy()
447 # The working dir would never be in the # cache, but it was in
449 # The working dir would never be in the # cache, but it was in
448 # the subset being filtered for its phase (or filtered out,
450 # the subset being filtered for its phase (or filtered out,
449 # depending on publicphase), so add it to the output to be
451 # depending on publicphase), so add it to the output to be
450 # included (or filtered out).
452 # included (or filtered out).
451 wdirrevs.add(wdirrev)
453 wdirrevs.add(wdirrev)
452 return wdirrevs
454 return wdirrevs
453
455
454 if not publicphase:
456 if not publicphase:
455 if repo.changelog.filteredrevs:
457 if repo.changelog.filteredrevs:
456 revs = revs - repo.changelog.filteredrevs
458 revs = revs - repo.changelog.filteredrevs
457
459
458 if subset is None:
460 if subset is None:
459 return smartset.baseset(revs)
461 return smartset.baseset(revs)
460 else:
462 else:
461 revs = _addwdir(subset, revs)
463 revs = _addwdir(subset, revs)
462 return subset & smartset.baseset(revs)
464 return subset & smartset.baseset(revs)
463 else:
465 else:
464 if subset is None:
466 if subset is None:
465 subset = smartset.fullreposet(repo)
467 subset = smartset.fullreposet(repo)
466
468
467 revs = _addwdir(subset, revs)
469 revs = _addwdir(subset, revs)
468
470
469 if not revs:
471 if not revs:
470 return subset
472 return subset
471 return subset.filter(lambda r: r not in revs)
473 return subset.filter(lambda r: r not in revs)
472
474
473 def copy(self):
475 def copy(self):
474 # Shallow copy meant to ensure isolation in
476 # Shallow copy meant to ensure isolation in
475 # advance/retractboundary(), nothing more.
477 # advance/retractboundary(), nothing more.
476 ph = self.__class__(None, None, _load=False)
478 ph = self.__class__(None, None, _load=False)
477 ph.phaseroots = self.phaseroots.copy()
479 ph.phaseroots = self.phaseroots.copy()
478 ph.dirty = self.dirty
480 ph.dirty = self.dirty
479 ph.opener = self.opener
481 ph.opener = self.opener
480 ph._loadedrevslen = self._loadedrevslen
482 ph._loadedrevslen = self._loadedrevslen
481 ph._phasesets = self._phasesets
483 ph._phasesets = self._phasesets
482 return ph
484 return ph
483
485
484 def replace(self, phcache):
486 def replace(self, phcache):
485 """replace all values in 'self' with content of phcache"""
487 """replace all values in 'self' with content of phcache"""
486 for a in (
488 for a in (
487 'phaseroots',
489 'phaseroots',
488 'dirty',
490 'dirty',
489 'opener',
491 'opener',
490 '_loadedrevslen',
492 '_loadedrevslen',
491 '_phasesets',
493 '_phasesets',
492 ):
494 ):
493 setattr(self, a, getattr(phcache, a))
495 setattr(self, a, getattr(phcache, a))
494
496
495 def _getphaserevsnative(self, repo):
497 def _getphaserevsnative(self, repo):
496 repo = repo.unfiltered()
498 repo = repo.unfiltered()
497 return repo.changelog.computephases(self.phaseroots)
499 return repo.changelog.computephases(self.phaseroots)
498
500
499 def _computephaserevspure(self, repo):
501 def _computephaserevspure(self, repo):
500 repo = repo.unfiltered()
502 repo = repo.unfiltered()
501 cl = repo.changelog
503 cl = repo.changelog
502 self._phasesets = {phase: set() for phase in allphases}
504 self._phasesets = {phase: set() for phase in allphases}
503 lowerroots = set()
505 lowerroots = set()
504 for phase in reversed(trackedphases):
506 for phase in reversed(trackedphases):
505 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
507 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
506 if roots:
508 if roots:
507 ps = set(cl.descendants(roots))
509 ps = set(cl.descendants(roots))
508 for root in roots:
510 for root in roots:
509 ps.add(root)
511 ps.add(root)
510 ps.difference_update(lowerroots)
512 ps.difference_update(lowerroots)
511 lowerroots.update(ps)
513 lowerroots.update(ps)
512 self._phasesets[phase] = ps
514 self._phasesets[phase] = ps
513 self._loadedrevslen = len(cl)
515 self._loadedrevslen = len(cl)
514
516
515 def loadphaserevs(self, repo: "localrepo.localrepository") -> None:
517 def loadphaserevs(self, repo: "localrepo.localrepository") -> None:
516 """ensure phase information is loaded in the object"""
518 """ensure phase information is loaded in the object"""
517 if self._phasesets is None:
519 if self._phasesets is None:
518 try:
520 try:
519 res = self._getphaserevsnative(repo)
521 res = self._getphaserevsnative(repo)
520 self._loadedrevslen, self._phasesets = res
522 self._loadedrevslen, self._phasesets = res
521 except AttributeError:
523 except AttributeError:
522 self._computephaserevspure(repo)
524 self._computephaserevspure(repo)
523
525
524 def invalidate(self):
526 def invalidate(self):
525 self._loadedrevslen = 0
527 self._loadedrevslen = 0
526 self._phasesets = None
528 self._phasesets = None
527
529
528 def phase(self, repo: "localrepo.localrepository", rev: int) -> int:
530 def phase(self, repo: "localrepo.localrepository", rev: int) -> int:
529 # We need a repo argument here to be able to build _phasesets
531 # We need a repo argument here to be able to build _phasesets
530 # if necessary. The repository instance is not stored in
532 # if necessary. The repository instance is not stored in
531 # phasecache to avoid reference cycles. The changelog instance
533 # phasecache to avoid reference cycles. The changelog instance
532 # is not stored because it is a filecache() property and can
534 # is not stored because it is a filecache() property and can
533 # be replaced without us being notified.
535 # be replaced without us being notified.
534 if rev == nullrev:
536 if rev == nullrev:
535 return public
537 return public
536 if rev < nullrev:
538 if rev < nullrev:
537 raise ValueError(_(b'cannot lookup negative revision'))
539 raise ValueError(_(b'cannot lookup negative revision'))
538 if rev >= self._loadedrevslen:
540 if rev >= self._loadedrevslen:
539 self.invalidate()
541 self.invalidate()
540 self.loadphaserevs(repo)
542 self.loadphaserevs(repo)
541 for phase in trackedphases:
543 for phase in trackedphases:
542 if rev in self._phasesets[phase]:
544 if rev in self._phasesets[phase]:
543 return phase
545 return phase
544 return public
546 return public
545
547
546 def write(self):
548 def write(self):
547 if not self.dirty:
549 if not self.dirty:
548 return
550 return
549 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
551 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
550 try:
552 try:
551 self._write(f)
553 self._write(f)
552 finally:
554 finally:
553 f.close()
555 f.close()
554
556
555 def _write(self, fp):
557 def _write(self, fp):
556 for phase, roots in self.phaseroots.items():
558 for phase, roots in self.phaseroots.items():
557 for h in sorted(roots):
559 for h in sorted(roots):
558 fp.write(b'%i %s\n' % (phase, hex(h)))
560 fp.write(b'%i %s\n' % (phase, hex(h)))
559 self.dirty = False
561 self.dirty = False
560
562
561 def _updateroots(self, phase, newroots, tr):
563 def _updateroots(self, phase, newroots, tr):
562 self.phaseroots[phase] = newroots
564 self.phaseroots[phase] = newroots
563 self.invalidate()
565 self.invalidate()
564 self.dirty = True
566 self.dirty = True
565
567
566 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
568 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
567 tr.hookargs[b'phases_moved'] = b'1'
569 tr.hookargs[b'phases_moved'] = b'1'
568
570
569 def registernew(self, repo, tr, targetphase, revs):
571 def registernew(self, repo, tr, targetphase, revs):
570 repo = repo.unfiltered()
572 repo = repo.unfiltered()
571 self._retractboundary(repo, tr, targetphase, [], revs=revs)
573 self._retractboundary(repo, tr, targetphase, [], revs=revs)
572 if tr is not None and b'phases' in tr.changes:
574 if tr is not None and b'phases' in tr.changes:
573 phasetracking = tr.changes[b'phases']
575 phasetracking = tr.changes[b'phases']
574 phase = self.phase
576 phase = self.phase
575 for rev in sorted(revs):
577 for rev in sorted(revs):
576 revphase = phase(repo, rev)
578 revphase = phase(repo, rev)
577 _trackphasechange(phasetracking, rev, None, revphase)
579 _trackphasechange(phasetracking, rev, None, revphase)
578 repo.invalidatevolatilesets()
580 repo.invalidatevolatilesets()
579
581
580 def advanceboundary(
582 def advanceboundary(
581 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
583 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
582 ):
584 ):
583 """Set all 'nodes' to phase 'targetphase'
585 """Set all 'nodes' to phase 'targetphase'
584
586
585 Nodes with a phase lower than 'targetphase' are not affected.
587 Nodes with a phase lower than 'targetphase' are not affected.
586
588
587 If dryrun is True, no actions will be performed
589 If dryrun is True, no actions will be performed
588
590
589 Returns a set of revs whose phase is changed or should be changed
591 Returns a set of revs whose phase is changed or should be changed
590 """
592 """
591 # Be careful to preserve shallow-copied values: do not update
593 # Be careful to preserve shallow-copied values: do not update
592 # phaseroots values, replace them.
594 # phaseroots values, replace them.
593 if revs is None:
595 if revs is None:
594 revs = []
596 revs = []
595 if tr is None:
597 if tr is None:
596 phasetracking = None
598 phasetracking = None
597 else:
599 else:
598 phasetracking = tr.changes.get(b'phases')
600 phasetracking = tr.changes.get(b'phases')
599
601
600 repo = repo.unfiltered()
602 repo = repo.unfiltered()
601 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
603 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
602
604
603 changes = set() # set of revisions to be changed
605 changes = set() # set of revisions to be changed
604 delroots = [] # set of root deleted by this path
606 delroots = [] # set of root deleted by this path
605 for phase in (phase for phase in allphases if phase > targetphase):
607 for phase in (phase for phase in allphases if phase > targetphase):
606 # filter nodes that are not in a compatible phase already
608 # filter nodes that are not in a compatible phase already
607 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
609 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
608 if not revs:
610 if not revs:
609 break # no roots to move anymore
611 break # no roots to move anymore
610
612
611 olds = self.phaseroots[phase]
613 olds = self.phaseroots[phase]
612
614
613 affected = repo.revs(b'%ln::%ld', olds, revs)
615 affected = repo.revs(b'%ln::%ld', olds, revs)
614 changes.update(affected)
616 changes.update(affected)
615 if dryrun:
617 if dryrun:
616 continue
618 continue
617 for r in affected:
619 for r in affected:
618 _trackphasechange(
620 _trackphasechange(
619 phasetracking, r, self.phase(repo, r), targetphase
621 phasetracking, r, self.phase(repo, r), targetphase
620 )
622 )
621
623
622 roots = {
624 roots = {
623 ctx.node()
625 ctx.node()
624 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
626 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
625 }
627 }
626 if olds != roots:
628 if olds != roots:
627 self._updateroots(phase, roots, tr)
629 self._updateroots(phase, roots, tr)
628 # some roots may need to be declared for lower phases
630 # some roots may need to be declared for lower phases
629 delroots.extend(olds - roots)
631 delroots.extend(olds - roots)
630 if not dryrun:
632 if not dryrun:
631 # declare deleted root in the target phase
633 # declare deleted root in the target phase
632 if targetphase != 0:
634 if targetphase != 0:
633 self._retractboundary(repo, tr, targetphase, delroots)
635 self._retractboundary(repo, tr, targetphase, delroots)
634 repo.invalidatevolatilesets()
636 repo.invalidatevolatilesets()
635 return changes
637 return changes
636
638
637 def retractboundary(self, repo, tr, targetphase, nodes):
639 def retractboundary(self, repo, tr, targetphase, nodes):
638 oldroots = {
640 oldroots = {
639 phase: revs
641 phase: revs
640 for phase, revs in self.phaseroots.items()
642 for phase, revs in self.phaseroots.items()
641 if phase <= targetphase
643 if phase <= targetphase
642 }
644 }
643 if tr is None:
645 if tr is None:
644 phasetracking = None
646 phasetracking = None
645 else:
647 else:
646 phasetracking = tr.changes.get(b'phases')
648 phasetracking = tr.changes.get(b'phases')
647 repo = repo.unfiltered()
649 repo = repo.unfiltered()
648 if (
650 if (
649 self._retractboundary(repo, tr, targetphase, nodes)
651 self._retractboundary(repo, tr, targetphase, nodes)
650 and phasetracking is not None
652 and phasetracking is not None
651 ):
653 ):
652
654
653 # find the affected revisions
655 # find the affected revisions
654 new = self.phaseroots[targetphase]
656 new = self.phaseroots[targetphase]
655 old = oldroots[targetphase]
657 old = oldroots[targetphase]
656 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
658 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
657
659
658 # find the phase of the affected revision
660 # find the phase of the affected revision
659 for phase in range(targetphase, -1, -1):
661 for phase in range(targetphase, -1, -1):
660 if phase:
662 if phase:
661 roots = oldroots.get(phase, [])
663 roots = oldroots.get(phase, [])
662 revs = set(repo.revs(b'%ln::%ld', roots, affected))
664 revs = set(repo.revs(b'%ln::%ld', roots, affected))
663 affected -= revs
665 affected -= revs
664 else: # public phase
666 else: # public phase
665 revs = affected
667 revs = affected
666 for r in sorted(revs):
668 for r in sorted(revs):
667 _trackphasechange(phasetracking, r, phase, targetphase)
669 _trackphasechange(phasetracking, r, phase, targetphase)
668 repo.invalidatevolatilesets()
670 repo.invalidatevolatilesets()
669
671
670 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
672 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
671 # Be careful to preserve shallow-copied values: do not update
673 # Be careful to preserve shallow-copied values: do not update
672 # phaseroots values, replace them.
674 # phaseroots values, replace them.
673 if revs is None:
675 if revs is None:
674 revs = []
676 revs = []
675 if (
677 if (
676 targetphase == internal
678 targetphase == internal
677 and not supportinternal(repo)
679 and not supportinternal(repo)
678 or targetphase == archived
680 or targetphase == archived
679 and not supportarchived(repo)
681 and not supportarchived(repo)
680 ):
682 ):
681 name = phasenames[targetphase]
683 name = phasenames[targetphase]
682 msg = b'this repository does not support the %s phase' % name
684 msg = b'this repository does not support the %s phase' % name
683 raise error.ProgrammingError(msg)
685 raise error.ProgrammingError(msg)
684
686
685 repo = repo.unfiltered()
687 repo = repo.unfiltered()
686 torev = repo.changelog.rev
688 torev = repo.changelog.rev
687 tonode = repo.changelog.node
689 tonode = repo.changelog.node
688 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
690 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
689 finalroots = oldroots = set(currentroots)
691 finalroots = oldroots = set(currentroots)
690 newroots = [torev(node) for node in nodes] + [r for r in revs]
692 newroots = [torev(node) for node in nodes] + [r for r in revs]
691 newroots = [
693 newroots = [
692 rev for rev in newroots if self.phase(repo, rev) < targetphase
694 rev for rev in newroots if self.phase(repo, rev) < targetphase
693 ]
695 ]
694
696
695 if newroots:
697 if newroots:
696 if nullrev in newroots:
698 if nullrev in newroots:
697 raise error.Abort(_(b'cannot change null revision phase'))
699 raise error.Abort(_(b'cannot change null revision phase'))
698 currentroots.update(newroots)
700 currentroots.update(newroots)
699
701
700 # Only compute new roots for revs above the roots that are being
702 # Only compute new roots for revs above the roots that are being
701 # retracted.
703 # retracted.
702 minnewroot = min(newroots)
704 minnewroot = min(newroots)
703 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
705 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
704 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
706 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
705
707
706 finalroots = {rev for rev in currentroots if rev < minnewroot}
708 finalroots = {rev for rev in currentroots if rev < minnewroot}
707 finalroots.update(updatedroots)
709 finalroots.update(updatedroots)
708 if finalroots != oldroots:
710 if finalroots != oldroots:
709 self._updateroots(
711 self._updateroots(
710 targetphase, {tonode(rev) for rev in finalroots}, tr
712 targetphase, {tonode(rev) for rev in finalroots}, tr
711 )
713 )
712 return True
714 return True
713 return False
715 return False
714
716
715 def filterunknown(self, repo: "localrepo.localrepository") -> None:
717 def filterunknown(self, repo: "localrepo.localrepository") -> None:
716 """remove unknown nodes from the phase boundary
718 """remove unknown nodes from the phase boundary
717
719
718 Nothing is lost as unknown nodes only hold data for their descendants.
720 Nothing is lost as unknown nodes only hold data for their descendants.
719 """
721 """
720 filtered = False
722 filtered = False
721 has_node = repo.changelog.index.has_node # to filter unknown nodes
723 has_node = repo.changelog.index.has_node # to filter unknown nodes
722 for phase, nodes in self.phaseroots.items():
724 for phase, nodes in self.phaseroots.items():
723 missing = sorted(node for node in nodes if not has_node(node))
725 missing = sorted(node for node in nodes if not has_node(node))
724 if missing:
726 if missing:
725 for mnode in missing:
727 for mnode in missing:
726 repo.ui.debug(
728 repo.ui.debug(
727 b'removing unknown node %s from %i-phase boundary\n'
729 b'removing unknown node %s from %i-phase boundary\n'
728 % (short(mnode), phase)
730 % (short(mnode), phase)
729 )
731 )
730 nodes.symmetric_difference_update(missing)
732 nodes.symmetric_difference_update(missing)
731 filtered = True
733 filtered = True
732 if filtered:
734 if filtered:
733 self.dirty = True
735 self.dirty = True
734 # filterunknown is called by repo.destroyed, we may have no changes in
736 # filterunknown is called by repo.destroyed, we may have no changes in
735 # root but _phasesets contents is certainly invalid (or at least we
737 # root but _phasesets contents is certainly invalid (or at least we
736 # have not proper way to check that). related to issue 3858.
738 # have not proper way to check that). related to issue 3858.
737 #
739 #
738 # The other caller is __init__ that have no _phasesets initialized
740 # The other caller is __init__ that have no _phasesets initialized
739 # anyway. If this change we should consider adding a dedicated
741 # anyway. If this change we should consider adding a dedicated
740 # "destroyed" function to phasecache or a proper cache key mechanism
742 # "destroyed" function to phasecache or a proper cache key mechanism
741 # (see branchmap one)
743 # (see branchmap one)
742 self.invalidate()
744 self.invalidate()
743
745
744
746
745 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
747 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
746 """Add nodes to a phase changing other nodes phases if necessary.
748 """Add nodes to a phase changing other nodes phases if necessary.
747
749
748 This function move boundary *forward* this means that all nodes
750 This function move boundary *forward* this means that all nodes
749 are set in the target phase or kept in a *lower* phase.
751 are set in the target phase or kept in a *lower* phase.
750
752
751 Simplify boundary to contains phase roots only.
753 Simplify boundary to contains phase roots only.
752
754
753 If dryrun is True, no actions will be performed
755 If dryrun is True, no actions will be performed
754
756
755 Returns a set of revs whose phase is changed or should be changed
757 Returns a set of revs whose phase is changed or should be changed
756 """
758 """
757 if revs is None:
759 if revs is None:
758 revs = []
760 revs = []
759 phcache = repo._phasecache.copy()
761 phcache = repo._phasecache.copy()
760 changes = phcache.advanceboundary(
762 changes = phcache.advanceboundary(
761 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
763 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
762 )
764 )
763 if not dryrun:
765 if not dryrun:
764 repo._phasecache.replace(phcache)
766 repo._phasecache.replace(phcache)
765 return changes
767 return changes
766
768
767
769
768 def retractboundary(repo, tr, targetphase, nodes):
770 def retractboundary(repo, tr, targetphase, nodes):
769 """Set nodes back to a phase changing other nodes phases if
771 """Set nodes back to a phase changing other nodes phases if
770 necessary.
772 necessary.
771
773
772 This function move boundary *backward* this means that all nodes
774 This function move boundary *backward* this means that all nodes
773 are set in the target phase or kept in a *higher* phase.
775 are set in the target phase or kept in a *higher* phase.
774
776
775 Simplify boundary to contains phase roots only."""
777 Simplify boundary to contains phase roots only."""
776 phcache = repo._phasecache.copy()
778 phcache = repo._phasecache.copy()
777 phcache.retractboundary(repo, tr, targetphase, nodes)
779 phcache.retractboundary(repo, tr, targetphase, nodes)
778 repo._phasecache.replace(phcache)
780 repo._phasecache.replace(phcache)
779
781
780
782
781 def registernew(repo, tr, targetphase, revs):
783 def registernew(repo, tr, targetphase, revs):
782 """register a new revision and its phase
784 """register a new revision and its phase
783
785
784 Code adding revisions to the repository should use this function to
786 Code adding revisions to the repository should use this function to
785 set new changeset in their target phase (or higher).
787 set new changeset in their target phase (or higher).
786 """
788 """
787 phcache = repo._phasecache.copy()
789 phcache = repo._phasecache.copy()
788 phcache.registernew(repo, tr, targetphase, revs)
790 phcache.registernew(repo, tr, targetphase, revs)
789 repo._phasecache.replace(phcache)
791 repo._phasecache.replace(phcache)
790
792
791
793
792 def listphases(repo: "localrepo.localrepository") -> Dict[bytes, bytes]:
794 def listphases(repo: "localrepo.localrepository") -> Dict[bytes, bytes]:
793 """List phases root for serialization over pushkey"""
795 """List phases root for serialization over pushkey"""
794 # Use ordered dictionary so behavior is deterministic.
796 # Use ordered dictionary so behavior is deterministic.
795 keys = util.sortdict()
797 keys = util.sortdict()
796 value = b'%i' % draft
798 value = b'%i' % draft
797 cl = repo.unfiltered().changelog
799 cl = repo.unfiltered().changelog
798 for root in repo._phasecache.phaseroots[draft]:
800 for root in repo._phasecache.phaseroots[draft]:
799 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
801 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
800 keys[hex(root)] = value
802 keys[hex(root)] = value
801
803
802 if repo.publishing():
804 if repo.publishing():
803 # Add an extra data to let remote know we are a publishing
805 # Add an extra data to let remote know we are a publishing
804 # repo. Publishing repo can't just pretend they are old repo.
806 # repo. Publishing repo can't just pretend they are old repo.
805 # When pushing to a publishing repo, the client still need to
807 # When pushing to a publishing repo, the client still need to
806 # push phase boundary
808 # push phase boundary
807 #
809 #
808 # Push do not only push changeset. It also push phase data.
810 # Push do not only push changeset. It also push phase data.
809 # New phase data may apply to common changeset which won't be
811 # New phase data may apply to common changeset which won't be
810 # push (as they are common). Here is a very simple example:
812 # push (as they are common). Here is a very simple example:
811 #
813 #
812 # 1) repo A push changeset X as draft to repo B
814 # 1) repo A push changeset X as draft to repo B
813 # 2) repo B make changeset X public
815 # 2) repo B make changeset X public
814 # 3) repo B push to repo A. X is not pushed but the data that
816 # 3) repo B push to repo A. X is not pushed but the data that
815 # X as now public should
817 # X as now public should
816 #
818 #
817 # The server can't handle it on it's own as it has no idea of
819 # The server can't handle it on it's own as it has no idea of
818 # client phase data.
820 # client phase data.
819 keys[b'publishing'] = b'True'
821 keys[b'publishing'] = b'True'
820 return keys
822 return keys
821
823
822
824
823 def pushphase(
825 def pushphase(
824 repo: "localrepo.localrepository",
826 repo: "localrepo.localrepository",
825 nhex: bytes,
827 nhex: bytes,
826 oldphasestr: bytes,
828 oldphasestr: bytes,
827 newphasestr: bytes,
829 newphasestr: bytes,
828 ) -> bool:
830 ) -> bool:
829 """List phases root for serialization over pushkey"""
831 """List phases root for serialization over pushkey"""
830 repo = repo.unfiltered()
832 repo = repo.unfiltered()
831 with repo.lock():
833 with repo.lock():
832 currentphase = repo[nhex].phase()
834 currentphase = repo[nhex].phase()
833 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
835 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
834 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
836 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
835 if currentphase == oldphase and newphase < oldphase:
837 if currentphase == oldphase and newphase < oldphase:
836 with repo.transaction(b'pushkey-phase') as tr:
838 with repo.transaction(b'pushkey-phase') as tr:
837 advanceboundary(repo, tr, newphase, [bin(nhex)])
839 advanceboundary(repo, tr, newphase, [bin(nhex)])
838 return True
840 return True
839 elif currentphase == newphase:
841 elif currentphase == newphase:
840 # raced, but got correct result
842 # raced, but got correct result
841 return True
843 return True
842 else:
844 else:
843 return False
845 return False
844
846
845
847
846 def subsetphaseheads(repo, subset):
848 def subsetphaseheads(repo, subset):
847 """Finds the phase heads for a subset of a history
849 """Finds the phase heads for a subset of a history
848
850
849 Returns a list indexed by phase number where each item is a list of phase
851 Returns a list indexed by phase number where each item is a list of phase
850 head nodes.
852 head nodes.
851 """
853 """
852 cl = repo.changelog
854 cl = repo.changelog
853
855
854 headsbyphase = {i: [] for i in allphases}
856 headsbyphase = {i: [] for i in allphases}
855 for phase in allphases:
857 for phase in allphases:
856 revset = b"heads(%%ln & _phase(%d))" % phase
858 revset = b"heads(%%ln & _phase(%d))" % phase
857 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
859 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
858 return headsbyphase
860 return headsbyphase
859
861
860
862
861 def updatephases(repo, trgetter, headsbyphase):
863 def updatephases(repo, trgetter, headsbyphase):
862 """Updates the repo with the given phase heads"""
864 """Updates the repo with the given phase heads"""
863 # Now advance phase boundaries of all phases
865 # Now advance phase boundaries of all phases
864 #
866 #
865 # run the update (and fetch transaction) only if there are actually things
867 # run the update (and fetch transaction) only if there are actually things
866 # to update. This avoid creating empty transaction during no-op operation.
868 # to update. This avoid creating empty transaction during no-op operation.
867
869
868 for phase in allphases:
870 for phase in allphases:
869 revset = b'%ln - _phase(%s)'
871 revset = b'%ln - _phase(%s)'
870 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
872 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
871 if heads:
873 if heads:
872 advanceboundary(repo, trgetter(), phase, heads)
874 advanceboundary(repo, trgetter(), phase, heads)
873
875
874
876
875 def analyzeremotephases(repo, subset, roots):
877 def analyzeremotephases(repo, subset, roots):
876 """Compute phases heads and root in a subset of node from root dict
878 """Compute phases heads and root in a subset of node from root dict
877
879
878 * subset is heads of the subset
880 * subset is heads of the subset
879 * roots is {<nodeid> => phase} mapping. key and value are string.
881 * roots is {<nodeid> => phase} mapping. key and value are string.
880
882
881 Accept unknown element input
883 Accept unknown element input
882 """
884 """
883 repo = repo.unfiltered()
885 repo = repo.unfiltered()
884 # build list from dictionary
886 # build list from dictionary
885 draftroots = []
887 draftroots = []
886 has_node = repo.changelog.index.has_node # to filter unknown nodes
888 has_node = repo.changelog.index.has_node # to filter unknown nodes
887 for nhex, phase in roots.items():
889 for nhex, phase in roots.items():
888 if nhex == b'publishing': # ignore data related to publish option
890 if nhex == b'publishing': # ignore data related to publish option
889 continue
891 continue
890 node = bin(nhex)
892 node = bin(nhex)
891 phase = int(phase)
893 phase = int(phase)
892 if phase == public:
894 if phase == public:
893 if node != repo.nullid:
895 if node != repo.nullid:
894 repo.ui.warn(
896 repo.ui.warn(
895 _(
897 _(
896 b'ignoring inconsistent public root'
898 b'ignoring inconsistent public root'
897 b' from remote: %s\n'
899 b' from remote: %s\n'
898 )
900 )
899 % nhex
901 % nhex
900 )
902 )
901 elif phase == draft:
903 elif phase == draft:
902 if has_node(node):
904 if has_node(node):
903 draftroots.append(node)
905 draftroots.append(node)
904 else:
906 else:
905 repo.ui.warn(
907 repo.ui.warn(
906 _(b'ignoring unexpected root from remote: %i %s\n')
908 _(b'ignoring unexpected root from remote: %i %s\n')
907 % (phase, nhex)
909 % (phase, nhex)
908 )
910 )
909 # compute heads
911 # compute heads
910 publicheads = newheads(repo, subset, draftroots)
912 publicheads = newheads(repo, subset, draftroots)
911 return publicheads, draftroots
913 return publicheads, draftroots
912
914
913
915
914 class remotephasessummary:
916 class remotephasessummary:
915 """summarize phase information on the remote side
917 """summarize phase information on the remote side
916
918
917 :publishing: True is the remote is publishing
919 :publishing: True is the remote is publishing
918 :publicheads: list of remote public phase heads (nodes)
920 :publicheads: list of remote public phase heads (nodes)
919 :draftheads: list of remote draft phase heads (nodes)
921 :draftheads: list of remote draft phase heads (nodes)
920 :draftroots: list of remote draft phase root (nodes)
922 :draftroots: list of remote draft phase root (nodes)
921 """
923 """
922
924
923 def __init__(self, repo, remotesubset, remoteroots):
925 def __init__(self, repo, remotesubset, remoteroots):
924 unfi = repo.unfiltered()
926 unfi = repo.unfiltered()
925 self._allremoteroots = remoteroots
927 self._allremoteroots = remoteroots
926
928
927 self.publishing = remoteroots.get(b'publishing', False)
929 self.publishing = remoteroots.get(b'publishing', False)
928
930
929 ana = analyzeremotephases(repo, remotesubset, remoteroots)
931 ana = analyzeremotephases(repo, remotesubset, remoteroots)
930 self.publicheads, self.draftroots = ana
932 self.publicheads, self.draftroots = ana
931 # Get the list of all "heads" revs draft on remote
933 # Get the list of all "heads" revs draft on remote
932 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
934 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
933 self.draftheads = [c.node() for c in dheads]
935 self.draftheads = [c.node() for c in dheads]
934
936
935
937
936 def newheads(repo, heads, roots):
938 def newheads(repo, heads, roots):
937 """compute new head of a subset minus another
939 """compute new head of a subset minus another
938
940
939 * `heads`: define the first subset
941 * `heads`: define the first subset
940 * `roots`: define the second we subtract from the first"""
942 * `roots`: define the second we subtract from the first"""
941 # prevent an import cycle
943 # prevent an import cycle
942 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
944 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
943 from . import dagop
945 from . import dagop
944
946
945 repo = repo.unfiltered()
947 repo = repo.unfiltered()
946 cl = repo.changelog
948 cl = repo.changelog
947 rev = cl.index.get_rev
949 rev = cl.index.get_rev
948 if not roots:
950 if not roots:
949 return heads
951 return heads
950 if not heads or heads == [repo.nullid]:
952 if not heads or heads == [repo.nullid]:
951 return []
953 return []
952 # The logic operated on revisions, convert arguments early for convenience
954 # The logic operated on revisions, convert arguments early for convenience
953 new_heads = {rev(n) for n in heads if n != repo.nullid}
955 new_heads = {rev(n) for n in heads if n != repo.nullid}
954 roots = [rev(n) for n in roots]
956 roots = [rev(n) for n in roots]
955 # compute the area we need to remove
957 # compute the area we need to remove
956 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
958 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
957 # heads in the area are no longer heads
959 # heads in the area are no longer heads
958 new_heads.difference_update(affected_zone)
960 new_heads.difference_update(affected_zone)
959 # revisions in the area have children outside of it,
961 # revisions in the area have children outside of it,
960 # They might be new heads
962 # They might be new heads
961 candidates = repo.revs(
963 candidates = repo.revs(
962 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
964 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
963 )
965 )
964 candidates -= affected_zone
966 candidates -= affected_zone
965 if new_heads or candidates:
967 if new_heads or candidates:
966 # remove candidate that are ancestors of other heads
968 # remove candidate that are ancestors of other heads
967 new_heads.update(candidates)
969 new_heads.update(candidates)
968 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
970 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
969 pruned = dagop.reachableroots(repo, candidates, prunestart)
971 pruned = dagop.reachableroots(repo, candidates, prunestart)
970 new_heads.difference_update(pruned)
972 new_heads.difference_update(pruned)
971
973
972 return pycompat.maplist(cl.node, sorted(new_heads))
974 return pycompat.maplist(cl.node, sorted(new_heads))
973
975
974
976
975 def newcommitphase(ui: "uimod.ui") -> int:
977 def newcommitphase(ui: "uimod.ui") -> int:
976 """helper to get the target phase of new commit
978 """helper to get the target phase of new commit
977
979
978 Handle all possible values for the phases.new-commit options.
980 Handle all possible values for the phases.new-commit options.
979
981
980 """
982 """
981 v = ui.config(b'phases', b'new-commit')
983 v = ui.config(b'phases', b'new-commit')
982 try:
984 try:
983 return phasenumber2[v]
985 return phasenumber2[v]
984 except KeyError:
986 except KeyError:
985 raise error.ConfigError(
987 raise error.ConfigError(
986 _(b"phases.new-commit: not a valid phase name ('%s')") % v
988 _(b"phases.new-commit: not a valid phase name ('%s')") % v
987 )
989 )
988
990
989
991
990 def hassecret(repo: "localrepo.localrepository") -> bool:
992 def hassecret(repo: "localrepo.localrepository") -> bool:
991 """utility function that check if a repo have any secret changeset."""
993 """utility function that check if a repo have any secret changeset."""
992 return bool(repo._phasecache.phaseroots[secret])
994 return bool(repo._phasecache.phaseroots[secret])
993
995
994
996
995 def preparehookargs(
997 def preparehookargs(
996 node: bytes,
998 node: bytes,
997 old: Optional[int],
999 old: Optional[int],
998 new: Optional[int],
1000 new: Optional[int],
999 ) -> Dict[bytes, bytes]:
1001 ) -> Dict[bytes, bytes]:
1000 if old is None:
1002 if old is None:
1001 old = b''
1003 old = b''
1002 else:
1004 else:
1003 old = phasenames[old]
1005 old = phasenames[old]
1004 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
1006 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
@@ -1,1133 +1,1133 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import random
8 import random
9
9
10 from typing import (
10 from typing import (
11 List,
11 List,
12 Type,
12 Type,
13 )
13 )
14
14
15 from ..i18n import _
15 from ..i18n import _
16 from .. import (
16 from .. import (
17 error,
17 error,
18 localrepo,
18 localrepo,
19 requirements,
19 requirements,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 from ..utils import compression
24 from ..utils import compression
25
25
26 # keeps pyflakes happy
26 # keeps pyflakes happy
27 assert [
27 assert [
28 List,
28 List,
29 Type,
29 Type,
30 ]
30 ]
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 requirements.GENERALDELTA_REQUIREMENT,
34 requirements.GENERALDELTA_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 requirements.REVLOGV2_REQUIREMENT,
36 requirements.REVLOGV2_REQUIREMENT,
37 requirements.CHANGELOGV2_REQUIREMENT,
37 requirements.CHANGELOGV2_REQUIREMENT,
38 }
38 }
39
39
40
40
41 def preservedrequirements(repo):
41 def preservedrequirements(repo):
42 preserved = {
42 preserved = {
43 requirements.SHARED_REQUIREMENT,
43 requirements.SHARED_REQUIREMENT,
44 requirements.NARROW_REQUIREMENT,
44 requirements.NARROW_REQUIREMENT,
45 }
45 }
46 return preserved & repo.requirements
46 return preserved & repo.requirements
47
47
48
48
49 FORMAT_VARIANT = b'deficiency'
49 FORMAT_VARIANT = b'deficiency'
50 OPTIMISATION = b'optimization'
50 OPTIMISATION = b'optimization'
51
51
52
52
53 class improvement:
53 class improvement:
54 """Represents an improvement that can be made as part of an upgrade."""
54 """Represents an improvement that can be made as part of an upgrade."""
55
55
56 ### The following attributes should be defined for each subclass:
56 ### The following attributes should be defined for each subclass:
57
57
58 # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
58 # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
59 # A format variant is where we change the storage format. Not all format
59 # A format variant is where we change the storage format. Not all format
60 # variant changes are an obvious problem.
60 # variant changes are an obvious problem.
61 # An optimization is an action (sometimes optional) that
61 # An optimization is an action (sometimes optional) that
62 # can be taken to further improve the state of the repository.
62 # can be taken to further improve the state of the repository.
63 type = None
63 type = None
64
64
65 # machine-readable string uniquely identifying this improvement. it will be
65 # machine-readable string uniquely identifying this improvement. it will be
66 # mapped to an action later in the upgrade process.
66 # mapped to an action later in the upgrade process.
67 name = None
67 name = None
68
68
69 # message intended for humans explaining the improvement in more detail,
69 # message intended for humans explaining the improvement in more detail,
70 # including the implications of it ``FORMAT_VARIANT`` types, should be
70 # including the implications of it ``FORMAT_VARIANT`` types, should be
71 # worded
71 # worded
72 # in the present tense.
72 # in the present tense.
73 description = None
73 description = None
74
74
75 # message intended for humans explaining what an upgrade addressing this
75 # message intended for humans explaining what an upgrade addressing this
76 # issue will do. should be worded in the future tense.
76 # issue will do. should be worded in the future tense.
77 upgrademessage = None
77 upgrademessage = None
78
78
79 # value of current Mercurial default for new repository
79 # value of current Mercurial default for new repository
80 default = None
80 default = None
81
81
82 # Message intended for humans which will be shown post an upgrade
82 # Message intended for humans which will be shown post an upgrade
83 # operation when the improvement will be added
83 # operation when the improvement will be added
84 postupgrademessage = None
84 postupgrademessage = None
85
85
86 # Message intended for humans which will be shown post an upgrade
86 # Message intended for humans which will be shown post an upgrade
87 # operation in which this improvement was removed
87 # operation in which this improvement was removed
88 postdowngrademessage = None
88 postdowngrademessage = None
89
89
90 # By default we assume that every improvement touches requirements and all revlogs
90 # By default we assume that every improvement touches requirements and all revlogs
91
91
92 # Whether this improvement touches filelogs
92 # Whether this improvement touches filelogs
93 touches_filelogs = True
93 touches_filelogs = True
94
94
95 # Whether this improvement touches manifests
95 # Whether this improvement touches manifests
96 touches_manifests = True
96 touches_manifests = True
97
97
98 # Whether this improvement touches changelog
98 # Whether this improvement touches changelog
99 touches_changelog = True
99 touches_changelog = True
100
100
101 # Whether this improvement changes repository requirements
101 # Whether this improvement changes repository requirements
102 touches_requirements = True
102 touches_requirements = True
103
103
104 # Whether this improvement touches the dirstate
104 # Whether this improvement touches the dirstate
105 touches_dirstate = False
105 touches_dirstate = False
106
106
107 # Can this action be run on a share instead of its mains repository
107 # Can this action be run on a share instead of its mains repository
108 compatible_with_share = False
108 compatible_with_share = False
109
109
110
110
111 allformatvariant = [] # type: List[Type['formatvariant']]
111 allformatvariant: List[Type['formatvariant']] = []
112
112
113
113
114 def registerformatvariant(cls):
114 def registerformatvariant(cls):
115 allformatvariant.append(cls)
115 allformatvariant.append(cls)
116 return cls
116 return cls
117
117
118
118
119 class formatvariant(improvement):
119 class formatvariant(improvement):
120 """an improvement subclass dedicated to repository format"""
120 """an improvement subclass dedicated to repository format"""
121
121
122 type = FORMAT_VARIANT
122 type = FORMAT_VARIANT
123
123
124 @staticmethod
124 @staticmethod
125 def fromrepo(repo):
125 def fromrepo(repo):
126 """current value of the variant in the repository"""
126 """current value of the variant in the repository"""
127 raise NotImplementedError()
127 raise NotImplementedError()
128
128
129 @staticmethod
129 @staticmethod
130 def fromconfig(repo):
130 def fromconfig(repo):
131 """current value of the variant in the configuration"""
131 """current value of the variant in the configuration"""
132 raise NotImplementedError()
132 raise NotImplementedError()
133
133
134
134
135 class requirementformatvariant(formatvariant):
135 class requirementformatvariant(formatvariant):
136 """formatvariant based on a 'requirement' name.
136 """formatvariant based on a 'requirement' name.
137
137
138 Many format variant are controlled by a 'requirement'. We define a small
138 Many format variant are controlled by a 'requirement'. We define a small
139 subclass to factor the code.
139 subclass to factor the code.
140 """
140 """
141
141
142 # the requirement that control this format variant
142 # the requirement that control this format variant
143 _requirement = None
143 _requirement = None
144
144
145 @staticmethod
145 @staticmethod
146 def _newreporequirements(ui):
146 def _newreporequirements(ui):
147 return localrepo.newreporequirements(
147 return localrepo.newreporequirements(
148 ui, localrepo.defaultcreateopts(ui)
148 ui, localrepo.defaultcreateopts(ui)
149 )
149 )
150
150
151 @classmethod
151 @classmethod
152 def fromrepo(cls, repo):
152 def fromrepo(cls, repo):
153 assert cls._requirement is not None
153 assert cls._requirement is not None
154 return cls._requirement in repo.requirements
154 return cls._requirement in repo.requirements
155
155
156 @classmethod
156 @classmethod
157 def fromconfig(cls, repo):
157 def fromconfig(cls, repo):
158 assert cls._requirement is not None
158 assert cls._requirement is not None
159 return cls._requirement in cls._newreporequirements(repo.ui)
159 return cls._requirement in cls._newreporequirements(repo.ui)
160
160
161
161
162 @registerformatvariant
162 @registerformatvariant
163 class fncache(requirementformatvariant):
163 class fncache(requirementformatvariant):
164 name = b'fncache'
164 name = b'fncache'
165
165
166 _requirement = requirements.FNCACHE_REQUIREMENT
166 _requirement = requirements.FNCACHE_REQUIREMENT
167
167
168 default = True
168 default = True
169
169
170 description = _(
170 description = _(
171 b'long and reserved filenames may not work correctly; '
171 b'long and reserved filenames may not work correctly; '
172 b'repository performance is sub-optimal'
172 b'repository performance is sub-optimal'
173 )
173 )
174
174
175 upgrademessage = _(
175 upgrademessage = _(
176 b'repository will be more resilient to storing '
176 b'repository will be more resilient to storing '
177 b'certain paths and performance of certain '
177 b'certain paths and performance of certain '
178 b'operations should be improved'
178 b'operations should be improved'
179 )
179 )
180
180
181
181
182 @registerformatvariant
182 @registerformatvariant
183 class dirstatev2(requirementformatvariant):
183 class dirstatev2(requirementformatvariant):
184 name = b'dirstate-v2'
184 name = b'dirstate-v2'
185 _requirement = requirements.DIRSTATE_V2_REQUIREMENT
185 _requirement = requirements.DIRSTATE_V2_REQUIREMENT
186
186
187 default = False
187 default = False
188
188
189 description = _(
189 description = _(
190 b'version 1 of the dirstate file format requires '
190 b'version 1 of the dirstate file format requires '
191 b'reading and parsing it all at once.\n'
191 b'reading and parsing it all at once.\n'
192 b'Version 2 has a better structure,'
192 b'Version 2 has a better structure,'
193 b'better information and lighter update mechanism'
193 b'better information and lighter update mechanism'
194 )
194 )
195
195
196 upgrademessage = _(b'"hg status" will be faster')
196 upgrademessage = _(b'"hg status" will be faster')
197
197
198 touches_filelogs = False
198 touches_filelogs = False
199 touches_manifests = False
199 touches_manifests = False
200 touches_changelog = False
200 touches_changelog = False
201 touches_requirements = True
201 touches_requirements = True
202 touches_dirstate = True
202 touches_dirstate = True
203 compatible_with_share = True
203 compatible_with_share = True
204
204
205
205
206 @registerformatvariant
206 @registerformatvariant
207 class dirstatetrackedkey(requirementformatvariant):
207 class dirstatetrackedkey(requirementformatvariant):
208 name = b'tracked-hint'
208 name = b'tracked-hint'
209 _requirement = requirements.DIRSTATE_TRACKED_HINT_V1
209 _requirement = requirements.DIRSTATE_TRACKED_HINT_V1
210
210
211 default = False
211 default = False
212
212
213 description = _(
213 description = _(
214 b'Add a small file to help external tooling that watch the tracked set'
214 b'Add a small file to help external tooling that watch the tracked set'
215 )
215 )
216
216
217 upgrademessage = _(
217 upgrademessage = _(
218 b'external tools will be informated of potential change in the tracked set'
218 b'external tools will be informated of potential change in the tracked set'
219 )
219 )
220
220
221 touches_filelogs = False
221 touches_filelogs = False
222 touches_manifests = False
222 touches_manifests = False
223 touches_changelog = False
223 touches_changelog = False
224 touches_requirements = True
224 touches_requirements = True
225 touches_dirstate = True
225 touches_dirstate = True
226 compatible_with_share = True
226 compatible_with_share = True
227
227
228
228
229 @registerformatvariant
229 @registerformatvariant
230 class dotencode(requirementformatvariant):
230 class dotencode(requirementformatvariant):
231 name = b'dotencode'
231 name = b'dotencode'
232
232
233 _requirement = requirements.DOTENCODE_REQUIREMENT
233 _requirement = requirements.DOTENCODE_REQUIREMENT
234
234
235 default = True
235 default = True
236
236
237 description = _(
237 description = _(
238 b'storage of filenames beginning with a period or '
238 b'storage of filenames beginning with a period or '
239 b'space may not work correctly'
239 b'space may not work correctly'
240 )
240 )
241
241
242 upgrademessage = _(
242 upgrademessage = _(
243 b'repository will be better able to store files '
243 b'repository will be better able to store files '
244 b'beginning with a space or period'
244 b'beginning with a space or period'
245 )
245 )
246
246
247
247
248 @registerformatvariant
248 @registerformatvariant
249 class generaldelta(requirementformatvariant):
249 class generaldelta(requirementformatvariant):
250 name = b'generaldelta'
250 name = b'generaldelta'
251
251
252 _requirement = requirements.GENERALDELTA_REQUIREMENT
252 _requirement = requirements.GENERALDELTA_REQUIREMENT
253
253
254 default = True
254 default = True
255
255
256 description = _(
256 description = _(
257 b'deltas within internal storage are unable to '
257 b'deltas within internal storage are unable to '
258 b'choose optimal revisions; repository is larger and '
258 b'choose optimal revisions; repository is larger and '
259 b'slower than it could be; interaction with other '
259 b'slower than it could be; interaction with other '
260 b'repositories may require extra network and CPU '
260 b'repositories may require extra network and CPU '
261 b'resources, making "hg push" and "hg pull" slower'
261 b'resources, making "hg push" and "hg pull" slower'
262 )
262 )
263
263
264 upgrademessage = _(
264 upgrademessage = _(
265 b'repository storage will be able to create '
265 b'repository storage will be able to create '
266 b'optimal deltas; new repository data will be '
266 b'optimal deltas; new repository data will be '
267 b'smaller and read times should decrease; '
267 b'smaller and read times should decrease; '
268 b'interacting with other repositories using this '
268 b'interacting with other repositories using this '
269 b'storage model should require less network and '
269 b'storage model should require less network and '
270 b'CPU resources, making "hg push" and "hg pull" '
270 b'CPU resources, making "hg push" and "hg pull" '
271 b'faster'
271 b'faster'
272 )
272 )
273
273
274
274
275 @registerformatvariant
275 @registerformatvariant
276 class sharesafe(requirementformatvariant):
276 class sharesafe(requirementformatvariant):
277 name = b'share-safe'
277 name = b'share-safe'
278 _requirement = requirements.SHARESAFE_REQUIREMENT
278 _requirement = requirements.SHARESAFE_REQUIREMENT
279
279
280 default = True
280 default = True
281
281
282 description = _(
282 description = _(
283 b'old shared repositories do not share source repository '
283 b'old shared repositories do not share source repository '
284 b'requirements and config. This leads to various problems '
284 b'requirements and config. This leads to various problems '
285 b'when the source repository format is upgraded or some new '
285 b'when the source repository format is upgraded or some new '
286 b'extensions are enabled.'
286 b'extensions are enabled.'
287 )
287 )
288
288
289 upgrademessage = _(
289 upgrademessage = _(
290 b'Upgrades a repository to share-safe format so that future '
290 b'Upgrades a repository to share-safe format so that future '
291 b'shares of this repository share its requirements and configs.'
291 b'shares of this repository share its requirements and configs.'
292 )
292 )
293
293
294 postdowngrademessage = _(
294 postdowngrademessage = _(
295 b'repository downgraded to not use share safe mode, '
295 b'repository downgraded to not use share safe mode, '
296 b'existing shares will not work and need to be reshared.'
296 b'existing shares will not work and need to be reshared.'
297 )
297 )
298
298
299 postupgrademessage = _(
299 postupgrademessage = _(
300 b'repository upgraded to share safe mode, existing'
300 b'repository upgraded to share safe mode, existing'
301 b' shares will still work in old non-safe mode. '
301 b' shares will still work in old non-safe mode. '
302 b'Re-share existing shares to use them in safe mode'
302 b'Re-share existing shares to use them in safe mode'
303 b' New shares will be created in safe mode.'
303 b' New shares will be created in safe mode.'
304 )
304 )
305
305
306 # upgrade only needs to change the requirements
306 # upgrade only needs to change the requirements
307 touches_filelogs = False
307 touches_filelogs = False
308 touches_manifests = False
308 touches_manifests = False
309 touches_changelog = False
309 touches_changelog = False
310 touches_requirements = True
310 touches_requirements = True
311
311
312
312
313 @registerformatvariant
313 @registerformatvariant
314 class sparserevlog(requirementformatvariant):
314 class sparserevlog(requirementformatvariant):
315 name = b'sparserevlog'
315 name = b'sparserevlog'
316
316
317 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
317 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
318
318
319 default = True
319 default = True
320
320
321 description = _(
321 description = _(
322 b'in order to limit disk reading and memory usage on older '
322 b'in order to limit disk reading and memory usage on older '
323 b'version, the span of a delta chain from its root to its '
323 b'version, the span of a delta chain from its root to its '
324 b'end is limited, whatever the relevant data in this span. '
324 b'end is limited, whatever the relevant data in this span. '
325 b'This can severly limit Mercurial ability to build good '
325 b'This can severly limit Mercurial ability to build good '
326 b'chain of delta resulting is much more storage space being '
326 b'chain of delta resulting is much more storage space being '
327 b'taken and limit reusability of on disk delta during '
327 b'taken and limit reusability of on disk delta during '
328 b'exchange.'
328 b'exchange.'
329 )
329 )
330
330
331 upgrademessage = _(
331 upgrademessage = _(
332 b'Revlog supports delta chain with more unused data '
332 b'Revlog supports delta chain with more unused data '
333 b'between payload. These gaps will be skipped at read '
333 b'between payload. These gaps will be skipped at read '
334 b'time. This allows for better delta chains, making a '
334 b'time. This allows for better delta chains, making a '
335 b'better compression and faster exchange with server.'
335 b'better compression and faster exchange with server.'
336 )
336 )
337
337
338
338
339 @registerformatvariant
339 @registerformatvariant
340 class persistentnodemap(requirementformatvariant):
340 class persistentnodemap(requirementformatvariant):
341 name = b'persistent-nodemap'
341 name = b'persistent-nodemap'
342
342
343 _requirement = requirements.NODEMAP_REQUIREMENT
343 _requirement = requirements.NODEMAP_REQUIREMENT
344
344
345 default = False
345 default = False
346
346
347 description = _(
347 description = _(
348 b'persist the node -> rev mapping on disk to speedup lookup'
348 b'persist the node -> rev mapping on disk to speedup lookup'
349 )
349 )
350
350
351 upgrademessage = _(b'Speedup revision lookup by node id.')
351 upgrademessage = _(b'Speedup revision lookup by node id.')
352
352
353
353
354 @registerformatvariant
354 @registerformatvariant
355 class copiessdc(requirementformatvariant):
355 class copiessdc(requirementformatvariant):
356 name = b'copies-sdc'
356 name = b'copies-sdc'
357
357
358 _requirement = requirements.COPIESSDC_REQUIREMENT
358 _requirement = requirements.COPIESSDC_REQUIREMENT
359
359
360 default = False
360 default = False
361
361
362 description = _(b'Stores copies information alongside changesets.')
362 description = _(b'Stores copies information alongside changesets.')
363
363
364 upgrademessage = _(
364 upgrademessage = _(
365 b'Allows to use more efficient algorithm to deal with copy tracing.'
365 b'Allows to use more efficient algorithm to deal with copy tracing.'
366 )
366 )
367
367
368 touches_filelogs = False
368 touches_filelogs = False
369 touches_manifests = False
369 touches_manifests = False
370
370
371
371
372 @registerformatvariant
372 @registerformatvariant
373 class revlogv2(requirementformatvariant):
373 class revlogv2(requirementformatvariant):
374 name = b'revlog-v2'
374 name = b'revlog-v2'
375 _requirement = requirements.REVLOGV2_REQUIREMENT
375 _requirement = requirements.REVLOGV2_REQUIREMENT
376 default = False
376 default = False
377 description = _(b'Version 2 of the revlog.')
377 description = _(b'Version 2 of the revlog.')
378 upgrademessage = _(b'very experimental')
378 upgrademessage = _(b'very experimental')
379
379
380
380
381 @registerformatvariant
381 @registerformatvariant
382 class changelogv2(requirementformatvariant):
382 class changelogv2(requirementformatvariant):
383 name = b'changelog-v2'
383 name = b'changelog-v2'
384 _requirement = requirements.CHANGELOGV2_REQUIREMENT
384 _requirement = requirements.CHANGELOGV2_REQUIREMENT
385 default = False
385 default = False
386 description = _(b'An iteration of the revlog focussed on changelog needs.')
386 description = _(b'An iteration of the revlog focussed on changelog needs.')
387 upgrademessage = _(b'quite experimental')
387 upgrademessage = _(b'quite experimental')
388
388
389 touches_filelogs = False
389 touches_filelogs = False
390 touches_manifests = False
390 touches_manifests = False
391
391
392
392
393 @registerformatvariant
393 @registerformatvariant
394 class removecldeltachain(formatvariant):
394 class removecldeltachain(formatvariant):
395 name = b'plain-cl-delta'
395 name = b'plain-cl-delta'
396
396
397 default = True
397 default = True
398
398
399 description = _(
399 description = _(
400 b'changelog storage is using deltas instead of '
400 b'changelog storage is using deltas instead of '
401 b'raw entries; changelog reading and any '
401 b'raw entries; changelog reading and any '
402 b'operation relying on changelog data are slower '
402 b'operation relying on changelog data are slower '
403 b'than they could be'
403 b'than they could be'
404 )
404 )
405
405
406 upgrademessage = _(
406 upgrademessage = _(
407 b'changelog storage will be reformated to '
407 b'changelog storage will be reformated to '
408 b'store raw entries; changelog reading will be '
408 b'store raw entries; changelog reading will be '
409 b'faster; changelog size may be reduced'
409 b'faster; changelog size may be reduced'
410 )
410 )
411
411
412 @staticmethod
412 @staticmethod
413 def fromrepo(repo):
413 def fromrepo(repo):
414 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
414 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
415 # changelogs with deltas.
415 # changelogs with deltas.
416 cl = repo.unfiltered().changelog
416 cl = repo.unfiltered().changelog
417 if len(cl) <= 1000:
417 if len(cl) <= 1000:
418 some_rev = list(cl)
418 some_rev = list(cl)
419 else:
419 else:
420 # do a random sampling to speeds things up Scanning the whole
420 # do a random sampling to speeds things up Scanning the whole
421 # repository can get really slow on bigger repo.
421 # repository can get really slow on bigger repo.
422 some_rev = sorted(
422 some_rev = sorted(
423 {random.randint(0, len(cl) - 1) for x in range(1000)}
423 {random.randint(0, len(cl) - 1) for x in range(1000)}
424 )
424 )
425 chainbase = cl.chainbase
425 chainbase = cl.chainbase
426 return all(rev == chainbase(rev) for rev in some_rev)
426 return all(rev == chainbase(rev) for rev in some_rev)
427
427
428 @staticmethod
428 @staticmethod
429 def fromconfig(repo):
429 def fromconfig(repo):
430 return True
430 return True
431
431
432
432
433 _has_zstd = (
433 _has_zstd = (
434 b'zstd' in util.compengines
434 b'zstd' in util.compengines
435 and util.compengines[b'zstd'].available()
435 and util.compengines[b'zstd'].available()
436 and util.compengines[b'zstd'].revlogheader()
436 and util.compengines[b'zstd'].revlogheader()
437 )
437 )
438
438
439
439
440 @registerformatvariant
440 @registerformatvariant
441 class compressionengine(formatvariant):
441 class compressionengine(formatvariant):
442 name = b'compression'
442 name = b'compression'
443
443
444 if _has_zstd:
444 if _has_zstd:
445 default = b'zstd'
445 default = b'zstd'
446 else:
446 else:
447 default = b'zlib'
447 default = b'zlib'
448
448
449 description = _(
449 description = _(
450 b'Compresion algorithm used to compress data. '
450 b'Compresion algorithm used to compress data. '
451 b'Some engine are faster than other'
451 b'Some engine are faster than other'
452 )
452 )
453
453
454 upgrademessage = _(
454 upgrademessage = _(
455 b'revlog content will be recompressed with the new algorithm.'
455 b'revlog content will be recompressed with the new algorithm.'
456 )
456 )
457
457
458 @classmethod
458 @classmethod
459 def fromrepo(cls, repo):
459 def fromrepo(cls, repo):
460 # we allow multiple compression engine requirement to co-exist because
460 # we allow multiple compression engine requirement to co-exist because
461 # strickly speaking, revlog seems to support mixed compression style.
461 # strickly speaking, revlog seems to support mixed compression style.
462 #
462 #
463 # The compression used for new entries will be "the last one"
463 # The compression used for new entries will be "the last one"
464 compression = b'zlib'
464 compression = b'zlib'
465 for req in repo.requirements:
465 for req in repo.requirements:
466 prefix = req.startswith
466 prefix = req.startswith
467 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
467 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
468 compression = req.split(b'-', 2)[2]
468 compression = req.split(b'-', 2)[2]
469 return compression
469 return compression
470
470
471 @classmethod
471 @classmethod
472 def fromconfig(cls, repo):
472 def fromconfig(cls, repo):
473 compengines = repo.ui.configlist(b'format', b'revlog-compression')
473 compengines = repo.ui.configlist(b'format', b'revlog-compression')
474 # return the first valid value as the selection code would do
474 # return the first valid value as the selection code would do
475 for comp in compengines:
475 for comp in compengines:
476 if comp in util.compengines:
476 if comp in util.compengines:
477 e = util.compengines[comp]
477 e = util.compengines[comp]
478 if e.available() and e.revlogheader():
478 if e.available() and e.revlogheader():
479 return comp
479 return comp
480
480
481 # no valide compression found lets display it all for clarity
481 # no valide compression found lets display it all for clarity
482 return b','.join(compengines)
482 return b','.join(compengines)
483
483
484
484
485 @registerformatvariant
485 @registerformatvariant
486 class compressionlevel(formatvariant):
486 class compressionlevel(formatvariant):
487 name = b'compression-level'
487 name = b'compression-level'
488 default = b'default'
488 default = b'default'
489
489
490 description = _(b'compression level')
490 description = _(b'compression level')
491
491
492 upgrademessage = _(b'revlog content will be recompressed')
492 upgrademessage = _(b'revlog content will be recompressed')
493
493
494 @classmethod
494 @classmethod
495 def fromrepo(cls, repo):
495 def fromrepo(cls, repo):
496 comp = compressionengine.fromrepo(repo)
496 comp = compressionengine.fromrepo(repo)
497 level = None
497 level = None
498 if comp == b'zlib':
498 if comp == b'zlib':
499 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
499 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
500 elif comp == b'zstd':
500 elif comp == b'zstd':
501 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
501 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
502 if level is None:
502 if level is None:
503 return b'default'
503 return b'default'
504 return bytes(level)
504 return bytes(level)
505
505
506 @classmethod
506 @classmethod
507 def fromconfig(cls, repo):
507 def fromconfig(cls, repo):
508 comp = compressionengine.fromconfig(repo)
508 comp = compressionengine.fromconfig(repo)
509 level = None
509 level = None
510 if comp == b'zlib':
510 if comp == b'zlib':
511 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
511 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
512 elif comp == b'zstd':
512 elif comp == b'zstd':
513 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
513 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
514 if level is None:
514 if level is None:
515 return b'default'
515 return b'default'
516 return bytes(level)
516 return bytes(level)
517
517
518
518
519 def find_format_upgrades(repo):
519 def find_format_upgrades(repo):
520 """returns a list of format upgrades which can be perform on the repo"""
520 """returns a list of format upgrades which can be perform on the repo"""
521 upgrades = []
521 upgrades = []
522
522
523 # We could detect lack of revlogv1 and store here, but they were added
523 # We could detect lack of revlogv1 and store here, but they were added
524 # in 0.9.2 and we don't support upgrading repos without these
524 # in 0.9.2 and we don't support upgrading repos without these
525 # requirements, so let's not bother.
525 # requirements, so let's not bother.
526
526
527 for fv in allformatvariant:
527 for fv in allformatvariant:
528 if not fv.fromrepo(repo):
528 if not fv.fromrepo(repo):
529 upgrades.append(fv)
529 upgrades.append(fv)
530
530
531 return upgrades
531 return upgrades
532
532
533
533
534 def find_format_downgrades(repo):
534 def find_format_downgrades(repo):
535 """returns a list of format downgrades which will be performed on the repo
535 """returns a list of format downgrades which will be performed on the repo
536 because of disabled config option for them"""
536 because of disabled config option for them"""
537
537
538 downgrades = []
538 downgrades = []
539
539
540 for fv in allformatvariant:
540 for fv in allformatvariant:
541 if fv.name == b'compression':
541 if fv.name == b'compression':
542 # If there is a compression change between repository
542 # If there is a compression change between repository
543 # and config, destination repository compression will change
543 # and config, destination repository compression will change
544 # and current compression will be removed.
544 # and current compression will be removed.
545 if fv.fromrepo(repo) != fv.fromconfig(repo):
545 if fv.fromrepo(repo) != fv.fromconfig(repo):
546 downgrades.append(fv)
546 downgrades.append(fv)
547 continue
547 continue
548 # format variant exist in repo but does not exist in new repository
548 # format variant exist in repo but does not exist in new repository
549 # config
549 # config
550 if fv.fromrepo(repo) and not fv.fromconfig(repo):
550 if fv.fromrepo(repo) and not fv.fromconfig(repo):
551 downgrades.append(fv)
551 downgrades.append(fv)
552
552
553 return downgrades
553 return downgrades
554
554
555
555
556 ALL_OPTIMISATIONS = []
556 ALL_OPTIMISATIONS = []
557
557
558
558
559 def register_optimization(obj):
559 def register_optimization(obj):
560 ALL_OPTIMISATIONS.append(obj)
560 ALL_OPTIMISATIONS.append(obj)
561 return obj
561 return obj
562
562
563
563
564 class optimization(improvement):
564 class optimization(improvement):
565 """an improvement subclass dedicated to optimizations"""
565 """an improvement subclass dedicated to optimizations"""
566
566
567 type = OPTIMISATION
567 type = OPTIMISATION
568
568
569
569
570 @register_optimization
570 @register_optimization
571 class redeltaparents(optimization):
571 class redeltaparents(optimization):
572 name = b're-delta-parent'
572 name = b're-delta-parent'
573
573
574 type = OPTIMISATION
574 type = OPTIMISATION
575
575
576 description = _(
576 description = _(
577 b'deltas within internal storage will be recalculated to '
577 b'deltas within internal storage will be recalculated to '
578 b'choose an optimal base revision where this was not '
578 b'choose an optimal base revision where this was not '
579 b'already done; the size of the repository may shrink and '
579 b'already done; the size of the repository may shrink and '
580 b'various operations may become faster; the first time '
580 b'various operations may become faster; the first time '
581 b'this optimization is performed could slow down upgrade '
581 b'this optimization is performed could slow down upgrade '
582 b'execution considerably; subsequent invocations should '
582 b'execution considerably; subsequent invocations should '
583 b'not run noticeably slower'
583 b'not run noticeably slower'
584 )
584 )
585
585
586 upgrademessage = _(
586 upgrademessage = _(
587 b'deltas within internal storage will choose a new '
587 b'deltas within internal storage will choose a new '
588 b'base revision if needed'
588 b'base revision if needed'
589 )
589 )
590
590
591
591
592 @register_optimization
592 @register_optimization
593 class redeltamultibase(optimization):
593 class redeltamultibase(optimization):
594 name = b're-delta-multibase'
594 name = b're-delta-multibase'
595
595
596 type = OPTIMISATION
596 type = OPTIMISATION
597
597
598 description = _(
598 description = _(
599 b'deltas within internal storage will be recalculated '
599 b'deltas within internal storage will be recalculated '
600 b'against multiple base revision and the smallest '
600 b'against multiple base revision and the smallest '
601 b'difference will be used; the size of the repository may '
601 b'difference will be used; the size of the repository may '
602 b'shrink significantly when there are many merges; this '
602 b'shrink significantly when there are many merges; this '
603 b'optimization will slow down execution in proportion to '
603 b'optimization will slow down execution in proportion to '
604 b'the number of merges in the repository and the amount '
604 b'the number of merges in the repository and the amount '
605 b'of files in the repository; this slow down should not '
605 b'of files in the repository; this slow down should not '
606 b'be significant unless there are tens of thousands of '
606 b'be significant unless there are tens of thousands of '
607 b'files and thousands of merges'
607 b'files and thousands of merges'
608 )
608 )
609
609
610 upgrademessage = _(
610 upgrademessage = _(
611 b'deltas within internal storage will choose an '
611 b'deltas within internal storage will choose an '
612 b'optimal delta by computing deltas against multiple '
612 b'optimal delta by computing deltas against multiple '
613 b'parents; may slow down execution time '
613 b'parents; may slow down execution time '
614 b'significantly'
614 b'significantly'
615 )
615 )
616
616
617
617
618 @register_optimization
618 @register_optimization
619 class redeltaall(optimization):
619 class redeltaall(optimization):
620 name = b're-delta-all'
620 name = b're-delta-all'
621
621
622 type = OPTIMISATION
622 type = OPTIMISATION
623
623
624 description = _(
624 description = _(
625 b'deltas within internal storage will always be '
625 b'deltas within internal storage will always be '
626 b'recalculated without reusing prior deltas; this will '
626 b'recalculated without reusing prior deltas; this will '
627 b'likely make execution run several times slower; this '
627 b'likely make execution run several times slower; this '
628 b'optimization is typically not needed'
628 b'optimization is typically not needed'
629 )
629 )
630
630
631 upgrademessage = _(
631 upgrademessage = _(
632 b'deltas within internal storage will be fully '
632 b'deltas within internal storage will be fully '
633 b'recomputed; this will likely drastically slow down '
633 b'recomputed; this will likely drastically slow down '
634 b'execution time'
634 b'execution time'
635 )
635 )
636
636
637
637
638 @register_optimization
638 @register_optimization
639 class redeltafulladd(optimization):
639 class redeltafulladd(optimization):
640 name = b're-delta-fulladd'
640 name = b're-delta-fulladd'
641
641
642 type = OPTIMISATION
642 type = OPTIMISATION
643
643
644 description = _(
644 description = _(
645 b'every revision will be re-added as if it was new '
645 b'every revision will be re-added as if it was new '
646 b'content. It will go through the full storage '
646 b'content. It will go through the full storage '
647 b'mechanism giving extensions a chance to process it '
647 b'mechanism giving extensions a chance to process it '
648 b'(eg. lfs). This is similar to "re-delta-all" but even '
648 b'(eg. lfs). This is similar to "re-delta-all" but even '
649 b'slower since more logic is involved.'
649 b'slower since more logic is involved.'
650 )
650 )
651
651
652 upgrademessage = _(
652 upgrademessage = _(
653 b'each revision will be added as new content to the '
653 b'each revision will be added as new content to the '
654 b'internal storage; this will likely drastically slow '
654 b'internal storage; this will likely drastically slow '
655 b'down execution time, but some extensions might need '
655 b'down execution time, but some extensions might need '
656 b'it'
656 b'it'
657 )
657 )
658
658
659
659
660 def findoptimizations(repo):
660 def findoptimizations(repo):
661 """Determine optimisation that could be used during upgrade"""
661 """Determine optimisation that could be used during upgrade"""
662 # These are unconditionally added. There is logic later that figures out
662 # These are unconditionally added. There is logic later that figures out
663 # which ones to apply.
663 # which ones to apply.
664 return list(ALL_OPTIMISATIONS)
664 return list(ALL_OPTIMISATIONS)
665
665
666
666
667 def determine_upgrade_actions(
667 def determine_upgrade_actions(
668 repo, format_upgrades, optimizations, sourcereqs, destreqs
668 repo, format_upgrades, optimizations, sourcereqs, destreqs
669 ):
669 ):
670 """Determine upgrade actions that will be performed.
670 """Determine upgrade actions that will be performed.
671
671
672 Given a list of improvements as returned by ``find_format_upgrades`` and
672 Given a list of improvements as returned by ``find_format_upgrades`` and
673 ``findoptimizations``, determine the list of upgrade actions that
673 ``findoptimizations``, determine the list of upgrade actions that
674 will be performed.
674 will be performed.
675
675
676 The role of this function is to filter improvements if needed, apply
676 The role of this function is to filter improvements if needed, apply
677 recommended optimizations from the improvements list that make sense,
677 recommended optimizations from the improvements list that make sense,
678 etc.
678 etc.
679
679
680 Returns a list of action names.
680 Returns a list of action names.
681 """
681 """
682 newactions = []
682 newactions = []
683
683
684 for d in format_upgrades:
684 for d in format_upgrades:
685 if hasattr(d, '_requirement'):
685 if hasattr(d, '_requirement'):
686 name = d._requirement
686 name = d._requirement
687 else:
687 else:
688 name = None
688 name = None
689
689
690 # If the action is a requirement that doesn't show up in the
690 # If the action is a requirement that doesn't show up in the
691 # destination requirements, prune the action.
691 # destination requirements, prune the action.
692 if name is not None and name not in destreqs:
692 if name is not None and name not in destreqs:
693 continue
693 continue
694
694
695 newactions.append(d)
695 newactions.append(d)
696
696
697 newactions.extend(
697 newactions.extend(
698 o
698 o
699 for o in sorted(optimizations, key=(lambda x: x.name))
699 for o in sorted(optimizations, key=(lambda x: x.name))
700 if o not in newactions
700 if o not in newactions
701 )
701 )
702
702
703 # FUTURE consider adding some optimizations here for certain transitions.
703 # FUTURE consider adding some optimizations here for certain transitions.
704 # e.g. adding generaldelta could schedule parent redeltas.
704 # e.g. adding generaldelta could schedule parent redeltas.
705
705
706 return newactions
706 return newactions
707
707
708
708
709 class BaseOperation:
709 class BaseOperation:
710 """base class that contains the minimum for an upgrade to work
710 """base class that contains the minimum for an upgrade to work
711
711
712 (this might need to be extended as the usage for subclass alternative to
712 (this might need to be extended as the usage for subclass alternative to
713 UpgradeOperation extends)
713 UpgradeOperation extends)
714 """
714 """
715
715
716 def __init__(
716 def __init__(
717 self,
717 self,
718 new_requirements,
718 new_requirements,
719 backup_store,
719 backup_store,
720 ):
720 ):
721 self.new_requirements = new_requirements
721 self.new_requirements = new_requirements
722 # should this operation create a backup of the store
722 # should this operation create a backup of the store
723 self.backup_store = backup_store
723 self.backup_store = backup_store
724
724
725
725
726 class UpgradeOperation(BaseOperation):
726 class UpgradeOperation(BaseOperation):
727 """represent the work to be done during an upgrade"""
727 """represent the work to be done during an upgrade"""
728
728
729 def __init__(
729 def __init__(
730 self,
730 self,
731 ui,
731 ui,
732 new_requirements,
732 new_requirements,
733 current_requirements,
733 current_requirements,
734 upgrade_actions,
734 upgrade_actions,
735 removed_actions,
735 removed_actions,
736 revlogs_to_process,
736 revlogs_to_process,
737 backup_store,
737 backup_store,
738 ):
738 ):
739 super().__init__(
739 super().__init__(
740 new_requirements,
740 new_requirements,
741 backup_store,
741 backup_store,
742 )
742 )
743 self.ui = ui
743 self.ui = ui
744 self.current_requirements = current_requirements
744 self.current_requirements = current_requirements
745 # list of upgrade actions the operation will perform
745 # list of upgrade actions the operation will perform
746 self.upgrade_actions = upgrade_actions
746 self.upgrade_actions = upgrade_actions
747 self.removed_actions = removed_actions
747 self.removed_actions = removed_actions
748 self.revlogs_to_process = revlogs_to_process
748 self.revlogs_to_process = revlogs_to_process
749 # requirements which will be added by the operation
749 # requirements which will be added by the operation
750 self._added_requirements = (
750 self._added_requirements = (
751 self.new_requirements - self.current_requirements
751 self.new_requirements - self.current_requirements
752 )
752 )
753 # requirements which will be removed by the operation
753 # requirements which will be removed by the operation
754 self._removed_requirements = (
754 self._removed_requirements = (
755 self.current_requirements - self.new_requirements
755 self.current_requirements - self.new_requirements
756 )
756 )
757 # requirements which will be preserved by the operation
757 # requirements which will be preserved by the operation
758 self._preserved_requirements = (
758 self._preserved_requirements = (
759 self.current_requirements & self.new_requirements
759 self.current_requirements & self.new_requirements
760 )
760 )
761 # optimizations which are not used and it's recommended that they
761 # optimizations which are not used and it's recommended that they
762 # should use them
762 # should use them
763 all_optimizations = findoptimizations(None)
763 all_optimizations = findoptimizations(None)
764 self.unused_optimizations = [
764 self.unused_optimizations = [
765 i for i in all_optimizations if i not in self.upgrade_actions
765 i for i in all_optimizations if i not in self.upgrade_actions
766 ]
766 ]
767
767
768 # delta reuse mode of this upgrade operation
768 # delta reuse mode of this upgrade operation
769 upgrade_actions_names = self.upgrade_actions_names
769 upgrade_actions_names = self.upgrade_actions_names
770 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
770 self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
771 if b're-delta-all' in upgrade_actions_names:
771 if b're-delta-all' in upgrade_actions_names:
772 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
772 self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
773 elif b're-delta-parent' in upgrade_actions_names:
773 elif b're-delta-parent' in upgrade_actions_names:
774 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
774 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
775 elif b're-delta-multibase' in upgrade_actions_names:
775 elif b're-delta-multibase' in upgrade_actions_names:
776 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
776 self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
777 elif b're-delta-fulladd' in upgrade_actions_names:
777 elif b're-delta-fulladd' in upgrade_actions_names:
778 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
778 self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
779
779
780 # should this operation force re-delta of both parents
780 # should this operation force re-delta of both parents
781 self.force_re_delta_both_parents = (
781 self.force_re_delta_both_parents = (
782 b're-delta-multibase' in upgrade_actions_names
782 b're-delta-multibase' in upgrade_actions_names
783 )
783 )
784
784
785 @property
785 @property
786 def upgrade_actions_names(self):
786 def upgrade_actions_names(self):
787 return set([a.name for a in self.upgrade_actions])
787 return set([a.name for a in self.upgrade_actions])
788
788
789 @property
789 @property
790 def requirements_only(self):
790 def requirements_only(self):
791 # does the operation only touches repository requirement
791 # does the operation only touches repository requirement
792 return (
792 return (
793 self.touches_requirements
793 self.touches_requirements
794 and not self.touches_filelogs
794 and not self.touches_filelogs
795 and not self.touches_manifests
795 and not self.touches_manifests
796 and not self.touches_changelog
796 and not self.touches_changelog
797 and not self.touches_dirstate
797 and not self.touches_dirstate
798 )
798 )
799
799
800 @property
800 @property
801 def touches_filelogs(self):
801 def touches_filelogs(self):
802 for a in self.upgrade_actions:
802 for a in self.upgrade_actions:
803 # in optimisations, we re-process the revlogs again
803 # in optimisations, we re-process the revlogs again
804 if a.type == OPTIMISATION:
804 if a.type == OPTIMISATION:
805 return True
805 return True
806 elif a.touches_filelogs:
806 elif a.touches_filelogs:
807 return True
807 return True
808 for a in self.removed_actions:
808 for a in self.removed_actions:
809 if a.touches_filelogs:
809 if a.touches_filelogs:
810 return True
810 return True
811 return False
811 return False
812
812
813 @property
813 @property
814 def touches_manifests(self):
814 def touches_manifests(self):
815 for a in self.upgrade_actions:
815 for a in self.upgrade_actions:
816 # in optimisations, we re-process the revlogs again
816 # in optimisations, we re-process the revlogs again
817 if a.type == OPTIMISATION:
817 if a.type == OPTIMISATION:
818 return True
818 return True
819 elif a.touches_manifests:
819 elif a.touches_manifests:
820 return True
820 return True
821 for a in self.removed_actions:
821 for a in self.removed_actions:
822 if a.touches_manifests:
822 if a.touches_manifests:
823 return True
823 return True
824 return False
824 return False
825
825
826 @property
826 @property
827 def touches_changelog(self):
827 def touches_changelog(self):
828 for a in self.upgrade_actions:
828 for a in self.upgrade_actions:
829 # in optimisations, we re-process the revlogs again
829 # in optimisations, we re-process the revlogs again
830 if a.type == OPTIMISATION:
830 if a.type == OPTIMISATION:
831 return True
831 return True
832 elif a.touches_changelog:
832 elif a.touches_changelog:
833 return True
833 return True
834 for a in self.removed_actions:
834 for a in self.removed_actions:
835 if a.touches_changelog:
835 if a.touches_changelog:
836 return True
836 return True
837 return False
837 return False
838
838
839 @property
839 @property
840 def touches_requirements(self):
840 def touches_requirements(self):
841 for a in self.upgrade_actions:
841 for a in self.upgrade_actions:
842 # optimisations are used to re-process revlogs and does not result
842 # optimisations are used to re-process revlogs and does not result
843 # in a requirement being added or removed
843 # in a requirement being added or removed
844 if a.type == OPTIMISATION:
844 if a.type == OPTIMISATION:
845 pass
845 pass
846 elif a.touches_requirements:
846 elif a.touches_requirements:
847 return True
847 return True
848 for a in self.removed_actions:
848 for a in self.removed_actions:
849 if a.touches_requirements:
849 if a.touches_requirements:
850 return True
850 return True
851
851
852 @property
852 @property
853 def touches_dirstate(self):
853 def touches_dirstate(self):
854 for a in self.upgrade_actions:
854 for a in self.upgrade_actions:
855 # revlog optimisations do not affect the dirstate
855 # revlog optimisations do not affect the dirstate
856 if a.type == OPTIMISATION:
856 if a.type == OPTIMISATION:
857 pass
857 pass
858 elif a.touches_dirstate:
858 elif a.touches_dirstate:
859 return True
859 return True
860 for a in self.removed_actions:
860 for a in self.removed_actions:
861 if a.touches_dirstate:
861 if a.touches_dirstate:
862 return True
862 return True
863
863
864 return False
864 return False
865
865
866 def _write_labeled(self, l, label: bytes):
866 def _write_labeled(self, l, label: bytes):
867 """
867 """
868 Utility function to aid writing of a list under one label
868 Utility function to aid writing of a list under one label
869 """
869 """
870 first = True
870 first = True
871 for r in sorted(l):
871 for r in sorted(l):
872 if not first:
872 if not first:
873 self.ui.write(b', ')
873 self.ui.write(b', ')
874 self.ui.write(r, label=label)
874 self.ui.write(r, label=label)
875 first = False
875 first = False
876
876
877 def print_requirements(self):
877 def print_requirements(self):
878 self.ui.write(_(b'requirements\n'))
878 self.ui.write(_(b'requirements\n'))
879 self.ui.write(_(b' preserved: '))
879 self.ui.write(_(b' preserved: '))
880 self._write_labeled(
880 self._write_labeled(
881 self._preserved_requirements, b"upgrade-repo.requirement.preserved"
881 self._preserved_requirements, b"upgrade-repo.requirement.preserved"
882 )
882 )
883 self.ui.write((b'\n'))
883 self.ui.write((b'\n'))
884 if self._removed_requirements:
884 if self._removed_requirements:
885 self.ui.write(_(b' removed: '))
885 self.ui.write(_(b' removed: '))
886 self._write_labeled(
886 self._write_labeled(
887 self._removed_requirements, b"upgrade-repo.requirement.removed"
887 self._removed_requirements, b"upgrade-repo.requirement.removed"
888 )
888 )
889 self.ui.write((b'\n'))
889 self.ui.write((b'\n'))
890 if self._added_requirements:
890 if self._added_requirements:
891 self.ui.write(_(b' added: '))
891 self.ui.write(_(b' added: '))
892 self._write_labeled(
892 self._write_labeled(
893 self._added_requirements, b"upgrade-repo.requirement.added"
893 self._added_requirements, b"upgrade-repo.requirement.added"
894 )
894 )
895 self.ui.write((b'\n'))
895 self.ui.write((b'\n'))
896 self.ui.write(b'\n')
896 self.ui.write(b'\n')
897
897
898 def print_optimisations(self):
898 def print_optimisations(self):
899 optimisations = [
899 optimisations = [
900 a for a in self.upgrade_actions if a.type == OPTIMISATION
900 a for a in self.upgrade_actions if a.type == OPTIMISATION
901 ]
901 ]
902 optimisations.sort(key=lambda a: a.name)
902 optimisations.sort(key=lambda a: a.name)
903 if optimisations:
903 if optimisations:
904 self.ui.write(_(b'optimisations: '))
904 self.ui.write(_(b'optimisations: '))
905 self._write_labeled(
905 self._write_labeled(
906 [a.name for a in optimisations],
906 [a.name for a in optimisations],
907 b"upgrade-repo.optimisation.performed",
907 b"upgrade-repo.optimisation.performed",
908 )
908 )
909 self.ui.write(b'\n\n')
909 self.ui.write(b'\n\n')
910
910
911 def print_upgrade_actions(self):
911 def print_upgrade_actions(self):
912 for a in self.upgrade_actions:
912 for a in self.upgrade_actions:
913 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
913 self.ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
914
914
915 def print_affected_revlogs(self):
915 def print_affected_revlogs(self):
916 if not self.revlogs_to_process:
916 if not self.revlogs_to_process:
917 self.ui.write((b'no revlogs to process\n'))
917 self.ui.write((b'no revlogs to process\n'))
918 else:
918 else:
919 self.ui.write((b'processed revlogs:\n'))
919 self.ui.write((b'processed revlogs:\n'))
920 for r in sorted(self.revlogs_to_process):
920 for r in sorted(self.revlogs_to_process):
921 self.ui.write((b' - %s\n' % r))
921 self.ui.write((b' - %s\n' % r))
922 self.ui.write((b'\n'))
922 self.ui.write((b'\n'))
923
923
924 def print_unused_optimizations(self):
924 def print_unused_optimizations(self):
925 for i in self.unused_optimizations:
925 for i in self.unused_optimizations:
926 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
926 self.ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
927
927
928 def has_upgrade_action(self, name):
928 def has_upgrade_action(self, name):
929 """Check whether the upgrade operation will perform this action"""
929 """Check whether the upgrade operation will perform this action"""
930 return name in self._upgrade_actions_names
930 return name in self._upgrade_actions_names
931
931
932 def print_post_op_messages(self):
932 def print_post_op_messages(self):
933 """print post upgrade operation warning messages"""
933 """print post upgrade operation warning messages"""
934 for a in self.upgrade_actions:
934 for a in self.upgrade_actions:
935 if a.postupgrademessage is not None:
935 if a.postupgrademessage is not None:
936 self.ui.warn(b'%s\n' % a.postupgrademessage)
936 self.ui.warn(b'%s\n' % a.postupgrademessage)
937 for a in self.removed_actions:
937 for a in self.removed_actions:
938 if a.postdowngrademessage is not None:
938 if a.postdowngrademessage is not None:
939 self.ui.warn(b'%s\n' % a.postdowngrademessage)
939 self.ui.warn(b'%s\n' % a.postdowngrademessage)
940
940
941
941
942 ### Code checking if a repository can got through the upgrade process at all. #
942 ### Code checking if a repository can got through the upgrade process at all. #
943
943
944
944
945 def requiredsourcerequirements(repo):
945 def requiredsourcerequirements(repo):
946 """Obtain requirements required to be present to upgrade a repo.
946 """Obtain requirements required to be present to upgrade a repo.
947
947
948 An upgrade will not be allowed if the repository doesn't have the
948 An upgrade will not be allowed if the repository doesn't have the
949 requirements returned by this function.
949 requirements returned by this function.
950 """
950 """
951 return {
951 return {
952 # Introduced in Mercurial 0.9.2.
952 # Introduced in Mercurial 0.9.2.
953 requirements.STORE_REQUIREMENT,
953 requirements.STORE_REQUIREMENT,
954 }
954 }
955
955
956
956
957 def blocksourcerequirements(repo):
957 def blocksourcerequirements(repo):
958 """Obtain requirements that will prevent an upgrade from occurring.
958 """Obtain requirements that will prevent an upgrade from occurring.
959
959
960 An upgrade cannot be performed if the source repository contains a
960 An upgrade cannot be performed if the source repository contains a
961 requirements in the returned set.
961 requirements in the returned set.
962 """
962 """
963 return {
963 return {
964 # This was a precursor to generaldelta and was never enabled by default.
964 # This was a precursor to generaldelta and was never enabled by default.
965 # It should (hopefully) not exist in the wild.
965 # It should (hopefully) not exist in the wild.
966 b'parentdelta',
966 b'parentdelta',
967 }
967 }
968
968
969
969
970 def check_revlog_version(reqs):
970 def check_revlog_version(reqs):
971 """Check that the requirements contain at least one Revlog version"""
971 """Check that the requirements contain at least one Revlog version"""
972 all_revlogs = {
972 all_revlogs = {
973 requirements.REVLOGV1_REQUIREMENT,
973 requirements.REVLOGV1_REQUIREMENT,
974 requirements.REVLOGV2_REQUIREMENT,
974 requirements.REVLOGV2_REQUIREMENT,
975 }
975 }
976 if not all_revlogs.intersection(reqs):
976 if not all_revlogs.intersection(reqs):
977 msg = _(b'cannot upgrade repository; missing a revlog version')
977 msg = _(b'cannot upgrade repository; missing a revlog version')
978 raise error.Abort(msg)
978 raise error.Abort(msg)
979
979
980
980
981 def check_source_requirements(repo):
981 def check_source_requirements(repo):
982 """Ensure that no existing requirements prevent the repository upgrade"""
982 """Ensure that no existing requirements prevent the repository upgrade"""
983
983
984 check_revlog_version(repo.requirements)
984 check_revlog_version(repo.requirements)
985 required = requiredsourcerequirements(repo)
985 required = requiredsourcerequirements(repo)
986 missingreqs = required - repo.requirements
986 missingreqs = required - repo.requirements
987 if missingreqs:
987 if missingreqs:
988 msg = _(b'cannot upgrade repository; requirement missing: %s')
988 msg = _(b'cannot upgrade repository; requirement missing: %s')
989 missingreqs = b', '.join(sorted(missingreqs))
989 missingreqs = b', '.join(sorted(missingreqs))
990 raise error.Abort(msg % missingreqs)
990 raise error.Abort(msg % missingreqs)
991
991
992 blocking = blocksourcerequirements(repo)
992 blocking = blocksourcerequirements(repo)
993 blockingreqs = blocking & repo.requirements
993 blockingreqs = blocking & repo.requirements
994 if blockingreqs:
994 if blockingreqs:
995 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
995 m = _(b'cannot upgrade repository; unsupported source requirement: %s')
996 blockingreqs = b', '.join(sorted(blockingreqs))
996 blockingreqs = b', '.join(sorted(blockingreqs))
997 raise error.Abort(m % blockingreqs)
997 raise error.Abort(m % blockingreqs)
998 # Upgrade should operate on the actual store, not the shared link.
998 # Upgrade should operate on the actual store, not the shared link.
999
999
1000 bad_share = (
1000 bad_share = (
1001 requirements.SHARED_REQUIREMENT in repo.requirements
1001 requirements.SHARED_REQUIREMENT in repo.requirements
1002 and requirements.SHARESAFE_REQUIREMENT not in repo.requirements
1002 and requirements.SHARESAFE_REQUIREMENT not in repo.requirements
1003 )
1003 )
1004 if bad_share:
1004 if bad_share:
1005 m = _(b'cannot upgrade repository; share repository without share-safe')
1005 m = _(b'cannot upgrade repository; share repository without share-safe')
1006 h = _(b'check :hg:`help config.format.use-share-safe`')
1006 h = _(b'check :hg:`help config.format.use-share-safe`')
1007 raise error.Abort(m, hint=h)
1007 raise error.Abort(m, hint=h)
1008
1008
1009
1009
1010 ### Verify the validity of the planned requirement changes ####################
1010 ### Verify the validity of the planned requirement changes ####################
1011
1011
1012
1012
1013 def supportremovedrequirements(repo):
1013 def supportremovedrequirements(repo):
1014 """Obtain requirements that can be removed during an upgrade.
1014 """Obtain requirements that can be removed during an upgrade.
1015
1015
1016 If an upgrade were to create a repository that dropped a requirement,
1016 If an upgrade were to create a repository that dropped a requirement,
1017 the dropped requirement must appear in the returned set for the upgrade
1017 the dropped requirement must appear in the returned set for the upgrade
1018 to be allowed.
1018 to be allowed.
1019 """
1019 """
1020 supported = {
1020 supported = {
1021 requirements.SPARSEREVLOG_REQUIREMENT,
1021 requirements.SPARSEREVLOG_REQUIREMENT,
1022 requirements.COPIESSDC_REQUIREMENT,
1022 requirements.COPIESSDC_REQUIREMENT,
1023 requirements.NODEMAP_REQUIREMENT,
1023 requirements.NODEMAP_REQUIREMENT,
1024 requirements.SHARESAFE_REQUIREMENT,
1024 requirements.SHARESAFE_REQUIREMENT,
1025 requirements.REVLOGV2_REQUIREMENT,
1025 requirements.REVLOGV2_REQUIREMENT,
1026 requirements.CHANGELOGV2_REQUIREMENT,
1026 requirements.CHANGELOGV2_REQUIREMENT,
1027 requirements.REVLOGV1_REQUIREMENT,
1027 requirements.REVLOGV1_REQUIREMENT,
1028 requirements.DIRSTATE_TRACKED_HINT_V1,
1028 requirements.DIRSTATE_TRACKED_HINT_V1,
1029 requirements.DIRSTATE_V2_REQUIREMENT,
1029 requirements.DIRSTATE_V2_REQUIREMENT,
1030 }
1030 }
1031 for name in compression.compengines:
1031 for name in compression.compengines:
1032 engine = compression.compengines[name]
1032 engine = compression.compengines[name]
1033 if engine.available() and engine.revlogheader():
1033 if engine.available() and engine.revlogheader():
1034 supported.add(b'exp-compression-%s' % name)
1034 supported.add(b'exp-compression-%s' % name)
1035 if engine.name() == b'zstd':
1035 if engine.name() == b'zstd':
1036 supported.add(b'revlog-compression-zstd')
1036 supported.add(b'revlog-compression-zstd')
1037 return supported
1037 return supported
1038
1038
1039
1039
1040 def supporteddestrequirements(repo):
1040 def supporteddestrequirements(repo):
1041 """Obtain requirements that upgrade supports in the destination.
1041 """Obtain requirements that upgrade supports in the destination.
1042
1042
1043 If the result of the upgrade would have requirements not in this set,
1043 If the result of the upgrade would have requirements not in this set,
1044 the upgrade is disallowed.
1044 the upgrade is disallowed.
1045
1045
1046 Extensions should monkeypatch this to add their custom requirements.
1046 Extensions should monkeypatch this to add their custom requirements.
1047 """
1047 """
1048 supported = {
1048 supported = {
1049 requirements.CHANGELOGV2_REQUIREMENT,
1049 requirements.CHANGELOGV2_REQUIREMENT,
1050 requirements.COPIESSDC_REQUIREMENT,
1050 requirements.COPIESSDC_REQUIREMENT,
1051 requirements.DIRSTATE_TRACKED_HINT_V1,
1051 requirements.DIRSTATE_TRACKED_HINT_V1,
1052 requirements.DIRSTATE_V2_REQUIREMENT,
1052 requirements.DIRSTATE_V2_REQUIREMENT,
1053 requirements.DOTENCODE_REQUIREMENT,
1053 requirements.DOTENCODE_REQUIREMENT,
1054 requirements.FNCACHE_REQUIREMENT,
1054 requirements.FNCACHE_REQUIREMENT,
1055 requirements.GENERALDELTA_REQUIREMENT,
1055 requirements.GENERALDELTA_REQUIREMENT,
1056 requirements.NODEMAP_REQUIREMENT,
1056 requirements.NODEMAP_REQUIREMENT,
1057 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
1057 requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
1058 requirements.REVLOGV2_REQUIREMENT,
1058 requirements.REVLOGV2_REQUIREMENT,
1059 requirements.SHARED_REQUIREMENT,
1059 requirements.SHARED_REQUIREMENT,
1060 requirements.SHARESAFE_REQUIREMENT,
1060 requirements.SHARESAFE_REQUIREMENT,
1061 requirements.SPARSEREVLOG_REQUIREMENT,
1061 requirements.SPARSEREVLOG_REQUIREMENT,
1062 requirements.STORE_REQUIREMENT,
1062 requirements.STORE_REQUIREMENT,
1063 requirements.TREEMANIFEST_REQUIREMENT,
1063 requirements.TREEMANIFEST_REQUIREMENT,
1064 requirements.NARROW_REQUIREMENT,
1064 requirements.NARROW_REQUIREMENT,
1065 }
1065 }
1066 for name in compression.compengines:
1066 for name in compression.compengines:
1067 engine = compression.compengines[name]
1067 engine = compression.compengines[name]
1068 if engine.available() and engine.revlogheader():
1068 if engine.available() and engine.revlogheader():
1069 supported.add(b'exp-compression-%s' % name)
1069 supported.add(b'exp-compression-%s' % name)
1070 if engine.name() == b'zstd':
1070 if engine.name() == b'zstd':
1071 supported.add(b'revlog-compression-zstd')
1071 supported.add(b'revlog-compression-zstd')
1072 return supported
1072 return supported
1073
1073
1074
1074
1075 def allowednewrequirements(repo):
1075 def allowednewrequirements(repo):
1076 """Obtain requirements that can be added to a repository during upgrade.
1076 """Obtain requirements that can be added to a repository during upgrade.
1077
1077
1078 This is used to disallow proposed requirements from being added when
1078 This is used to disallow proposed requirements from being added when
1079 they weren't present before.
1079 they weren't present before.
1080
1080
1081 We use a list of allowed requirement additions instead of a list of known
1081 We use a list of allowed requirement additions instead of a list of known
1082 bad additions because the whitelist approach is safer and will prevent
1082 bad additions because the whitelist approach is safer and will prevent
1083 future, unknown requirements from accidentally being added.
1083 future, unknown requirements from accidentally being added.
1084 """
1084 """
1085 supported = {
1085 supported = {
1086 requirements.DOTENCODE_REQUIREMENT,
1086 requirements.DOTENCODE_REQUIREMENT,
1087 requirements.FNCACHE_REQUIREMENT,
1087 requirements.FNCACHE_REQUIREMENT,
1088 requirements.GENERALDELTA_REQUIREMENT,
1088 requirements.GENERALDELTA_REQUIREMENT,
1089 requirements.SPARSEREVLOG_REQUIREMENT,
1089 requirements.SPARSEREVLOG_REQUIREMENT,
1090 requirements.COPIESSDC_REQUIREMENT,
1090 requirements.COPIESSDC_REQUIREMENT,
1091 requirements.NODEMAP_REQUIREMENT,
1091 requirements.NODEMAP_REQUIREMENT,
1092 requirements.SHARESAFE_REQUIREMENT,
1092 requirements.SHARESAFE_REQUIREMENT,
1093 requirements.REVLOGV1_REQUIREMENT,
1093 requirements.REVLOGV1_REQUIREMENT,
1094 requirements.REVLOGV2_REQUIREMENT,
1094 requirements.REVLOGV2_REQUIREMENT,
1095 requirements.CHANGELOGV2_REQUIREMENT,
1095 requirements.CHANGELOGV2_REQUIREMENT,
1096 requirements.DIRSTATE_TRACKED_HINT_V1,
1096 requirements.DIRSTATE_TRACKED_HINT_V1,
1097 requirements.DIRSTATE_V2_REQUIREMENT,
1097 requirements.DIRSTATE_V2_REQUIREMENT,
1098 }
1098 }
1099 for name in compression.compengines:
1099 for name in compression.compengines:
1100 engine = compression.compengines[name]
1100 engine = compression.compengines[name]
1101 if engine.available() and engine.revlogheader():
1101 if engine.available() and engine.revlogheader():
1102 supported.add(b'exp-compression-%s' % name)
1102 supported.add(b'exp-compression-%s' % name)
1103 if engine.name() == b'zstd':
1103 if engine.name() == b'zstd':
1104 supported.add(b'revlog-compression-zstd')
1104 supported.add(b'revlog-compression-zstd')
1105 return supported
1105 return supported
1106
1106
1107
1107
1108 def check_requirements_changes(repo, new_reqs):
1108 def check_requirements_changes(repo, new_reqs):
1109 old_reqs = repo.requirements
1109 old_reqs = repo.requirements
1110 check_revlog_version(repo.requirements)
1110 check_revlog_version(repo.requirements)
1111 support_removal = supportremovedrequirements(repo)
1111 support_removal = supportremovedrequirements(repo)
1112 no_remove_reqs = old_reqs - new_reqs - support_removal
1112 no_remove_reqs = old_reqs - new_reqs - support_removal
1113 if no_remove_reqs:
1113 if no_remove_reqs:
1114 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1114 msg = _(b'cannot upgrade repository; requirement would be removed: %s')
1115 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1115 no_remove_reqs = b', '.join(sorted(no_remove_reqs))
1116 raise error.Abort(msg % no_remove_reqs)
1116 raise error.Abort(msg % no_remove_reqs)
1117
1117
1118 support_addition = allowednewrequirements(repo)
1118 support_addition = allowednewrequirements(repo)
1119 no_add_reqs = new_reqs - old_reqs - support_addition
1119 no_add_reqs = new_reqs - old_reqs - support_addition
1120 if no_add_reqs:
1120 if no_add_reqs:
1121 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1121 m = _(b'cannot upgrade repository; do not support adding requirement: ')
1122 no_add_reqs = b', '.join(sorted(no_add_reqs))
1122 no_add_reqs = b', '.join(sorted(no_add_reqs))
1123 raise error.Abort(m + no_add_reqs)
1123 raise error.Abort(m + no_add_reqs)
1124
1124
1125 supported = supporteddestrequirements(repo)
1125 supported = supporteddestrequirements(repo)
1126 unsupported_reqs = new_reqs - supported
1126 unsupported_reqs = new_reqs - supported
1127 if unsupported_reqs:
1127 if unsupported_reqs:
1128 msg = _(
1128 msg = _(
1129 b'cannot upgrade repository; do not support destination '
1129 b'cannot upgrade repository; do not support destination '
1130 b'requirement: %s'
1130 b'requirement: %s'
1131 )
1131 )
1132 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1132 unsupported_reqs = b', '.join(sorted(unsupported_reqs))
1133 raise error.Abort(msg % unsupported_reqs)
1133 raise error.Abort(msg % unsupported_reqs)
General Comments 0
You need to be logged in to leave comments. Login now