##// END OF EJS Templates
core: migrate uses of hashlib.sha1 to hashutil.sha1...
Augie Fackler -
r44512:4ebd162f default draft
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,738 +1,738 b''
1 # chgserver.py - command server extension for cHg
1 # chgserver.py - command server extension for cHg
2 #
2 #
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """command server extension for cHg
8 """command server extension for cHg
9
9
10 'S' channel (read/write)
10 'S' channel (read/write)
11 propagate ui.system() request to client
11 propagate ui.system() request to client
12
12
13 'attachio' command
13 'attachio' command
14 attach client's stdio passed by sendmsg()
14 attach client's stdio passed by sendmsg()
15
15
16 'chdir' command
16 'chdir' command
17 change current directory
17 change current directory
18
18
19 'setenv' command
19 'setenv' command
20 replace os.environ completely
20 replace os.environ completely
21
21
22 'setumask' command (DEPRECATED)
22 'setumask' command (DEPRECATED)
23 'setumask2' command
23 'setumask2' command
24 set umask
24 set umask
25
25
26 'validate' command
26 'validate' command
27 reload the config and check if the server is up to date
27 reload the config and check if the server is up to date
28
28
29 Config
29 Config
30 ------
30 ------
31
31
32 ::
32 ::
33
33
34 [chgserver]
34 [chgserver]
35 # how long (in seconds) should an idle chg server exit
35 # how long (in seconds) should an idle chg server exit
36 idletimeout = 3600
36 idletimeout = 3600
37
37
38 # whether to skip config or env change checks
38 # whether to skip config or env change checks
39 skiphash = False
39 skiphash = False
40 """
40 """
41
41
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import hashlib
45 import inspect
44 import inspect
46 import os
45 import os
47 import re
46 import re
48 import socket
47 import socket
49 import stat
48 import stat
50 import struct
49 import struct
51 import time
50 import time
52
51
53 from .i18n import _
52 from .i18n import _
54 from .pycompat import (
53 from .pycompat import (
55 getattr,
54 getattr,
56 setattr,
55 setattr,
57 )
56 )
58
57
59 from . import (
58 from . import (
60 commandserver,
59 commandserver,
61 encoding,
60 encoding,
62 error,
61 error,
63 extensions,
62 extensions,
64 node,
63 node,
65 pycompat,
64 pycompat,
66 util,
65 util,
67 )
66 )
68
67
69 from .utils import (
68 from .utils import (
69 hashutil,
70 procutil,
70 procutil,
71 stringutil,
71 stringutil,
72 )
72 )
73
73
74
74
75 def _hashlist(items):
75 def _hashlist(items):
76 """return sha1 hexdigest for a list"""
76 """return sha1 hexdigest for a list"""
77 return node.hex(hashlib.sha1(stringutil.pprint(items)).digest())
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
78
78
79
79
80 # sensitive config sections affecting confighash
80 # sensitive config sections affecting confighash
81 _configsections = [
81 _configsections = [
82 b'alias', # affects global state commands.table
82 b'alias', # affects global state commands.table
83 b'eol', # uses setconfig('eol', ...)
83 b'eol', # uses setconfig('eol', ...)
84 b'extdiff', # uisetup will register new commands
84 b'extdiff', # uisetup will register new commands
85 b'extensions',
85 b'extensions',
86 ]
86 ]
87
87
88 _configsectionitems = [
88 _configsectionitems = [
89 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
89 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
90 ]
90 ]
91
91
92 # sensitive environment variables affecting confighash
92 # sensitive environment variables affecting confighash
93 _envre = re.compile(
93 _envre = re.compile(
94 br'''\A(?:
94 br'''\A(?:
95 CHGHG
95 CHGHG
96 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
96 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
97 |HG(?:ENCODING|PLAIN).*
97 |HG(?:ENCODING|PLAIN).*
98 |LANG(?:UAGE)?
98 |LANG(?:UAGE)?
99 |LC_.*
99 |LC_.*
100 |LD_.*
100 |LD_.*
101 |PATH
101 |PATH
102 |PYTHON.*
102 |PYTHON.*
103 |TERM(?:INFO)?
103 |TERM(?:INFO)?
104 |TZ
104 |TZ
105 )\Z''',
105 )\Z''',
106 re.X,
106 re.X,
107 )
107 )
108
108
109
109
110 def _confighash(ui):
110 def _confighash(ui):
111 """return a quick hash for detecting config/env changes
111 """return a quick hash for detecting config/env changes
112
112
113 confighash is the hash of sensitive config items and environment variables.
113 confighash is the hash of sensitive config items and environment variables.
114
114
115 for chgserver, it is designed that once confighash changes, the server is
115 for chgserver, it is designed that once confighash changes, the server is
116 not qualified to serve its client and should redirect the client to a new
116 not qualified to serve its client and should redirect the client to a new
117 server. different from mtimehash, confighash change will not mark the
117 server. different from mtimehash, confighash change will not mark the
118 server outdated and exit since the user can have different configs at the
118 server outdated and exit since the user can have different configs at the
119 same time.
119 same time.
120 """
120 """
121 sectionitems = []
121 sectionitems = []
122 for section in _configsections:
122 for section in _configsections:
123 sectionitems.append(ui.configitems(section))
123 sectionitems.append(ui.configitems(section))
124 for section, item in _configsectionitems:
124 for section, item in _configsectionitems:
125 sectionitems.append(ui.config(section, item))
125 sectionitems.append(ui.config(section, item))
126 sectionhash = _hashlist(sectionitems)
126 sectionhash = _hashlist(sectionitems)
127 # If $CHGHG is set, the change to $HG should not trigger a new chg server
127 # If $CHGHG is set, the change to $HG should not trigger a new chg server
128 if b'CHGHG' in encoding.environ:
128 if b'CHGHG' in encoding.environ:
129 ignored = {b'HG'}
129 ignored = {b'HG'}
130 else:
130 else:
131 ignored = set()
131 ignored = set()
132 envitems = [
132 envitems = [
133 (k, v)
133 (k, v)
134 for k, v in pycompat.iteritems(encoding.environ)
134 for k, v in pycompat.iteritems(encoding.environ)
135 if _envre.match(k) and k not in ignored
135 if _envre.match(k) and k not in ignored
136 ]
136 ]
137 envhash = _hashlist(sorted(envitems))
137 envhash = _hashlist(sorted(envitems))
138 return sectionhash[:6] + envhash[:6]
138 return sectionhash[:6] + envhash[:6]
139
139
140
140
141 def _getmtimepaths(ui):
141 def _getmtimepaths(ui):
142 """get a list of paths that should be checked to detect change
142 """get a list of paths that should be checked to detect change
143
143
144 The list will include:
144 The list will include:
145 - extensions (will not cover all files for complex extensions)
145 - extensions (will not cover all files for complex extensions)
146 - mercurial/__version__.py
146 - mercurial/__version__.py
147 - python binary
147 - python binary
148 """
148 """
149 modules = [m for n, m in extensions.extensions(ui)]
149 modules = [m for n, m in extensions.extensions(ui)]
150 try:
150 try:
151 from . import __version__
151 from . import __version__
152
152
153 modules.append(__version__)
153 modules.append(__version__)
154 except ImportError:
154 except ImportError:
155 pass
155 pass
156 files = []
156 files = []
157 if pycompat.sysexecutable:
157 if pycompat.sysexecutable:
158 files.append(pycompat.sysexecutable)
158 files.append(pycompat.sysexecutable)
159 for m in modules:
159 for m in modules:
160 try:
160 try:
161 files.append(pycompat.fsencode(inspect.getabsfile(m)))
161 files.append(pycompat.fsencode(inspect.getabsfile(m)))
162 except TypeError:
162 except TypeError:
163 pass
163 pass
164 return sorted(set(files))
164 return sorted(set(files))
165
165
166
166
167 def _mtimehash(paths):
167 def _mtimehash(paths):
168 """return a quick hash for detecting file changes
168 """return a quick hash for detecting file changes
169
169
170 mtimehash calls stat on given paths and calculate a hash based on size and
170 mtimehash calls stat on given paths and calculate a hash based on size and
171 mtime of each file. mtimehash does not read file content because reading is
171 mtime of each file. mtimehash does not read file content because reading is
172 expensive. therefore it's not 100% reliable for detecting content changes.
172 expensive. therefore it's not 100% reliable for detecting content changes.
173 it's possible to return different hashes for same file contents.
173 it's possible to return different hashes for same file contents.
174 it's also possible to return a same hash for different file contents for
174 it's also possible to return a same hash for different file contents for
175 some carefully crafted situation.
175 some carefully crafted situation.
176
176
177 for chgserver, it is designed that once mtimehash changes, the server is
177 for chgserver, it is designed that once mtimehash changes, the server is
178 considered outdated immediately and should no longer provide service.
178 considered outdated immediately and should no longer provide service.
179
179
180 mtimehash is not included in confighash because we only know the paths of
180 mtimehash is not included in confighash because we only know the paths of
181 extensions after importing them (there is imp.find_module but that faces
181 extensions after importing them (there is imp.find_module but that faces
182 race conditions). We need to calculate confighash without importing.
182 race conditions). We need to calculate confighash without importing.
183 """
183 """
184
184
185 def trystat(path):
185 def trystat(path):
186 try:
186 try:
187 st = os.stat(path)
187 st = os.stat(path)
188 return (st[stat.ST_MTIME], st.st_size)
188 return (st[stat.ST_MTIME], st.st_size)
189 except OSError:
189 except OSError:
190 # could be ENOENT, EPERM etc. not fatal in any case
190 # could be ENOENT, EPERM etc. not fatal in any case
191 pass
191 pass
192
192
193 return _hashlist(pycompat.maplist(trystat, paths))[:12]
193 return _hashlist(pycompat.maplist(trystat, paths))[:12]
194
194
195
195
196 class hashstate(object):
196 class hashstate(object):
197 """a structure storing confighash, mtimehash, paths used for mtimehash"""
197 """a structure storing confighash, mtimehash, paths used for mtimehash"""
198
198
199 def __init__(self, confighash, mtimehash, mtimepaths):
199 def __init__(self, confighash, mtimehash, mtimepaths):
200 self.confighash = confighash
200 self.confighash = confighash
201 self.mtimehash = mtimehash
201 self.mtimehash = mtimehash
202 self.mtimepaths = mtimepaths
202 self.mtimepaths = mtimepaths
203
203
204 @staticmethod
204 @staticmethod
205 def fromui(ui, mtimepaths=None):
205 def fromui(ui, mtimepaths=None):
206 if mtimepaths is None:
206 if mtimepaths is None:
207 mtimepaths = _getmtimepaths(ui)
207 mtimepaths = _getmtimepaths(ui)
208 confighash = _confighash(ui)
208 confighash = _confighash(ui)
209 mtimehash = _mtimehash(mtimepaths)
209 mtimehash = _mtimehash(mtimepaths)
210 ui.log(
210 ui.log(
211 b'cmdserver',
211 b'cmdserver',
212 b'confighash = %s mtimehash = %s\n',
212 b'confighash = %s mtimehash = %s\n',
213 confighash,
213 confighash,
214 mtimehash,
214 mtimehash,
215 )
215 )
216 return hashstate(confighash, mtimehash, mtimepaths)
216 return hashstate(confighash, mtimehash, mtimepaths)
217
217
218
218
219 def _newchgui(srcui, csystem, attachio):
219 def _newchgui(srcui, csystem, attachio):
220 class chgui(srcui.__class__):
220 class chgui(srcui.__class__):
221 def __init__(self, src=None):
221 def __init__(self, src=None):
222 super(chgui, self).__init__(src)
222 super(chgui, self).__init__(src)
223 if src:
223 if src:
224 self._csystem = getattr(src, '_csystem', csystem)
224 self._csystem = getattr(src, '_csystem', csystem)
225 else:
225 else:
226 self._csystem = csystem
226 self._csystem = csystem
227
227
228 def _runsystem(self, cmd, environ, cwd, out):
228 def _runsystem(self, cmd, environ, cwd, out):
229 # fallback to the original system method if
229 # fallback to the original system method if
230 # a. the output stream is not stdout (e.g. stderr, cStringIO),
230 # a. the output stream is not stdout (e.g. stderr, cStringIO),
231 # b. or stdout is redirected by protectfinout(),
231 # b. or stdout is redirected by protectfinout(),
232 # because the chg client is not aware of these situations and
232 # because the chg client is not aware of these situations and
233 # will behave differently (i.e. write to stdout).
233 # will behave differently (i.e. write to stdout).
234 if (
234 if (
235 out is not self.fout
235 out is not self.fout
236 or not util.safehasattr(self.fout, b'fileno')
236 or not util.safehasattr(self.fout, b'fileno')
237 or self.fout.fileno() != procutil.stdout.fileno()
237 or self.fout.fileno() != procutil.stdout.fileno()
238 or self._finoutredirected
238 or self._finoutredirected
239 ):
239 ):
240 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
240 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
241 self.flush()
241 self.flush()
242 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
242 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
243
243
244 def _runpager(self, cmd, env=None):
244 def _runpager(self, cmd, env=None):
245 self._csystem(
245 self._csystem(
246 cmd,
246 cmd,
247 procutil.shellenviron(env),
247 procutil.shellenviron(env),
248 type=b'pager',
248 type=b'pager',
249 cmdtable={b'attachio': attachio},
249 cmdtable={b'attachio': attachio},
250 )
250 )
251 return True
251 return True
252
252
253 return chgui(srcui)
253 return chgui(srcui)
254
254
255
255
256 def _loadnewui(srcui, args, cdebug):
256 def _loadnewui(srcui, args, cdebug):
257 from . import dispatch # avoid cycle
257 from . import dispatch # avoid cycle
258
258
259 newui = srcui.__class__.load()
259 newui = srcui.__class__.load()
260 for a in [b'fin', b'fout', b'ferr', b'environ']:
260 for a in [b'fin', b'fout', b'ferr', b'environ']:
261 setattr(newui, a, getattr(srcui, a))
261 setattr(newui, a, getattr(srcui, a))
262 if util.safehasattr(srcui, b'_csystem'):
262 if util.safehasattr(srcui, b'_csystem'):
263 newui._csystem = srcui._csystem
263 newui._csystem = srcui._csystem
264
264
265 # command line args
265 # command line args
266 options = dispatch._earlyparseopts(newui, args)
266 options = dispatch._earlyparseopts(newui, args)
267 dispatch._parseconfig(newui, options[b'config'])
267 dispatch._parseconfig(newui, options[b'config'])
268
268
269 # stolen from tortoisehg.util.copydynamicconfig()
269 # stolen from tortoisehg.util.copydynamicconfig()
270 for section, name, value in srcui.walkconfig():
270 for section, name, value in srcui.walkconfig():
271 source = srcui.configsource(section, name)
271 source = srcui.configsource(section, name)
272 if b':' in source or source == b'--config' or source.startswith(b'$'):
272 if b':' in source or source == b'--config' or source.startswith(b'$'):
273 # path:line or command line, or environ
273 # path:line or command line, or environ
274 continue
274 continue
275 newui.setconfig(section, name, value, source)
275 newui.setconfig(section, name, value, source)
276
276
277 # load wd and repo config, copied from dispatch.py
277 # load wd and repo config, copied from dispatch.py
278 cwd = options[b'cwd']
278 cwd = options[b'cwd']
279 cwd = cwd and os.path.realpath(cwd) or None
279 cwd = cwd and os.path.realpath(cwd) or None
280 rpath = options[b'repository']
280 rpath = options[b'repository']
281 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
281 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
282
282
283 extensions.populateui(newui)
283 extensions.populateui(newui)
284 commandserver.setuplogging(newui, fp=cdebug)
284 commandserver.setuplogging(newui, fp=cdebug)
285 if newui is not newlui:
285 if newui is not newlui:
286 extensions.populateui(newlui)
286 extensions.populateui(newlui)
287 commandserver.setuplogging(newlui, fp=cdebug)
287 commandserver.setuplogging(newlui, fp=cdebug)
288
288
289 return (newui, newlui)
289 return (newui, newlui)
290
290
291
291
292 class channeledsystem(object):
292 class channeledsystem(object):
293 """Propagate ui.system() request in the following format:
293 """Propagate ui.system() request in the following format:
294
294
295 payload length (unsigned int),
295 payload length (unsigned int),
296 type, '\0',
296 type, '\0',
297 cmd, '\0',
297 cmd, '\0',
298 cwd, '\0',
298 cwd, '\0',
299 envkey, '=', val, '\0',
299 envkey, '=', val, '\0',
300 ...
300 ...
301 envkey, '=', val
301 envkey, '=', val
302
302
303 if type == 'system', waits for:
303 if type == 'system', waits for:
304
304
305 exitcode length (unsigned int),
305 exitcode length (unsigned int),
306 exitcode (int)
306 exitcode (int)
307
307
308 if type == 'pager', repetitively waits for a command name ending with '\n'
308 if type == 'pager', repetitively waits for a command name ending with '\n'
309 and executes it defined by cmdtable, or exits the loop if the command name
309 and executes it defined by cmdtable, or exits the loop if the command name
310 is empty.
310 is empty.
311 """
311 """
312
312
313 def __init__(self, in_, out, channel):
313 def __init__(self, in_, out, channel):
314 self.in_ = in_
314 self.in_ = in_
315 self.out = out
315 self.out = out
316 self.channel = channel
316 self.channel = channel
317
317
318 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
318 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
319 args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
319 args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
320 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
320 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
321 data = b'\0'.join(args)
321 data = b'\0'.join(args)
322 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
322 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
323 self.out.write(data)
323 self.out.write(data)
324 self.out.flush()
324 self.out.flush()
325
325
326 if type == b'system':
326 if type == b'system':
327 length = self.in_.read(4)
327 length = self.in_.read(4)
328 (length,) = struct.unpack(b'>I', length)
328 (length,) = struct.unpack(b'>I', length)
329 if length != 4:
329 if length != 4:
330 raise error.Abort(_(b'invalid response'))
330 raise error.Abort(_(b'invalid response'))
331 (rc,) = struct.unpack(b'>i', self.in_.read(4))
331 (rc,) = struct.unpack(b'>i', self.in_.read(4))
332 return rc
332 return rc
333 elif type == b'pager':
333 elif type == b'pager':
334 while True:
334 while True:
335 cmd = self.in_.readline()[:-1]
335 cmd = self.in_.readline()[:-1]
336 if not cmd:
336 if not cmd:
337 break
337 break
338 if cmdtable and cmd in cmdtable:
338 if cmdtable and cmd in cmdtable:
339 cmdtable[cmd]()
339 cmdtable[cmd]()
340 else:
340 else:
341 raise error.Abort(_(b'unexpected command: %s') % cmd)
341 raise error.Abort(_(b'unexpected command: %s') % cmd)
342 else:
342 else:
343 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
343 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
344
344
345
345
346 _iochannels = [
346 _iochannels = [
347 # server.ch, ui.fp, mode
347 # server.ch, ui.fp, mode
348 (b'cin', b'fin', 'rb'),
348 (b'cin', b'fin', 'rb'),
349 (b'cout', b'fout', 'wb'),
349 (b'cout', b'fout', 'wb'),
350 (b'cerr', b'ferr', 'wb'),
350 (b'cerr', b'ferr', 'wb'),
351 ]
351 ]
352
352
353
353
354 class chgcmdserver(commandserver.server):
354 class chgcmdserver(commandserver.server):
355 def __init__(
355 def __init__(
356 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
356 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
357 ):
357 ):
358 super(chgcmdserver, self).__init__(
358 super(chgcmdserver, self).__init__(
359 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
359 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
360 repo,
360 repo,
361 fin,
361 fin,
362 fout,
362 fout,
363 prereposetups,
363 prereposetups,
364 )
364 )
365 self.clientsock = sock
365 self.clientsock = sock
366 self._ioattached = False
366 self._ioattached = False
367 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
367 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
368 self.hashstate = hashstate
368 self.hashstate = hashstate
369 self.baseaddress = baseaddress
369 self.baseaddress = baseaddress
370 if hashstate is not None:
370 if hashstate is not None:
371 self.capabilities = self.capabilities.copy()
371 self.capabilities = self.capabilities.copy()
372 self.capabilities[b'validate'] = chgcmdserver.validate
372 self.capabilities[b'validate'] = chgcmdserver.validate
373
373
374 def cleanup(self):
374 def cleanup(self):
375 super(chgcmdserver, self).cleanup()
375 super(chgcmdserver, self).cleanup()
376 # dispatch._runcatch() does not flush outputs if exception is not
376 # dispatch._runcatch() does not flush outputs if exception is not
377 # handled by dispatch._dispatch()
377 # handled by dispatch._dispatch()
378 self.ui.flush()
378 self.ui.flush()
379 self._restoreio()
379 self._restoreio()
380 self._ioattached = False
380 self._ioattached = False
381
381
382 def attachio(self):
382 def attachio(self):
383 """Attach to client's stdio passed via unix domain socket; all
383 """Attach to client's stdio passed via unix domain socket; all
384 channels except cresult will no longer be used
384 channels except cresult will no longer be used
385 """
385 """
386 # tell client to sendmsg() with 1-byte payload, which makes it
386 # tell client to sendmsg() with 1-byte payload, which makes it
387 # distinctive from "attachio\n" command consumed by client.read()
387 # distinctive from "attachio\n" command consumed by client.read()
388 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
388 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
389 clientfds = util.recvfds(self.clientsock.fileno())
389 clientfds = util.recvfds(self.clientsock.fileno())
390 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
390 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
391
391
392 ui = self.ui
392 ui = self.ui
393 ui.flush()
393 ui.flush()
394 self._saveio()
394 self._saveio()
395 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
395 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
396 assert fd > 0
396 assert fd > 0
397 fp = getattr(ui, fn)
397 fp = getattr(ui, fn)
398 os.dup2(fd, fp.fileno())
398 os.dup2(fd, fp.fileno())
399 os.close(fd)
399 os.close(fd)
400 if self._ioattached:
400 if self._ioattached:
401 continue
401 continue
402 # reset buffering mode when client is first attached. as we want
402 # reset buffering mode when client is first attached. as we want
403 # to see output immediately on pager, the mode stays unchanged
403 # to see output immediately on pager, the mode stays unchanged
404 # when client re-attached. ferr is unchanged because it should
404 # when client re-attached. ferr is unchanged because it should
405 # be unbuffered no matter if it is a tty or not.
405 # be unbuffered no matter if it is a tty or not.
406 if fn == b'ferr':
406 if fn == b'ferr':
407 newfp = fp
407 newfp = fp
408 else:
408 else:
409 # make it line buffered explicitly because the default is
409 # make it line buffered explicitly because the default is
410 # decided on first write(), where fout could be a pager.
410 # decided on first write(), where fout could be a pager.
411 if fp.isatty():
411 if fp.isatty():
412 bufsize = 1 # line buffered
412 bufsize = 1 # line buffered
413 else:
413 else:
414 bufsize = -1 # system default
414 bufsize = -1 # system default
415 newfp = os.fdopen(fp.fileno(), mode, bufsize)
415 newfp = os.fdopen(fp.fileno(), mode, bufsize)
416 setattr(ui, fn, newfp)
416 setattr(ui, fn, newfp)
417 setattr(self, cn, newfp)
417 setattr(self, cn, newfp)
418
418
419 self._ioattached = True
419 self._ioattached = True
420 self.cresult.write(struct.pack(b'>i', len(clientfds)))
420 self.cresult.write(struct.pack(b'>i', len(clientfds)))
421
421
422 def _saveio(self):
422 def _saveio(self):
423 if self._oldios:
423 if self._oldios:
424 return
424 return
425 ui = self.ui
425 ui = self.ui
426 for cn, fn, _mode in _iochannels:
426 for cn, fn, _mode in _iochannels:
427 ch = getattr(self, cn)
427 ch = getattr(self, cn)
428 fp = getattr(ui, fn)
428 fp = getattr(ui, fn)
429 fd = os.dup(fp.fileno())
429 fd = os.dup(fp.fileno())
430 self._oldios.append((ch, fp, fd))
430 self._oldios.append((ch, fp, fd))
431
431
432 def _restoreio(self):
432 def _restoreio(self):
433 ui = self.ui
433 ui = self.ui
434 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
434 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
435 newfp = getattr(ui, fn)
435 newfp = getattr(ui, fn)
436 # close newfp while it's associated with client; otherwise it
436 # close newfp while it's associated with client; otherwise it
437 # would be closed when newfp is deleted
437 # would be closed when newfp is deleted
438 if newfp is not fp:
438 if newfp is not fp:
439 newfp.close()
439 newfp.close()
440 # restore original fd: fp is open again
440 # restore original fd: fp is open again
441 os.dup2(fd, fp.fileno())
441 os.dup2(fd, fp.fileno())
442 os.close(fd)
442 os.close(fd)
443 setattr(self, cn, ch)
443 setattr(self, cn, ch)
444 setattr(ui, fn, fp)
444 setattr(ui, fn, fp)
445 del self._oldios[:]
445 del self._oldios[:]
446
446
447 def validate(self):
447 def validate(self):
448 """Reload the config and check if the server is up to date
448 """Reload the config and check if the server is up to date
449
449
450 Read a list of '\0' separated arguments.
450 Read a list of '\0' separated arguments.
451 Write a non-empty list of '\0' separated instruction strings or '\0'
451 Write a non-empty list of '\0' separated instruction strings or '\0'
452 if the list is empty.
452 if the list is empty.
453 An instruction string could be either:
453 An instruction string could be either:
454 - "unlink $path", the client should unlink the path to stop the
454 - "unlink $path", the client should unlink the path to stop the
455 outdated server.
455 outdated server.
456 - "redirect $path", the client should attempt to connect to $path
456 - "redirect $path", the client should attempt to connect to $path
457 first. If it does not work, start a new server. It implies
457 first. If it does not work, start a new server. It implies
458 "reconnect".
458 "reconnect".
459 - "exit $n", the client should exit directly with code n.
459 - "exit $n", the client should exit directly with code n.
460 This may happen if we cannot parse the config.
460 This may happen if we cannot parse the config.
461 - "reconnect", the client should close the connection and
461 - "reconnect", the client should close the connection and
462 reconnect.
462 reconnect.
463 If neither "reconnect" nor "redirect" is included in the instruction
463 If neither "reconnect" nor "redirect" is included in the instruction
464 list, the client can continue with this server after completing all
464 list, the client can continue with this server after completing all
465 the instructions.
465 the instructions.
466 """
466 """
467 from . import dispatch # avoid cycle
467 from . import dispatch # avoid cycle
468
468
469 args = self._readlist()
469 args = self._readlist()
470 try:
470 try:
471 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
471 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
472 except error.ParseError as inst:
472 except error.ParseError as inst:
473 dispatch._formatparse(self.ui.warn, inst)
473 dispatch._formatparse(self.ui.warn, inst)
474 self.ui.flush()
474 self.ui.flush()
475 self.cresult.write(b'exit 255')
475 self.cresult.write(b'exit 255')
476 return
476 return
477 except error.Abort as inst:
477 except error.Abort as inst:
478 self.ui.error(_(b"abort: %s\n") % inst)
478 self.ui.error(_(b"abort: %s\n") % inst)
479 if inst.hint:
479 if inst.hint:
480 self.ui.error(_(b"(%s)\n") % inst.hint)
480 self.ui.error(_(b"(%s)\n") % inst.hint)
481 self.ui.flush()
481 self.ui.flush()
482 self.cresult.write(b'exit 255')
482 self.cresult.write(b'exit 255')
483 return
483 return
484 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
484 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
485 insts = []
485 insts = []
486 if newhash.mtimehash != self.hashstate.mtimehash:
486 if newhash.mtimehash != self.hashstate.mtimehash:
487 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
487 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
488 insts.append(b'unlink %s' % addr)
488 insts.append(b'unlink %s' % addr)
489 # mtimehash is empty if one or more extensions fail to load.
489 # mtimehash is empty if one or more extensions fail to load.
490 # to be compatible with hg, still serve the client this time.
490 # to be compatible with hg, still serve the client this time.
491 if self.hashstate.mtimehash:
491 if self.hashstate.mtimehash:
492 insts.append(b'reconnect')
492 insts.append(b'reconnect')
493 if newhash.confighash != self.hashstate.confighash:
493 if newhash.confighash != self.hashstate.confighash:
494 addr = _hashaddress(self.baseaddress, newhash.confighash)
494 addr = _hashaddress(self.baseaddress, newhash.confighash)
495 insts.append(b'redirect %s' % addr)
495 insts.append(b'redirect %s' % addr)
496 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
496 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
497 self.cresult.write(b'\0'.join(insts) or b'\0')
497 self.cresult.write(b'\0'.join(insts) or b'\0')
498
498
499 def chdir(self):
499 def chdir(self):
500 """Change current directory
500 """Change current directory
501
501
502 Note that the behavior of --cwd option is bit different from this.
502 Note that the behavior of --cwd option is bit different from this.
503 It does not affect --config parameter.
503 It does not affect --config parameter.
504 """
504 """
505 path = self._readstr()
505 path = self._readstr()
506 if not path:
506 if not path:
507 return
507 return
508 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
508 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
509 os.chdir(path)
509 os.chdir(path)
510
510
511 def setumask(self):
511 def setumask(self):
512 """Change umask (DEPRECATED)"""
512 """Change umask (DEPRECATED)"""
513 # BUG: this does not follow the message frame structure, but kept for
513 # BUG: this does not follow the message frame structure, but kept for
514 # backward compatibility with old chg clients for some time
514 # backward compatibility with old chg clients for some time
515 self._setumask(self._read(4))
515 self._setumask(self._read(4))
516
516
517 def setumask2(self):
517 def setumask2(self):
518 """Change umask"""
518 """Change umask"""
519 data = self._readstr()
519 data = self._readstr()
520 if len(data) != 4:
520 if len(data) != 4:
521 raise ValueError(b'invalid mask length in setumask2 request')
521 raise ValueError(b'invalid mask length in setumask2 request')
522 self._setumask(data)
522 self._setumask(data)
523
523
524 def _setumask(self, data):
524 def _setumask(self, data):
525 mask = struct.unpack(b'>I', data)[0]
525 mask = struct.unpack(b'>I', data)[0]
526 self.ui.log(b'chgserver', b'setumask %r\n', mask)
526 self.ui.log(b'chgserver', b'setumask %r\n', mask)
527 os.umask(mask)
527 os.umask(mask)
528
528
529 def runcommand(self):
529 def runcommand(self):
530 # pager may be attached within the runcommand session, which should
530 # pager may be attached within the runcommand session, which should
531 # be detached at the end of the session. otherwise the pager wouldn't
531 # be detached at the end of the session. otherwise the pager wouldn't
532 # receive EOF.
532 # receive EOF.
533 globaloldios = self._oldios
533 globaloldios = self._oldios
534 self._oldios = []
534 self._oldios = []
535 try:
535 try:
536 return super(chgcmdserver, self).runcommand()
536 return super(chgcmdserver, self).runcommand()
537 finally:
537 finally:
538 self._restoreio()
538 self._restoreio()
539 self._oldios = globaloldios
539 self._oldios = globaloldios
540
540
541 def setenv(self):
541 def setenv(self):
542 """Clear and update os.environ
542 """Clear and update os.environ
543
543
544 Note that not all variables can make an effect on the running process.
544 Note that not all variables can make an effect on the running process.
545 """
545 """
546 l = self._readlist()
546 l = self._readlist()
547 try:
547 try:
548 newenv = dict(s.split(b'=', 1) for s in l)
548 newenv = dict(s.split(b'=', 1) for s in l)
549 except ValueError:
549 except ValueError:
550 raise ValueError(b'unexpected value in setenv request')
550 raise ValueError(b'unexpected value in setenv request')
551 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
551 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
552
552
553 # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
553 # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
554 # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
554 # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
555 # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
555 # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
556 # disabled with PYTHONCOERCECLOCALE=0 in the environment.
556 # disabled with PYTHONCOERCECLOCALE=0 in the environment.
557 #
557 #
558 # When fromui is called via _inithashstate, python has already set
558 # When fromui is called via _inithashstate, python has already set
559 # this, so that's in the environment right when we start up the hg
559 # this, so that's in the environment right when we start up the hg
560 # process. Then chg will call us and tell us to set the environment to
560 # process. Then chg will call us and tell us to set the environment to
561 # the one it has; this might NOT have LC_CTYPE, so we'll need to
561 # the one it has; this might NOT have LC_CTYPE, so we'll need to
562 # carry-forward the LC_CTYPE that was coerced in these situations.
562 # carry-forward the LC_CTYPE that was coerced in these situations.
563 #
563 #
564 # If this is not handled, we will fail config+env validation and fail
564 # If this is not handled, we will fail config+env validation and fail
565 # to start chg. If this is just ignored instead of carried forward, we
565 # to start chg. If this is just ignored instead of carried forward, we
566 # may have different behavior between chg and non-chg.
566 # may have different behavior between chg and non-chg.
567 if pycompat.ispy3:
567 if pycompat.ispy3:
568 # Rename for wordwrapping purposes
568 # Rename for wordwrapping purposes
569 oldenv = encoding.environ
569 oldenv = encoding.environ
570 if not any(
570 if not any(
571 e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
571 e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
572 ):
572 ):
573 keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
573 keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
574 old_keys = [k for k, v in oldenv.items() if k in keys and v]
574 old_keys = [k for k, v in oldenv.items() if k in keys and v]
575 new_keys = [k for k, v in newenv.items() if k in keys and v]
575 new_keys = [k for k, v in newenv.items() if k in keys and v]
576 # If the user's environment (from chg) doesn't have ANY of the
576 # If the user's environment (from chg) doesn't have ANY of the
577 # keys that python looks for, and the environment (from
577 # keys that python looks for, and the environment (from
578 # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
578 # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
579 # carry it forward.
579 # carry it forward.
580 if (
580 if (
581 not new_keys
581 not new_keys
582 and old_keys == [b'LC_CTYPE']
582 and old_keys == [b'LC_CTYPE']
583 and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
583 and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
584 ):
584 ):
585 newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
585 newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
586
586
587 encoding.environ.clear()
587 encoding.environ.clear()
588 encoding.environ.update(newenv)
588 encoding.environ.update(newenv)
589
589
590 capabilities = commandserver.server.capabilities.copy()
590 capabilities = commandserver.server.capabilities.copy()
591 capabilities.update(
591 capabilities.update(
592 {
592 {
593 b'attachio': attachio,
593 b'attachio': attachio,
594 b'chdir': chdir,
594 b'chdir': chdir,
595 b'runcommand': runcommand,
595 b'runcommand': runcommand,
596 b'setenv': setenv,
596 b'setenv': setenv,
597 b'setumask': setumask,
597 b'setumask': setumask,
598 b'setumask2': setumask2,
598 b'setumask2': setumask2,
599 }
599 }
600 )
600 )
601
601
602 if util.safehasattr(procutil, b'setprocname'):
602 if util.safehasattr(procutil, b'setprocname'):
603
603
604 def setprocname(self):
604 def setprocname(self):
605 """Change process title"""
605 """Change process title"""
606 name = self._readstr()
606 name = self._readstr()
607 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
607 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
608 procutil.setprocname(name)
608 procutil.setprocname(name)
609
609
610 capabilities[b'setprocname'] = setprocname
610 capabilities[b'setprocname'] = setprocname
611
611
612
612
613 def _tempaddress(address):
613 def _tempaddress(address):
614 return b'%s.%d.tmp' % (address, os.getpid())
614 return b'%s.%d.tmp' % (address, os.getpid())
615
615
616
616
617 def _hashaddress(address, hashstr):
617 def _hashaddress(address, hashstr):
618 # if the basename of address contains '.', use only the left part. this
618 # if the basename of address contains '.', use only the left part. this
619 # makes it possible for the client to pass 'server.tmp$PID' and follow by
619 # makes it possible for the client to pass 'server.tmp$PID' and follow by
620 # an atomic rename to avoid locking when spawning new servers.
620 # an atomic rename to avoid locking when spawning new servers.
621 dirname, basename = os.path.split(address)
621 dirname, basename = os.path.split(address)
622 basename = basename.split(b'.', 1)[0]
622 basename = basename.split(b'.', 1)[0]
623 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
623 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
624
624
625
625
626 class chgunixservicehandler(object):
626 class chgunixservicehandler(object):
627 """Set of operations for chg services"""
627 """Set of operations for chg services"""
628
628
629 pollinterval = 1 # [sec]
629 pollinterval = 1 # [sec]
630
630
631 def __init__(self, ui):
631 def __init__(self, ui):
632 self.ui = ui
632 self.ui = ui
633 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
633 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
634 self._lastactive = time.time()
634 self._lastactive = time.time()
635
635
636 def bindsocket(self, sock, address):
636 def bindsocket(self, sock, address):
637 self._inithashstate(address)
637 self._inithashstate(address)
638 self._checkextensions()
638 self._checkextensions()
639 self._bind(sock)
639 self._bind(sock)
640 self._createsymlink()
640 self._createsymlink()
641 # no "listening at" message should be printed to simulate hg behavior
641 # no "listening at" message should be printed to simulate hg behavior
642
642
643 def _inithashstate(self, address):
643 def _inithashstate(self, address):
644 self._baseaddress = address
644 self._baseaddress = address
645 if self.ui.configbool(b'chgserver', b'skiphash'):
645 if self.ui.configbool(b'chgserver', b'skiphash'):
646 self._hashstate = None
646 self._hashstate = None
647 self._realaddress = address
647 self._realaddress = address
648 return
648 return
649 self._hashstate = hashstate.fromui(self.ui)
649 self._hashstate = hashstate.fromui(self.ui)
650 self._realaddress = _hashaddress(address, self._hashstate.confighash)
650 self._realaddress = _hashaddress(address, self._hashstate.confighash)
651
651
652 def _checkextensions(self):
652 def _checkextensions(self):
653 if not self._hashstate:
653 if not self._hashstate:
654 return
654 return
655 if extensions.notloaded():
655 if extensions.notloaded():
656 # one or more extensions failed to load. mtimehash becomes
656 # one or more extensions failed to load. mtimehash becomes
657 # meaningless because we do not know the paths of those extensions.
657 # meaningless because we do not know the paths of those extensions.
658 # set mtimehash to an illegal hash value to invalidate the server.
658 # set mtimehash to an illegal hash value to invalidate the server.
659 self._hashstate.mtimehash = b''
659 self._hashstate.mtimehash = b''
660
660
661 def _bind(self, sock):
661 def _bind(self, sock):
662 # use a unique temp address so we can stat the file and do ownership
662 # use a unique temp address so we can stat the file and do ownership
663 # check later
663 # check later
664 tempaddress = _tempaddress(self._realaddress)
664 tempaddress = _tempaddress(self._realaddress)
665 util.bindunixsocket(sock, tempaddress)
665 util.bindunixsocket(sock, tempaddress)
666 self._socketstat = os.stat(tempaddress)
666 self._socketstat = os.stat(tempaddress)
667 sock.listen(socket.SOMAXCONN)
667 sock.listen(socket.SOMAXCONN)
668 # rename will replace the old socket file if exists atomically. the
668 # rename will replace the old socket file if exists atomically. the
669 # old server will detect ownership change and exit.
669 # old server will detect ownership change and exit.
670 util.rename(tempaddress, self._realaddress)
670 util.rename(tempaddress, self._realaddress)
671
671
672 def _createsymlink(self):
672 def _createsymlink(self):
673 if self._baseaddress == self._realaddress:
673 if self._baseaddress == self._realaddress:
674 return
674 return
675 tempaddress = _tempaddress(self._baseaddress)
675 tempaddress = _tempaddress(self._baseaddress)
676 os.symlink(os.path.basename(self._realaddress), tempaddress)
676 os.symlink(os.path.basename(self._realaddress), tempaddress)
677 util.rename(tempaddress, self._baseaddress)
677 util.rename(tempaddress, self._baseaddress)
678
678
679 def _issocketowner(self):
679 def _issocketowner(self):
680 try:
680 try:
681 st = os.stat(self._realaddress)
681 st = os.stat(self._realaddress)
682 return (
682 return (
683 st.st_ino == self._socketstat.st_ino
683 st.st_ino == self._socketstat.st_ino
684 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
684 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
685 )
685 )
686 except OSError:
686 except OSError:
687 return False
687 return False
688
688
689 def unlinksocket(self, address):
689 def unlinksocket(self, address):
690 if not self._issocketowner():
690 if not self._issocketowner():
691 return
691 return
692 # it is possible to have a race condition here that we may
692 # it is possible to have a race condition here that we may
693 # remove another server's socket file. but that's okay
693 # remove another server's socket file. but that's okay
694 # since that server will detect and exit automatically and
694 # since that server will detect and exit automatically and
695 # the client will start a new server on demand.
695 # the client will start a new server on demand.
696 util.tryunlink(self._realaddress)
696 util.tryunlink(self._realaddress)
697
697
698 def shouldexit(self):
698 def shouldexit(self):
699 if not self._issocketowner():
699 if not self._issocketowner():
700 self.ui.log(
700 self.ui.log(
701 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
701 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
702 )
702 )
703 return True
703 return True
704 if time.time() - self._lastactive > self._idletimeout:
704 if time.time() - self._lastactive > self._idletimeout:
705 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
705 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
706 return True
706 return True
707 return False
707 return False
708
708
709 def newconnection(self):
709 def newconnection(self):
710 self._lastactive = time.time()
710 self._lastactive = time.time()
711
711
712 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
712 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
713 return chgcmdserver(
713 return chgcmdserver(
714 self.ui,
714 self.ui,
715 repo,
715 repo,
716 fin,
716 fin,
717 fout,
717 fout,
718 conn,
718 conn,
719 prereposetups,
719 prereposetups,
720 self._hashstate,
720 self._hashstate,
721 self._baseaddress,
721 self._baseaddress,
722 )
722 )
723
723
724
724
725 def chgunixservice(ui, repo, opts):
725 def chgunixservice(ui, repo, opts):
726 # CHGINTERNALMARK is set by chg client. It is an indication of things are
726 # CHGINTERNALMARK is set by chg client. It is an indication of things are
727 # started by chg so other code can do things accordingly, like disabling
727 # started by chg so other code can do things accordingly, like disabling
728 # demandimport or detecting chg client started by chg client. When executed
728 # demandimport or detecting chg client started by chg client. When executed
729 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
729 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
730 # environ cleaner.
730 # environ cleaner.
731 if b'CHGINTERNALMARK' in encoding.environ:
731 if b'CHGINTERNALMARK' in encoding.environ:
732 del encoding.environ[b'CHGINTERNALMARK']
732 del encoding.environ[b'CHGINTERNALMARK']
733
733
734 if repo:
734 if repo:
735 # one chgserver can serve multiple repos. drop repo information
735 # one chgserver can serve multiple repos. drop repo information
736 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
736 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
737 h = chgunixservicehandler(ui)
737 h = chgunixservicehandler(ui)
738 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
738 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,3098 +1,3100 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
12
11
13 from .i18n import _
12 from .i18n import _
14 from .node import (
13 from .node import (
15 hex,
14 hex,
16 nullid,
15 nullid,
17 nullrev,
16 nullrev,
18 )
17 )
19 from .thirdparty import attr
18 from .thirdparty import attr
20 from . import (
19 from . import (
21 bookmarks as bookmod,
20 bookmarks as bookmod,
22 bundle2,
21 bundle2,
23 changegroup,
22 changegroup,
24 discovery,
23 discovery,
25 error,
24 error,
26 exchangev2,
25 exchangev2,
27 lock as lockmod,
26 lock as lockmod,
28 logexchange,
27 logexchange,
29 narrowspec,
28 narrowspec,
30 obsolete,
29 obsolete,
31 obsutil,
30 obsutil,
32 phases,
31 phases,
33 pushkey,
32 pushkey,
34 pycompat,
33 pycompat,
35 scmutil,
34 scmutil,
36 sslutil,
35 sslutil,
37 streamclone,
36 streamclone,
38 url as urlmod,
37 url as urlmod,
39 util,
38 util,
40 wireprototypes,
39 wireprototypes,
41 )
40 )
42 from .interfaces import repository
41 from .interfaces import repository
43 from .utils import stringutil
42 from .utils import (
43 hashutil,
44 stringutil,
45 )
44
46
45 urlerr = util.urlerr
47 urlerr = util.urlerr
46 urlreq = util.urlreq
48 urlreq = util.urlreq
47
49
48 _NARROWACL_SECTION = b'narrowacl'
50 _NARROWACL_SECTION = b'narrowacl'
49
51
50 # Maps bundle version human names to changegroup versions.
52 # Maps bundle version human names to changegroup versions.
51 _bundlespeccgversions = {
53 _bundlespeccgversions = {
52 b'v1': b'01',
54 b'v1': b'01',
53 b'v2': b'02',
55 b'v2': b'02',
54 b'packed1': b's1',
56 b'packed1': b's1',
55 b'bundle2': b'02', # legacy
57 b'bundle2': b'02', # legacy
56 }
58 }
57
59
58 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
59 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
60 b'v1': {
62 b'v1': {
61 b'changegroup': True,
63 b'changegroup': True,
62 b'cg.version': b'01',
64 b'cg.version': b'01',
63 b'obsolescence': False,
65 b'obsolescence': False,
64 b'phases': False,
66 b'phases': False,
65 b'tagsfnodescache': False,
67 b'tagsfnodescache': False,
66 b'revbranchcache': False,
68 b'revbranchcache': False,
67 },
69 },
68 b'v2': {
70 b'v2': {
69 b'changegroup': True,
71 b'changegroup': True,
70 b'cg.version': b'02',
72 b'cg.version': b'02',
71 b'obsolescence': False,
73 b'obsolescence': False,
72 b'phases': False,
74 b'phases': False,
73 b'tagsfnodescache': True,
75 b'tagsfnodescache': True,
74 b'revbranchcache': True,
76 b'revbranchcache': True,
75 },
77 },
76 b'packed1': {b'cg.version': b's1'},
78 b'packed1': {b'cg.version': b's1'},
77 }
79 }
78 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
80 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
79
81
80 _bundlespecvariants = {
82 _bundlespecvariants = {
81 b"streamv2": {
83 b"streamv2": {
82 b"changegroup": False,
84 b"changegroup": False,
83 b"streamv2": True,
85 b"streamv2": True,
84 b"tagsfnodescache": False,
86 b"tagsfnodescache": False,
85 b"revbranchcache": False,
87 b"revbranchcache": False,
86 }
88 }
87 }
89 }
88
90
89 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
91 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
92 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
91
93
92
94
93 @attr.s
95 @attr.s
94 class bundlespec(object):
96 class bundlespec(object):
95 compression = attr.ib()
97 compression = attr.ib()
96 wirecompression = attr.ib()
98 wirecompression = attr.ib()
97 version = attr.ib()
99 version = attr.ib()
98 wireversion = attr.ib()
100 wireversion = attr.ib()
99 params = attr.ib()
101 params = attr.ib()
100 contentopts = attr.ib()
102 contentopts = attr.ib()
101
103
102
104
103 def parsebundlespec(repo, spec, strict=True):
105 def parsebundlespec(repo, spec, strict=True):
104 """Parse a bundle string specification into parts.
106 """Parse a bundle string specification into parts.
105
107
106 Bundle specifications denote a well-defined bundle/exchange format.
108 Bundle specifications denote a well-defined bundle/exchange format.
107 The content of a given specification should not change over time in
109 The content of a given specification should not change over time in
108 order to ensure that bundles produced by a newer version of Mercurial are
110 order to ensure that bundles produced by a newer version of Mercurial are
109 readable from an older version.
111 readable from an older version.
110
112
111 The string currently has the form:
113 The string currently has the form:
112
114
113 <compression>-<type>[;<parameter0>[;<parameter1>]]
115 <compression>-<type>[;<parameter0>[;<parameter1>]]
114
116
115 Where <compression> is one of the supported compression formats
117 Where <compression> is one of the supported compression formats
116 and <type> is (currently) a version string. A ";" can follow the type and
118 and <type> is (currently) a version string. A ";" can follow the type and
117 all text afterwards is interpreted as URI encoded, ";" delimited key=value
119 all text afterwards is interpreted as URI encoded, ";" delimited key=value
118 pairs.
120 pairs.
119
121
120 If ``strict`` is True (the default) <compression> is required. Otherwise,
122 If ``strict`` is True (the default) <compression> is required. Otherwise,
121 it is optional.
123 it is optional.
122
124
123 Returns a bundlespec object of (compression, version, parameters).
125 Returns a bundlespec object of (compression, version, parameters).
124 Compression will be ``None`` if not in strict mode and a compression isn't
126 Compression will be ``None`` if not in strict mode and a compression isn't
125 defined.
127 defined.
126
128
127 An ``InvalidBundleSpecification`` is raised when the specification is
129 An ``InvalidBundleSpecification`` is raised when the specification is
128 not syntactically well formed.
130 not syntactically well formed.
129
131
130 An ``UnsupportedBundleSpecification`` is raised when the compression or
132 An ``UnsupportedBundleSpecification`` is raised when the compression or
131 bundle type/version is not recognized.
133 bundle type/version is not recognized.
132
134
133 Note: this function will likely eventually return a more complex data
135 Note: this function will likely eventually return a more complex data
134 structure, including bundle2 part information.
136 structure, including bundle2 part information.
135 """
137 """
136
138
137 def parseparams(s):
139 def parseparams(s):
138 if b';' not in s:
140 if b';' not in s:
139 return s, {}
141 return s, {}
140
142
141 params = {}
143 params = {}
142 version, paramstr = s.split(b';', 1)
144 version, paramstr = s.split(b';', 1)
143
145
144 for p in paramstr.split(b';'):
146 for p in paramstr.split(b';'):
145 if b'=' not in p:
147 if b'=' not in p:
146 raise error.InvalidBundleSpecification(
148 raise error.InvalidBundleSpecification(
147 _(
149 _(
148 b'invalid bundle specification: '
150 b'invalid bundle specification: '
149 b'missing "=" in parameter: %s'
151 b'missing "=" in parameter: %s'
150 )
152 )
151 % p
153 % p
152 )
154 )
153
155
154 key, value = p.split(b'=', 1)
156 key, value = p.split(b'=', 1)
155 key = urlreq.unquote(key)
157 key = urlreq.unquote(key)
156 value = urlreq.unquote(value)
158 value = urlreq.unquote(value)
157 params[key] = value
159 params[key] = value
158
160
159 return version, params
161 return version, params
160
162
161 if strict and b'-' not in spec:
163 if strict and b'-' not in spec:
162 raise error.InvalidBundleSpecification(
164 raise error.InvalidBundleSpecification(
163 _(
165 _(
164 b'invalid bundle specification; '
166 b'invalid bundle specification; '
165 b'must be prefixed with compression: %s'
167 b'must be prefixed with compression: %s'
166 )
168 )
167 % spec
169 % spec
168 )
170 )
169
171
170 if b'-' in spec:
172 if b'-' in spec:
171 compression, version = spec.split(b'-', 1)
173 compression, version = spec.split(b'-', 1)
172
174
173 if compression not in util.compengines.supportedbundlenames:
175 if compression not in util.compengines.supportedbundlenames:
174 raise error.UnsupportedBundleSpecification(
176 raise error.UnsupportedBundleSpecification(
175 _(b'%s compression is not supported') % compression
177 _(b'%s compression is not supported') % compression
176 )
178 )
177
179
178 version, params = parseparams(version)
180 version, params = parseparams(version)
179
181
180 if version not in _bundlespeccgversions:
182 if version not in _bundlespeccgversions:
181 raise error.UnsupportedBundleSpecification(
183 raise error.UnsupportedBundleSpecification(
182 _(b'%s is not a recognized bundle version') % version
184 _(b'%s is not a recognized bundle version') % version
183 )
185 )
184 else:
186 else:
185 # Value could be just the compression or just the version, in which
187 # Value could be just the compression or just the version, in which
186 # case some defaults are assumed (but only when not in strict mode).
188 # case some defaults are assumed (but only when not in strict mode).
187 assert not strict
189 assert not strict
188
190
189 spec, params = parseparams(spec)
191 spec, params = parseparams(spec)
190
192
191 if spec in util.compengines.supportedbundlenames:
193 if spec in util.compengines.supportedbundlenames:
192 compression = spec
194 compression = spec
193 version = b'v1'
195 version = b'v1'
194 # Generaldelta repos require v2.
196 # Generaldelta repos require v2.
195 if b'generaldelta' in repo.requirements:
197 if b'generaldelta' in repo.requirements:
196 version = b'v2'
198 version = b'v2'
197 # Modern compression engines require v2.
199 # Modern compression engines require v2.
198 if compression not in _bundlespecv1compengines:
200 if compression not in _bundlespecv1compengines:
199 version = b'v2'
201 version = b'v2'
200 elif spec in _bundlespeccgversions:
202 elif spec in _bundlespeccgversions:
201 if spec == b'packed1':
203 if spec == b'packed1':
202 compression = b'none'
204 compression = b'none'
203 else:
205 else:
204 compression = b'bzip2'
206 compression = b'bzip2'
205 version = spec
207 version = spec
206 else:
208 else:
207 raise error.UnsupportedBundleSpecification(
209 raise error.UnsupportedBundleSpecification(
208 _(b'%s is not a recognized bundle specification') % spec
210 _(b'%s is not a recognized bundle specification') % spec
209 )
211 )
210
212
211 # Bundle version 1 only supports a known set of compression engines.
213 # Bundle version 1 only supports a known set of compression engines.
212 if version == b'v1' and compression not in _bundlespecv1compengines:
214 if version == b'v1' and compression not in _bundlespecv1compengines:
213 raise error.UnsupportedBundleSpecification(
215 raise error.UnsupportedBundleSpecification(
214 _(b'compression engine %s is not supported on v1 bundles')
216 _(b'compression engine %s is not supported on v1 bundles')
215 % compression
217 % compression
216 )
218 )
217
219
218 # The specification for packed1 can optionally declare the data formats
220 # The specification for packed1 can optionally declare the data formats
219 # required to apply it. If we see this metadata, compare against what the
221 # required to apply it. If we see this metadata, compare against what the
220 # repo supports and error if the bundle isn't compatible.
222 # repo supports and error if the bundle isn't compatible.
221 if version == b'packed1' and b'requirements' in params:
223 if version == b'packed1' and b'requirements' in params:
222 requirements = set(params[b'requirements'].split(b','))
224 requirements = set(params[b'requirements'].split(b','))
223 missingreqs = requirements - repo.supportedformats
225 missingreqs = requirements - repo.supportedformats
224 if missingreqs:
226 if missingreqs:
225 raise error.UnsupportedBundleSpecification(
227 raise error.UnsupportedBundleSpecification(
226 _(b'missing support for repository features: %s')
228 _(b'missing support for repository features: %s')
227 % b', '.join(sorted(missingreqs))
229 % b', '.join(sorted(missingreqs))
228 )
230 )
229
231
230 # Compute contentopts based on the version
232 # Compute contentopts based on the version
231 contentopts = _bundlespeccontentopts.get(version, {}).copy()
233 contentopts = _bundlespeccontentopts.get(version, {}).copy()
232
234
233 # Process the variants
235 # Process the variants
234 if b"stream" in params and params[b"stream"] == b"v2":
236 if b"stream" in params and params[b"stream"] == b"v2":
235 variant = _bundlespecvariants[b"streamv2"]
237 variant = _bundlespecvariants[b"streamv2"]
236 contentopts.update(variant)
238 contentopts.update(variant)
237
239
238 engine = util.compengines.forbundlename(compression)
240 engine = util.compengines.forbundlename(compression)
239 compression, wirecompression = engine.bundletype()
241 compression, wirecompression = engine.bundletype()
240 wireversion = _bundlespeccgversions[version]
242 wireversion = _bundlespeccgversions[version]
241
243
242 return bundlespec(
244 return bundlespec(
243 compression, wirecompression, version, wireversion, params, contentopts
245 compression, wirecompression, version, wireversion, params, contentopts
244 )
246 )
245
247
246
248
247 def readbundle(ui, fh, fname, vfs=None):
249 def readbundle(ui, fh, fname, vfs=None):
248 header = changegroup.readexactly(fh, 4)
250 header = changegroup.readexactly(fh, 4)
249
251
250 alg = None
252 alg = None
251 if not fname:
253 if not fname:
252 fname = b"stream"
254 fname = b"stream"
253 if not header.startswith(b'HG') and header.startswith(b'\0'):
255 if not header.startswith(b'HG') and header.startswith(b'\0'):
254 fh = changegroup.headerlessfixup(fh, header)
256 fh = changegroup.headerlessfixup(fh, header)
255 header = b"HG10"
257 header = b"HG10"
256 alg = b'UN'
258 alg = b'UN'
257 elif vfs:
259 elif vfs:
258 fname = vfs.join(fname)
260 fname = vfs.join(fname)
259
261
260 magic, version = header[0:2], header[2:4]
262 magic, version = header[0:2], header[2:4]
261
263
262 if magic != b'HG':
264 if magic != b'HG':
263 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
265 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
264 if version == b'10':
266 if version == b'10':
265 if alg is None:
267 if alg is None:
266 alg = changegroup.readexactly(fh, 2)
268 alg = changegroup.readexactly(fh, 2)
267 return changegroup.cg1unpacker(fh, alg)
269 return changegroup.cg1unpacker(fh, alg)
268 elif version.startswith(b'2'):
270 elif version.startswith(b'2'):
269 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
271 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
270 elif version == b'S1':
272 elif version == b'S1':
271 return streamclone.streamcloneapplier(fh)
273 return streamclone.streamcloneapplier(fh)
272 else:
274 else:
273 raise error.Abort(
275 raise error.Abort(
274 _(b'%s: unknown bundle version %s') % (fname, version)
276 _(b'%s: unknown bundle version %s') % (fname, version)
275 )
277 )
276
278
277
279
278 def getbundlespec(ui, fh):
280 def getbundlespec(ui, fh):
279 """Infer the bundlespec from a bundle file handle.
281 """Infer the bundlespec from a bundle file handle.
280
282
281 The input file handle is seeked and the original seek position is not
283 The input file handle is seeked and the original seek position is not
282 restored.
284 restored.
283 """
285 """
284
286
285 def speccompression(alg):
287 def speccompression(alg):
286 try:
288 try:
287 return util.compengines.forbundletype(alg).bundletype()[0]
289 return util.compengines.forbundletype(alg).bundletype()[0]
288 except KeyError:
290 except KeyError:
289 return None
291 return None
290
292
291 b = readbundle(ui, fh, None)
293 b = readbundle(ui, fh, None)
292 if isinstance(b, changegroup.cg1unpacker):
294 if isinstance(b, changegroup.cg1unpacker):
293 alg = b._type
295 alg = b._type
294 if alg == b'_truncatedBZ':
296 if alg == b'_truncatedBZ':
295 alg = b'BZ'
297 alg = b'BZ'
296 comp = speccompression(alg)
298 comp = speccompression(alg)
297 if not comp:
299 if not comp:
298 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
300 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
299 return b'%s-v1' % comp
301 return b'%s-v1' % comp
300 elif isinstance(b, bundle2.unbundle20):
302 elif isinstance(b, bundle2.unbundle20):
301 if b'Compression' in b.params:
303 if b'Compression' in b.params:
302 comp = speccompression(b.params[b'Compression'])
304 comp = speccompression(b.params[b'Compression'])
303 if not comp:
305 if not comp:
304 raise error.Abort(
306 raise error.Abort(
305 _(b'unknown compression algorithm: %s') % comp
307 _(b'unknown compression algorithm: %s') % comp
306 )
308 )
307 else:
309 else:
308 comp = b'none'
310 comp = b'none'
309
311
310 version = None
312 version = None
311 for part in b.iterparts():
313 for part in b.iterparts():
312 if part.type == b'changegroup':
314 if part.type == b'changegroup':
313 version = part.params[b'version']
315 version = part.params[b'version']
314 if version in (b'01', b'02'):
316 if version in (b'01', b'02'):
315 version = b'v2'
317 version = b'v2'
316 else:
318 else:
317 raise error.Abort(
319 raise error.Abort(
318 _(
320 _(
319 b'changegroup version %s does not have '
321 b'changegroup version %s does not have '
320 b'a known bundlespec'
322 b'a known bundlespec'
321 )
323 )
322 % version,
324 % version,
323 hint=_(b'try upgrading your Mercurial client'),
325 hint=_(b'try upgrading your Mercurial client'),
324 )
326 )
325 elif part.type == b'stream2' and version is None:
327 elif part.type == b'stream2' and version is None:
326 # A stream2 part requires to be part of a v2 bundle
328 # A stream2 part requires to be part of a v2 bundle
327 requirements = urlreq.unquote(part.params[b'requirements'])
329 requirements = urlreq.unquote(part.params[b'requirements'])
328 splitted = requirements.split()
330 splitted = requirements.split()
329 params = bundle2._formatrequirementsparams(splitted)
331 params = bundle2._formatrequirementsparams(splitted)
330 return b'none-v2;stream=v2;%s' % params
332 return b'none-v2;stream=v2;%s' % params
331
333
332 if not version:
334 if not version:
333 raise error.Abort(
335 raise error.Abort(
334 _(b'could not identify changegroup version in bundle')
336 _(b'could not identify changegroup version in bundle')
335 )
337 )
336
338
337 return b'%s-%s' % (comp, version)
339 return b'%s-%s' % (comp, version)
338 elif isinstance(b, streamclone.streamcloneapplier):
340 elif isinstance(b, streamclone.streamcloneapplier):
339 requirements = streamclone.readbundle1header(fh)[2]
341 requirements = streamclone.readbundle1header(fh)[2]
340 formatted = bundle2._formatrequirementsparams(requirements)
342 formatted = bundle2._formatrequirementsparams(requirements)
341 return b'none-packed1;%s' % formatted
343 return b'none-packed1;%s' % formatted
342 else:
344 else:
343 raise error.Abort(_(b'unknown bundle type: %s') % b)
345 raise error.Abort(_(b'unknown bundle type: %s') % b)
344
346
345
347
346 def _computeoutgoing(repo, heads, common):
348 def _computeoutgoing(repo, heads, common):
347 """Computes which revs are outgoing given a set of common
349 """Computes which revs are outgoing given a set of common
348 and a set of heads.
350 and a set of heads.
349
351
350 This is a separate function so extensions can have access to
352 This is a separate function so extensions can have access to
351 the logic.
353 the logic.
352
354
353 Returns a discovery.outgoing object.
355 Returns a discovery.outgoing object.
354 """
356 """
355 cl = repo.changelog
357 cl = repo.changelog
356 if common:
358 if common:
357 hasnode = cl.hasnode
359 hasnode = cl.hasnode
358 common = [n for n in common if hasnode(n)]
360 common = [n for n in common if hasnode(n)]
359 else:
361 else:
360 common = [nullid]
362 common = [nullid]
361 if not heads:
363 if not heads:
362 heads = cl.heads()
364 heads = cl.heads()
363 return discovery.outgoing(repo, common, heads)
365 return discovery.outgoing(repo, common, heads)
364
366
365
367
366 def _checkpublish(pushop):
368 def _checkpublish(pushop):
367 repo = pushop.repo
369 repo = pushop.repo
368 ui = repo.ui
370 ui = repo.ui
369 behavior = ui.config(b'experimental', b'auto-publish')
371 behavior = ui.config(b'experimental', b'auto-publish')
370 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
372 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
371 return
373 return
372 remotephases = listkeys(pushop.remote, b'phases')
374 remotephases = listkeys(pushop.remote, b'phases')
373 if not remotephases.get(b'publishing', False):
375 if not remotephases.get(b'publishing', False):
374 return
376 return
375
377
376 if pushop.revs is None:
378 if pushop.revs is None:
377 published = repo.filtered(b'served').revs(b'not public()')
379 published = repo.filtered(b'served').revs(b'not public()')
378 else:
380 else:
379 published = repo.revs(b'::%ln - public()', pushop.revs)
381 published = repo.revs(b'::%ln - public()', pushop.revs)
380 if published:
382 if published:
381 if behavior == b'warn':
383 if behavior == b'warn':
382 ui.warn(
384 ui.warn(
383 _(b'%i changesets about to be published\n') % len(published)
385 _(b'%i changesets about to be published\n') % len(published)
384 )
386 )
385 elif behavior == b'confirm':
387 elif behavior == b'confirm':
386 if ui.promptchoice(
388 if ui.promptchoice(
387 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
389 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
388 % len(published)
390 % len(published)
389 ):
391 ):
390 raise error.Abort(_(b'user quit'))
392 raise error.Abort(_(b'user quit'))
391 elif behavior == b'abort':
393 elif behavior == b'abort':
392 msg = _(b'push would publish %i changesets') % len(published)
394 msg = _(b'push would publish %i changesets') % len(published)
393 hint = _(
395 hint = _(
394 b"use --publish or adjust 'experimental.auto-publish'"
396 b"use --publish or adjust 'experimental.auto-publish'"
395 b" config"
397 b" config"
396 )
398 )
397 raise error.Abort(msg, hint=hint)
399 raise error.Abort(msg, hint=hint)
398
400
399
401
400 def _forcebundle1(op):
402 def _forcebundle1(op):
401 """return true if a pull/push must use bundle1
403 """return true if a pull/push must use bundle1
402
404
403 This function is used to allow testing of the older bundle version"""
405 This function is used to allow testing of the older bundle version"""
404 ui = op.repo.ui
406 ui = op.repo.ui
405 # The goal is this config is to allow developer to choose the bundle
407 # The goal is this config is to allow developer to choose the bundle
406 # version used during exchanged. This is especially handy during test.
408 # version used during exchanged. This is especially handy during test.
407 # Value is a list of bundle version to be picked from, highest version
409 # Value is a list of bundle version to be picked from, highest version
408 # should be used.
410 # should be used.
409 #
411 #
410 # developer config: devel.legacy.exchange
412 # developer config: devel.legacy.exchange
411 exchange = ui.configlist(b'devel', b'legacy.exchange')
413 exchange = ui.configlist(b'devel', b'legacy.exchange')
412 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
414 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
413 return forcebundle1 or not op.remote.capable(b'bundle2')
415 return forcebundle1 or not op.remote.capable(b'bundle2')
414
416
415
417
416 class pushoperation(object):
418 class pushoperation(object):
417 """A object that represent a single push operation
419 """A object that represent a single push operation
418
420
419 Its purpose is to carry push related state and very common operations.
421 Its purpose is to carry push related state and very common operations.
420
422
421 A new pushoperation should be created at the beginning of each push and
423 A new pushoperation should be created at the beginning of each push and
422 discarded afterward.
424 discarded afterward.
423 """
425 """
424
426
425 def __init__(
427 def __init__(
426 self,
428 self,
427 repo,
429 repo,
428 remote,
430 remote,
429 force=False,
431 force=False,
430 revs=None,
432 revs=None,
431 newbranch=False,
433 newbranch=False,
432 bookmarks=(),
434 bookmarks=(),
433 publish=False,
435 publish=False,
434 pushvars=None,
436 pushvars=None,
435 ):
437 ):
436 # repo we push from
438 # repo we push from
437 self.repo = repo
439 self.repo = repo
438 self.ui = repo.ui
440 self.ui = repo.ui
439 # repo we push to
441 # repo we push to
440 self.remote = remote
442 self.remote = remote
441 # force option provided
443 # force option provided
442 self.force = force
444 self.force = force
443 # revs to be pushed (None is "all")
445 # revs to be pushed (None is "all")
444 self.revs = revs
446 self.revs = revs
445 # bookmark explicitly pushed
447 # bookmark explicitly pushed
446 self.bookmarks = bookmarks
448 self.bookmarks = bookmarks
447 # allow push of new branch
449 # allow push of new branch
448 self.newbranch = newbranch
450 self.newbranch = newbranch
449 # step already performed
451 # step already performed
450 # (used to check what steps have been already performed through bundle2)
452 # (used to check what steps have been already performed through bundle2)
451 self.stepsdone = set()
453 self.stepsdone = set()
452 # Integer version of the changegroup push result
454 # Integer version of the changegroup push result
453 # - None means nothing to push
455 # - None means nothing to push
454 # - 0 means HTTP error
456 # - 0 means HTTP error
455 # - 1 means we pushed and remote head count is unchanged *or*
457 # - 1 means we pushed and remote head count is unchanged *or*
456 # we have outgoing changesets but refused to push
458 # we have outgoing changesets but refused to push
457 # - other values as described by addchangegroup()
459 # - other values as described by addchangegroup()
458 self.cgresult = None
460 self.cgresult = None
459 # Boolean value for the bookmark push
461 # Boolean value for the bookmark push
460 self.bkresult = None
462 self.bkresult = None
461 # discover.outgoing object (contains common and outgoing data)
463 # discover.outgoing object (contains common and outgoing data)
462 self.outgoing = None
464 self.outgoing = None
463 # all remote topological heads before the push
465 # all remote topological heads before the push
464 self.remoteheads = None
466 self.remoteheads = None
465 # Details of the remote branch pre and post push
467 # Details of the remote branch pre and post push
466 #
468 #
467 # mapping: {'branch': ([remoteheads],
469 # mapping: {'branch': ([remoteheads],
468 # [newheads],
470 # [newheads],
469 # [unsyncedheads],
471 # [unsyncedheads],
470 # [discardedheads])}
472 # [discardedheads])}
471 # - branch: the branch name
473 # - branch: the branch name
472 # - remoteheads: the list of remote heads known locally
474 # - remoteheads: the list of remote heads known locally
473 # None if the branch is new
475 # None if the branch is new
474 # - newheads: the new remote heads (known locally) with outgoing pushed
476 # - newheads: the new remote heads (known locally) with outgoing pushed
475 # - unsyncedheads: the list of remote heads unknown locally.
477 # - unsyncedheads: the list of remote heads unknown locally.
476 # - discardedheads: the list of remote heads made obsolete by the push
478 # - discardedheads: the list of remote heads made obsolete by the push
477 self.pushbranchmap = None
479 self.pushbranchmap = None
478 # testable as a boolean indicating if any nodes are missing locally.
480 # testable as a boolean indicating if any nodes are missing locally.
479 self.incoming = None
481 self.incoming = None
480 # summary of the remote phase situation
482 # summary of the remote phase situation
481 self.remotephases = None
483 self.remotephases = None
482 # phases changes that must be pushed along side the changesets
484 # phases changes that must be pushed along side the changesets
483 self.outdatedphases = None
485 self.outdatedphases = None
484 # phases changes that must be pushed if changeset push fails
486 # phases changes that must be pushed if changeset push fails
485 self.fallbackoutdatedphases = None
487 self.fallbackoutdatedphases = None
486 # outgoing obsmarkers
488 # outgoing obsmarkers
487 self.outobsmarkers = set()
489 self.outobsmarkers = set()
488 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
490 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
489 self.outbookmarks = []
491 self.outbookmarks = []
490 # transaction manager
492 # transaction manager
491 self.trmanager = None
493 self.trmanager = None
492 # map { pushkey partid -> callback handling failure}
494 # map { pushkey partid -> callback handling failure}
493 # used to handle exception from mandatory pushkey part failure
495 # used to handle exception from mandatory pushkey part failure
494 self.pkfailcb = {}
496 self.pkfailcb = {}
495 # an iterable of pushvars or None
497 # an iterable of pushvars or None
496 self.pushvars = pushvars
498 self.pushvars = pushvars
497 # publish pushed changesets
499 # publish pushed changesets
498 self.publish = publish
500 self.publish = publish
499
501
500 @util.propertycache
502 @util.propertycache
501 def futureheads(self):
503 def futureheads(self):
502 """future remote heads if the changeset push succeeds"""
504 """future remote heads if the changeset push succeeds"""
503 return self.outgoing.missingheads
505 return self.outgoing.missingheads
504
506
505 @util.propertycache
507 @util.propertycache
506 def fallbackheads(self):
508 def fallbackheads(self):
507 """future remote heads if the changeset push fails"""
509 """future remote heads if the changeset push fails"""
508 if self.revs is None:
510 if self.revs is None:
509 # not target to push, all common are relevant
511 # not target to push, all common are relevant
510 return self.outgoing.commonheads
512 return self.outgoing.commonheads
511 unfi = self.repo.unfiltered()
513 unfi = self.repo.unfiltered()
512 # I want cheads = heads(::missingheads and ::commonheads)
514 # I want cheads = heads(::missingheads and ::commonheads)
513 # (missingheads is revs with secret changeset filtered out)
515 # (missingheads is revs with secret changeset filtered out)
514 #
516 #
515 # This can be expressed as:
517 # This can be expressed as:
516 # cheads = ( (missingheads and ::commonheads)
518 # cheads = ( (missingheads and ::commonheads)
517 # + (commonheads and ::missingheads))"
519 # + (commonheads and ::missingheads))"
518 # )
520 # )
519 #
521 #
520 # while trying to push we already computed the following:
522 # while trying to push we already computed the following:
521 # common = (::commonheads)
523 # common = (::commonheads)
522 # missing = ((commonheads::missingheads) - commonheads)
524 # missing = ((commonheads::missingheads) - commonheads)
523 #
525 #
524 # We can pick:
526 # We can pick:
525 # * missingheads part of common (::commonheads)
527 # * missingheads part of common (::commonheads)
526 common = self.outgoing.common
528 common = self.outgoing.common
527 rev = self.repo.changelog.index.rev
529 rev = self.repo.changelog.index.rev
528 cheads = [node for node in self.revs if rev(node) in common]
530 cheads = [node for node in self.revs if rev(node) in common]
529 # and
531 # and
530 # * commonheads parents on missing
532 # * commonheads parents on missing
531 revset = unfi.set(
533 revset = unfi.set(
532 b'%ln and parents(roots(%ln))',
534 b'%ln and parents(roots(%ln))',
533 self.outgoing.commonheads,
535 self.outgoing.commonheads,
534 self.outgoing.missing,
536 self.outgoing.missing,
535 )
537 )
536 cheads.extend(c.node() for c in revset)
538 cheads.extend(c.node() for c in revset)
537 return cheads
539 return cheads
538
540
539 @property
541 @property
540 def commonheads(self):
542 def commonheads(self):
541 """set of all common heads after changeset bundle push"""
543 """set of all common heads after changeset bundle push"""
542 if self.cgresult:
544 if self.cgresult:
543 return self.futureheads
545 return self.futureheads
544 else:
546 else:
545 return self.fallbackheads
547 return self.fallbackheads
546
548
547
549
548 # mapping of message used when pushing bookmark
550 # mapping of message used when pushing bookmark
549 bookmsgmap = {
551 bookmsgmap = {
550 b'update': (
552 b'update': (
551 _(b"updating bookmark %s\n"),
553 _(b"updating bookmark %s\n"),
552 _(b'updating bookmark %s failed!\n'),
554 _(b'updating bookmark %s failed!\n'),
553 ),
555 ),
554 b'export': (
556 b'export': (
555 _(b"exporting bookmark %s\n"),
557 _(b"exporting bookmark %s\n"),
556 _(b'exporting bookmark %s failed!\n'),
558 _(b'exporting bookmark %s failed!\n'),
557 ),
559 ),
558 b'delete': (
560 b'delete': (
559 _(b"deleting remote bookmark %s\n"),
561 _(b"deleting remote bookmark %s\n"),
560 _(b'deleting remote bookmark %s failed!\n'),
562 _(b'deleting remote bookmark %s failed!\n'),
561 ),
563 ),
562 }
564 }
563
565
564
566
565 def push(
567 def push(
566 repo,
568 repo,
567 remote,
569 remote,
568 force=False,
570 force=False,
569 revs=None,
571 revs=None,
570 newbranch=False,
572 newbranch=False,
571 bookmarks=(),
573 bookmarks=(),
572 publish=False,
574 publish=False,
573 opargs=None,
575 opargs=None,
574 ):
576 ):
575 '''Push outgoing changesets (limited by revs) from a local
577 '''Push outgoing changesets (limited by revs) from a local
576 repository to remote. Return an integer:
578 repository to remote. Return an integer:
577 - None means nothing to push
579 - None means nothing to push
578 - 0 means HTTP error
580 - 0 means HTTP error
579 - 1 means we pushed and remote head count is unchanged *or*
581 - 1 means we pushed and remote head count is unchanged *or*
580 we have outgoing changesets but refused to push
582 we have outgoing changesets but refused to push
581 - other values as described by addchangegroup()
583 - other values as described by addchangegroup()
582 '''
584 '''
583 if opargs is None:
585 if opargs is None:
584 opargs = {}
586 opargs = {}
585 pushop = pushoperation(
587 pushop = pushoperation(
586 repo,
588 repo,
587 remote,
589 remote,
588 force,
590 force,
589 revs,
591 revs,
590 newbranch,
592 newbranch,
591 bookmarks,
593 bookmarks,
592 publish,
594 publish,
593 **pycompat.strkwargs(opargs)
595 **pycompat.strkwargs(opargs)
594 )
596 )
595 if pushop.remote.local():
597 if pushop.remote.local():
596 missing = (
598 missing = (
597 set(pushop.repo.requirements) - pushop.remote.local().supported
599 set(pushop.repo.requirements) - pushop.remote.local().supported
598 )
600 )
599 if missing:
601 if missing:
600 msg = _(
602 msg = _(
601 b"required features are not"
603 b"required features are not"
602 b" supported in the destination:"
604 b" supported in the destination:"
603 b" %s"
605 b" %s"
604 ) % (b', '.join(sorted(missing)))
606 ) % (b', '.join(sorted(missing)))
605 raise error.Abort(msg)
607 raise error.Abort(msg)
606
608
607 if not pushop.remote.canpush():
609 if not pushop.remote.canpush():
608 raise error.Abort(_(b"destination does not support push"))
610 raise error.Abort(_(b"destination does not support push"))
609
611
610 if not pushop.remote.capable(b'unbundle'):
612 if not pushop.remote.capable(b'unbundle'):
611 raise error.Abort(
613 raise error.Abort(
612 _(
614 _(
613 b'cannot push: destination does not support the '
615 b'cannot push: destination does not support the '
614 b'unbundle wire protocol command'
616 b'unbundle wire protocol command'
615 )
617 )
616 )
618 )
617
619
618 # get lock as we might write phase data
620 # get lock as we might write phase data
619 wlock = lock = None
621 wlock = lock = None
620 try:
622 try:
621 # bundle2 push may receive a reply bundle touching bookmarks
623 # bundle2 push may receive a reply bundle touching bookmarks
622 # requiring the wlock. Take it now to ensure proper ordering.
624 # requiring the wlock. Take it now to ensure proper ordering.
623 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
625 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
624 if (
626 if (
625 (not _forcebundle1(pushop))
627 (not _forcebundle1(pushop))
626 and maypushback
628 and maypushback
627 and not bookmod.bookmarksinstore(repo)
629 and not bookmod.bookmarksinstore(repo)
628 ):
630 ):
629 wlock = pushop.repo.wlock()
631 wlock = pushop.repo.wlock()
630 lock = pushop.repo.lock()
632 lock = pushop.repo.lock()
631 pushop.trmanager = transactionmanager(
633 pushop.trmanager = transactionmanager(
632 pushop.repo, b'push-response', pushop.remote.url()
634 pushop.repo, b'push-response', pushop.remote.url()
633 )
635 )
634 except error.LockUnavailable as err:
636 except error.LockUnavailable as err:
635 # source repo cannot be locked.
637 # source repo cannot be locked.
636 # We do not abort the push, but just disable the local phase
638 # We do not abort the push, but just disable the local phase
637 # synchronisation.
639 # synchronisation.
638 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
640 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
639 err
641 err
640 )
642 )
641 pushop.ui.debug(msg)
643 pushop.ui.debug(msg)
642
644
643 with wlock or util.nullcontextmanager():
645 with wlock or util.nullcontextmanager():
644 with lock or util.nullcontextmanager():
646 with lock or util.nullcontextmanager():
645 with pushop.trmanager or util.nullcontextmanager():
647 with pushop.trmanager or util.nullcontextmanager():
646 pushop.repo.checkpush(pushop)
648 pushop.repo.checkpush(pushop)
647 _checkpublish(pushop)
649 _checkpublish(pushop)
648 _pushdiscovery(pushop)
650 _pushdiscovery(pushop)
649 if not pushop.force:
651 if not pushop.force:
650 _checksubrepostate(pushop)
652 _checksubrepostate(pushop)
651 if not _forcebundle1(pushop):
653 if not _forcebundle1(pushop):
652 _pushbundle2(pushop)
654 _pushbundle2(pushop)
653 _pushchangeset(pushop)
655 _pushchangeset(pushop)
654 _pushsyncphase(pushop)
656 _pushsyncphase(pushop)
655 _pushobsolete(pushop)
657 _pushobsolete(pushop)
656 _pushbookmark(pushop)
658 _pushbookmark(pushop)
657
659
658 if repo.ui.configbool(b'experimental', b'remotenames'):
660 if repo.ui.configbool(b'experimental', b'remotenames'):
659 logexchange.pullremotenames(repo, remote)
661 logexchange.pullremotenames(repo, remote)
660
662
661 return pushop
663 return pushop
662
664
663
665
664 # list of steps to perform discovery before push
666 # list of steps to perform discovery before push
665 pushdiscoveryorder = []
667 pushdiscoveryorder = []
666
668
667 # Mapping between step name and function
669 # Mapping between step name and function
668 #
670 #
669 # This exists to help extensions wrap steps if necessary
671 # This exists to help extensions wrap steps if necessary
670 pushdiscoverymapping = {}
672 pushdiscoverymapping = {}
671
673
672
674
673 def pushdiscovery(stepname):
675 def pushdiscovery(stepname):
674 """decorator for function performing discovery before push
676 """decorator for function performing discovery before push
675
677
676 The function is added to the step -> function mapping and appended to the
678 The function is added to the step -> function mapping and appended to the
677 list of steps. Beware that decorated function will be added in order (this
679 list of steps. Beware that decorated function will be added in order (this
678 may matter).
680 may matter).
679
681
680 You can only use this decorator for a new step, if you want to wrap a step
682 You can only use this decorator for a new step, if you want to wrap a step
681 from an extension, change the pushdiscovery dictionary directly."""
683 from an extension, change the pushdiscovery dictionary directly."""
682
684
683 def dec(func):
685 def dec(func):
684 assert stepname not in pushdiscoverymapping
686 assert stepname not in pushdiscoverymapping
685 pushdiscoverymapping[stepname] = func
687 pushdiscoverymapping[stepname] = func
686 pushdiscoveryorder.append(stepname)
688 pushdiscoveryorder.append(stepname)
687 return func
689 return func
688
690
689 return dec
691 return dec
690
692
691
693
692 def _pushdiscovery(pushop):
694 def _pushdiscovery(pushop):
693 """Run all discovery steps"""
695 """Run all discovery steps"""
694 for stepname in pushdiscoveryorder:
696 for stepname in pushdiscoveryorder:
695 step = pushdiscoverymapping[stepname]
697 step = pushdiscoverymapping[stepname]
696 step(pushop)
698 step(pushop)
697
699
698
700
699 def _checksubrepostate(pushop):
701 def _checksubrepostate(pushop):
700 """Ensure all outgoing referenced subrepo revisions are present locally"""
702 """Ensure all outgoing referenced subrepo revisions are present locally"""
701 for n in pushop.outgoing.missing:
703 for n in pushop.outgoing.missing:
702 ctx = pushop.repo[n]
704 ctx = pushop.repo[n]
703
705
704 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
706 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
705 for subpath in sorted(ctx.substate):
707 for subpath in sorted(ctx.substate):
706 sub = ctx.sub(subpath)
708 sub = ctx.sub(subpath)
707 sub.verify(onpush=True)
709 sub.verify(onpush=True)
708
710
709
711
710 @pushdiscovery(b'changeset')
712 @pushdiscovery(b'changeset')
711 def _pushdiscoverychangeset(pushop):
713 def _pushdiscoverychangeset(pushop):
712 """discover the changeset that need to be pushed"""
714 """discover the changeset that need to be pushed"""
713 fci = discovery.findcommonincoming
715 fci = discovery.findcommonincoming
714 if pushop.revs:
716 if pushop.revs:
715 commoninc = fci(
717 commoninc = fci(
716 pushop.repo,
718 pushop.repo,
717 pushop.remote,
719 pushop.remote,
718 force=pushop.force,
720 force=pushop.force,
719 ancestorsof=pushop.revs,
721 ancestorsof=pushop.revs,
720 )
722 )
721 else:
723 else:
722 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
724 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
723 common, inc, remoteheads = commoninc
725 common, inc, remoteheads = commoninc
724 fco = discovery.findcommonoutgoing
726 fco = discovery.findcommonoutgoing
725 outgoing = fco(
727 outgoing = fco(
726 pushop.repo,
728 pushop.repo,
727 pushop.remote,
729 pushop.remote,
728 onlyheads=pushop.revs,
730 onlyheads=pushop.revs,
729 commoninc=commoninc,
731 commoninc=commoninc,
730 force=pushop.force,
732 force=pushop.force,
731 )
733 )
732 pushop.outgoing = outgoing
734 pushop.outgoing = outgoing
733 pushop.remoteheads = remoteheads
735 pushop.remoteheads = remoteheads
734 pushop.incoming = inc
736 pushop.incoming = inc
735
737
736
738
737 @pushdiscovery(b'phase')
739 @pushdiscovery(b'phase')
738 def _pushdiscoveryphase(pushop):
740 def _pushdiscoveryphase(pushop):
739 """discover the phase that needs to be pushed
741 """discover the phase that needs to be pushed
740
742
741 (computed for both success and failure case for changesets push)"""
743 (computed for both success and failure case for changesets push)"""
742 outgoing = pushop.outgoing
744 outgoing = pushop.outgoing
743 unfi = pushop.repo.unfiltered()
745 unfi = pushop.repo.unfiltered()
744 remotephases = listkeys(pushop.remote, b'phases')
746 remotephases = listkeys(pushop.remote, b'phases')
745
747
746 if (
748 if (
747 pushop.ui.configbool(b'ui', b'_usedassubrepo')
749 pushop.ui.configbool(b'ui', b'_usedassubrepo')
748 and remotephases # server supports phases
750 and remotephases # server supports phases
749 and not pushop.outgoing.missing # no changesets to be pushed
751 and not pushop.outgoing.missing # no changesets to be pushed
750 and remotephases.get(b'publishing', False)
752 and remotephases.get(b'publishing', False)
751 ):
753 ):
752 # When:
754 # When:
753 # - this is a subrepo push
755 # - this is a subrepo push
754 # - and remote support phase
756 # - and remote support phase
755 # - and no changeset are to be pushed
757 # - and no changeset are to be pushed
756 # - and remote is publishing
758 # - and remote is publishing
757 # We may be in issue 3781 case!
759 # We may be in issue 3781 case!
758 # We drop the possible phase synchronisation done by
760 # We drop the possible phase synchronisation done by
759 # courtesy to publish changesets possibly locally draft
761 # courtesy to publish changesets possibly locally draft
760 # on the remote.
762 # on the remote.
761 pushop.outdatedphases = []
763 pushop.outdatedphases = []
762 pushop.fallbackoutdatedphases = []
764 pushop.fallbackoutdatedphases = []
763 return
765 return
764
766
765 pushop.remotephases = phases.remotephasessummary(
767 pushop.remotephases = phases.remotephasessummary(
766 pushop.repo, pushop.fallbackheads, remotephases
768 pushop.repo, pushop.fallbackheads, remotephases
767 )
769 )
768 droots = pushop.remotephases.draftroots
770 droots = pushop.remotephases.draftroots
769
771
770 extracond = b''
772 extracond = b''
771 if not pushop.remotephases.publishing:
773 if not pushop.remotephases.publishing:
772 extracond = b' and public()'
774 extracond = b' and public()'
773 revset = b'heads((%%ln::%%ln) %s)' % extracond
775 revset = b'heads((%%ln::%%ln) %s)' % extracond
774 # Get the list of all revs draft on remote by public here.
776 # Get the list of all revs draft on remote by public here.
775 # XXX Beware that revset break if droots is not strictly
777 # XXX Beware that revset break if droots is not strictly
776 # XXX root we may want to ensure it is but it is costly
778 # XXX root we may want to ensure it is but it is costly
777 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
779 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
778 if not pushop.remotephases.publishing and pushop.publish:
780 if not pushop.remotephases.publishing and pushop.publish:
779 future = list(
781 future = list(
780 unfi.set(
782 unfi.set(
781 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
783 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
782 )
784 )
783 )
785 )
784 elif not outgoing.missing:
786 elif not outgoing.missing:
785 future = fallback
787 future = fallback
786 else:
788 else:
787 # adds changeset we are going to push as draft
789 # adds changeset we are going to push as draft
788 #
790 #
789 # should not be necessary for publishing server, but because of an
791 # should not be necessary for publishing server, but because of an
790 # issue fixed in xxxxx we have to do it anyway.
792 # issue fixed in xxxxx we have to do it anyway.
791 fdroots = list(
793 fdroots = list(
792 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
794 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
793 )
795 )
794 fdroots = [f.node() for f in fdroots]
796 fdroots = [f.node() for f in fdroots]
795 future = list(unfi.set(revset, fdroots, pushop.futureheads))
797 future = list(unfi.set(revset, fdroots, pushop.futureheads))
796 pushop.outdatedphases = future
798 pushop.outdatedphases = future
797 pushop.fallbackoutdatedphases = fallback
799 pushop.fallbackoutdatedphases = fallback
798
800
799
801
800 @pushdiscovery(b'obsmarker')
802 @pushdiscovery(b'obsmarker')
801 def _pushdiscoveryobsmarkers(pushop):
803 def _pushdiscoveryobsmarkers(pushop):
802 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
804 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
803 return
805 return
804
806
805 if not pushop.repo.obsstore:
807 if not pushop.repo.obsstore:
806 return
808 return
807
809
808 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
810 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
809 return
811 return
810
812
811 repo = pushop.repo
813 repo = pushop.repo
812 # very naive computation, that can be quite expensive on big repo.
814 # very naive computation, that can be quite expensive on big repo.
813 # However: evolution is currently slow on them anyway.
815 # However: evolution is currently slow on them anyway.
814 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
816 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
815 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
817 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
816
818
817
819
818 @pushdiscovery(b'bookmarks')
820 @pushdiscovery(b'bookmarks')
819 def _pushdiscoverybookmarks(pushop):
821 def _pushdiscoverybookmarks(pushop):
820 ui = pushop.ui
822 ui = pushop.ui
821 repo = pushop.repo.unfiltered()
823 repo = pushop.repo.unfiltered()
822 remote = pushop.remote
824 remote = pushop.remote
823 ui.debug(b"checking for updated bookmarks\n")
825 ui.debug(b"checking for updated bookmarks\n")
824 ancestors = ()
826 ancestors = ()
825 if pushop.revs:
827 if pushop.revs:
826 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
828 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
827 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
829 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
828
830
829 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
831 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
830
832
831 explicit = {
833 explicit = {
832 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
834 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
833 }
835 }
834
836
835 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
837 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
836 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
838 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
837
839
838
840
839 def _processcompared(pushop, pushed, explicit, remotebms, comp):
841 def _processcompared(pushop, pushed, explicit, remotebms, comp):
840 """take decision on bookmarks to push to the remote repo
842 """take decision on bookmarks to push to the remote repo
841
843
842 Exists to help extensions alter this behavior.
844 Exists to help extensions alter this behavior.
843 """
845 """
844 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
846 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
845
847
846 repo = pushop.repo
848 repo = pushop.repo
847
849
848 for b, scid, dcid in advsrc:
850 for b, scid, dcid in advsrc:
849 if b in explicit:
851 if b in explicit:
850 explicit.remove(b)
852 explicit.remove(b)
851 if not pushed or repo[scid].rev() in pushed:
853 if not pushed or repo[scid].rev() in pushed:
852 pushop.outbookmarks.append((b, dcid, scid))
854 pushop.outbookmarks.append((b, dcid, scid))
853 # search added bookmark
855 # search added bookmark
854 for b, scid, dcid in addsrc:
856 for b, scid, dcid in addsrc:
855 if b in explicit:
857 if b in explicit:
856 explicit.remove(b)
858 explicit.remove(b)
857 pushop.outbookmarks.append((b, b'', scid))
859 pushop.outbookmarks.append((b, b'', scid))
858 # search for overwritten bookmark
860 # search for overwritten bookmark
859 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
861 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
860 if b in explicit:
862 if b in explicit:
861 explicit.remove(b)
863 explicit.remove(b)
862 pushop.outbookmarks.append((b, dcid, scid))
864 pushop.outbookmarks.append((b, dcid, scid))
863 # search for bookmark to delete
865 # search for bookmark to delete
864 for b, scid, dcid in adddst:
866 for b, scid, dcid in adddst:
865 if b in explicit:
867 if b in explicit:
866 explicit.remove(b)
868 explicit.remove(b)
867 # treat as "deleted locally"
869 # treat as "deleted locally"
868 pushop.outbookmarks.append((b, dcid, b''))
870 pushop.outbookmarks.append((b, dcid, b''))
869 # identical bookmarks shouldn't get reported
871 # identical bookmarks shouldn't get reported
870 for b, scid, dcid in same:
872 for b, scid, dcid in same:
871 if b in explicit:
873 if b in explicit:
872 explicit.remove(b)
874 explicit.remove(b)
873
875
874 if explicit:
876 if explicit:
875 explicit = sorted(explicit)
877 explicit = sorted(explicit)
876 # we should probably list all of them
878 # we should probably list all of them
877 pushop.ui.warn(
879 pushop.ui.warn(
878 _(
880 _(
879 b'bookmark %s does not exist on the local '
881 b'bookmark %s does not exist on the local '
880 b'or remote repository!\n'
882 b'or remote repository!\n'
881 )
883 )
882 % explicit[0]
884 % explicit[0]
883 )
885 )
884 pushop.bkresult = 2
886 pushop.bkresult = 2
885
887
886 pushop.outbookmarks.sort()
888 pushop.outbookmarks.sort()
887
889
888
890
889 def _pushcheckoutgoing(pushop):
891 def _pushcheckoutgoing(pushop):
890 outgoing = pushop.outgoing
892 outgoing = pushop.outgoing
891 unfi = pushop.repo.unfiltered()
893 unfi = pushop.repo.unfiltered()
892 if not outgoing.missing:
894 if not outgoing.missing:
893 # nothing to push
895 # nothing to push
894 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
896 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
895 return False
897 return False
896 # something to push
898 # something to push
897 if not pushop.force:
899 if not pushop.force:
898 # if repo.obsstore == False --> no obsolete
900 # if repo.obsstore == False --> no obsolete
899 # then, save the iteration
901 # then, save the iteration
900 if unfi.obsstore:
902 if unfi.obsstore:
901 # this message are here for 80 char limit reason
903 # this message are here for 80 char limit reason
902 mso = _(b"push includes obsolete changeset: %s!")
904 mso = _(b"push includes obsolete changeset: %s!")
903 mspd = _(b"push includes phase-divergent changeset: %s!")
905 mspd = _(b"push includes phase-divergent changeset: %s!")
904 mscd = _(b"push includes content-divergent changeset: %s!")
906 mscd = _(b"push includes content-divergent changeset: %s!")
905 mst = {
907 mst = {
906 b"orphan": _(b"push includes orphan changeset: %s!"),
908 b"orphan": _(b"push includes orphan changeset: %s!"),
907 b"phase-divergent": mspd,
909 b"phase-divergent": mspd,
908 b"content-divergent": mscd,
910 b"content-divergent": mscd,
909 }
911 }
910 # If we are to push if there is at least one
912 # If we are to push if there is at least one
911 # obsolete or unstable changeset in missing, at
913 # obsolete or unstable changeset in missing, at
912 # least one of the missinghead will be obsolete or
914 # least one of the missinghead will be obsolete or
913 # unstable. So checking heads only is ok
915 # unstable. So checking heads only is ok
914 for node in outgoing.missingheads:
916 for node in outgoing.missingheads:
915 ctx = unfi[node]
917 ctx = unfi[node]
916 if ctx.obsolete():
918 if ctx.obsolete():
917 raise error.Abort(mso % ctx)
919 raise error.Abort(mso % ctx)
918 elif ctx.isunstable():
920 elif ctx.isunstable():
919 # TODO print more than one instability in the abort
921 # TODO print more than one instability in the abort
920 # message
922 # message
921 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
923 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
922
924
923 discovery.checkheads(pushop)
925 discovery.checkheads(pushop)
924 return True
926 return True
925
927
926
928
927 # List of names of steps to perform for an outgoing bundle2, order matters.
929 # List of names of steps to perform for an outgoing bundle2, order matters.
928 b2partsgenorder = []
930 b2partsgenorder = []
929
931
930 # Mapping between step name and function
932 # Mapping between step name and function
931 #
933 #
932 # This exists to help extensions wrap steps if necessary
934 # This exists to help extensions wrap steps if necessary
933 b2partsgenmapping = {}
935 b2partsgenmapping = {}
934
936
935
937
936 def b2partsgenerator(stepname, idx=None):
938 def b2partsgenerator(stepname, idx=None):
937 """decorator for function generating bundle2 part
939 """decorator for function generating bundle2 part
938
940
939 The function is added to the step -> function mapping and appended to the
941 The function is added to the step -> function mapping and appended to the
940 list of steps. Beware that decorated functions will be added in order
942 list of steps. Beware that decorated functions will be added in order
941 (this may matter).
943 (this may matter).
942
944
943 You can only use this decorator for new steps, if you want to wrap a step
945 You can only use this decorator for new steps, if you want to wrap a step
944 from an extension, attack the b2partsgenmapping dictionary directly."""
946 from an extension, attack the b2partsgenmapping dictionary directly."""
945
947
946 def dec(func):
948 def dec(func):
947 assert stepname not in b2partsgenmapping
949 assert stepname not in b2partsgenmapping
948 b2partsgenmapping[stepname] = func
950 b2partsgenmapping[stepname] = func
949 if idx is None:
951 if idx is None:
950 b2partsgenorder.append(stepname)
952 b2partsgenorder.append(stepname)
951 else:
953 else:
952 b2partsgenorder.insert(idx, stepname)
954 b2partsgenorder.insert(idx, stepname)
953 return func
955 return func
954
956
955 return dec
957 return dec
956
958
957
959
958 def _pushb2ctxcheckheads(pushop, bundler):
960 def _pushb2ctxcheckheads(pushop, bundler):
959 """Generate race condition checking parts
961 """Generate race condition checking parts
960
962
961 Exists as an independent function to aid extensions
963 Exists as an independent function to aid extensions
962 """
964 """
963 # * 'force' do not check for push race,
965 # * 'force' do not check for push race,
964 # * if we don't push anything, there are nothing to check.
966 # * if we don't push anything, there are nothing to check.
965 if not pushop.force and pushop.outgoing.missingheads:
967 if not pushop.force and pushop.outgoing.missingheads:
966 allowunrelated = b'related' in bundler.capabilities.get(
968 allowunrelated = b'related' in bundler.capabilities.get(
967 b'checkheads', ()
969 b'checkheads', ()
968 )
970 )
969 emptyremote = pushop.pushbranchmap is None
971 emptyremote = pushop.pushbranchmap is None
970 if not allowunrelated or emptyremote:
972 if not allowunrelated or emptyremote:
971 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
973 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
972 else:
974 else:
973 affected = set()
975 affected = set()
974 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
976 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
975 remoteheads, newheads, unsyncedheads, discardedheads = heads
977 remoteheads, newheads, unsyncedheads, discardedheads = heads
976 if remoteheads is not None:
978 if remoteheads is not None:
977 remote = set(remoteheads)
979 remote = set(remoteheads)
978 affected |= set(discardedheads) & remote
980 affected |= set(discardedheads) & remote
979 affected |= remote - set(newheads)
981 affected |= remote - set(newheads)
980 if affected:
982 if affected:
981 data = iter(sorted(affected))
983 data = iter(sorted(affected))
982 bundler.newpart(b'check:updated-heads', data=data)
984 bundler.newpart(b'check:updated-heads', data=data)
983
985
984
986
985 def _pushing(pushop):
987 def _pushing(pushop):
986 """return True if we are pushing anything"""
988 """return True if we are pushing anything"""
987 return bool(
989 return bool(
988 pushop.outgoing.missing
990 pushop.outgoing.missing
989 or pushop.outdatedphases
991 or pushop.outdatedphases
990 or pushop.outobsmarkers
992 or pushop.outobsmarkers
991 or pushop.outbookmarks
993 or pushop.outbookmarks
992 )
994 )
993
995
994
996
995 @b2partsgenerator(b'check-bookmarks')
997 @b2partsgenerator(b'check-bookmarks')
996 def _pushb2checkbookmarks(pushop, bundler):
998 def _pushb2checkbookmarks(pushop, bundler):
997 """insert bookmark move checking"""
999 """insert bookmark move checking"""
998 if not _pushing(pushop) or pushop.force:
1000 if not _pushing(pushop) or pushop.force:
999 return
1001 return
1000 b2caps = bundle2.bundle2caps(pushop.remote)
1002 b2caps = bundle2.bundle2caps(pushop.remote)
1001 hasbookmarkcheck = b'bookmarks' in b2caps
1003 hasbookmarkcheck = b'bookmarks' in b2caps
1002 if not (pushop.outbookmarks and hasbookmarkcheck):
1004 if not (pushop.outbookmarks and hasbookmarkcheck):
1003 return
1005 return
1004 data = []
1006 data = []
1005 for book, old, new in pushop.outbookmarks:
1007 for book, old, new in pushop.outbookmarks:
1006 data.append((book, old))
1008 data.append((book, old))
1007 checkdata = bookmod.binaryencode(data)
1009 checkdata = bookmod.binaryencode(data)
1008 bundler.newpart(b'check:bookmarks', data=checkdata)
1010 bundler.newpart(b'check:bookmarks', data=checkdata)
1009
1011
1010
1012
1011 @b2partsgenerator(b'check-phases')
1013 @b2partsgenerator(b'check-phases')
1012 def _pushb2checkphases(pushop, bundler):
1014 def _pushb2checkphases(pushop, bundler):
1013 """insert phase move checking"""
1015 """insert phase move checking"""
1014 if not _pushing(pushop) or pushop.force:
1016 if not _pushing(pushop) or pushop.force:
1015 return
1017 return
1016 b2caps = bundle2.bundle2caps(pushop.remote)
1018 b2caps = bundle2.bundle2caps(pushop.remote)
1017 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1019 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1018 if pushop.remotephases is not None and hasphaseheads:
1020 if pushop.remotephases is not None and hasphaseheads:
1019 # check that the remote phase has not changed
1021 # check that the remote phase has not changed
1020 checks = [[] for p in phases.allphases]
1022 checks = [[] for p in phases.allphases]
1021 checks[phases.public].extend(pushop.remotephases.publicheads)
1023 checks[phases.public].extend(pushop.remotephases.publicheads)
1022 checks[phases.draft].extend(pushop.remotephases.draftroots)
1024 checks[phases.draft].extend(pushop.remotephases.draftroots)
1023 if any(checks):
1025 if any(checks):
1024 for nodes in checks:
1026 for nodes in checks:
1025 nodes.sort()
1027 nodes.sort()
1026 checkdata = phases.binaryencode(checks)
1028 checkdata = phases.binaryencode(checks)
1027 bundler.newpart(b'check:phases', data=checkdata)
1029 bundler.newpart(b'check:phases', data=checkdata)
1028
1030
1029
1031
1030 @b2partsgenerator(b'changeset')
1032 @b2partsgenerator(b'changeset')
1031 def _pushb2ctx(pushop, bundler):
1033 def _pushb2ctx(pushop, bundler):
1032 """handle changegroup push through bundle2
1034 """handle changegroup push through bundle2
1033
1035
1034 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1036 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1035 """
1037 """
1036 if b'changesets' in pushop.stepsdone:
1038 if b'changesets' in pushop.stepsdone:
1037 return
1039 return
1038 pushop.stepsdone.add(b'changesets')
1040 pushop.stepsdone.add(b'changesets')
1039 # Send known heads to the server for race detection.
1041 # Send known heads to the server for race detection.
1040 if not _pushcheckoutgoing(pushop):
1042 if not _pushcheckoutgoing(pushop):
1041 return
1043 return
1042 pushop.repo.prepushoutgoinghooks(pushop)
1044 pushop.repo.prepushoutgoinghooks(pushop)
1043
1045
1044 _pushb2ctxcheckheads(pushop, bundler)
1046 _pushb2ctxcheckheads(pushop, bundler)
1045
1047
1046 b2caps = bundle2.bundle2caps(pushop.remote)
1048 b2caps = bundle2.bundle2caps(pushop.remote)
1047 version = b'01'
1049 version = b'01'
1048 cgversions = b2caps.get(b'changegroup')
1050 cgversions = b2caps.get(b'changegroup')
1049 if cgversions: # 3.1 and 3.2 ship with an empty value
1051 if cgversions: # 3.1 and 3.2 ship with an empty value
1050 cgversions = [
1052 cgversions = [
1051 v
1053 v
1052 for v in cgversions
1054 for v in cgversions
1053 if v in changegroup.supportedoutgoingversions(pushop.repo)
1055 if v in changegroup.supportedoutgoingversions(pushop.repo)
1054 ]
1056 ]
1055 if not cgversions:
1057 if not cgversions:
1056 raise error.Abort(_(b'no common changegroup version'))
1058 raise error.Abort(_(b'no common changegroup version'))
1057 version = max(cgversions)
1059 version = max(cgversions)
1058 cgstream = changegroup.makestream(
1060 cgstream = changegroup.makestream(
1059 pushop.repo, pushop.outgoing, version, b'push'
1061 pushop.repo, pushop.outgoing, version, b'push'
1060 )
1062 )
1061 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1063 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1062 if cgversions:
1064 if cgversions:
1063 cgpart.addparam(b'version', version)
1065 cgpart.addparam(b'version', version)
1064 if b'treemanifest' in pushop.repo.requirements:
1066 if b'treemanifest' in pushop.repo.requirements:
1065 cgpart.addparam(b'treemanifest', b'1')
1067 cgpart.addparam(b'treemanifest', b'1')
1066 if b'exp-sidedata-flag' in pushop.repo.requirements:
1068 if b'exp-sidedata-flag' in pushop.repo.requirements:
1067 cgpart.addparam(b'exp-sidedata', b'1')
1069 cgpart.addparam(b'exp-sidedata', b'1')
1068
1070
1069 def handlereply(op):
1071 def handlereply(op):
1070 """extract addchangegroup returns from server reply"""
1072 """extract addchangegroup returns from server reply"""
1071 cgreplies = op.records.getreplies(cgpart.id)
1073 cgreplies = op.records.getreplies(cgpart.id)
1072 assert len(cgreplies[b'changegroup']) == 1
1074 assert len(cgreplies[b'changegroup']) == 1
1073 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1075 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1074
1076
1075 return handlereply
1077 return handlereply
1076
1078
1077
1079
1078 @b2partsgenerator(b'phase')
1080 @b2partsgenerator(b'phase')
1079 def _pushb2phases(pushop, bundler):
1081 def _pushb2phases(pushop, bundler):
1080 """handle phase push through bundle2"""
1082 """handle phase push through bundle2"""
1081 if b'phases' in pushop.stepsdone:
1083 if b'phases' in pushop.stepsdone:
1082 return
1084 return
1083 b2caps = bundle2.bundle2caps(pushop.remote)
1085 b2caps = bundle2.bundle2caps(pushop.remote)
1084 ui = pushop.repo.ui
1086 ui = pushop.repo.ui
1085
1087
1086 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1088 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1087 haspushkey = b'pushkey' in b2caps
1089 haspushkey = b'pushkey' in b2caps
1088 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1090 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1089
1091
1090 if hasphaseheads and not legacyphase:
1092 if hasphaseheads and not legacyphase:
1091 return _pushb2phaseheads(pushop, bundler)
1093 return _pushb2phaseheads(pushop, bundler)
1092 elif haspushkey:
1094 elif haspushkey:
1093 return _pushb2phasespushkey(pushop, bundler)
1095 return _pushb2phasespushkey(pushop, bundler)
1094
1096
1095
1097
1096 def _pushb2phaseheads(pushop, bundler):
1098 def _pushb2phaseheads(pushop, bundler):
1097 """push phase information through a bundle2 - binary part"""
1099 """push phase information through a bundle2 - binary part"""
1098 pushop.stepsdone.add(b'phases')
1100 pushop.stepsdone.add(b'phases')
1099 if pushop.outdatedphases:
1101 if pushop.outdatedphases:
1100 updates = [[] for p in phases.allphases]
1102 updates = [[] for p in phases.allphases]
1101 updates[0].extend(h.node() for h in pushop.outdatedphases)
1103 updates[0].extend(h.node() for h in pushop.outdatedphases)
1102 phasedata = phases.binaryencode(updates)
1104 phasedata = phases.binaryencode(updates)
1103 bundler.newpart(b'phase-heads', data=phasedata)
1105 bundler.newpart(b'phase-heads', data=phasedata)
1104
1106
1105
1107
1106 def _pushb2phasespushkey(pushop, bundler):
1108 def _pushb2phasespushkey(pushop, bundler):
1107 """push phase information through a bundle2 - pushkey part"""
1109 """push phase information through a bundle2 - pushkey part"""
1108 pushop.stepsdone.add(b'phases')
1110 pushop.stepsdone.add(b'phases')
1109 part2node = []
1111 part2node = []
1110
1112
1111 def handlefailure(pushop, exc):
1113 def handlefailure(pushop, exc):
1112 targetid = int(exc.partid)
1114 targetid = int(exc.partid)
1113 for partid, node in part2node:
1115 for partid, node in part2node:
1114 if partid == targetid:
1116 if partid == targetid:
1115 raise error.Abort(_(b'updating %s to public failed') % node)
1117 raise error.Abort(_(b'updating %s to public failed') % node)
1116
1118
1117 enc = pushkey.encode
1119 enc = pushkey.encode
1118 for newremotehead in pushop.outdatedphases:
1120 for newremotehead in pushop.outdatedphases:
1119 part = bundler.newpart(b'pushkey')
1121 part = bundler.newpart(b'pushkey')
1120 part.addparam(b'namespace', enc(b'phases'))
1122 part.addparam(b'namespace', enc(b'phases'))
1121 part.addparam(b'key', enc(newremotehead.hex()))
1123 part.addparam(b'key', enc(newremotehead.hex()))
1122 part.addparam(b'old', enc(b'%d' % phases.draft))
1124 part.addparam(b'old', enc(b'%d' % phases.draft))
1123 part.addparam(b'new', enc(b'%d' % phases.public))
1125 part.addparam(b'new', enc(b'%d' % phases.public))
1124 part2node.append((part.id, newremotehead))
1126 part2node.append((part.id, newremotehead))
1125 pushop.pkfailcb[part.id] = handlefailure
1127 pushop.pkfailcb[part.id] = handlefailure
1126
1128
1127 def handlereply(op):
1129 def handlereply(op):
1128 for partid, node in part2node:
1130 for partid, node in part2node:
1129 partrep = op.records.getreplies(partid)
1131 partrep = op.records.getreplies(partid)
1130 results = partrep[b'pushkey']
1132 results = partrep[b'pushkey']
1131 assert len(results) <= 1
1133 assert len(results) <= 1
1132 msg = None
1134 msg = None
1133 if not results:
1135 if not results:
1134 msg = _(b'server ignored update of %s to public!\n') % node
1136 msg = _(b'server ignored update of %s to public!\n') % node
1135 elif not int(results[0][b'return']):
1137 elif not int(results[0][b'return']):
1136 msg = _(b'updating %s to public failed!\n') % node
1138 msg = _(b'updating %s to public failed!\n') % node
1137 if msg is not None:
1139 if msg is not None:
1138 pushop.ui.warn(msg)
1140 pushop.ui.warn(msg)
1139
1141
1140 return handlereply
1142 return handlereply
1141
1143
1142
1144
1143 @b2partsgenerator(b'obsmarkers')
1145 @b2partsgenerator(b'obsmarkers')
1144 def _pushb2obsmarkers(pushop, bundler):
1146 def _pushb2obsmarkers(pushop, bundler):
1145 if b'obsmarkers' in pushop.stepsdone:
1147 if b'obsmarkers' in pushop.stepsdone:
1146 return
1148 return
1147 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1149 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1148 if obsolete.commonversion(remoteversions) is None:
1150 if obsolete.commonversion(remoteversions) is None:
1149 return
1151 return
1150 pushop.stepsdone.add(b'obsmarkers')
1152 pushop.stepsdone.add(b'obsmarkers')
1151 if pushop.outobsmarkers:
1153 if pushop.outobsmarkers:
1152 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1154 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1153 bundle2.buildobsmarkerspart(bundler, markers)
1155 bundle2.buildobsmarkerspart(bundler, markers)
1154
1156
1155
1157
1156 @b2partsgenerator(b'bookmarks')
1158 @b2partsgenerator(b'bookmarks')
1157 def _pushb2bookmarks(pushop, bundler):
1159 def _pushb2bookmarks(pushop, bundler):
1158 """handle bookmark push through bundle2"""
1160 """handle bookmark push through bundle2"""
1159 if b'bookmarks' in pushop.stepsdone:
1161 if b'bookmarks' in pushop.stepsdone:
1160 return
1162 return
1161 b2caps = bundle2.bundle2caps(pushop.remote)
1163 b2caps = bundle2.bundle2caps(pushop.remote)
1162
1164
1163 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1165 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1164 legacybooks = b'bookmarks' in legacy
1166 legacybooks = b'bookmarks' in legacy
1165
1167
1166 if not legacybooks and b'bookmarks' in b2caps:
1168 if not legacybooks and b'bookmarks' in b2caps:
1167 return _pushb2bookmarkspart(pushop, bundler)
1169 return _pushb2bookmarkspart(pushop, bundler)
1168 elif b'pushkey' in b2caps:
1170 elif b'pushkey' in b2caps:
1169 return _pushb2bookmarkspushkey(pushop, bundler)
1171 return _pushb2bookmarkspushkey(pushop, bundler)
1170
1172
1171
1173
1172 def _bmaction(old, new):
1174 def _bmaction(old, new):
1173 """small utility for bookmark pushing"""
1175 """small utility for bookmark pushing"""
1174 if not old:
1176 if not old:
1175 return b'export'
1177 return b'export'
1176 elif not new:
1178 elif not new:
1177 return b'delete'
1179 return b'delete'
1178 return b'update'
1180 return b'update'
1179
1181
1180
1182
1181 def _abortonsecretctx(pushop, node, b):
1183 def _abortonsecretctx(pushop, node, b):
1182 """abort if a given bookmark points to a secret changeset"""
1184 """abort if a given bookmark points to a secret changeset"""
1183 if node and pushop.repo[node].phase() == phases.secret:
1185 if node and pushop.repo[node].phase() == phases.secret:
1184 raise error.Abort(
1186 raise error.Abort(
1185 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1187 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1186 )
1188 )
1187
1189
1188
1190
1189 def _pushb2bookmarkspart(pushop, bundler):
1191 def _pushb2bookmarkspart(pushop, bundler):
1190 pushop.stepsdone.add(b'bookmarks')
1192 pushop.stepsdone.add(b'bookmarks')
1191 if not pushop.outbookmarks:
1193 if not pushop.outbookmarks:
1192 return
1194 return
1193
1195
1194 allactions = []
1196 allactions = []
1195 data = []
1197 data = []
1196 for book, old, new in pushop.outbookmarks:
1198 for book, old, new in pushop.outbookmarks:
1197 _abortonsecretctx(pushop, new, book)
1199 _abortonsecretctx(pushop, new, book)
1198 data.append((book, new))
1200 data.append((book, new))
1199 allactions.append((book, _bmaction(old, new)))
1201 allactions.append((book, _bmaction(old, new)))
1200 checkdata = bookmod.binaryencode(data)
1202 checkdata = bookmod.binaryencode(data)
1201 bundler.newpart(b'bookmarks', data=checkdata)
1203 bundler.newpart(b'bookmarks', data=checkdata)
1202
1204
1203 def handlereply(op):
1205 def handlereply(op):
1204 ui = pushop.ui
1206 ui = pushop.ui
1205 # if success
1207 # if success
1206 for book, action in allactions:
1208 for book, action in allactions:
1207 ui.status(bookmsgmap[action][0] % book)
1209 ui.status(bookmsgmap[action][0] % book)
1208
1210
1209 return handlereply
1211 return handlereply
1210
1212
1211
1213
1212 def _pushb2bookmarkspushkey(pushop, bundler):
1214 def _pushb2bookmarkspushkey(pushop, bundler):
1213 pushop.stepsdone.add(b'bookmarks')
1215 pushop.stepsdone.add(b'bookmarks')
1214 part2book = []
1216 part2book = []
1215 enc = pushkey.encode
1217 enc = pushkey.encode
1216
1218
1217 def handlefailure(pushop, exc):
1219 def handlefailure(pushop, exc):
1218 targetid = int(exc.partid)
1220 targetid = int(exc.partid)
1219 for partid, book, action in part2book:
1221 for partid, book, action in part2book:
1220 if partid == targetid:
1222 if partid == targetid:
1221 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1223 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1222 # we should not be called for part we did not generated
1224 # we should not be called for part we did not generated
1223 assert False
1225 assert False
1224
1226
1225 for book, old, new in pushop.outbookmarks:
1227 for book, old, new in pushop.outbookmarks:
1226 _abortonsecretctx(pushop, new, book)
1228 _abortonsecretctx(pushop, new, book)
1227 part = bundler.newpart(b'pushkey')
1229 part = bundler.newpart(b'pushkey')
1228 part.addparam(b'namespace', enc(b'bookmarks'))
1230 part.addparam(b'namespace', enc(b'bookmarks'))
1229 part.addparam(b'key', enc(book))
1231 part.addparam(b'key', enc(book))
1230 part.addparam(b'old', enc(hex(old)))
1232 part.addparam(b'old', enc(hex(old)))
1231 part.addparam(b'new', enc(hex(new)))
1233 part.addparam(b'new', enc(hex(new)))
1232 action = b'update'
1234 action = b'update'
1233 if not old:
1235 if not old:
1234 action = b'export'
1236 action = b'export'
1235 elif not new:
1237 elif not new:
1236 action = b'delete'
1238 action = b'delete'
1237 part2book.append((part.id, book, action))
1239 part2book.append((part.id, book, action))
1238 pushop.pkfailcb[part.id] = handlefailure
1240 pushop.pkfailcb[part.id] = handlefailure
1239
1241
1240 def handlereply(op):
1242 def handlereply(op):
1241 ui = pushop.ui
1243 ui = pushop.ui
1242 for partid, book, action in part2book:
1244 for partid, book, action in part2book:
1243 partrep = op.records.getreplies(partid)
1245 partrep = op.records.getreplies(partid)
1244 results = partrep[b'pushkey']
1246 results = partrep[b'pushkey']
1245 assert len(results) <= 1
1247 assert len(results) <= 1
1246 if not results:
1248 if not results:
1247 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1249 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1248 else:
1250 else:
1249 ret = int(results[0][b'return'])
1251 ret = int(results[0][b'return'])
1250 if ret:
1252 if ret:
1251 ui.status(bookmsgmap[action][0] % book)
1253 ui.status(bookmsgmap[action][0] % book)
1252 else:
1254 else:
1253 ui.warn(bookmsgmap[action][1] % book)
1255 ui.warn(bookmsgmap[action][1] % book)
1254 if pushop.bkresult is not None:
1256 if pushop.bkresult is not None:
1255 pushop.bkresult = 1
1257 pushop.bkresult = 1
1256
1258
1257 return handlereply
1259 return handlereply
1258
1260
1259
1261
1260 @b2partsgenerator(b'pushvars', idx=0)
1262 @b2partsgenerator(b'pushvars', idx=0)
1261 def _getbundlesendvars(pushop, bundler):
1263 def _getbundlesendvars(pushop, bundler):
1262 '''send shellvars via bundle2'''
1264 '''send shellvars via bundle2'''
1263 pushvars = pushop.pushvars
1265 pushvars = pushop.pushvars
1264 if pushvars:
1266 if pushvars:
1265 shellvars = {}
1267 shellvars = {}
1266 for raw in pushvars:
1268 for raw in pushvars:
1267 if b'=' not in raw:
1269 if b'=' not in raw:
1268 msg = (
1270 msg = (
1269 b"unable to parse variable '%s', should follow "
1271 b"unable to parse variable '%s', should follow "
1270 b"'KEY=VALUE' or 'KEY=' format"
1272 b"'KEY=VALUE' or 'KEY=' format"
1271 )
1273 )
1272 raise error.Abort(msg % raw)
1274 raise error.Abort(msg % raw)
1273 k, v = raw.split(b'=', 1)
1275 k, v = raw.split(b'=', 1)
1274 shellvars[k] = v
1276 shellvars[k] = v
1275
1277
1276 part = bundler.newpart(b'pushvars')
1278 part = bundler.newpart(b'pushvars')
1277
1279
1278 for key, value in pycompat.iteritems(shellvars):
1280 for key, value in pycompat.iteritems(shellvars):
1279 part.addparam(key, value, mandatory=False)
1281 part.addparam(key, value, mandatory=False)
1280
1282
1281
1283
1282 def _pushbundle2(pushop):
1284 def _pushbundle2(pushop):
1283 """push data to the remote using bundle2
1285 """push data to the remote using bundle2
1284
1286
1285 The only currently supported type of data is changegroup but this will
1287 The only currently supported type of data is changegroup but this will
1286 evolve in the future."""
1288 evolve in the future."""
1287 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1289 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1288 pushback = pushop.trmanager and pushop.ui.configbool(
1290 pushback = pushop.trmanager and pushop.ui.configbool(
1289 b'experimental', b'bundle2.pushback'
1291 b'experimental', b'bundle2.pushback'
1290 )
1292 )
1291
1293
1292 # create reply capability
1294 # create reply capability
1293 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1294 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1296 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1295 )
1297 )
1296 bundler.newpart(b'replycaps', data=capsblob)
1298 bundler.newpart(b'replycaps', data=capsblob)
1297 replyhandlers = []
1299 replyhandlers = []
1298 for partgenname in b2partsgenorder:
1300 for partgenname in b2partsgenorder:
1299 partgen = b2partsgenmapping[partgenname]
1301 partgen = b2partsgenmapping[partgenname]
1300 ret = partgen(pushop, bundler)
1302 ret = partgen(pushop, bundler)
1301 if callable(ret):
1303 if callable(ret):
1302 replyhandlers.append(ret)
1304 replyhandlers.append(ret)
1303 # do not push if nothing to push
1305 # do not push if nothing to push
1304 if bundler.nbparts <= 1:
1306 if bundler.nbparts <= 1:
1305 return
1307 return
1306 stream = util.chunkbuffer(bundler.getchunks())
1308 stream = util.chunkbuffer(bundler.getchunks())
1307 try:
1309 try:
1308 try:
1310 try:
1309 with pushop.remote.commandexecutor() as e:
1311 with pushop.remote.commandexecutor() as e:
1310 reply = e.callcommand(
1312 reply = e.callcommand(
1311 b'unbundle',
1313 b'unbundle',
1312 {
1314 {
1313 b'bundle': stream,
1315 b'bundle': stream,
1314 b'heads': [b'force'],
1316 b'heads': [b'force'],
1315 b'url': pushop.remote.url(),
1317 b'url': pushop.remote.url(),
1316 },
1318 },
1317 ).result()
1319 ).result()
1318 except error.BundleValueError as exc:
1320 except error.BundleValueError as exc:
1319 raise error.Abort(_(b'missing support for %s') % exc)
1321 raise error.Abort(_(b'missing support for %s') % exc)
1320 try:
1322 try:
1321 trgetter = None
1323 trgetter = None
1322 if pushback:
1324 if pushback:
1323 trgetter = pushop.trmanager.transaction
1325 trgetter = pushop.trmanager.transaction
1324 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1326 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1325 except error.BundleValueError as exc:
1327 except error.BundleValueError as exc:
1326 raise error.Abort(_(b'missing support for %s') % exc)
1328 raise error.Abort(_(b'missing support for %s') % exc)
1327 except bundle2.AbortFromPart as exc:
1329 except bundle2.AbortFromPart as exc:
1328 pushop.ui.status(_(b'remote: %s\n') % exc)
1330 pushop.ui.status(_(b'remote: %s\n') % exc)
1329 if exc.hint is not None:
1331 if exc.hint is not None:
1330 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1332 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1331 raise error.Abort(_(b'push failed on remote'))
1333 raise error.Abort(_(b'push failed on remote'))
1332 except error.PushkeyFailed as exc:
1334 except error.PushkeyFailed as exc:
1333 partid = int(exc.partid)
1335 partid = int(exc.partid)
1334 if partid not in pushop.pkfailcb:
1336 if partid not in pushop.pkfailcb:
1335 raise
1337 raise
1336 pushop.pkfailcb[partid](pushop, exc)
1338 pushop.pkfailcb[partid](pushop, exc)
1337 for rephand in replyhandlers:
1339 for rephand in replyhandlers:
1338 rephand(op)
1340 rephand(op)
1339
1341
1340
1342
1341 def _pushchangeset(pushop):
1343 def _pushchangeset(pushop):
1342 """Make the actual push of changeset bundle to remote repo"""
1344 """Make the actual push of changeset bundle to remote repo"""
1343 if b'changesets' in pushop.stepsdone:
1345 if b'changesets' in pushop.stepsdone:
1344 return
1346 return
1345 pushop.stepsdone.add(b'changesets')
1347 pushop.stepsdone.add(b'changesets')
1346 if not _pushcheckoutgoing(pushop):
1348 if not _pushcheckoutgoing(pushop):
1347 return
1349 return
1348
1350
1349 # Should have verified this in push().
1351 # Should have verified this in push().
1350 assert pushop.remote.capable(b'unbundle')
1352 assert pushop.remote.capable(b'unbundle')
1351
1353
1352 pushop.repo.prepushoutgoinghooks(pushop)
1354 pushop.repo.prepushoutgoinghooks(pushop)
1353 outgoing = pushop.outgoing
1355 outgoing = pushop.outgoing
1354 # TODO: get bundlecaps from remote
1356 # TODO: get bundlecaps from remote
1355 bundlecaps = None
1357 bundlecaps = None
1356 # create a changegroup from local
1358 # create a changegroup from local
1357 if pushop.revs is None and not (
1359 if pushop.revs is None and not (
1358 outgoing.excluded or pushop.repo.changelog.filteredrevs
1360 outgoing.excluded or pushop.repo.changelog.filteredrevs
1359 ):
1361 ):
1360 # push everything,
1362 # push everything,
1361 # use the fast path, no race possible on push
1363 # use the fast path, no race possible on push
1362 cg = changegroup.makechangegroup(
1364 cg = changegroup.makechangegroup(
1363 pushop.repo,
1365 pushop.repo,
1364 outgoing,
1366 outgoing,
1365 b'01',
1367 b'01',
1366 b'push',
1368 b'push',
1367 fastpath=True,
1369 fastpath=True,
1368 bundlecaps=bundlecaps,
1370 bundlecaps=bundlecaps,
1369 )
1371 )
1370 else:
1372 else:
1371 cg = changegroup.makechangegroup(
1373 cg = changegroup.makechangegroup(
1372 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1374 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1373 )
1375 )
1374
1376
1375 # apply changegroup to remote
1377 # apply changegroup to remote
1376 # local repo finds heads on server, finds out what
1378 # local repo finds heads on server, finds out what
1377 # revs it must push. once revs transferred, if server
1379 # revs it must push. once revs transferred, if server
1378 # finds it has different heads (someone else won
1380 # finds it has different heads (someone else won
1379 # commit/push race), server aborts.
1381 # commit/push race), server aborts.
1380 if pushop.force:
1382 if pushop.force:
1381 remoteheads = [b'force']
1383 remoteheads = [b'force']
1382 else:
1384 else:
1383 remoteheads = pushop.remoteheads
1385 remoteheads = pushop.remoteheads
1384 # ssh: return remote's addchangegroup()
1386 # ssh: return remote's addchangegroup()
1385 # http: return remote's addchangegroup() or 0 for error
1387 # http: return remote's addchangegroup() or 0 for error
1386 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1388 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1387
1389
1388
1390
1389 def _pushsyncphase(pushop):
1391 def _pushsyncphase(pushop):
1390 """synchronise phase information locally and remotely"""
1392 """synchronise phase information locally and remotely"""
1391 cheads = pushop.commonheads
1393 cheads = pushop.commonheads
1392 # even when we don't push, exchanging phase data is useful
1394 # even when we don't push, exchanging phase data is useful
1393 remotephases = listkeys(pushop.remote, b'phases')
1395 remotephases = listkeys(pushop.remote, b'phases')
1394 if (
1396 if (
1395 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1397 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1396 and remotephases # server supports phases
1398 and remotephases # server supports phases
1397 and pushop.cgresult is None # nothing was pushed
1399 and pushop.cgresult is None # nothing was pushed
1398 and remotephases.get(b'publishing', False)
1400 and remotephases.get(b'publishing', False)
1399 ):
1401 ):
1400 # When:
1402 # When:
1401 # - this is a subrepo push
1403 # - this is a subrepo push
1402 # - and remote support phase
1404 # - and remote support phase
1403 # - and no changeset was pushed
1405 # - and no changeset was pushed
1404 # - and remote is publishing
1406 # - and remote is publishing
1405 # We may be in issue 3871 case!
1407 # We may be in issue 3871 case!
1406 # We drop the possible phase synchronisation done by
1408 # We drop the possible phase synchronisation done by
1407 # courtesy to publish changesets possibly locally draft
1409 # courtesy to publish changesets possibly locally draft
1408 # on the remote.
1410 # on the remote.
1409 remotephases = {b'publishing': b'True'}
1411 remotephases = {b'publishing': b'True'}
1410 if not remotephases: # old server or public only reply from non-publishing
1412 if not remotephases: # old server or public only reply from non-publishing
1411 _localphasemove(pushop, cheads)
1413 _localphasemove(pushop, cheads)
1412 # don't push any phase data as there is nothing to push
1414 # don't push any phase data as there is nothing to push
1413 else:
1415 else:
1414 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1416 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1415 pheads, droots = ana
1417 pheads, droots = ana
1416 ### Apply remote phase on local
1418 ### Apply remote phase on local
1417 if remotephases.get(b'publishing', False):
1419 if remotephases.get(b'publishing', False):
1418 _localphasemove(pushop, cheads)
1420 _localphasemove(pushop, cheads)
1419 else: # publish = False
1421 else: # publish = False
1420 _localphasemove(pushop, pheads)
1422 _localphasemove(pushop, pheads)
1421 _localphasemove(pushop, cheads, phases.draft)
1423 _localphasemove(pushop, cheads, phases.draft)
1422 ### Apply local phase on remote
1424 ### Apply local phase on remote
1423
1425
1424 if pushop.cgresult:
1426 if pushop.cgresult:
1425 if b'phases' in pushop.stepsdone:
1427 if b'phases' in pushop.stepsdone:
1426 # phases already pushed though bundle2
1428 # phases already pushed though bundle2
1427 return
1429 return
1428 outdated = pushop.outdatedphases
1430 outdated = pushop.outdatedphases
1429 else:
1431 else:
1430 outdated = pushop.fallbackoutdatedphases
1432 outdated = pushop.fallbackoutdatedphases
1431
1433
1432 pushop.stepsdone.add(b'phases')
1434 pushop.stepsdone.add(b'phases')
1433
1435
1434 # filter heads already turned public by the push
1436 # filter heads already turned public by the push
1435 outdated = [c for c in outdated if c.node() not in pheads]
1437 outdated = [c for c in outdated if c.node() not in pheads]
1436 # fallback to independent pushkey command
1438 # fallback to independent pushkey command
1437 for newremotehead in outdated:
1439 for newremotehead in outdated:
1438 with pushop.remote.commandexecutor() as e:
1440 with pushop.remote.commandexecutor() as e:
1439 r = e.callcommand(
1441 r = e.callcommand(
1440 b'pushkey',
1442 b'pushkey',
1441 {
1443 {
1442 b'namespace': b'phases',
1444 b'namespace': b'phases',
1443 b'key': newremotehead.hex(),
1445 b'key': newremotehead.hex(),
1444 b'old': b'%d' % phases.draft,
1446 b'old': b'%d' % phases.draft,
1445 b'new': b'%d' % phases.public,
1447 b'new': b'%d' % phases.public,
1446 },
1448 },
1447 ).result()
1449 ).result()
1448
1450
1449 if not r:
1451 if not r:
1450 pushop.ui.warn(
1452 pushop.ui.warn(
1451 _(b'updating %s to public failed!\n') % newremotehead
1453 _(b'updating %s to public failed!\n') % newremotehead
1452 )
1454 )
1453
1455
1454
1456
1455 def _localphasemove(pushop, nodes, phase=phases.public):
1457 def _localphasemove(pushop, nodes, phase=phases.public):
1456 """move <nodes> to <phase> in the local source repo"""
1458 """move <nodes> to <phase> in the local source repo"""
1457 if pushop.trmanager:
1459 if pushop.trmanager:
1458 phases.advanceboundary(
1460 phases.advanceboundary(
1459 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1461 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1460 )
1462 )
1461 else:
1463 else:
1462 # repo is not locked, do not change any phases!
1464 # repo is not locked, do not change any phases!
1463 # Informs the user that phases should have been moved when
1465 # Informs the user that phases should have been moved when
1464 # applicable.
1466 # applicable.
1465 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1467 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1466 phasestr = phases.phasenames[phase]
1468 phasestr = phases.phasenames[phase]
1467 if actualmoves:
1469 if actualmoves:
1468 pushop.ui.status(
1470 pushop.ui.status(
1469 _(
1471 _(
1470 b'cannot lock source repo, skipping '
1472 b'cannot lock source repo, skipping '
1471 b'local %s phase update\n'
1473 b'local %s phase update\n'
1472 )
1474 )
1473 % phasestr
1475 % phasestr
1474 )
1476 )
1475
1477
1476
1478
1477 def _pushobsolete(pushop):
1479 def _pushobsolete(pushop):
1478 """utility function to push obsolete markers to a remote"""
1480 """utility function to push obsolete markers to a remote"""
1479 if b'obsmarkers' in pushop.stepsdone:
1481 if b'obsmarkers' in pushop.stepsdone:
1480 return
1482 return
1481 repo = pushop.repo
1483 repo = pushop.repo
1482 remote = pushop.remote
1484 remote = pushop.remote
1483 pushop.stepsdone.add(b'obsmarkers')
1485 pushop.stepsdone.add(b'obsmarkers')
1484 if pushop.outobsmarkers:
1486 if pushop.outobsmarkers:
1485 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1487 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1486 rslts = []
1488 rslts = []
1487 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1489 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1488 remotedata = obsolete._pushkeyescape(markers)
1490 remotedata = obsolete._pushkeyescape(markers)
1489 for key in sorted(remotedata, reverse=True):
1491 for key in sorted(remotedata, reverse=True):
1490 # reverse sort to ensure we end with dump0
1492 # reverse sort to ensure we end with dump0
1491 data = remotedata[key]
1493 data = remotedata[key]
1492 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1494 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1493 if [r for r in rslts if not r]:
1495 if [r for r in rslts if not r]:
1494 msg = _(b'failed to push some obsolete markers!\n')
1496 msg = _(b'failed to push some obsolete markers!\n')
1495 repo.ui.warn(msg)
1497 repo.ui.warn(msg)
1496
1498
1497
1499
1498 def _pushbookmark(pushop):
1500 def _pushbookmark(pushop):
1499 """Update bookmark position on remote"""
1501 """Update bookmark position on remote"""
1500 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1502 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1501 return
1503 return
1502 pushop.stepsdone.add(b'bookmarks')
1504 pushop.stepsdone.add(b'bookmarks')
1503 ui = pushop.ui
1505 ui = pushop.ui
1504 remote = pushop.remote
1506 remote = pushop.remote
1505
1507
1506 for b, old, new in pushop.outbookmarks:
1508 for b, old, new in pushop.outbookmarks:
1507 action = b'update'
1509 action = b'update'
1508 if not old:
1510 if not old:
1509 action = b'export'
1511 action = b'export'
1510 elif not new:
1512 elif not new:
1511 action = b'delete'
1513 action = b'delete'
1512
1514
1513 with remote.commandexecutor() as e:
1515 with remote.commandexecutor() as e:
1514 r = e.callcommand(
1516 r = e.callcommand(
1515 b'pushkey',
1517 b'pushkey',
1516 {
1518 {
1517 b'namespace': b'bookmarks',
1519 b'namespace': b'bookmarks',
1518 b'key': b,
1520 b'key': b,
1519 b'old': hex(old),
1521 b'old': hex(old),
1520 b'new': hex(new),
1522 b'new': hex(new),
1521 },
1523 },
1522 ).result()
1524 ).result()
1523
1525
1524 if r:
1526 if r:
1525 ui.status(bookmsgmap[action][0] % b)
1527 ui.status(bookmsgmap[action][0] % b)
1526 else:
1528 else:
1527 ui.warn(bookmsgmap[action][1] % b)
1529 ui.warn(bookmsgmap[action][1] % b)
1528 # discovery can have set the value form invalid entry
1530 # discovery can have set the value form invalid entry
1529 if pushop.bkresult is not None:
1531 if pushop.bkresult is not None:
1530 pushop.bkresult = 1
1532 pushop.bkresult = 1
1531
1533
1532
1534
1533 class pulloperation(object):
1535 class pulloperation(object):
1534 """A object that represent a single pull operation
1536 """A object that represent a single pull operation
1535
1537
1536 It purpose is to carry pull related state and very common operation.
1538 It purpose is to carry pull related state and very common operation.
1537
1539
1538 A new should be created at the beginning of each pull and discarded
1540 A new should be created at the beginning of each pull and discarded
1539 afterward.
1541 afterward.
1540 """
1542 """
1541
1543
1542 def __init__(
1544 def __init__(
1543 self,
1545 self,
1544 repo,
1546 repo,
1545 remote,
1547 remote,
1546 heads=None,
1548 heads=None,
1547 force=False,
1549 force=False,
1548 bookmarks=(),
1550 bookmarks=(),
1549 remotebookmarks=None,
1551 remotebookmarks=None,
1550 streamclonerequested=None,
1552 streamclonerequested=None,
1551 includepats=None,
1553 includepats=None,
1552 excludepats=None,
1554 excludepats=None,
1553 depth=None,
1555 depth=None,
1554 ):
1556 ):
1555 # repo we pull into
1557 # repo we pull into
1556 self.repo = repo
1558 self.repo = repo
1557 # repo we pull from
1559 # repo we pull from
1558 self.remote = remote
1560 self.remote = remote
1559 # revision we try to pull (None is "all")
1561 # revision we try to pull (None is "all")
1560 self.heads = heads
1562 self.heads = heads
1561 # bookmark pulled explicitly
1563 # bookmark pulled explicitly
1562 self.explicitbookmarks = [
1564 self.explicitbookmarks = [
1563 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1565 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1564 ]
1566 ]
1565 # do we force pull?
1567 # do we force pull?
1566 self.force = force
1568 self.force = force
1567 # whether a streaming clone was requested
1569 # whether a streaming clone was requested
1568 self.streamclonerequested = streamclonerequested
1570 self.streamclonerequested = streamclonerequested
1569 # transaction manager
1571 # transaction manager
1570 self.trmanager = None
1572 self.trmanager = None
1571 # set of common changeset between local and remote before pull
1573 # set of common changeset between local and remote before pull
1572 self.common = None
1574 self.common = None
1573 # set of pulled head
1575 # set of pulled head
1574 self.rheads = None
1576 self.rheads = None
1575 # list of missing changeset to fetch remotely
1577 # list of missing changeset to fetch remotely
1576 self.fetch = None
1578 self.fetch = None
1577 # remote bookmarks data
1579 # remote bookmarks data
1578 self.remotebookmarks = remotebookmarks
1580 self.remotebookmarks = remotebookmarks
1579 # result of changegroup pulling (used as return code by pull)
1581 # result of changegroup pulling (used as return code by pull)
1580 self.cgresult = None
1582 self.cgresult = None
1581 # list of step already done
1583 # list of step already done
1582 self.stepsdone = set()
1584 self.stepsdone = set()
1583 # Whether we attempted a clone from pre-generated bundles.
1585 # Whether we attempted a clone from pre-generated bundles.
1584 self.clonebundleattempted = False
1586 self.clonebundleattempted = False
1585 # Set of file patterns to include.
1587 # Set of file patterns to include.
1586 self.includepats = includepats
1588 self.includepats = includepats
1587 # Set of file patterns to exclude.
1589 # Set of file patterns to exclude.
1588 self.excludepats = excludepats
1590 self.excludepats = excludepats
1589 # Number of ancestor changesets to pull from each pulled head.
1591 # Number of ancestor changesets to pull from each pulled head.
1590 self.depth = depth
1592 self.depth = depth
1591
1593
1592 @util.propertycache
1594 @util.propertycache
1593 def pulledsubset(self):
1595 def pulledsubset(self):
1594 """heads of the set of changeset target by the pull"""
1596 """heads of the set of changeset target by the pull"""
1595 # compute target subset
1597 # compute target subset
1596 if self.heads is None:
1598 if self.heads is None:
1597 # We pulled every thing possible
1599 # We pulled every thing possible
1598 # sync on everything common
1600 # sync on everything common
1599 c = set(self.common)
1601 c = set(self.common)
1600 ret = list(self.common)
1602 ret = list(self.common)
1601 for n in self.rheads:
1603 for n in self.rheads:
1602 if n not in c:
1604 if n not in c:
1603 ret.append(n)
1605 ret.append(n)
1604 return ret
1606 return ret
1605 else:
1607 else:
1606 # We pulled a specific subset
1608 # We pulled a specific subset
1607 # sync on this subset
1609 # sync on this subset
1608 return self.heads
1610 return self.heads
1609
1611
1610 @util.propertycache
1612 @util.propertycache
1611 def canusebundle2(self):
1613 def canusebundle2(self):
1612 return not _forcebundle1(self)
1614 return not _forcebundle1(self)
1613
1615
1614 @util.propertycache
1616 @util.propertycache
1615 def remotebundle2caps(self):
1617 def remotebundle2caps(self):
1616 return bundle2.bundle2caps(self.remote)
1618 return bundle2.bundle2caps(self.remote)
1617
1619
1618 def gettransaction(self):
1620 def gettransaction(self):
1619 # deprecated; talk to trmanager directly
1621 # deprecated; talk to trmanager directly
1620 return self.trmanager.transaction()
1622 return self.trmanager.transaction()
1621
1623
1622
1624
1623 class transactionmanager(util.transactional):
1625 class transactionmanager(util.transactional):
1624 """An object to manage the life cycle of a transaction
1626 """An object to manage the life cycle of a transaction
1625
1627
1626 It creates the transaction on demand and calls the appropriate hooks when
1628 It creates the transaction on demand and calls the appropriate hooks when
1627 closing the transaction."""
1629 closing the transaction."""
1628
1630
1629 def __init__(self, repo, source, url):
1631 def __init__(self, repo, source, url):
1630 self.repo = repo
1632 self.repo = repo
1631 self.source = source
1633 self.source = source
1632 self.url = url
1634 self.url = url
1633 self._tr = None
1635 self._tr = None
1634
1636
1635 def transaction(self):
1637 def transaction(self):
1636 """Return an open transaction object, constructing if necessary"""
1638 """Return an open transaction object, constructing if necessary"""
1637 if not self._tr:
1639 if not self._tr:
1638 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1640 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1639 self._tr = self.repo.transaction(trname)
1641 self._tr = self.repo.transaction(trname)
1640 self._tr.hookargs[b'source'] = self.source
1642 self._tr.hookargs[b'source'] = self.source
1641 self._tr.hookargs[b'url'] = self.url
1643 self._tr.hookargs[b'url'] = self.url
1642 return self._tr
1644 return self._tr
1643
1645
1644 def close(self):
1646 def close(self):
1645 """close transaction if created"""
1647 """close transaction if created"""
1646 if self._tr is not None:
1648 if self._tr is not None:
1647 self._tr.close()
1649 self._tr.close()
1648
1650
1649 def release(self):
1651 def release(self):
1650 """release transaction if created"""
1652 """release transaction if created"""
1651 if self._tr is not None:
1653 if self._tr is not None:
1652 self._tr.release()
1654 self._tr.release()
1653
1655
1654
1656
1655 def listkeys(remote, namespace):
1657 def listkeys(remote, namespace):
1656 with remote.commandexecutor() as e:
1658 with remote.commandexecutor() as e:
1657 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1659 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1658
1660
1659
1661
1660 def _fullpullbundle2(repo, pullop):
1662 def _fullpullbundle2(repo, pullop):
1661 # The server may send a partial reply, i.e. when inlining
1663 # The server may send a partial reply, i.e. when inlining
1662 # pre-computed bundles. In that case, update the common
1664 # pre-computed bundles. In that case, update the common
1663 # set based on the results and pull another bundle.
1665 # set based on the results and pull another bundle.
1664 #
1666 #
1665 # There are two indicators that the process is finished:
1667 # There are two indicators that the process is finished:
1666 # - no changeset has been added, or
1668 # - no changeset has been added, or
1667 # - all remote heads are known locally.
1669 # - all remote heads are known locally.
1668 # The head check must use the unfiltered view as obsoletion
1670 # The head check must use the unfiltered view as obsoletion
1669 # markers can hide heads.
1671 # markers can hide heads.
1670 unfi = repo.unfiltered()
1672 unfi = repo.unfiltered()
1671 unficl = unfi.changelog
1673 unficl = unfi.changelog
1672
1674
1673 def headsofdiff(h1, h2):
1675 def headsofdiff(h1, h2):
1674 """Returns heads(h1 % h2)"""
1676 """Returns heads(h1 % h2)"""
1675 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1677 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1676 return set(ctx.node() for ctx in res)
1678 return set(ctx.node() for ctx in res)
1677
1679
1678 def headsofunion(h1, h2):
1680 def headsofunion(h1, h2):
1679 """Returns heads((h1 + h2) - null)"""
1681 """Returns heads((h1 + h2) - null)"""
1680 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1682 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1681 return set(ctx.node() for ctx in res)
1683 return set(ctx.node() for ctx in res)
1682
1684
1683 while True:
1685 while True:
1684 old_heads = unficl.heads()
1686 old_heads = unficl.heads()
1685 clstart = len(unficl)
1687 clstart = len(unficl)
1686 _pullbundle2(pullop)
1688 _pullbundle2(pullop)
1687 if repository.NARROW_REQUIREMENT in repo.requirements:
1689 if repository.NARROW_REQUIREMENT in repo.requirements:
1688 # XXX narrow clones filter the heads on the server side during
1690 # XXX narrow clones filter the heads on the server side during
1689 # XXX getbundle and result in partial replies as well.
1691 # XXX getbundle and result in partial replies as well.
1690 # XXX Disable pull bundles in this case as band aid to avoid
1692 # XXX Disable pull bundles in this case as band aid to avoid
1691 # XXX extra round trips.
1693 # XXX extra round trips.
1692 break
1694 break
1693 if clstart == len(unficl):
1695 if clstart == len(unficl):
1694 break
1696 break
1695 if all(unficl.hasnode(n) for n in pullop.rheads):
1697 if all(unficl.hasnode(n) for n in pullop.rheads):
1696 break
1698 break
1697 new_heads = headsofdiff(unficl.heads(), old_heads)
1699 new_heads = headsofdiff(unficl.heads(), old_heads)
1698 pullop.common = headsofunion(new_heads, pullop.common)
1700 pullop.common = headsofunion(new_heads, pullop.common)
1699 pullop.rheads = set(pullop.rheads) - pullop.common
1701 pullop.rheads = set(pullop.rheads) - pullop.common
1700
1702
1701
1703
1702 def pull(
1704 def pull(
1703 repo,
1705 repo,
1704 remote,
1706 remote,
1705 heads=None,
1707 heads=None,
1706 force=False,
1708 force=False,
1707 bookmarks=(),
1709 bookmarks=(),
1708 opargs=None,
1710 opargs=None,
1709 streamclonerequested=None,
1711 streamclonerequested=None,
1710 includepats=None,
1712 includepats=None,
1711 excludepats=None,
1713 excludepats=None,
1712 depth=None,
1714 depth=None,
1713 ):
1715 ):
1714 """Fetch repository data from a remote.
1716 """Fetch repository data from a remote.
1715
1717
1716 This is the main function used to retrieve data from a remote repository.
1718 This is the main function used to retrieve data from a remote repository.
1717
1719
1718 ``repo`` is the local repository to clone into.
1720 ``repo`` is the local repository to clone into.
1719 ``remote`` is a peer instance.
1721 ``remote`` is a peer instance.
1720 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1722 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1721 default) means to pull everything from the remote.
1723 default) means to pull everything from the remote.
1722 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1724 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1723 default, all remote bookmarks are pulled.
1725 default, all remote bookmarks are pulled.
1724 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1726 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1725 initialization.
1727 initialization.
1726 ``streamclonerequested`` is a boolean indicating whether a "streaming
1728 ``streamclonerequested`` is a boolean indicating whether a "streaming
1727 clone" is requested. A "streaming clone" is essentially a raw file copy
1729 clone" is requested. A "streaming clone" is essentially a raw file copy
1728 of revlogs from the server. This only works when the local repository is
1730 of revlogs from the server. This only works when the local repository is
1729 empty. The default value of ``None`` means to respect the server
1731 empty. The default value of ``None`` means to respect the server
1730 configuration for preferring stream clones.
1732 configuration for preferring stream clones.
1731 ``includepats`` and ``excludepats`` define explicit file patterns to
1733 ``includepats`` and ``excludepats`` define explicit file patterns to
1732 include and exclude in storage, respectively. If not defined, narrow
1734 include and exclude in storage, respectively. If not defined, narrow
1733 patterns from the repo instance are used, if available.
1735 patterns from the repo instance are used, if available.
1734 ``depth`` is an integer indicating the DAG depth of history we're
1736 ``depth`` is an integer indicating the DAG depth of history we're
1735 interested in. If defined, for each revision specified in ``heads``, we
1737 interested in. If defined, for each revision specified in ``heads``, we
1736 will fetch up to this many of its ancestors and data associated with them.
1738 will fetch up to this many of its ancestors and data associated with them.
1737
1739
1738 Returns the ``pulloperation`` created for this pull.
1740 Returns the ``pulloperation`` created for this pull.
1739 """
1741 """
1740 if opargs is None:
1742 if opargs is None:
1741 opargs = {}
1743 opargs = {}
1742
1744
1743 # We allow the narrow patterns to be passed in explicitly to provide more
1745 # We allow the narrow patterns to be passed in explicitly to provide more
1744 # flexibility for API consumers.
1746 # flexibility for API consumers.
1745 if includepats or excludepats:
1747 if includepats or excludepats:
1746 includepats = includepats or set()
1748 includepats = includepats or set()
1747 excludepats = excludepats or set()
1749 excludepats = excludepats or set()
1748 else:
1750 else:
1749 includepats, excludepats = repo.narrowpats
1751 includepats, excludepats = repo.narrowpats
1750
1752
1751 narrowspec.validatepatterns(includepats)
1753 narrowspec.validatepatterns(includepats)
1752 narrowspec.validatepatterns(excludepats)
1754 narrowspec.validatepatterns(excludepats)
1753
1755
1754 pullop = pulloperation(
1756 pullop = pulloperation(
1755 repo,
1757 repo,
1756 remote,
1758 remote,
1757 heads,
1759 heads,
1758 force,
1760 force,
1759 bookmarks=bookmarks,
1761 bookmarks=bookmarks,
1760 streamclonerequested=streamclonerequested,
1762 streamclonerequested=streamclonerequested,
1761 includepats=includepats,
1763 includepats=includepats,
1762 excludepats=excludepats,
1764 excludepats=excludepats,
1763 depth=depth,
1765 depth=depth,
1764 **pycompat.strkwargs(opargs)
1766 **pycompat.strkwargs(opargs)
1765 )
1767 )
1766
1768
1767 peerlocal = pullop.remote.local()
1769 peerlocal = pullop.remote.local()
1768 if peerlocal:
1770 if peerlocal:
1769 missing = set(peerlocal.requirements) - pullop.repo.supported
1771 missing = set(peerlocal.requirements) - pullop.repo.supported
1770 if missing:
1772 if missing:
1771 msg = _(
1773 msg = _(
1772 b"required features are not"
1774 b"required features are not"
1773 b" supported in the destination:"
1775 b" supported in the destination:"
1774 b" %s"
1776 b" %s"
1775 ) % (b', '.join(sorted(missing)))
1777 ) % (b', '.join(sorted(missing)))
1776 raise error.Abort(msg)
1778 raise error.Abort(msg)
1777
1779
1778 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1780 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1779 wlock = util.nullcontextmanager()
1781 wlock = util.nullcontextmanager()
1780 if not bookmod.bookmarksinstore(repo):
1782 if not bookmod.bookmarksinstore(repo):
1781 wlock = repo.wlock()
1783 wlock = repo.wlock()
1782 with wlock, repo.lock(), pullop.trmanager:
1784 with wlock, repo.lock(), pullop.trmanager:
1783 # Use the modern wire protocol, if available.
1785 # Use the modern wire protocol, if available.
1784 if remote.capable(b'command-changesetdata'):
1786 if remote.capable(b'command-changesetdata'):
1785 exchangev2.pull(pullop)
1787 exchangev2.pull(pullop)
1786 else:
1788 else:
1787 # This should ideally be in _pullbundle2(). However, it needs to run
1789 # This should ideally be in _pullbundle2(). However, it needs to run
1788 # before discovery to avoid extra work.
1790 # before discovery to avoid extra work.
1789 _maybeapplyclonebundle(pullop)
1791 _maybeapplyclonebundle(pullop)
1790 streamclone.maybeperformlegacystreamclone(pullop)
1792 streamclone.maybeperformlegacystreamclone(pullop)
1791 _pulldiscovery(pullop)
1793 _pulldiscovery(pullop)
1792 if pullop.canusebundle2:
1794 if pullop.canusebundle2:
1793 _fullpullbundle2(repo, pullop)
1795 _fullpullbundle2(repo, pullop)
1794 _pullchangeset(pullop)
1796 _pullchangeset(pullop)
1795 _pullphase(pullop)
1797 _pullphase(pullop)
1796 _pullbookmarks(pullop)
1798 _pullbookmarks(pullop)
1797 _pullobsolete(pullop)
1799 _pullobsolete(pullop)
1798
1800
1799 # storing remotenames
1801 # storing remotenames
1800 if repo.ui.configbool(b'experimental', b'remotenames'):
1802 if repo.ui.configbool(b'experimental', b'remotenames'):
1801 logexchange.pullremotenames(repo, remote)
1803 logexchange.pullremotenames(repo, remote)
1802
1804
1803 return pullop
1805 return pullop
1804
1806
1805
1807
1806 # list of steps to perform discovery before pull
1808 # list of steps to perform discovery before pull
1807 pulldiscoveryorder = []
1809 pulldiscoveryorder = []
1808
1810
1809 # Mapping between step name and function
1811 # Mapping between step name and function
1810 #
1812 #
1811 # This exists to help extensions wrap steps if necessary
1813 # This exists to help extensions wrap steps if necessary
1812 pulldiscoverymapping = {}
1814 pulldiscoverymapping = {}
1813
1815
1814
1816
1815 def pulldiscovery(stepname):
1817 def pulldiscovery(stepname):
1816 """decorator for function performing discovery before pull
1818 """decorator for function performing discovery before pull
1817
1819
1818 The function is added to the step -> function mapping and appended to the
1820 The function is added to the step -> function mapping and appended to the
1819 list of steps. Beware that decorated function will be added in order (this
1821 list of steps. Beware that decorated function will be added in order (this
1820 may matter).
1822 may matter).
1821
1823
1822 You can only use this decorator for a new step, if you want to wrap a step
1824 You can only use this decorator for a new step, if you want to wrap a step
1823 from an extension, change the pulldiscovery dictionary directly."""
1825 from an extension, change the pulldiscovery dictionary directly."""
1824
1826
1825 def dec(func):
1827 def dec(func):
1826 assert stepname not in pulldiscoverymapping
1828 assert stepname not in pulldiscoverymapping
1827 pulldiscoverymapping[stepname] = func
1829 pulldiscoverymapping[stepname] = func
1828 pulldiscoveryorder.append(stepname)
1830 pulldiscoveryorder.append(stepname)
1829 return func
1831 return func
1830
1832
1831 return dec
1833 return dec
1832
1834
1833
1835
1834 def _pulldiscovery(pullop):
1836 def _pulldiscovery(pullop):
1835 """Run all discovery steps"""
1837 """Run all discovery steps"""
1836 for stepname in pulldiscoveryorder:
1838 for stepname in pulldiscoveryorder:
1837 step = pulldiscoverymapping[stepname]
1839 step = pulldiscoverymapping[stepname]
1838 step(pullop)
1840 step(pullop)
1839
1841
1840
1842
1841 @pulldiscovery(b'b1:bookmarks')
1843 @pulldiscovery(b'b1:bookmarks')
1842 def _pullbookmarkbundle1(pullop):
1844 def _pullbookmarkbundle1(pullop):
1843 """fetch bookmark data in bundle1 case
1845 """fetch bookmark data in bundle1 case
1844
1846
1845 If not using bundle2, we have to fetch bookmarks before changeset
1847 If not using bundle2, we have to fetch bookmarks before changeset
1846 discovery to reduce the chance and impact of race conditions."""
1848 discovery to reduce the chance and impact of race conditions."""
1847 if pullop.remotebookmarks is not None:
1849 if pullop.remotebookmarks is not None:
1848 return
1850 return
1849 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1851 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1850 # all known bundle2 servers now support listkeys, but lets be nice with
1852 # all known bundle2 servers now support listkeys, but lets be nice with
1851 # new implementation.
1853 # new implementation.
1852 return
1854 return
1853 books = listkeys(pullop.remote, b'bookmarks')
1855 books = listkeys(pullop.remote, b'bookmarks')
1854 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1856 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1855
1857
1856
1858
1857 @pulldiscovery(b'changegroup')
1859 @pulldiscovery(b'changegroup')
1858 def _pulldiscoverychangegroup(pullop):
1860 def _pulldiscoverychangegroup(pullop):
1859 """discovery phase for the pull
1861 """discovery phase for the pull
1860
1862
1861 Current handle changeset discovery only, will change handle all discovery
1863 Current handle changeset discovery only, will change handle all discovery
1862 at some point."""
1864 at some point."""
1863 tmp = discovery.findcommonincoming(
1865 tmp = discovery.findcommonincoming(
1864 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1866 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1865 )
1867 )
1866 common, fetch, rheads = tmp
1868 common, fetch, rheads = tmp
1867 has_node = pullop.repo.unfiltered().changelog.index.has_node
1869 has_node = pullop.repo.unfiltered().changelog.index.has_node
1868 if fetch and rheads:
1870 if fetch and rheads:
1869 # If a remote heads is filtered locally, put in back in common.
1871 # If a remote heads is filtered locally, put in back in common.
1870 #
1872 #
1871 # This is a hackish solution to catch most of "common but locally
1873 # This is a hackish solution to catch most of "common but locally
1872 # hidden situation". We do not performs discovery on unfiltered
1874 # hidden situation". We do not performs discovery on unfiltered
1873 # repository because it end up doing a pathological amount of round
1875 # repository because it end up doing a pathological amount of round
1874 # trip for w huge amount of changeset we do not care about.
1876 # trip for w huge amount of changeset we do not care about.
1875 #
1877 #
1876 # If a set of such "common but filtered" changeset exist on the server
1878 # If a set of such "common but filtered" changeset exist on the server
1877 # but are not including a remote heads, we'll not be able to detect it,
1879 # but are not including a remote heads, we'll not be able to detect it,
1878 scommon = set(common)
1880 scommon = set(common)
1879 for n in rheads:
1881 for n in rheads:
1880 if has_node(n):
1882 if has_node(n):
1881 if n not in scommon:
1883 if n not in scommon:
1882 common.append(n)
1884 common.append(n)
1883 if set(rheads).issubset(set(common)):
1885 if set(rheads).issubset(set(common)):
1884 fetch = []
1886 fetch = []
1885 pullop.common = common
1887 pullop.common = common
1886 pullop.fetch = fetch
1888 pullop.fetch = fetch
1887 pullop.rheads = rheads
1889 pullop.rheads = rheads
1888
1890
1889
1891
1890 def _pullbundle2(pullop):
1892 def _pullbundle2(pullop):
1891 """pull data using bundle2
1893 """pull data using bundle2
1892
1894
1893 For now, the only supported data are changegroup."""
1895 For now, the only supported data are changegroup."""
1894 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1896 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1895
1897
1896 # make ui easier to access
1898 # make ui easier to access
1897 ui = pullop.repo.ui
1899 ui = pullop.repo.ui
1898
1900
1899 # At the moment we don't do stream clones over bundle2. If that is
1901 # At the moment we don't do stream clones over bundle2. If that is
1900 # implemented then here's where the check for that will go.
1902 # implemented then here's where the check for that will go.
1901 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1903 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1902
1904
1903 # declare pull perimeters
1905 # declare pull perimeters
1904 kwargs[b'common'] = pullop.common
1906 kwargs[b'common'] = pullop.common
1905 kwargs[b'heads'] = pullop.heads or pullop.rheads
1907 kwargs[b'heads'] = pullop.heads or pullop.rheads
1906
1908
1907 # check server supports narrow and then adding includepats and excludepats
1909 # check server supports narrow and then adding includepats and excludepats
1908 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1910 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1909 if servernarrow and pullop.includepats:
1911 if servernarrow and pullop.includepats:
1910 kwargs[b'includepats'] = pullop.includepats
1912 kwargs[b'includepats'] = pullop.includepats
1911 if servernarrow and pullop.excludepats:
1913 if servernarrow and pullop.excludepats:
1912 kwargs[b'excludepats'] = pullop.excludepats
1914 kwargs[b'excludepats'] = pullop.excludepats
1913
1915
1914 if streaming:
1916 if streaming:
1915 kwargs[b'cg'] = False
1917 kwargs[b'cg'] = False
1916 kwargs[b'stream'] = True
1918 kwargs[b'stream'] = True
1917 pullop.stepsdone.add(b'changegroup')
1919 pullop.stepsdone.add(b'changegroup')
1918 pullop.stepsdone.add(b'phases')
1920 pullop.stepsdone.add(b'phases')
1919
1921
1920 else:
1922 else:
1921 # pulling changegroup
1923 # pulling changegroup
1922 pullop.stepsdone.add(b'changegroup')
1924 pullop.stepsdone.add(b'changegroup')
1923
1925
1924 kwargs[b'cg'] = pullop.fetch
1926 kwargs[b'cg'] = pullop.fetch
1925
1927
1926 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1928 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1927 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1929 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1928 if not legacyphase and hasbinaryphase:
1930 if not legacyphase and hasbinaryphase:
1929 kwargs[b'phases'] = True
1931 kwargs[b'phases'] = True
1930 pullop.stepsdone.add(b'phases')
1932 pullop.stepsdone.add(b'phases')
1931
1933
1932 if b'listkeys' in pullop.remotebundle2caps:
1934 if b'listkeys' in pullop.remotebundle2caps:
1933 if b'phases' not in pullop.stepsdone:
1935 if b'phases' not in pullop.stepsdone:
1934 kwargs[b'listkeys'] = [b'phases']
1936 kwargs[b'listkeys'] = [b'phases']
1935
1937
1936 bookmarksrequested = False
1938 bookmarksrequested = False
1937 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1939 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1938 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1940 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1939
1941
1940 if pullop.remotebookmarks is not None:
1942 if pullop.remotebookmarks is not None:
1941 pullop.stepsdone.add(b'request-bookmarks')
1943 pullop.stepsdone.add(b'request-bookmarks')
1942
1944
1943 if (
1945 if (
1944 b'request-bookmarks' not in pullop.stepsdone
1946 b'request-bookmarks' not in pullop.stepsdone
1945 and pullop.remotebookmarks is None
1947 and pullop.remotebookmarks is None
1946 and not legacybookmark
1948 and not legacybookmark
1947 and hasbinarybook
1949 and hasbinarybook
1948 ):
1950 ):
1949 kwargs[b'bookmarks'] = True
1951 kwargs[b'bookmarks'] = True
1950 bookmarksrequested = True
1952 bookmarksrequested = True
1951
1953
1952 if b'listkeys' in pullop.remotebundle2caps:
1954 if b'listkeys' in pullop.remotebundle2caps:
1953 if b'request-bookmarks' not in pullop.stepsdone:
1955 if b'request-bookmarks' not in pullop.stepsdone:
1954 # make sure to always includes bookmark data when migrating
1956 # make sure to always includes bookmark data when migrating
1955 # `hg incoming --bundle` to using this function.
1957 # `hg incoming --bundle` to using this function.
1956 pullop.stepsdone.add(b'request-bookmarks')
1958 pullop.stepsdone.add(b'request-bookmarks')
1957 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1959 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1958
1960
1959 # If this is a full pull / clone and the server supports the clone bundles
1961 # If this is a full pull / clone and the server supports the clone bundles
1960 # feature, tell the server whether we attempted a clone bundle. The
1962 # feature, tell the server whether we attempted a clone bundle. The
1961 # presence of this flag indicates the client supports clone bundles. This
1963 # presence of this flag indicates the client supports clone bundles. This
1962 # will enable the server to treat clients that support clone bundles
1964 # will enable the server to treat clients that support clone bundles
1963 # differently from those that don't.
1965 # differently from those that don't.
1964 if (
1966 if (
1965 pullop.remote.capable(b'clonebundles')
1967 pullop.remote.capable(b'clonebundles')
1966 and pullop.heads is None
1968 and pullop.heads is None
1967 and list(pullop.common) == [nullid]
1969 and list(pullop.common) == [nullid]
1968 ):
1970 ):
1969 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1971 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1970
1972
1971 if streaming:
1973 if streaming:
1972 pullop.repo.ui.status(_(b'streaming all changes\n'))
1974 pullop.repo.ui.status(_(b'streaming all changes\n'))
1973 elif not pullop.fetch:
1975 elif not pullop.fetch:
1974 pullop.repo.ui.status(_(b"no changes found\n"))
1976 pullop.repo.ui.status(_(b"no changes found\n"))
1975 pullop.cgresult = 0
1977 pullop.cgresult = 0
1976 else:
1978 else:
1977 if pullop.heads is None and list(pullop.common) == [nullid]:
1979 if pullop.heads is None and list(pullop.common) == [nullid]:
1978 pullop.repo.ui.status(_(b"requesting all changes\n"))
1980 pullop.repo.ui.status(_(b"requesting all changes\n"))
1979 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1980 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1982 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1981 if obsolete.commonversion(remoteversions) is not None:
1983 if obsolete.commonversion(remoteversions) is not None:
1982 kwargs[b'obsmarkers'] = True
1984 kwargs[b'obsmarkers'] = True
1983 pullop.stepsdone.add(b'obsmarkers')
1985 pullop.stepsdone.add(b'obsmarkers')
1984 _pullbundle2extraprepare(pullop, kwargs)
1986 _pullbundle2extraprepare(pullop, kwargs)
1985
1987
1986 with pullop.remote.commandexecutor() as e:
1988 with pullop.remote.commandexecutor() as e:
1987 args = dict(kwargs)
1989 args = dict(kwargs)
1988 args[b'source'] = b'pull'
1990 args[b'source'] = b'pull'
1989 bundle = e.callcommand(b'getbundle', args).result()
1991 bundle = e.callcommand(b'getbundle', args).result()
1990
1992
1991 try:
1993 try:
1992 op = bundle2.bundleoperation(
1994 op = bundle2.bundleoperation(
1993 pullop.repo, pullop.gettransaction, source=b'pull'
1995 pullop.repo, pullop.gettransaction, source=b'pull'
1994 )
1996 )
1995 op.modes[b'bookmarks'] = b'records'
1997 op.modes[b'bookmarks'] = b'records'
1996 bundle2.processbundle(pullop.repo, bundle, op=op)
1998 bundle2.processbundle(pullop.repo, bundle, op=op)
1997 except bundle2.AbortFromPart as exc:
1999 except bundle2.AbortFromPart as exc:
1998 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2000 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1999 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2001 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2000 except error.BundleValueError as exc:
2002 except error.BundleValueError as exc:
2001 raise error.Abort(_(b'missing support for %s') % exc)
2003 raise error.Abort(_(b'missing support for %s') % exc)
2002
2004
2003 if pullop.fetch:
2005 if pullop.fetch:
2004 pullop.cgresult = bundle2.combinechangegroupresults(op)
2006 pullop.cgresult = bundle2.combinechangegroupresults(op)
2005
2007
2006 # processing phases change
2008 # processing phases change
2007 for namespace, value in op.records[b'listkeys']:
2009 for namespace, value in op.records[b'listkeys']:
2008 if namespace == b'phases':
2010 if namespace == b'phases':
2009 _pullapplyphases(pullop, value)
2011 _pullapplyphases(pullop, value)
2010
2012
2011 # processing bookmark update
2013 # processing bookmark update
2012 if bookmarksrequested:
2014 if bookmarksrequested:
2013 books = {}
2015 books = {}
2014 for record in op.records[b'bookmarks']:
2016 for record in op.records[b'bookmarks']:
2015 books[record[b'bookmark']] = record[b"node"]
2017 books[record[b'bookmark']] = record[b"node"]
2016 pullop.remotebookmarks = books
2018 pullop.remotebookmarks = books
2017 else:
2019 else:
2018 for namespace, value in op.records[b'listkeys']:
2020 for namespace, value in op.records[b'listkeys']:
2019 if namespace == b'bookmarks':
2021 if namespace == b'bookmarks':
2020 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2022 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2021
2023
2022 # bookmark data were either already there or pulled in the bundle
2024 # bookmark data were either already there or pulled in the bundle
2023 if pullop.remotebookmarks is not None:
2025 if pullop.remotebookmarks is not None:
2024 _pullbookmarks(pullop)
2026 _pullbookmarks(pullop)
2025
2027
2026
2028
2027 def _pullbundle2extraprepare(pullop, kwargs):
2029 def _pullbundle2extraprepare(pullop, kwargs):
2028 """hook function so that extensions can extend the getbundle call"""
2030 """hook function so that extensions can extend the getbundle call"""
2029
2031
2030
2032
2031 def _pullchangeset(pullop):
2033 def _pullchangeset(pullop):
2032 """pull changeset from unbundle into the local repo"""
2034 """pull changeset from unbundle into the local repo"""
2033 # We delay the open of the transaction as late as possible so we
2035 # We delay the open of the transaction as late as possible so we
2034 # don't open transaction for nothing or you break future useful
2036 # don't open transaction for nothing or you break future useful
2035 # rollback call
2037 # rollback call
2036 if b'changegroup' in pullop.stepsdone:
2038 if b'changegroup' in pullop.stepsdone:
2037 return
2039 return
2038 pullop.stepsdone.add(b'changegroup')
2040 pullop.stepsdone.add(b'changegroup')
2039 if not pullop.fetch:
2041 if not pullop.fetch:
2040 pullop.repo.ui.status(_(b"no changes found\n"))
2042 pullop.repo.ui.status(_(b"no changes found\n"))
2041 pullop.cgresult = 0
2043 pullop.cgresult = 0
2042 return
2044 return
2043 tr = pullop.gettransaction()
2045 tr = pullop.gettransaction()
2044 if pullop.heads is None and list(pullop.common) == [nullid]:
2046 if pullop.heads is None and list(pullop.common) == [nullid]:
2045 pullop.repo.ui.status(_(b"requesting all changes\n"))
2047 pullop.repo.ui.status(_(b"requesting all changes\n"))
2046 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2048 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2047 # issue1320, avoid a race if remote changed after discovery
2049 # issue1320, avoid a race if remote changed after discovery
2048 pullop.heads = pullop.rheads
2050 pullop.heads = pullop.rheads
2049
2051
2050 if pullop.remote.capable(b'getbundle'):
2052 if pullop.remote.capable(b'getbundle'):
2051 # TODO: get bundlecaps from remote
2053 # TODO: get bundlecaps from remote
2052 cg = pullop.remote.getbundle(
2054 cg = pullop.remote.getbundle(
2053 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2055 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2054 )
2056 )
2055 elif pullop.heads is None:
2057 elif pullop.heads is None:
2056 with pullop.remote.commandexecutor() as e:
2058 with pullop.remote.commandexecutor() as e:
2057 cg = e.callcommand(
2059 cg = e.callcommand(
2058 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2060 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2059 ).result()
2061 ).result()
2060
2062
2061 elif not pullop.remote.capable(b'changegroupsubset'):
2063 elif not pullop.remote.capable(b'changegroupsubset'):
2062 raise error.Abort(
2064 raise error.Abort(
2063 _(
2065 _(
2064 b"partial pull cannot be done because "
2066 b"partial pull cannot be done because "
2065 b"other repository doesn't support "
2067 b"other repository doesn't support "
2066 b"changegroupsubset."
2068 b"changegroupsubset."
2067 )
2069 )
2068 )
2070 )
2069 else:
2071 else:
2070 with pullop.remote.commandexecutor() as e:
2072 with pullop.remote.commandexecutor() as e:
2071 cg = e.callcommand(
2073 cg = e.callcommand(
2072 b'changegroupsubset',
2074 b'changegroupsubset',
2073 {
2075 {
2074 b'bases': pullop.fetch,
2076 b'bases': pullop.fetch,
2075 b'heads': pullop.heads,
2077 b'heads': pullop.heads,
2076 b'source': b'pull',
2078 b'source': b'pull',
2077 },
2079 },
2078 ).result()
2080 ).result()
2079
2081
2080 bundleop = bundle2.applybundle(
2082 bundleop = bundle2.applybundle(
2081 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2083 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2082 )
2084 )
2083 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2085 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2084
2086
2085
2087
2086 def _pullphase(pullop):
2088 def _pullphase(pullop):
2087 # Get remote phases data from remote
2089 # Get remote phases data from remote
2088 if b'phases' in pullop.stepsdone:
2090 if b'phases' in pullop.stepsdone:
2089 return
2091 return
2090 remotephases = listkeys(pullop.remote, b'phases')
2092 remotephases = listkeys(pullop.remote, b'phases')
2091 _pullapplyphases(pullop, remotephases)
2093 _pullapplyphases(pullop, remotephases)
2092
2094
2093
2095
2094 def _pullapplyphases(pullop, remotephases):
2096 def _pullapplyphases(pullop, remotephases):
2095 """apply phase movement from observed remote state"""
2097 """apply phase movement from observed remote state"""
2096 if b'phases' in pullop.stepsdone:
2098 if b'phases' in pullop.stepsdone:
2097 return
2099 return
2098 pullop.stepsdone.add(b'phases')
2100 pullop.stepsdone.add(b'phases')
2099 publishing = bool(remotephases.get(b'publishing', False))
2101 publishing = bool(remotephases.get(b'publishing', False))
2100 if remotephases and not publishing:
2102 if remotephases and not publishing:
2101 # remote is new and non-publishing
2103 # remote is new and non-publishing
2102 pheads, _dr = phases.analyzeremotephases(
2104 pheads, _dr = phases.analyzeremotephases(
2103 pullop.repo, pullop.pulledsubset, remotephases
2105 pullop.repo, pullop.pulledsubset, remotephases
2104 )
2106 )
2105 dheads = pullop.pulledsubset
2107 dheads = pullop.pulledsubset
2106 else:
2108 else:
2107 # Remote is old or publishing all common changesets
2109 # Remote is old or publishing all common changesets
2108 # should be seen as public
2110 # should be seen as public
2109 pheads = pullop.pulledsubset
2111 pheads = pullop.pulledsubset
2110 dheads = []
2112 dheads = []
2111 unfi = pullop.repo.unfiltered()
2113 unfi = pullop.repo.unfiltered()
2112 phase = unfi._phasecache.phase
2114 phase = unfi._phasecache.phase
2113 rev = unfi.changelog.index.get_rev
2115 rev = unfi.changelog.index.get_rev
2114 public = phases.public
2116 public = phases.public
2115 draft = phases.draft
2117 draft = phases.draft
2116
2118
2117 # exclude changesets already public locally and update the others
2119 # exclude changesets already public locally and update the others
2118 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2120 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2119 if pheads:
2121 if pheads:
2120 tr = pullop.gettransaction()
2122 tr = pullop.gettransaction()
2121 phases.advanceboundary(pullop.repo, tr, public, pheads)
2123 phases.advanceboundary(pullop.repo, tr, public, pheads)
2122
2124
2123 # exclude changesets already draft locally and update the others
2125 # exclude changesets already draft locally and update the others
2124 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2126 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2125 if dheads:
2127 if dheads:
2126 tr = pullop.gettransaction()
2128 tr = pullop.gettransaction()
2127 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2129 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2128
2130
2129
2131
2130 def _pullbookmarks(pullop):
2132 def _pullbookmarks(pullop):
2131 """process the remote bookmark information to update the local one"""
2133 """process the remote bookmark information to update the local one"""
2132 if b'bookmarks' in pullop.stepsdone:
2134 if b'bookmarks' in pullop.stepsdone:
2133 return
2135 return
2134 pullop.stepsdone.add(b'bookmarks')
2136 pullop.stepsdone.add(b'bookmarks')
2135 repo = pullop.repo
2137 repo = pullop.repo
2136 remotebookmarks = pullop.remotebookmarks
2138 remotebookmarks = pullop.remotebookmarks
2137 bookmod.updatefromremote(
2139 bookmod.updatefromremote(
2138 repo.ui,
2140 repo.ui,
2139 repo,
2141 repo,
2140 remotebookmarks,
2142 remotebookmarks,
2141 pullop.remote.url(),
2143 pullop.remote.url(),
2142 pullop.gettransaction,
2144 pullop.gettransaction,
2143 explicit=pullop.explicitbookmarks,
2145 explicit=pullop.explicitbookmarks,
2144 )
2146 )
2145
2147
2146
2148
2147 def _pullobsolete(pullop):
2149 def _pullobsolete(pullop):
2148 """utility function to pull obsolete markers from a remote
2150 """utility function to pull obsolete markers from a remote
2149
2151
2150 The `gettransaction` is function that return the pull transaction, creating
2152 The `gettransaction` is function that return the pull transaction, creating
2151 one if necessary. We return the transaction to inform the calling code that
2153 one if necessary. We return the transaction to inform the calling code that
2152 a new transaction have been created (when applicable).
2154 a new transaction have been created (when applicable).
2153
2155
2154 Exists mostly to allow overriding for experimentation purpose"""
2156 Exists mostly to allow overriding for experimentation purpose"""
2155 if b'obsmarkers' in pullop.stepsdone:
2157 if b'obsmarkers' in pullop.stepsdone:
2156 return
2158 return
2157 pullop.stepsdone.add(b'obsmarkers')
2159 pullop.stepsdone.add(b'obsmarkers')
2158 tr = None
2160 tr = None
2159 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2161 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2160 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2162 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2161 remoteobs = listkeys(pullop.remote, b'obsolete')
2163 remoteobs = listkeys(pullop.remote, b'obsolete')
2162 if b'dump0' in remoteobs:
2164 if b'dump0' in remoteobs:
2163 tr = pullop.gettransaction()
2165 tr = pullop.gettransaction()
2164 markers = []
2166 markers = []
2165 for key in sorted(remoteobs, reverse=True):
2167 for key in sorted(remoteobs, reverse=True):
2166 if key.startswith(b'dump'):
2168 if key.startswith(b'dump'):
2167 data = util.b85decode(remoteobs[key])
2169 data = util.b85decode(remoteobs[key])
2168 version, newmarks = obsolete._readmarkers(data)
2170 version, newmarks = obsolete._readmarkers(data)
2169 markers += newmarks
2171 markers += newmarks
2170 if markers:
2172 if markers:
2171 pullop.repo.obsstore.add(tr, markers)
2173 pullop.repo.obsstore.add(tr, markers)
2172 pullop.repo.invalidatevolatilesets()
2174 pullop.repo.invalidatevolatilesets()
2173 return tr
2175 return tr
2174
2176
2175
2177
2176 def applynarrowacl(repo, kwargs):
2178 def applynarrowacl(repo, kwargs):
2177 """Apply narrow fetch access control.
2179 """Apply narrow fetch access control.
2178
2180
2179 This massages the named arguments for getbundle wire protocol commands
2181 This massages the named arguments for getbundle wire protocol commands
2180 so requested data is filtered through access control rules.
2182 so requested data is filtered through access control rules.
2181 """
2183 """
2182 ui = repo.ui
2184 ui = repo.ui
2183 # TODO this assumes existence of HTTP and is a layering violation.
2185 # TODO this assumes existence of HTTP and is a layering violation.
2184 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2186 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2185 user_includes = ui.configlist(
2187 user_includes = ui.configlist(
2186 _NARROWACL_SECTION,
2188 _NARROWACL_SECTION,
2187 username + b'.includes',
2189 username + b'.includes',
2188 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2190 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2189 )
2191 )
2190 user_excludes = ui.configlist(
2192 user_excludes = ui.configlist(
2191 _NARROWACL_SECTION,
2193 _NARROWACL_SECTION,
2192 username + b'.excludes',
2194 username + b'.excludes',
2193 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2195 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2194 )
2196 )
2195 if not user_includes:
2197 if not user_includes:
2196 raise error.Abort(
2198 raise error.Abort(
2197 _(b"%s configuration for user %s is empty")
2199 _(b"%s configuration for user %s is empty")
2198 % (_NARROWACL_SECTION, username)
2200 % (_NARROWACL_SECTION, username)
2199 )
2201 )
2200
2202
2201 user_includes = [
2203 user_includes = [
2202 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2204 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2203 ]
2205 ]
2204 user_excludes = [
2206 user_excludes = [
2205 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2207 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2206 ]
2208 ]
2207
2209
2208 req_includes = set(kwargs.get('includepats', []))
2210 req_includes = set(kwargs.get('includepats', []))
2209 req_excludes = set(kwargs.get('excludepats', []))
2211 req_excludes = set(kwargs.get('excludepats', []))
2210
2212
2211 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2213 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2212 req_includes, req_excludes, user_includes, user_excludes
2214 req_includes, req_excludes, user_includes, user_excludes
2213 )
2215 )
2214
2216
2215 if invalid_includes:
2217 if invalid_includes:
2216 raise error.Abort(
2218 raise error.Abort(
2217 _(b"The following includes are not accessible for %s: %s")
2219 _(b"The following includes are not accessible for %s: %s")
2218 % (username, stringutil.pprint(invalid_includes))
2220 % (username, stringutil.pprint(invalid_includes))
2219 )
2221 )
2220
2222
2221 new_args = {}
2223 new_args = {}
2222 new_args.update(kwargs)
2224 new_args.update(kwargs)
2223 new_args['narrow'] = True
2225 new_args['narrow'] = True
2224 new_args['narrow_acl'] = True
2226 new_args['narrow_acl'] = True
2225 new_args['includepats'] = req_includes
2227 new_args['includepats'] = req_includes
2226 if req_excludes:
2228 if req_excludes:
2227 new_args['excludepats'] = req_excludes
2229 new_args['excludepats'] = req_excludes
2228
2230
2229 return new_args
2231 return new_args
2230
2232
2231
2233
2232 def _computeellipsis(repo, common, heads, known, match, depth=None):
2234 def _computeellipsis(repo, common, heads, known, match, depth=None):
2233 """Compute the shape of a narrowed DAG.
2235 """Compute the shape of a narrowed DAG.
2234
2236
2235 Args:
2237 Args:
2236 repo: The repository we're transferring.
2238 repo: The repository we're transferring.
2237 common: The roots of the DAG range we're transferring.
2239 common: The roots of the DAG range we're transferring.
2238 May be just [nullid], which means all ancestors of heads.
2240 May be just [nullid], which means all ancestors of heads.
2239 heads: The heads of the DAG range we're transferring.
2241 heads: The heads of the DAG range we're transferring.
2240 match: The narrowmatcher that allows us to identify relevant changes.
2242 match: The narrowmatcher that allows us to identify relevant changes.
2241 depth: If not None, only consider nodes to be full nodes if they are at
2243 depth: If not None, only consider nodes to be full nodes if they are at
2242 most depth changesets away from one of heads.
2244 most depth changesets away from one of heads.
2243
2245
2244 Returns:
2246 Returns:
2245 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2247 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2246
2248
2247 visitnodes: The list of nodes (either full or ellipsis) which
2249 visitnodes: The list of nodes (either full or ellipsis) which
2248 need to be sent to the client.
2250 need to be sent to the client.
2249 relevant_nodes: The set of changelog nodes which change a file inside
2251 relevant_nodes: The set of changelog nodes which change a file inside
2250 the narrowspec. The client needs these as non-ellipsis nodes.
2252 the narrowspec. The client needs these as non-ellipsis nodes.
2251 ellipsisroots: A dict of {rev: parents} that is used in
2253 ellipsisroots: A dict of {rev: parents} that is used in
2252 narrowchangegroup to produce ellipsis nodes with the
2254 narrowchangegroup to produce ellipsis nodes with the
2253 correct parents.
2255 correct parents.
2254 """
2256 """
2255 cl = repo.changelog
2257 cl = repo.changelog
2256 mfl = repo.manifestlog
2258 mfl = repo.manifestlog
2257
2259
2258 clrev = cl.rev
2260 clrev = cl.rev
2259
2261
2260 commonrevs = {clrev(n) for n in common} | {nullrev}
2262 commonrevs = {clrev(n) for n in common} | {nullrev}
2261 headsrevs = {clrev(n) for n in heads}
2263 headsrevs = {clrev(n) for n in heads}
2262
2264
2263 if depth:
2265 if depth:
2264 revdepth = {h: 0 for h in headsrevs}
2266 revdepth = {h: 0 for h in headsrevs}
2265
2267
2266 ellipsisheads = collections.defaultdict(set)
2268 ellipsisheads = collections.defaultdict(set)
2267 ellipsisroots = collections.defaultdict(set)
2269 ellipsisroots = collections.defaultdict(set)
2268
2270
2269 def addroot(head, curchange):
2271 def addroot(head, curchange):
2270 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2272 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2271 ellipsisroots[head].add(curchange)
2273 ellipsisroots[head].add(curchange)
2272 # Recursively split ellipsis heads with 3 roots by finding the
2274 # Recursively split ellipsis heads with 3 roots by finding the
2273 # roots' youngest common descendant which is an elided merge commit.
2275 # roots' youngest common descendant which is an elided merge commit.
2274 # That descendant takes 2 of the 3 roots as its own, and becomes a
2276 # That descendant takes 2 of the 3 roots as its own, and becomes a
2275 # root of the head.
2277 # root of the head.
2276 while len(ellipsisroots[head]) > 2:
2278 while len(ellipsisroots[head]) > 2:
2277 child, roots = splithead(head)
2279 child, roots = splithead(head)
2278 splitroots(head, child, roots)
2280 splitroots(head, child, roots)
2279 head = child # Recurse in case we just added a 3rd root
2281 head = child # Recurse in case we just added a 3rd root
2280
2282
2281 def splitroots(head, child, roots):
2283 def splitroots(head, child, roots):
2282 ellipsisroots[head].difference_update(roots)
2284 ellipsisroots[head].difference_update(roots)
2283 ellipsisroots[head].add(child)
2285 ellipsisroots[head].add(child)
2284 ellipsisroots[child].update(roots)
2286 ellipsisroots[child].update(roots)
2285 ellipsisroots[child].discard(child)
2287 ellipsisroots[child].discard(child)
2286
2288
2287 def splithead(head):
2289 def splithead(head):
2288 r1, r2, r3 = sorted(ellipsisroots[head])
2290 r1, r2, r3 = sorted(ellipsisroots[head])
2289 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2291 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2290 mid = repo.revs(
2292 mid = repo.revs(
2291 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2293 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2292 )
2294 )
2293 for j in mid:
2295 for j in mid:
2294 if j == nr2:
2296 if j == nr2:
2295 return nr2, (nr1, nr2)
2297 return nr2, (nr1, nr2)
2296 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2298 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2297 return j, (nr1, nr2)
2299 return j, (nr1, nr2)
2298 raise error.Abort(
2300 raise error.Abort(
2299 _(
2301 _(
2300 b'Failed to split up ellipsis node! head: %d, '
2302 b'Failed to split up ellipsis node! head: %d, '
2301 b'roots: %d %d %d'
2303 b'roots: %d %d %d'
2302 )
2304 )
2303 % (head, r1, r2, r3)
2305 % (head, r1, r2, r3)
2304 )
2306 )
2305
2307
2306 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2308 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2307 visit = reversed(missing)
2309 visit = reversed(missing)
2308 relevant_nodes = set()
2310 relevant_nodes = set()
2309 visitnodes = [cl.node(m) for m in missing]
2311 visitnodes = [cl.node(m) for m in missing]
2310 required = set(headsrevs) | known
2312 required = set(headsrevs) | known
2311 for rev in visit:
2313 for rev in visit:
2312 clrev = cl.changelogrevision(rev)
2314 clrev = cl.changelogrevision(rev)
2313 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2315 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2314 if depth is not None:
2316 if depth is not None:
2315 curdepth = revdepth[rev]
2317 curdepth = revdepth[rev]
2316 for p in ps:
2318 for p in ps:
2317 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2319 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2318 needed = False
2320 needed = False
2319 shallow_enough = depth is None or revdepth[rev] <= depth
2321 shallow_enough = depth is None or revdepth[rev] <= depth
2320 if shallow_enough:
2322 if shallow_enough:
2321 curmf = mfl[clrev.manifest].read()
2323 curmf = mfl[clrev.manifest].read()
2322 if ps:
2324 if ps:
2323 # We choose to not trust the changed files list in
2325 # We choose to not trust the changed files list in
2324 # changesets because it's not always correct. TODO: could
2326 # changesets because it's not always correct. TODO: could
2325 # we trust it for the non-merge case?
2327 # we trust it for the non-merge case?
2326 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2328 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2327 needed = bool(curmf.diff(p1mf, match))
2329 needed = bool(curmf.diff(p1mf, match))
2328 if not needed and len(ps) > 1:
2330 if not needed and len(ps) > 1:
2329 # For merge changes, the list of changed files is not
2331 # For merge changes, the list of changed files is not
2330 # helpful, since we need to emit the merge if a file
2332 # helpful, since we need to emit the merge if a file
2331 # in the narrow spec has changed on either side of the
2333 # in the narrow spec has changed on either side of the
2332 # merge. As a result, we do a manifest diff to check.
2334 # merge. As a result, we do a manifest diff to check.
2333 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2335 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2334 needed = bool(curmf.diff(p2mf, match))
2336 needed = bool(curmf.diff(p2mf, match))
2335 else:
2337 else:
2336 # For a root node, we need to include the node if any
2338 # For a root node, we need to include the node if any
2337 # files in the node match the narrowspec.
2339 # files in the node match the narrowspec.
2338 needed = any(curmf.walk(match))
2340 needed = any(curmf.walk(match))
2339
2341
2340 if needed:
2342 if needed:
2341 for head in ellipsisheads[rev]:
2343 for head in ellipsisheads[rev]:
2342 addroot(head, rev)
2344 addroot(head, rev)
2343 for p in ps:
2345 for p in ps:
2344 required.add(p)
2346 required.add(p)
2345 relevant_nodes.add(cl.node(rev))
2347 relevant_nodes.add(cl.node(rev))
2346 else:
2348 else:
2347 if not ps:
2349 if not ps:
2348 ps = [nullrev]
2350 ps = [nullrev]
2349 if rev in required:
2351 if rev in required:
2350 for head in ellipsisheads[rev]:
2352 for head in ellipsisheads[rev]:
2351 addroot(head, rev)
2353 addroot(head, rev)
2352 for p in ps:
2354 for p in ps:
2353 ellipsisheads[p].add(rev)
2355 ellipsisheads[p].add(rev)
2354 else:
2356 else:
2355 for p in ps:
2357 for p in ps:
2356 ellipsisheads[p] |= ellipsisheads[rev]
2358 ellipsisheads[p] |= ellipsisheads[rev]
2357
2359
2358 # add common changesets as roots of their reachable ellipsis heads
2360 # add common changesets as roots of their reachable ellipsis heads
2359 for c in commonrevs:
2361 for c in commonrevs:
2360 for head in ellipsisheads[c]:
2362 for head in ellipsisheads[c]:
2361 addroot(head, c)
2363 addroot(head, c)
2362 return visitnodes, relevant_nodes, ellipsisroots
2364 return visitnodes, relevant_nodes, ellipsisroots
2363
2365
2364
2366
2365 def caps20to10(repo, role):
2367 def caps20to10(repo, role):
2366 """return a set with appropriate options to use bundle20 during getbundle"""
2368 """return a set with appropriate options to use bundle20 during getbundle"""
2367 caps = {b'HG20'}
2369 caps = {b'HG20'}
2368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2369 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2371 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2370 return caps
2372 return caps
2371
2373
2372
2374
2373 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2375 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2374 getbundle2partsorder = []
2376 getbundle2partsorder = []
2375
2377
2376 # Mapping between step name and function
2378 # Mapping between step name and function
2377 #
2379 #
2378 # This exists to help extensions wrap steps if necessary
2380 # This exists to help extensions wrap steps if necessary
2379 getbundle2partsmapping = {}
2381 getbundle2partsmapping = {}
2380
2382
2381
2383
2382 def getbundle2partsgenerator(stepname, idx=None):
2384 def getbundle2partsgenerator(stepname, idx=None):
2383 """decorator for function generating bundle2 part for getbundle
2385 """decorator for function generating bundle2 part for getbundle
2384
2386
2385 The function is added to the step -> function mapping and appended to the
2387 The function is added to the step -> function mapping and appended to the
2386 list of steps. Beware that decorated functions will be added in order
2388 list of steps. Beware that decorated functions will be added in order
2387 (this may matter).
2389 (this may matter).
2388
2390
2389 You can only use this decorator for new steps, if you want to wrap a step
2391 You can only use this decorator for new steps, if you want to wrap a step
2390 from an extension, attack the getbundle2partsmapping dictionary directly."""
2392 from an extension, attack the getbundle2partsmapping dictionary directly."""
2391
2393
2392 def dec(func):
2394 def dec(func):
2393 assert stepname not in getbundle2partsmapping
2395 assert stepname not in getbundle2partsmapping
2394 getbundle2partsmapping[stepname] = func
2396 getbundle2partsmapping[stepname] = func
2395 if idx is None:
2397 if idx is None:
2396 getbundle2partsorder.append(stepname)
2398 getbundle2partsorder.append(stepname)
2397 else:
2399 else:
2398 getbundle2partsorder.insert(idx, stepname)
2400 getbundle2partsorder.insert(idx, stepname)
2399 return func
2401 return func
2400
2402
2401 return dec
2403 return dec
2402
2404
2403
2405
2404 def bundle2requested(bundlecaps):
2406 def bundle2requested(bundlecaps):
2405 if bundlecaps is not None:
2407 if bundlecaps is not None:
2406 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2408 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2407 return False
2409 return False
2408
2410
2409
2411
2410 def getbundlechunks(
2412 def getbundlechunks(
2411 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2413 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2412 ):
2414 ):
2413 """Return chunks constituting a bundle's raw data.
2415 """Return chunks constituting a bundle's raw data.
2414
2416
2415 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2417 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2416 passed.
2418 passed.
2417
2419
2418 Returns a 2-tuple of a dict with metadata about the generated bundle
2420 Returns a 2-tuple of a dict with metadata about the generated bundle
2419 and an iterator over raw chunks (of varying sizes).
2421 and an iterator over raw chunks (of varying sizes).
2420 """
2422 """
2421 kwargs = pycompat.byteskwargs(kwargs)
2423 kwargs = pycompat.byteskwargs(kwargs)
2422 info = {}
2424 info = {}
2423 usebundle2 = bundle2requested(bundlecaps)
2425 usebundle2 = bundle2requested(bundlecaps)
2424 # bundle10 case
2426 # bundle10 case
2425 if not usebundle2:
2427 if not usebundle2:
2426 if bundlecaps and not kwargs.get(b'cg', True):
2428 if bundlecaps and not kwargs.get(b'cg', True):
2427 raise ValueError(
2429 raise ValueError(
2428 _(b'request for bundle10 must include changegroup')
2430 _(b'request for bundle10 must include changegroup')
2429 )
2431 )
2430
2432
2431 if kwargs:
2433 if kwargs:
2432 raise ValueError(
2434 raise ValueError(
2433 _(b'unsupported getbundle arguments: %s')
2435 _(b'unsupported getbundle arguments: %s')
2434 % b', '.join(sorted(kwargs.keys()))
2436 % b', '.join(sorted(kwargs.keys()))
2435 )
2437 )
2436 outgoing = _computeoutgoing(repo, heads, common)
2438 outgoing = _computeoutgoing(repo, heads, common)
2437 info[b'bundleversion'] = 1
2439 info[b'bundleversion'] = 1
2438 return (
2440 return (
2439 info,
2441 info,
2440 changegroup.makestream(
2442 changegroup.makestream(
2441 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2443 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2442 ),
2444 ),
2443 )
2445 )
2444
2446
2445 # bundle20 case
2447 # bundle20 case
2446 info[b'bundleversion'] = 2
2448 info[b'bundleversion'] = 2
2447 b2caps = {}
2449 b2caps = {}
2448 for bcaps in bundlecaps:
2450 for bcaps in bundlecaps:
2449 if bcaps.startswith(b'bundle2='):
2451 if bcaps.startswith(b'bundle2='):
2450 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2452 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2451 b2caps.update(bundle2.decodecaps(blob))
2453 b2caps.update(bundle2.decodecaps(blob))
2452 bundler = bundle2.bundle20(repo.ui, b2caps)
2454 bundler = bundle2.bundle20(repo.ui, b2caps)
2453
2455
2454 kwargs[b'heads'] = heads
2456 kwargs[b'heads'] = heads
2455 kwargs[b'common'] = common
2457 kwargs[b'common'] = common
2456
2458
2457 for name in getbundle2partsorder:
2459 for name in getbundle2partsorder:
2458 func = getbundle2partsmapping[name]
2460 func = getbundle2partsmapping[name]
2459 func(
2461 func(
2460 bundler,
2462 bundler,
2461 repo,
2463 repo,
2462 source,
2464 source,
2463 bundlecaps=bundlecaps,
2465 bundlecaps=bundlecaps,
2464 b2caps=b2caps,
2466 b2caps=b2caps,
2465 **pycompat.strkwargs(kwargs)
2467 **pycompat.strkwargs(kwargs)
2466 )
2468 )
2467
2469
2468 info[b'prefercompressed'] = bundler.prefercompressed
2470 info[b'prefercompressed'] = bundler.prefercompressed
2469
2471
2470 return info, bundler.getchunks()
2472 return info, bundler.getchunks()
2471
2473
2472
2474
2473 @getbundle2partsgenerator(b'stream2')
2475 @getbundle2partsgenerator(b'stream2')
2474 def _getbundlestream2(bundler, repo, *args, **kwargs):
2476 def _getbundlestream2(bundler, repo, *args, **kwargs):
2475 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2477 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2476
2478
2477
2479
2478 @getbundle2partsgenerator(b'changegroup')
2480 @getbundle2partsgenerator(b'changegroup')
2479 def _getbundlechangegrouppart(
2481 def _getbundlechangegrouppart(
2480 bundler,
2482 bundler,
2481 repo,
2483 repo,
2482 source,
2484 source,
2483 bundlecaps=None,
2485 bundlecaps=None,
2484 b2caps=None,
2486 b2caps=None,
2485 heads=None,
2487 heads=None,
2486 common=None,
2488 common=None,
2487 **kwargs
2489 **kwargs
2488 ):
2490 ):
2489 """add a changegroup part to the requested bundle"""
2491 """add a changegroup part to the requested bundle"""
2490 if not kwargs.get('cg', True) or not b2caps:
2492 if not kwargs.get('cg', True) or not b2caps:
2491 return
2493 return
2492
2494
2493 version = b'01'
2495 version = b'01'
2494 cgversions = b2caps.get(b'changegroup')
2496 cgversions = b2caps.get(b'changegroup')
2495 if cgversions: # 3.1 and 3.2 ship with an empty value
2497 if cgversions: # 3.1 and 3.2 ship with an empty value
2496 cgversions = [
2498 cgversions = [
2497 v
2499 v
2498 for v in cgversions
2500 for v in cgversions
2499 if v in changegroup.supportedoutgoingversions(repo)
2501 if v in changegroup.supportedoutgoingversions(repo)
2500 ]
2502 ]
2501 if not cgversions:
2503 if not cgversions:
2502 raise error.Abort(_(b'no common changegroup version'))
2504 raise error.Abort(_(b'no common changegroup version'))
2503 version = max(cgversions)
2505 version = max(cgversions)
2504
2506
2505 outgoing = _computeoutgoing(repo, heads, common)
2507 outgoing = _computeoutgoing(repo, heads, common)
2506 if not outgoing.missing:
2508 if not outgoing.missing:
2507 return
2509 return
2508
2510
2509 if kwargs.get('narrow', False):
2511 if kwargs.get('narrow', False):
2510 include = sorted(filter(bool, kwargs.get('includepats', [])))
2512 include = sorted(filter(bool, kwargs.get('includepats', [])))
2511 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2513 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2512 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2514 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2513 else:
2515 else:
2514 matcher = None
2516 matcher = None
2515
2517
2516 cgstream = changegroup.makestream(
2518 cgstream = changegroup.makestream(
2517 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2519 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2518 )
2520 )
2519
2521
2520 part = bundler.newpart(b'changegroup', data=cgstream)
2522 part = bundler.newpart(b'changegroup', data=cgstream)
2521 if cgversions:
2523 if cgversions:
2522 part.addparam(b'version', version)
2524 part.addparam(b'version', version)
2523
2525
2524 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2526 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2525
2527
2526 if b'treemanifest' in repo.requirements:
2528 if b'treemanifest' in repo.requirements:
2527 part.addparam(b'treemanifest', b'1')
2529 part.addparam(b'treemanifest', b'1')
2528
2530
2529 if b'exp-sidedata-flag' in repo.requirements:
2531 if b'exp-sidedata-flag' in repo.requirements:
2530 part.addparam(b'exp-sidedata', b'1')
2532 part.addparam(b'exp-sidedata', b'1')
2531
2533
2532 if (
2534 if (
2533 kwargs.get('narrow', False)
2535 kwargs.get('narrow', False)
2534 and kwargs.get('narrow_acl', False)
2536 and kwargs.get('narrow_acl', False)
2535 and (include or exclude)
2537 and (include or exclude)
2536 ):
2538 ):
2537 # this is mandatory because otherwise ACL clients won't work
2539 # this is mandatory because otherwise ACL clients won't work
2538 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2540 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2539 narrowspecpart.data = b'%s\0%s' % (
2541 narrowspecpart.data = b'%s\0%s' % (
2540 b'\n'.join(include),
2542 b'\n'.join(include),
2541 b'\n'.join(exclude),
2543 b'\n'.join(exclude),
2542 )
2544 )
2543
2545
2544
2546
2545 @getbundle2partsgenerator(b'bookmarks')
2547 @getbundle2partsgenerator(b'bookmarks')
2546 def _getbundlebookmarkpart(
2548 def _getbundlebookmarkpart(
2547 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2548 ):
2550 ):
2549 """add a bookmark part to the requested bundle"""
2551 """add a bookmark part to the requested bundle"""
2550 if not kwargs.get('bookmarks', False):
2552 if not kwargs.get('bookmarks', False):
2551 return
2553 return
2552 if not b2caps or b'bookmarks' not in b2caps:
2554 if not b2caps or b'bookmarks' not in b2caps:
2553 raise error.Abort(_(b'no common bookmarks exchange method'))
2555 raise error.Abort(_(b'no common bookmarks exchange method'))
2554 books = bookmod.listbinbookmarks(repo)
2556 books = bookmod.listbinbookmarks(repo)
2555 data = bookmod.binaryencode(books)
2557 data = bookmod.binaryencode(books)
2556 if data:
2558 if data:
2557 bundler.newpart(b'bookmarks', data=data)
2559 bundler.newpart(b'bookmarks', data=data)
2558
2560
2559
2561
2560 @getbundle2partsgenerator(b'listkeys')
2562 @getbundle2partsgenerator(b'listkeys')
2561 def _getbundlelistkeysparts(
2563 def _getbundlelistkeysparts(
2562 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2564 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2563 ):
2565 ):
2564 """add parts containing listkeys namespaces to the requested bundle"""
2566 """add parts containing listkeys namespaces to the requested bundle"""
2565 listkeys = kwargs.get('listkeys', ())
2567 listkeys = kwargs.get('listkeys', ())
2566 for namespace in listkeys:
2568 for namespace in listkeys:
2567 part = bundler.newpart(b'listkeys')
2569 part = bundler.newpart(b'listkeys')
2568 part.addparam(b'namespace', namespace)
2570 part.addparam(b'namespace', namespace)
2569 keys = repo.listkeys(namespace).items()
2571 keys = repo.listkeys(namespace).items()
2570 part.data = pushkey.encodekeys(keys)
2572 part.data = pushkey.encodekeys(keys)
2571
2573
2572
2574
2573 @getbundle2partsgenerator(b'obsmarkers')
2575 @getbundle2partsgenerator(b'obsmarkers')
2574 def _getbundleobsmarkerpart(
2576 def _getbundleobsmarkerpart(
2575 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2576 ):
2578 ):
2577 """add an obsolescence markers part to the requested bundle"""
2579 """add an obsolescence markers part to the requested bundle"""
2578 if kwargs.get('obsmarkers', False):
2580 if kwargs.get('obsmarkers', False):
2579 if heads is None:
2581 if heads is None:
2580 heads = repo.heads()
2582 heads = repo.heads()
2581 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2583 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2582 markers = repo.obsstore.relevantmarkers(subset)
2584 markers = repo.obsstore.relevantmarkers(subset)
2583 markers = obsutil.sortedmarkers(markers)
2585 markers = obsutil.sortedmarkers(markers)
2584 bundle2.buildobsmarkerspart(bundler, markers)
2586 bundle2.buildobsmarkerspart(bundler, markers)
2585
2587
2586
2588
2587 @getbundle2partsgenerator(b'phases')
2589 @getbundle2partsgenerator(b'phases')
2588 def _getbundlephasespart(
2590 def _getbundlephasespart(
2589 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2591 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2590 ):
2592 ):
2591 """add phase heads part to the requested bundle"""
2593 """add phase heads part to the requested bundle"""
2592 if kwargs.get('phases', False):
2594 if kwargs.get('phases', False):
2593 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2595 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2594 raise error.Abort(_(b'no common phases exchange method'))
2596 raise error.Abort(_(b'no common phases exchange method'))
2595 if heads is None:
2597 if heads is None:
2596 heads = repo.heads()
2598 heads = repo.heads()
2597
2599
2598 headsbyphase = collections.defaultdict(set)
2600 headsbyphase = collections.defaultdict(set)
2599 if repo.publishing():
2601 if repo.publishing():
2600 headsbyphase[phases.public] = heads
2602 headsbyphase[phases.public] = heads
2601 else:
2603 else:
2602 # find the appropriate heads to move
2604 # find the appropriate heads to move
2603
2605
2604 phase = repo._phasecache.phase
2606 phase = repo._phasecache.phase
2605 node = repo.changelog.node
2607 node = repo.changelog.node
2606 rev = repo.changelog.rev
2608 rev = repo.changelog.rev
2607 for h in heads:
2609 for h in heads:
2608 headsbyphase[phase(repo, rev(h))].add(h)
2610 headsbyphase[phase(repo, rev(h))].add(h)
2609 seenphases = list(headsbyphase.keys())
2611 seenphases = list(headsbyphase.keys())
2610
2612
2611 # We do not handle anything but public and draft phase for now)
2613 # We do not handle anything but public and draft phase for now)
2612 if seenphases:
2614 if seenphases:
2613 assert max(seenphases) <= phases.draft
2615 assert max(seenphases) <= phases.draft
2614
2616
2615 # if client is pulling non-public changesets, we need to find
2617 # if client is pulling non-public changesets, we need to find
2616 # intermediate public heads.
2618 # intermediate public heads.
2617 draftheads = headsbyphase.get(phases.draft, set())
2619 draftheads = headsbyphase.get(phases.draft, set())
2618 if draftheads:
2620 if draftheads:
2619 publicheads = headsbyphase.get(phases.public, set())
2621 publicheads = headsbyphase.get(phases.public, set())
2620
2622
2621 revset = b'heads(only(%ln, %ln) and public())'
2623 revset = b'heads(only(%ln, %ln) and public())'
2622 extraheads = repo.revs(revset, draftheads, publicheads)
2624 extraheads = repo.revs(revset, draftheads, publicheads)
2623 for r in extraheads:
2625 for r in extraheads:
2624 headsbyphase[phases.public].add(node(r))
2626 headsbyphase[phases.public].add(node(r))
2625
2627
2626 # transform data in a format used by the encoding function
2628 # transform data in a format used by the encoding function
2627 phasemapping = []
2629 phasemapping = []
2628 for phase in phases.allphases:
2630 for phase in phases.allphases:
2629 phasemapping.append(sorted(headsbyphase[phase]))
2631 phasemapping.append(sorted(headsbyphase[phase]))
2630
2632
2631 # generate the actual part
2633 # generate the actual part
2632 phasedata = phases.binaryencode(phasemapping)
2634 phasedata = phases.binaryencode(phasemapping)
2633 bundler.newpart(b'phase-heads', data=phasedata)
2635 bundler.newpart(b'phase-heads', data=phasedata)
2634
2636
2635
2637
2636 @getbundle2partsgenerator(b'hgtagsfnodes')
2638 @getbundle2partsgenerator(b'hgtagsfnodes')
2637 def _getbundletagsfnodes(
2639 def _getbundletagsfnodes(
2638 bundler,
2640 bundler,
2639 repo,
2641 repo,
2640 source,
2642 source,
2641 bundlecaps=None,
2643 bundlecaps=None,
2642 b2caps=None,
2644 b2caps=None,
2643 heads=None,
2645 heads=None,
2644 common=None,
2646 common=None,
2645 **kwargs
2647 **kwargs
2646 ):
2648 ):
2647 """Transfer the .hgtags filenodes mapping.
2649 """Transfer the .hgtags filenodes mapping.
2648
2650
2649 Only values for heads in this bundle will be transferred.
2651 Only values for heads in this bundle will be transferred.
2650
2652
2651 The part data consists of pairs of 20 byte changeset node and .hgtags
2653 The part data consists of pairs of 20 byte changeset node and .hgtags
2652 filenodes raw values.
2654 filenodes raw values.
2653 """
2655 """
2654 # Don't send unless:
2656 # Don't send unless:
2655 # - changeset are being exchanged,
2657 # - changeset are being exchanged,
2656 # - the client supports it.
2658 # - the client supports it.
2657 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2659 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2658 return
2660 return
2659
2661
2660 outgoing = _computeoutgoing(repo, heads, common)
2662 outgoing = _computeoutgoing(repo, heads, common)
2661 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2663 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2662
2664
2663
2665
2664 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2666 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2665 def _getbundlerevbranchcache(
2667 def _getbundlerevbranchcache(
2666 bundler,
2668 bundler,
2667 repo,
2669 repo,
2668 source,
2670 source,
2669 bundlecaps=None,
2671 bundlecaps=None,
2670 b2caps=None,
2672 b2caps=None,
2671 heads=None,
2673 heads=None,
2672 common=None,
2674 common=None,
2673 **kwargs
2675 **kwargs
2674 ):
2676 ):
2675 """Transfer the rev-branch-cache mapping
2677 """Transfer the rev-branch-cache mapping
2676
2678
2677 The payload is a series of data related to each branch
2679 The payload is a series of data related to each branch
2678
2680
2679 1) branch name length
2681 1) branch name length
2680 2) number of open heads
2682 2) number of open heads
2681 3) number of closed heads
2683 3) number of closed heads
2682 4) open heads nodes
2684 4) open heads nodes
2683 5) closed heads nodes
2685 5) closed heads nodes
2684 """
2686 """
2685 # Don't send unless:
2687 # Don't send unless:
2686 # - changeset are being exchanged,
2688 # - changeset are being exchanged,
2687 # - the client supports it.
2689 # - the client supports it.
2688 # - narrow bundle isn't in play (not currently compatible).
2690 # - narrow bundle isn't in play (not currently compatible).
2689 if (
2691 if (
2690 not kwargs.get('cg', True)
2692 not kwargs.get('cg', True)
2691 or not b2caps
2693 or not b2caps
2692 or b'rev-branch-cache' not in b2caps
2694 or b'rev-branch-cache' not in b2caps
2693 or kwargs.get('narrow', False)
2695 or kwargs.get('narrow', False)
2694 or repo.ui.has_section(_NARROWACL_SECTION)
2696 or repo.ui.has_section(_NARROWACL_SECTION)
2695 ):
2697 ):
2696 return
2698 return
2697
2699
2698 outgoing = _computeoutgoing(repo, heads, common)
2700 outgoing = _computeoutgoing(repo, heads, common)
2699 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2701 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2700
2702
2701
2703
2702 def check_heads(repo, their_heads, context):
2704 def check_heads(repo, their_heads, context):
2703 """check if the heads of a repo have been modified
2705 """check if the heads of a repo have been modified
2704
2706
2705 Used by peer for unbundling.
2707 Used by peer for unbundling.
2706 """
2708 """
2707 heads = repo.heads()
2709 heads = repo.heads()
2708 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2710 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2709 if not (
2711 if not (
2710 their_heads == [b'force']
2712 their_heads == [b'force']
2711 or their_heads == heads
2713 or their_heads == heads
2712 or their_heads == [b'hashed', heads_hash]
2714 or their_heads == [b'hashed', heads_hash]
2713 ):
2715 ):
2714 # someone else committed/pushed/unbundled while we
2716 # someone else committed/pushed/unbundled while we
2715 # were transferring data
2717 # were transferring data
2716 raise error.PushRaced(
2718 raise error.PushRaced(
2717 b'repository changed while %s - please try again' % context
2719 b'repository changed while %s - please try again' % context
2718 )
2720 )
2719
2721
2720
2722
2721 def unbundle(repo, cg, heads, source, url):
2723 def unbundle(repo, cg, heads, source, url):
2722 """Apply a bundle to a repo.
2724 """Apply a bundle to a repo.
2723
2725
2724 this function makes sure the repo is locked during the application and have
2726 this function makes sure the repo is locked during the application and have
2725 mechanism to check that no push race occurred between the creation of the
2727 mechanism to check that no push race occurred between the creation of the
2726 bundle and its application.
2728 bundle and its application.
2727
2729
2728 If the push was raced as PushRaced exception is raised."""
2730 If the push was raced as PushRaced exception is raised."""
2729 r = 0
2731 r = 0
2730 # need a transaction when processing a bundle2 stream
2732 # need a transaction when processing a bundle2 stream
2731 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2733 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2732 lockandtr = [None, None, None]
2734 lockandtr = [None, None, None]
2733 recordout = None
2735 recordout = None
2734 # quick fix for output mismatch with bundle2 in 3.4
2736 # quick fix for output mismatch with bundle2 in 3.4
2735 captureoutput = repo.ui.configbool(
2737 captureoutput = repo.ui.configbool(
2736 b'experimental', b'bundle2-output-capture'
2738 b'experimental', b'bundle2-output-capture'
2737 )
2739 )
2738 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2740 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2739 captureoutput = True
2741 captureoutput = True
2740 try:
2742 try:
2741 # note: outside bundle1, 'heads' is expected to be empty and this
2743 # note: outside bundle1, 'heads' is expected to be empty and this
2742 # 'check_heads' call wil be a no-op
2744 # 'check_heads' call wil be a no-op
2743 check_heads(repo, heads, b'uploading changes')
2745 check_heads(repo, heads, b'uploading changes')
2744 # push can proceed
2746 # push can proceed
2745 if not isinstance(cg, bundle2.unbundle20):
2747 if not isinstance(cg, bundle2.unbundle20):
2746 # legacy case: bundle1 (changegroup 01)
2748 # legacy case: bundle1 (changegroup 01)
2747 txnname = b"\n".join([source, util.hidepassword(url)])
2749 txnname = b"\n".join([source, util.hidepassword(url)])
2748 with repo.lock(), repo.transaction(txnname) as tr:
2750 with repo.lock(), repo.transaction(txnname) as tr:
2749 op = bundle2.applybundle(repo, cg, tr, source, url)
2751 op = bundle2.applybundle(repo, cg, tr, source, url)
2750 r = bundle2.combinechangegroupresults(op)
2752 r = bundle2.combinechangegroupresults(op)
2751 else:
2753 else:
2752 r = None
2754 r = None
2753 try:
2755 try:
2754
2756
2755 def gettransaction():
2757 def gettransaction():
2756 if not lockandtr[2]:
2758 if not lockandtr[2]:
2757 if not bookmod.bookmarksinstore(repo):
2759 if not bookmod.bookmarksinstore(repo):
2758 lockandtr[0] = repo.wlock()
2760 lockandtr[0] = repo.wlock()
2759 lockandtr[1] = repo.lock()
2761 lockandtr[1] = repo.lock()
2760 lockandtr[2] = repo.transaction(source)
2762 lockandtr[2] = repo.transaction(source)
2761 lockandtr[2].hookargs[b'source'] = source
2763 lockandtr[2].hookargs[b'source'] = source
2762 lockandtr[2].hookargs[b'url'] = url
2764 lockandtr[2].hookargs[b'url'] = url
2763 lockandtr[2].hookargs[b'bundle2'] = b'1'
2765 lockandtr[2].hookargs[b'bundle2'] = b'1'
2764 return lockandtr[2]
2766 return lockandtr[2]
2765
2767
2766 # Do greedy locking by default until we're satisfied with lazy
2768 # Do greedy locking by default until we're satisfied with lazy
2767 # locking.
2769 # locking.
2768 if not repo.ui.configbool(
2770 if not repo.ui.configbool(
2769 b'experimental', b'bundle2lazylocking'
2771 b'experimental', b'bundle2lazylocking'
2770 ):
2772 ):
2771 gettransaction()
2773 gettransaction()
2772
2774
2773 op = bundle2.bundleoperation(
2775 op = bundle2.bundleoperation(
2774 repo,
2776 repo,
2775 gettransaction,
2777 gettransaction,
2776 captureoutput=captureoutput,
2778 captureoutput=captureoutput,
2777 source=b'push',
2779 source=b'push',
2778 )
2780 )
2779 try:
2781 try:
2780 op = bundle2.processbundle(repo, cg, op=op)
2782 op = bundle2.processbundle(repo, cg, op=op)
2781 finally:
2783 finally:
2782 r = op.reply
2784 r = op.reply
2783 if captureoutput and r is not None:
2785 if captureoutput and r is not None:
2784 repo.ui.pushbuffer(error=True, subproc=True)
2786 repo.ui.pushbuffer(error=True, subproc=True)
2785
2787
2786 def recordout(output):
2788 def recordout(output):
2787 r.newpart(b'output', data=output, mandatory=False)
2789 r.newpart(b'output', data=output, mandatory=False)
2788
2790
2789 if lockandtr[2] is not None:
2791 if lockandtr[2] is not None:
2790 lockandtr[2].close()
2792 lockandtr[2].close()
2791 except BaseException as exc:
2793 except BaseException as exc:
2792 exc.duringunbundle2 = True
2794 exc.duringunbundle2 = True
2793 if captureoutput and r is not None:
2795 if captureoutput and r is not None:
2794 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2796 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2795
2797
2796 def recordout(output):
2798 def recordout(output):
2797 part = bundle2.bundlepart(
2799 part = bundle2.bundlepart(
2798 b'output', data=output, mandatory=False
2800 b'output', data=output, mandatory=False
2799 )
2801 )
2800 parts.append(part)
2802 parts.append(part)
2801
2803
2802 raise
2804 raise
2803 finally:
2805 finally:
2804 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2806 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2805 if recordout is not None:
2807 if recordout is not None:
2806 recordout(repo.ui.popbuffer())
2808 recordout(repo.ui.popbuffer())
2807 return r
2809 return r
2808
2810
2809
2811
2810 def _maybeapplyclonebundle(pullop):
2812 def _maybeapplyclonebundle(pullop):
2811 """Apply a clone bundle from a remote, if possible."""
2813 """Apply a clone bundle from a remote, if possible."""
2812
2814
2813 repo = pullop.repo
2815 repo = pullop.repo
2814 remote = pullop.remote
2816 remote = pullop.remote
2815
2817
2816 if not repo.ui.configbool(b'ui', b'clonebundles'):
2818 if not repo.ui.configbool(b'ui', b'clonebundles'):
2817 return
2819 return
2818
2820
2819 # Only run if local repo is empty.
2821 # Only run if local repo is empty.
2820 if len(repo):
2822 if len(repo):
2821 return
2823 return
2822
2824
2823 if pullop.heads:
2825 if pullop.heads:
2824 return
2826 return
2825
2827
2826 if not remote.capable(b'clonebundles'):
2828 if not remote.capable(b'clonebundles'):
2827 return
2829 return
2828
2830
2829 with remote.commandexecutor() as e:
2831 with remote.commandexecutor() as e:
2830 res = e.callcommand(b'clonebundles', {}).result()
2832 res = e.callcommand(b'clonebundles', {}).result()
2831
2833
2832 # If we call the wire protocol command, that's good enough to record the
2834 # If we call the wire protocol command, that's good enough to record the
2833 # attempt.
2835 # attempt.
2834 pullop.clonebundleattempted = True
2836 pullop.clonebundleattempted = True
2835
2837
2836 entries = parseclonebundlesmanifest(repo, res)
2838 entries = parseclonebundlesmanifest(repo, res)
2837 if not entries:
2839 if not entries:
2838 repo.ui.note(
2840 repo.ui.note(
2839 _(
2841 _(
2840 b'no clone bundles available on remote; '
2842 b'no clone bundles available on remote; '
2841 b'falling back to regular clone\n'
2843 b'falling back to regular clone\n'
2842 )
2844 )
2843 )
2845 )
2844 return
2846 return
2845
2847
2846 entries = filterclonebundleentries(
2848 entries = filterclonebundleentries(
2847 repo, entries, streamclonerequested=pullop.streamclonerequested
2849 repo, entries, streamclonerequested=pullop.streamclonerequested
2848 )
2850 )
2849
2851
2850 if not entries:
2852 if not entries:
2851 # There is a thundering herd concern here. However, if a server
2853 # There is a thundering herd concern here. However, if a server
2852 # operator doesn't advertise bundles appropriate for its clients,
2854 # operator doesn't advertise bundles appropriate for its clients,
2853 # they deserve what's coming. Furthermore, from a client's
2855 # they deserve what's coming. Furthermore, from a client's
2854 # perspective, no automatic fallback would mean not being able to
2856 # perspective, no automatic fallback would mean not being able to
2855 # clone!
2857 # clone!
2856 repo.ui.warn(
2858 repo.ui.warn(
2857 _(
2859 _(
2858 b'no compatible clone bundles available on server; '
2860 b'no compatible clone bundles available on server; '
2859 b'falling back to regular clone\n'
2861 b'falling back to regular clone\n'
2860 )
2862 )
2861 )
2863 )
2862 repo.ui.warn(
2864 repo.ui.warn(
2863 _(b'(you may want to report this to the server operator)\n')
2865 _(b'(you may want to report this to the server operator)\n')
2864 )
2866 )
2865 return
2867 return
2866
2868
2867 entries = sortclonebundleentries(repo.ui, entries)
2869 entries = sortclonebundleentries(repo.ui, entries)
2868
2870
2869 url = entries[0][b'URL']
2871 url = entries[0][b'URL']
2870 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2872 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2871 if trypullbundlefromurl(repo.ui, repo, url):
2873 if trypullbundlefromurl(repo.ui, repo, url):
2872 repo.ui.status(_(b'finished applying clone bundle\n'))
2874 repo.ui.status(_(b'finished applying clone bundle\n'))
2873 # Bundle failed.
2875 # Bundle failed.
2874 #
2876 #
2875 # We abort by default to avoid the thundering herd of
2877 # We abort by default to avoid the thundering herd of
2876 # clients flooding a server that was expecting expensive
2878 # clients flooding a server that was expecting expensive
2877 # clone load to be offloaded.
2879 # clone load to be offloaded.
2878 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2880 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2879 repo.ui.warn(_(b'falling back to normal clone\n'))
2881 repo.ui.warn(_(b'falling back to normal clone\n'))
2880 else:
2882 else:
2881 raise error.Abort(
2883 raise error.Abort(
2882 _(b'error applying bundle'),
2884 _(b'error applying bundle'),
2883 hint=_(
2885 hint=_(
2884 b'if this error persists, consider contacting '
2886 b'if this error persists, consider contacting '
2885 b'the server operator or disable clone '
2887 b'the server operator or disable clone '
2886 b'bundles via '
2888 b'bundles via '
2887 b'"--config ui.clonebundles=false"'
2889 b'"--config ui.clonebundles=false"'
2888 ),
2890 ),
2889 )
2891 )
2890
2892
2891
2893
2892 def parseclonebundlesmanifest(repo, s):
2894 def parseclonebundlesmanifest(repo, s):
2893 """Parses the raw text of a clone bundles manifest.
2895 """Parses the raw text of a clone bundles manifest.
2894
2896
2895 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2897 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2896 to the URL and other keys are the attributes for the entry.
2898 to the URL and other keys are the attributes for the entry.
2897 """
2899 """
2898 m = []
2900 m = []
2899 for line in s.splitlines():
2901 for line in s.splitlines():
2900 fields = line.split()
2902 fields = line.split()
2901 if not fields:
2903 if not fields:
2902 continue
2904 continue
2903 attrs = {b'URL': fields[0]}
2905 attrs = {b'URL': fields[0]}
2904 for rawattr in fields[1:]:
2906 for rawattr in fields[1:]:
2905 key, value = rawattr.split(b'=', 1)
2907 key, value = rawattr.split(b'=', 1)
2906 key = urlreq.unquote(key)
2908 key = urlreq.unquote(key)
2907 value = urlreq.unquote(value)
2909 value = urlreq.unquote(value)
2908 attrs[key] = value
2910 attrs[key] = value
2909
2911
2910 # Parse BUNDLESPEC into components. This makes client-side
2912 # Parse BUNDLESPEC into components. This makes client-side
2911 # preferences easier to specify since you can prefer a single
2913 # preferences easier to specify since you can prefer a single
2912 # component of the BUNDLESPEC.
2914 # component of the BUNDLESPEC.
2913 if key == b'BUNDLESPEC':
2915 if key == b'BUNDLESPEC':
2914 try:
2916 try:
2915 bundlespec = parsebundlespec(repo, value)
2917 bundlespec = parsebundlespec(repo, value)
2916 attrs[b'COMPRESSION'] = bundlespec.compression
2918 attrs[b'COMPRESSION'] = bundlespec.compression
2917 attrs[b'VERSION'] = bundlespec.version
2919 attrs[b'VERSION'] = bundlespec.version
2918 except error.InvalidBundleSpecification:
2920 except error.InvalidBundleSpecification:
2919 pass
2921 pass
2920 except error.UnsupportedBundleSpecification:
2922 except error.UnsupportedBundleSpecification:
2921 pass
2923 pass
2922
2924
2923 m.append(attrs)
2925 m.append(attrs)
2924
2926
2925 return m
2927 return m
2926
2928
2927
2929
2928 def isstreamclonespec(bundlespec):
2930 def isstreamclonespec(bundlespec):
2929 # Stream clone v1
2931 # Stream clone v1
2930 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2932 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2931 return True
2933 return True
2932
2934
2933 # Stream clone v2
2935 # Stream clone v2
2934 if (
2936 if (
2935 bundlespec.wirecompression == b'UN'
2937 bundlespec.wirecompression == b'UN'
2936 and bundlespec.wireversion == b'02'
2938 and bundlespec.wireversion == b'02'
2937 and bundlespec.contentopts.get(b'streamv2')
2939 and bundlespec.contentopts.get(b'streamv2')
2938 ):
2940 ):
2939 return True
2941 return True
2940
2942
2941 return False
2943 return False
2942
2944
2943
2945
2944 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2946 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2945 """Remove incompatible clone bundle manifest entries.
2947 """Remove incompatible clone bundle manifest entries.
2946
2948
2947 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2949 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2948 and returns a new list consisting of only the entries that this client
2950 and returns a new list consisting of only the entries that this client
2949 should be able to apply.
2951 should be able to apply.
2950
2952
2951 There is no guarantee we'll be able to apply all returned entries because
2953 There is no guarantee we'll be able to apply all returned entries because
2952 the metadata we use to filter on may be missing or wrong.
2954 the metadata we use to filter on may be missing or wrong.
2953 """
2955 """
2954 newentries = []
2956 newentries = []
2955 for entry in entries:
2957 for entry in entries:
2956 spec = entry.get(b'BUNDLESPEC')
2958 spec = entry.get(b'BUNDLESPEC')
2957 if spec:
2959 if spec:
2958 try:
2960 try:
2959 bundlespec = parsebundlespec(repo, spec, strict=True)
2961 bundlespec = parsebundlespec(repo, spec, strict=True)
2960
2962
2961 # If a stream clone was requested, filter out non-streamclone
2963 # If a stream clone was requested, filter out non-streamclone
2962 # entries.
2964 # entries.
2963 if streamclonerequested and not isstreamclonespec(bundlespec):
2965 if streamclonerequested and not isstreamclonespec(bundlespec):
2964 repo.ui.debug(
2966 repo.ui.debug(
2965 b'filtering %s because not a stream clone\n'
2967 b'filtering %s because not a stream clone\n'
2966 % entry[b'URL']
2968 % entry[b'URL']
2967 )
2969 )
2968 continue
2970 continue
2969
2971
2970 except error.InvalidBundleSpecification as e:
2972 except error.InvalidBundleSpecification as e:
2971 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2973 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2972 continue
2974 continue
2973 except error.UnsupportedBundleSpecification as e:
2975 except error.UnsupportedBundleSpecification as e:
2974 repo.ui.debug(
2976 repo.ui.debug(
2975 b'filtering %s because unsupported bundle '
2977 b'filtering %s because unsupported bundle '
2976 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2978 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2977 )
2979 )
2978 continue
2980 continue
2979 # If we don't have a spec and requested a stream clone, we don't know
2981 # If we don't have a spec and requested a stream clone, we don't know
2980 # what the entry is so don't attempt to apply it.
2982 # what the entry is so don't attempt to apply it.
2981 elif streamclonerequested:
2983 elif streamclonerequested:
2982 repo.ui.debug(
2984 repo.ui.debug(
2983 b'filtering %s because cannot determine if a stream '
2985 b'filtering %s because cannot determine if a stream '
2984 b'clone bundle\n' % entry[b'URL']
2986 b'clone bundle\n' % entry[b'URL']
2985 )
2987 )
2986 continue
2988 continue
2987
2989
2988 if b'REQUIRESNI' in entry and not sslutil.hassni:
2990 if b'REQUIRESNI' in entry and not sslutil.hassni:
2989 repo.ui.debug(
2991 repo.ui.debug(
2990 b'filtering %s because SNI not supported\n' % entry[b'URL']
2992 b'filtering %s because SNI not supported\n' % entry[b'URL']
2991 )
2993 )
2992 continue
2994 continue
2993
2995
2994 newentries.append(entry)
2996 newentries.append(entry)
2995
2997
2996 return newentries
2998 return newentries
2997
2999
2998
3000
2999 class clonebundleentry(object):
3001 class clonebundleentry(object):
3000 """Represents an item in a clone bundles manifest.
3002 """Represents an item in a clone bundles manifest.
3001
3003
3002 This rich class is needed to support sorting since sorted() in Python 3
3004 This rich class is needed to support sorting since sorted() in Python 3
3003 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3005 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3004 won't work.
3006 won't work.
3005 """
3007 """
3006
3008
3007 def __init__(self, value, prefers):
3009 def __init__(self, value, prefers):
3008 self.value = value
3010 self.value = value
3009 self.prefers = prefers
3011 self.prefers = prefers
3010
3012
3011 def _cmp(self, other):
3013 def _cmp(self, other):
3012 for prefkey, prefvalue in self.prefers:
3014 for prefkey, prefvalue in self.prefers:
3013 avalue = self.value.get(prefkey)
3015 avalue = self.value.get(prefkey)
3014 bvalue = other.value.get(prefkey)
3016 bvalue = other.value.get(prefkey)
3015
3017
3016 # Special case for b missing attribute and a matches exactly.
3018 # Special case for b missing attribute and a matches exactly.
3017 if avalue is not None and bvalue is None and avalue == prefvalue:
3019 if avalue is not None and bvalue is None and avalue == prefvalue:
3018 return -1
3020 return -1
3019
3021
3020 # Special case for a missing attribute and b matches exactly.
3022 # Special case for a missing attribute and b matches exactly.
3021 if bvalue is not None and avalue is None and bvalue == prefvalue:
3023 if bvalue is not None and avalue is None and bvalue == prefvalue:
3022 return 1
3024 return 1
3023
3025
3024 # We can't compare unless attribute present on both.
3026 # We can't compare unless attribute present on both.
3025 if avalue is None or bvalue is None:
3027 if avalue is None or bvalue is None:
3026 continue
3028 continue
3027
3029
3028 # Same values should fall back to next attribute.
3030 # Same values should fall back to next attribute.
3029 if avalue == bvalue:
3031 if avalue == bvalue:
3030 continue
3032 continue
3031
3033
3032 # Exact matches come first.
3034 # Exact matches come first.
3033 if avalue == prefvalue:
3035 if avalue == prefvalue:
3034 return -1
3036 return -1
3035 if bvalue == prefvalue:
3037 if bvalue == prefvalue:
3036 return 1
3038 return 1
3037
3039
3038 # Fall back to next attribute.
3040 # Fall back to next attribute.
3039 continue
3041 continue
3040
3042
3041 # If we got here we couldn't sort by attributes and prefers. Fall
3043 # If we got here we couldn't sort by attributes and prefers. Fall
3042 # back to index order.
3044 # back to index order.
3043 return 0
3045 return 0
3044
3046
3045 def __lt__(self, other):
3047 def __lt__(self, other):
3046 return self._cmp(other) < 0
3048 return self._cmp(other) < 0
3047
3049
3048 def __gt__(self, other):
3050 def __gt__(self, other):
3049 return self._cmp(other) > 0
3051 return self._cmp(other) > 0
3050
3052
3051 def __eq__(self, other):
3053 def __eq__(self, other):
3052 return self._cmp(other) == 0
3054 return self._cmp(other) == 0
3053
3055
3054 def __le__(self, other):
3056 def __le__(self, other):
3055 return self._cmp(other) <= 0
3057 return self._cmp(other) <= 0
3056
3058
3057 def __ge__(self, other):
3059 def __ge__(self, other):
3058 return self._cmp(other) >= 0
3060 return self._cmp(other) >= 0
3059
3061
3060 def __ne__(self, other):
3062 def __ne__(self, other):
3061 return self._cmp(other) != 0
3063 return self._cmp(other) != 0
3062
3064
3063
3065
3064 def sortclonebundleentries(ui, entries):
3066 def sortclonebundleentries(ui, entries):
3065 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3067 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3066 if not prefers:
3068 if not prefers:
3067 return list(entries)
3069 return list(entries)
3068
3070
3069 prefers = [p.split(b'=', 1) for p in prefers]
3071 prefers = [p.split(b'=', 1) for p in prefers]
3070
3072
3071 items = sorted(clonebundleentry(v, prefers) for v in entries)
3073 items = sorted(clonebundleentry(v, prefers) for v in entries)
3072 return [i.value for i in items]
3074 return [i.value for i in items]
3073
3075
3074
3076
3075 def trypullbundlefromurl(ui, repo, url):
3077 def trypullbundlefromurl(ui, repo, url):
3076 """Attempt to apply a bundle from a URL."""
3078 """Attempt to apply a bundle from a URL."""
3077 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3079 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3078 try:
3080 try:
3079 fh = urlmod.open(ui, url)
3081 fh = urlmod.open(ui, url)
3080 cg = readbundle(ui, fh, b'stream')
3082 cg = readbundle(ui, fh, b'stream')
3081
3083
3082 if isinstance(cg, streamclone.streamcloneapplier):
3084 if isinstance(cg, streamclone.streamcloneapplier):
3083 cg.apply(repo)
3085 cg.apply(repo)
3084 else:
3086 else:
3085 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3087 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3086 return True
3088 return True
3087 except urlerr.httperror as e:
3089 except urlerr.httperror as e:
3088 ui.warn(
3090 ui.warn(
3089 _(b'HTTP error fetching bundle: %s\n')
3091 _(b'HTTP error fetching bundle: %s\n')
3090 % stringutil.forcebytestr(e)
3092 % stringutil.forcebytestr(e)
3091 )
3093 )
3092 except urlerr.urlerror as e:
3094 except urlerr.urlerror as e:
3093 ui.warn(
3095 ui.warn(
3094 _(b'error fetching bundle: %s\n')
3096 _(b'error fetching bundle: %s\n')
3095 % stringutil.forcebytestr(e.reason)
3097 % stringutil.forcebytestr(e.reason)
3096 )
3098 )
3097
3099
3098 return False
3100 return False
@@ -1,1460 +1,1459 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
13 import os
12 import os
14 import shutil
13 import shutil
15 import stat
14 import stat
16
15
17 from .i18n import _
16 from .i18n import _
18 from .node import nullid
17 from .node import nullid
19 from .pycompat import getattr
18 from .pycompat import getattr
20
19
21 from . import (
20 from . import (
22 bookmarks,
21 bookmarks,
23 bundlerepo,
22 bundlerepo,
24 cacheutil,
23 cacheutil,
25 cmdutil,
24 cmdutil,
26 destutil,
25 destutil,
27 discovery,
26 discovery,
28 error,
27 error,
29 exchange,
28 exchange,
30 extensions,
29 extensions,
31 httppeer,
30 httppeer,
32 localrepo,
31 localrepo,
33 lock,
32 lock,
34 logcmdutil,
33 logcmdutil,
35 logexchange,
34 logexchange,
36 merge as mergemod,
35 merge as mergemod,
37 narrowspec,
36 narrowspec,
38 node,
37 node,
39 phases,
38 phases,
40 pycompat,
39 pycompat,
41 scmutil,
40 scmutil,
42 sshpeer,
41 sshpeer,
43 statichttprepo,
42 statichttprepo,
44 ui as uimod,
43 ui as uimod,
45 unionrepo,
44 unionrepo,
46 url,
45 url,
47 util,
46 util,
48 verify as verifymod,
47 verify as verifymod,
49 vfs as vfsmod,
48 vfs as vfsmod,
50 )
49 )
51
50 from .utils import hashutil
52 from .interfaces import repository as repositorymod
51 from .interfaces import repository as repositorymod
53
52
54 release = lock.release
53 release = lock.release
55
54
56 # shared features
55 # shared features
57 sharedbookmarks = b'bookmarks'
56 sharedbookmarks = b'bookmarks'
58
57
59
58
60 def _local(path):
59 def _local(path):
61 path = util.expandpath(util.urllocalpath(path))
60 path = util.expandpath(util.urllocalpath(path))
62
61
63 try:
62 try:
64 isfile = os.path.isfile(path)
63 isfile = os.path.isfile(path)
65 # Python 2 raises TypeError, Python 3 ValueError.
64 # Python 2 raises TypeError, Python 3 ValueError.
66 except (TypeError, ValueError) as e:
65 except (TypeError, ValueError) as e:
67 raise error.Abort(
66 raise error.Abort(
68 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
69 )
68 )
70
69
71 return isfile and bundlerepo or localrepo
70 return isfile and bundlerepo or localrepo
72
71
73
72
74 def addbranchrevs(lrepo, other, branches, revs):
73 def addbranchrevs(lrepo, other, branches, revs):
75 peer = other.peer() # a courtesy to callers using a localrepo for other
74 peer = other.peer() # a courtesy to callers using a localrepo for other
76 hashbranch, branches = branches
75 hashbranch, branches = branches
77 if not hashbranch and not branches:
76 if not hashbranch and not branches:
78 x = revs or None
77 x = revs or None
79 if revs:
78 if revs:
80 y = revs[0]
79 y = revs[0]
81 else:
80 else:
82 y = None
81 y = None
83 return x, y
82 return x, y
84 if revs:
83 if revs:
85 revs = list(revs)
84 revs = list(revs)
86 else:
85 else:
87 revs = []
86 revs = []
88
87
89 if not peer.capable(b'branchmap'):
88 if not peer.capable(b'branchmap'):
90 if branches:
89 if branches:
91 raise error.Abort(_(b"remote branch lookup not supported"))
90 raise error.Abort(_(b"remote branch lookup not supported"))
92 revs.append(hashbranch)
91 revs.append(hashbranch)
93 return revs, revs[0]
92 return revs, revs[0]
94
93
95 with peer.commandexecutor() as e:
94 with peer.commandexecutor() as e:
96 branchmap = e.callcommand(b'branchmap', {}).result()
95 branchmap = e.callcommand(b'branchmap', {}).result()
97
96
98 def primary(branch):
97 def primary(branch):
99 if branch == b'.':
98 if branch == b'.':
100 if not lrepo:
99 if not lrepo:
101 raise error.Abort(_(b"dirstate branch not accessible"))
100 raise error.Abort(_(b"dirstate branch not accessible"))
102 branch = lrepo.dirstate.branch()
101 branch = lrepo.dirstate.branch()
103 if branch in branchmap:
102 if branch in branchmap:
104 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
105 return True
104 return True
106 else:
105 else:
107 return False
106 return False
108
107
109 for branch in branches:
108 for branch in branches:
110 if not primary(branch):
109 if not primary(branch):
111 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
112 if hashbranch:
111 if hashbranch:
113 if not primary(hashbranch):
112 if not primary(hashbranch):
114 revs.append(hashbranch)
113 revs.append(hashbranch)
115 return revs, revs[0]
114 return revs, revs[0]
116
115
117
116
118 def parseurl(path, branches=None):
117 def parseurl(path, branches=None):
119 '''parse url#branch, returning (url, (branch, branches))'''
118 '''parse url#branch, returning (url, (branch, branches))'''
120
119
121 u = util.url(path)
120 u = util.url(path)
122 branch = None
121 branch = None
123 if u.fragment:
122 if u.fragment:
124 branch = u.fragment
123 branch = u.fragment
125 u.fragment = None
124 u.fragment = None
126 return bytes(u), (branch, branches or [])
125 return bytes(u), (branch, branches or [])
127
126
128
127
129 schemes = {
128 schemes = {
130 b'bundle': bundlerepo,
129 b'bundle': bundlerepo,
131 b'union': unionrepo,
130 b'union': unionrepo,
132 b'file': _local,
131 b'file': _local,
133 b'http': httppeer,
132 b'http': httppeer,
134 b'https': httppeer,
133 b'https': httppeer,
135 b'ssh': sshpeer,
134 b'ssh': sshpeer,
136 b'static-http': statichttprepo,
135 b'static-http': statichttprepo,
137 }
136 }
138
137
139
138
140 def _peerlookup(path):
139 def _peerlookup(path):
141 u = util.url(path)
140 u = util.url(path)
142 scheme = u.scheme or b'file'
141 scheme = u.scheme or b'file'
143 thing = schemes.get(scheme) or schemes[b'file']
142 thing = schemes.get(scheme) or schemes[b'file']
144 try:
143 try:
145 return thing(path)
144 return thing(path)
146 except TypeError:
145 except TypeError:
147 # we can't test callable(thing) because 'thing' can be an unloaded
146 # we can't test callable(thing) because 'thing' can be an unloaded
148 # module that implements __call__
147 # module that implements __call__
149 if not util.safehasattr(thing, b'instance'):
148 if not util.safehasattr(thing, b'instance'):
150 raise
149 raise
151 return thing
150 return thing
152
151
153
152
154 def islocal(repo):
153 def islocal(repo):
155 '''return true if repo (or path pointing to repo) is local'''
154 '''return true if repo (or path pointing to repo) is local'''
156 if isinstance(repo, bytes):
155 if isinstance(repo, bytes):
157 try:
156 try:
158 return _peerlookup(repo).islocal(repo)
157 return _peerlookup(repo).islocal(repo)
159 except AttributeError:
158 except AttributeError:
160 return False
159 return False
161 return repo.local()
160 return repo.local()
162
161
163
162
164 def openpath(ui, path, sendaccept=True):
163 def openpath(ui, path, sendaccept=True):
165 '''open path with open if local, url.open if remote'''
164 '''open path with open if local, url.open if remote'''
166 pathurl = util.url(path, parsequery=False, parsefragment=False)
165 pathurl = util.url(path, parsequery=False, parsefragment=False)
167 if pathurl.islocal():
166 if pathurl.islocal():
168 return util.posixfile(pathurl.localpath(), b'rb')
167 return util.posixfile(pathurl.localpath(), b'rb')
169 else:
168 else:
170 return url.open(ui, path, sendaccept=sendaccept)
169 return url.open(ui, path, sendaccept=sendaccept)
171
170
172
171
173 # a list of (ui, repo) functions called for wire peer initialization
172 # a list of (ui, repo) functions called for wire peer initialization
174 wirepeersetupfuncs = []
173 wirepeersetupfuncs = []
175
174
176
175
177 def _peerorrepo(
176 def _peerorrepo(
178 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
179 ):
178 ):
180 """return a repository object for the specified path"""
179 """return a repository object for the specified path"""
181 obj = _peerlookup(path).instance(
180 obj = _peerlookup(path).instance(
182 ui, path, create, intents=intents, createopts=createopts
181 ui, path, create, intents=intents, createopts=createopts
183 )
182 )
184 ui = getattr(obj, "ui", ui)
183 ui = getattr(obj, "ui", ui)
185 for f in presetupfuncs or []:
184 for f in presetupfuncs or []:
186 f(ui, obj)
185 f(ui, obj)
187 ui.log(b'extension', b'- executing reposetup hooks\n')
186 ui.log(b'extension', b'- executing reposetup hooks\n')
188 with util.timedcm('all reposetup') as allreposetupstats:
187 with util.timedcm('all reposetup') as allreposetupstats:
189 for name, module in extensions.extensions(ui):
188 for name, module in extensions.extensions(ui):
190 ui.log(b'extension', b' - running reposetup for %s\n', name)
189 ui.log(b'extension', b' - running reposetup for %s\n', name)
191 hook = getattr(module, 'reposetup', None)
190 hook = getattr(module, 'reposetup', None)
192 if hook:
191 if hook:
193 with util.timedcm('reposetup %r', name) as stats:
192 with util.timedcm('reposetup %r', name) as stats:
194 hook(ui, obj)
193 hook(ui, obj)
195 ui.log(
194 ui.log(
196 b'extension', b' > reposetup for %s took %s\n', name, stats
195 b'extension', b' > reposetup for %s took %s\n', name, stats
197 )
196 )
198 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
199 if not obj.local():
198 if not obj.local():
200 for f in wirepeersetupfuncs:
199 for f in wirepeersetupfuncs:
201 f(ui, obj)
200 f(ui, obj)
202 return obj
201 return obj
203
202
204
203
205 def repository(
204 def repository(
206 ui,
205 ui,
207 path=b'',
206 path=b'',
208 create=False,
207 create=False,
209 presetupfuncs=None,
208 presetupfuncs=None,
210 intents=None,
209 intents=None,
211 createopts=None,
210 createopts=None,
212 ):
211 ):
213 """return a repository object for the specified path"""
212 """return a repository object for the specified path"""
214 peer = _peerorrepo(
213 peer = _peerorrepo(
215 ui,
214 ui,
216 path,
215 path,
217 create,
216 create,
218 presetupfuncs=presetupfuncs,
217 presetupfuncs=presetupfuncs,
219 intents=intents,
218 intents=intents,
220 createopts=createopts,
219 createopts=createopts,
221 )
220 )
222 repo = peer.local()
221 repo = peer.local()
223 if not repo:
222 if not repo:
224 raise error.Abort(
223 raise error.Abort(
225 _(b"repository '%s' is not local") % (path or peer.url())
224 _(b"repository '%s' is not local") % (path or peer.url())
226 )
225 )
227 return repo.filtered(b'visible')
226 return repo.filtered(b'visible')
228
227
229
228
230 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
231 '''return a repository peer for the specified path'''
230 '''return a repository peer for the specified path'''
232 rui = remoteui(uiorrepo, opts)
231 rui = remoteui(uiorrepo, opts)
233 return _peerorrepo(
232 return _peerorrepo(
234 rui, path, create, intents=intents, createopts=createopts
233 rui, path, create, intents=intents, createopts=createopts
235 ).peer()
234 ).peer()
236
235
237
236
238 def defaultdest(source):
237 def defaultdest(source):
239 '''return default destination of clone if none is given
238 '''return default destination of clone if none is given
240
239
241 >>> defaultdest(b'foo')
240 >>> defaultdest(b'foo')
242 'foo'
241 'foo'
243 >>> defaultdest(b'/foo/bar')
242 >>> defaultdest(b'/foo/bar')
244 'bar'
243 'bar'
245 >>> defaultdest(b'/')
244 >>> defaultdest(b'/')
246 ''
245 ''
247 >>> defaultdest(b'')
246 >>> defaultdest(b'')
248 ''
247 ''
249 >>> defaultdest(b'http://example.org/')
248 >>> defaultdest(b'http://example.org/')
250 ''
249 ''
251 >>> defaultdest(b'http://example.org/foo/')
250 >>> defaultdest(b'http://example.org/foo/')
252 'foo'
251 'foo'
253 '''
252 '''
254 path = util.url(source).path
253 path = util.url(source).path
255 if not path:
254 if not path:
256 return b''
255 return b''
257 return os.path.basename(os.path.normpath(path))
256 return os.path.basename(os.path.normpath(path))
258
257
259
258
260 def sharedreposource(repo):
259 def sharedreposource(repo):
261 """Returns repository object for source repository of a shared repo.
260 """Returns repository object for source repository of a shared repo.
262
261
263 If repo is not a shared repository, returns None.
262 If repo is not a shared repository, returns None.
264 """
263 """
265 if repo.sharedpath == repo.path:
264 if repo.sharedpath == repo.path:
266 return None
265 return None
267
266
268 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
269 return repo.srcrepo
268 return repo.srcrepo
270
269
271 # the sharedpath always ends in the .hg; we want the path to the repo
270 # the sharedpath always ends in the .hg; we want the path to the repo
272 source = repo.vfs.split(repo.sharedpath)[0]
271 source = repo.vfs.split(repo.sharedpath)[0]
273 srcurl, branches = parseurl(source)
272 srcurl, branches = parseurl(source)
274 srcrepo = repository(repo.ui, srcurl)
273 srcrepo = repository(repo.ui, srcurl)
275 repo.srcrepo = srcrepo
274 repo.srcrepo = srcrepo
276 return srcrepo
275 return srcrepo
277
276
278
277
279 def share(
278 def share(
280 ui,
279 ui,
281 source,
280 source,
282 dest=None,
281 dest=None,
283 update=True,
282 update=True,
284 bookmarks=True,
283 bookmarks=True,
285 defaultpath=None,
284 defaultpath=None,
286 relative=False,
285 relative=False,
287 ):
286 ):
288 '''create a shared repository'''
287 '''create a shared repository'''
289
288
290 if not islocal(source):
289 if not islocal(source):
291 raise error.Abort(_(b'can only share local repositories'))
290 raise error.Abort(_(b'can only share local repositories'))
292
291
293 if not dest:
292 if not dest:
294 dest = defaultdest(source)
293 dest = defaultdest(source)
295 else:
294 else:
296 dest = ui.expandpath(dest)
295 dest = ui.expandpath(dest)
297
296
298 if isinstance(source, bytes):
297 if isinstance(source, bytes):
299 origsource = ui.expandpath(source)
298 origsource = ui.expandpath(source)
300 source, branches = parseurl(origsource)
299 source, branches = parseurl(origsource)
301 srcrepo = repository(ui, source)
300 srcrepo = repository(ui, source)
302 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
303 else:
302 else:
304 srcrepo = source.local()
303 srcrepo = source.local()
305 checkout = None
304 checkout = None
306
305
307 shareditems = set()
306 shareditems = set()
308 if bookmarks:
307 if bookmarks:
309 shareditems.add(sharedbookmarks)
308 shareditems.add(sharedbookmarks)
310
309
311 r = repository(
310 r = repository(
312 ui,
311 ui,
313 dest,
312 dest,
314 create=True,
313 create=True,
315 createopts={
314 createopts={
316 b'sharedrepo': srcrepo,
315 b'sharedrepo': srcrepo,
317 b'sharedrelative': relative,
316 b'sharedrelative': relative,
318 b'shareditems': shareditems,
317 b'shareditems': shareditems,
319 },
318 },
320 )
319 )
321
320
322 postshare(srcrepo, r, defaultpath=defaultpath)
321 postshare(srcrepo, r, defaultpath=defaultpath)
323 r = repository(ui, dest)
322 r = repository(ui, dest)
324 _postshareupdate(r, update, checkout=checkout)
323 _postshareupdate(r, update, checkout=checkout)
325 return r
324 return r
326
325
327
326
328 def unshare(ui, repo):
327 def unshare(ui, repo):
329 """convert a shared repository to a normal one
328 """convert a shared repository to a normal one
330
329
331 Copy the store data to the repo and remove the sharedpath data.
330 Copy the store data to the repo and remove the sharedpath data.
332
331
333 Returns a new repository object representing the unshared repository.
332 Returns a new repository object representing the unshared repository.
334
333
335 The passed repository object is not usable after this function is
334 The passed repository object is not usable after this function is
336 called.
335 called.
337 """
336 """
338
337
339 with repo.lock():
338 with repo.lock():
340 # we use locks here because if we race with commit, we
339 # we use locks here because if we race with commit, we
341 # can end up with extra data in the cloned revlogs that's
340 # can end up with extra data in the cloned revlogs that's
342 # not pointed to by changesets, thus causing verify to
341 # not pointed to by changesets, thus causing verify to
343 # fail
342 # fail
344 destlock = copystore(ui, repo, repo.path)
343 destlock = copystore(ui, repo, repo.path)
345 with destlock or util.nullcontextmanager():
344 with destlock or util.nullcontextmanager():
346
345
347 sharefile = repo.vfs.join(b'sharedpath')
346 sharefile = repo.vfs.join(b'sharedpath')
348 util.rename(sharefile, sharefile + b'.old')
347 util.rename(sharefile, sharefile + b'.old')
349
348
350 repo.requirements.discard(b'shared')
349 repo.requirements.discard(b'shared')
351 repo.requirements.discard(b'relshared')
350 repo.requirements.discard(b'relshared')
352 repo._writerequirements()
351 repo._writerequirements()
353
352
354 # Removing share changes some fundamental properties of the repo instance.
353 # Removing share changes some fundamental properties of the repo instance.
355 # So we instantiate a new repo object and operate on it rather than
354 # So we instantiate a new repo object and operate on it rather than
356 # try to keep the existing repo usable.
355 # try to keep the existing repo usable.
357 newrepo = repository(repo.baseui, repo.root, create=False)
356 newrepo = repository(repo.baseui, repo.root, create=False)
358
357
359 # TODO: figure out how to access subrepos that exist, but were previously
358 # TODO: figure out how to access subrepos that exist, but were previously
360 # removed from .hgsub
359 # removed from .hgsub
361 c = newrepo[b'.']
360 c = newrepo[b'.']
362 subs = c.substate
361 subs = c.substate
363 for s in sorted(subs):
362 for s in sorted(subs):
364 c.sub(s).unshare()
363 c.sub(s).unshare()
365
364
366 localrepo.poisonrepository(repo)
365 localrepo.poisonrepository(repo)
367
366
368 return newrepo
367 return newrepo
369
368
370
369
371 def postshare(sourcerepo, destrepo, defaultpath=None):
370 def postshare(sourcerepo, destrepo, defaultpath=None):
372 """Called after a new shared repo is created.
371 """Called after a new shared repo is created.
373
372
374 The new repo only has a requirements file and pointer to the source.
373 The new repo only has a requirements file and pointer to the source.
375 This function configures additional shared data.
374 This function configures additional shared data.
376
375
377 Extensions can wrap this function and write additional entries to
376 Extensions can wrap this function and write additional entries to
378 destrepo/.hg/shared to indicate additional pieces of data to be shared.
377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
379 """
378 """
380 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
381 if default:
380 if default:
382 template = b'[paths]\ndefault = %s\n'
381 template = b'[paths]\ndefault = %s\n'
383 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
384 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
385 with destrepo.wlock():
384 with destrepo.wlock():
386 narrowspec.copytoworkingcopy(destrepo)
385 narrowspec.copytoworkingcopy(destrepo)
387
386
388
387
389 def _postshareupdate(repo, update, checkout=None):
388 def _postshareupdate(repo, update, checkout=None):
390 """Maybe perform a working directory update after a shared repo is created.
389 """Maybe perform a working directory update after a shared repo is created.
391
390
392 ``update`` can be a boolean or a revision to update to.
391 ``update`` can be a boolean or a revision to update to.
393 """
392 """
394 if not update:
393 if not update:
395 return
394 return
396
395
397 repo.ui.status(_(b"updating working directory\n"))
396 repo.ui.status(_(b"updating working directory\n"))
398 if update is not True:
397 if update is not True:
399 checkout = update
398 checkout = update
400 for test in (checkout, b'default', b'tip'):
399 for test in (checkout, b'default', b'tip'):
401 if test is None:
400 if test is None:
402 continue
401 continue
403 try:
402 try:
404 uprev = repo.lookup(test)
403 uprev = repo.lookup(test)
405 break
404 break
406 except error.RepoLookupError:
405 except error.RepoLookupError:
407 continue
406 continue
408 _update(repo, uprev)
407 _update(repo, uprev)
409
408
410
409
411 def copystore(ui, srcrepo, destpath):
410 def copystore(ui, srcrepo, destpath):
412 '''copy files from store of srcrepo in destpath
411 '''copy files from store of srcrepo in destpath
413
412
414 returns destlock
413 returns destlock
415 '''
414 '''
416 destlock = None
415 destlock = None
417 try:
416 try:
418 hardlink = None
417 hardlink = None
419 topic = _(b'linking') if hardlink else _(b'copying')
418 topic = _(b'linking') if hardlink else _(b'copying')
420 with ui.makeprogress(topic, unit=_(b'files')) as progress:
419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
421 num = 0
420 num = 0
422 srcpublishing = srcrepo.publishing()
421 srcpublishing = srcrepo.publishing()
423 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
424 dstvfs = vfsmod.vfs(destpath)
423 dstvfs = vfsmod.vfs(destpath)
425 for f in srcrepo.store.copylist():
424 for f in srcrepo.store.copylist():
426 if srcpublishing and f.endswith(b'phaseroots'):
425 if srcpublishing and f.endswith(b'phaseroots'):
427 continue
426 continue
428 dstbase = os.path.dirname(f)
427 dstbase = os.path.dirname(f)
429 if dstbase and not dstvfs.exists(dstbase):
428 if dstbase and not dstvfs.exists(dstbase):
430 dstvfs.mkdir(dstbase)
429 dstvfs.mkdir(dstbase)
431 if srcvfs.exists(f):
430 if srcvfs.exists(f):
432 if f.endswith(b'data'):
431 if f.endswith(b'data'):
433 # 'dstbase' may be empty (e.g. revlog format 0)
432 # 'dstbase' may be empty (e.g. revlog format 0)
434 lockfile = os.path.join(dstbase, b"lock")
433 lockfile = os.path.join(dstbase, b"lock")
435 # lock to avoid premature writing to the target
434 # lock to avoid premature writing to the target
436 destlock = lock.lock(dstvfs, lockfile)
435 destlock = lock.lock(dstvfs, lockfile)
437 hardlink, n = util.copyfiles(
436 hardlink, n = util.copyfiles(
438 srcvfs.join(f), dstvfs.join(f), hardlink, progress
437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
439 )
438 )
440 num += n
439 num += n
441 if hardlink:
440 if hardlink:
442 ui.debug(b"linked %d files\n" % num)
441 ui.debug(b"linked %d files\n" % num)
443 else:
442 else:
444 ui.debug(b"copied %d files\n" % num)
443 ui.debug(b"copied %d files\n" % num)
445 return destlock
444 return destlock
446 except: # re-raises
445 except: # re-raises
447 release(destlock)
446 release(destlock)
448 raise
447 raise
449
448
450
449
451 def clonewithshare(
450 def clonewithshare(
452 ui,
451 ui,
453 peeropts,
452 peeropts,
454 sharepath,
453 sharepath,
455 source,
454 source,
456 srcpeer,
455 srcpeer,
457 dest,
456 dest,
458 pull=False,
457 pull=False,
459 rev=None,
458 rev=None,
460 update=True,
459 update=True,
461 stream=False,
460 stream=False,
462 ):
461 ):
463 """Perform a clone using a shared repo.
462 """Perform a clone using a shared repo.
464
463
465 The store for the repository will be located at <sharepath>/.hg. The
464 The store for the repository will be located at <sharepath>/.hg. The
466 specified revisions will be cloned or pulled from "source". A shared repo
465 specified revisions will be cloned or pulled from "source". A shared repo
467 will be created at "dest" and a working copy will be created if "update" is
466 will be created at "dest" and a working copy will be created if "update" is
468 True.
467 True.
469 """
468 """
470 revs = None
469 revs = None
471 if rev:
470 if rev:
472 if not srcpeer.capable(b'lookup'):
471 if not srcpeer.capable(b'lookup'):
473 raise error.Abort(
472 raise error.Abort(
474 _(
473 _(
475 b"src repository does not support "
474 b"src repository does not support "
476 b"revision lookup and so doesn't "
475 b"revision lookup and so doesn't "
477 b"support clone by revision"
476 b"support clone by revision"
478 )
477 )
479 )
478 )
480
479
481 # TODO this is batchable.
480 # TODO this is batchable.
482 remoterevs = []
481 remoterevs = []
483 for r in rev:
482 for r in rev:
484 with srcpeer.commandexecutor() as e:
483 with srcpeer.commandexecutor() as e:
485 remoterevs.append(
484 remoterevs.append(
486 e.callcommand(b'lookup', {b'key': r,}).result()
485 e.callcommand(b'lookup', {b'key': r,}).result()
487 )
486 )
488 revs = remoterevs
487 revs = remoterevs
489
488
490 # Obtain a lock before checking for or cloning the pooled repo otherwise
489 # Obtain a lock before checking for or cloning the pooled repo otherwise
491 # 2 clients may race creating or populating it.
490 # 2 clients may race creating or populating it.
492 pooldir = os.path.dirname(sharepath)
491 pooldir = os.path.dirname(sharepath)
493 # lock class requires the directory to exist.
492 # lock class requires the directory to exist.
494 try:
493 try:
495 util.makedir(pooldir, False)
494 util.makedir(pooldir, False)
496 except OSError as e:
495 except OSError as e:
497 if e.errno != errno.EEXIST:
496 if e.errno != errno.EEXIST:
498 raise
497 raise
499
498
500 poolvfs = vfsmod.vfs(pooldir)
499 poolvfs = vfsmod.vfs(pooldir)
501 basename = os.path.basename(sharepath)
500 basename = os.path.basename(sharepath)
502
501
503 with lock.lock(poolvfs, b'%s.lock' % basename):
502 with lock.lock(poolvfs, b'%s.lock' % basename):
504 if os.path.exists(sharepath):
503 if os.path.exists(sharepath):
505 ui.status(
504 ui.status(
506 _(b'(sharing from existing pooled repository %s)\n') % basename
505 _(b'(sharing from existing pooled repository %s)\n') % basename
507 )
506 )
508 else:
507 else:
509 ui.status(
508 ui.status(
510 _(b'(sharing from new pooled repository %s)\n') % basename
509 _(b'(sharing from new pooled repository %s)\n') % basename
511 )
510 )
512 # Always use pull mode because hardlinks in share mode don't work
511 # Always use pull mode because hardlinks in share mode don't work
513 # well. Never update because working copies aren't necessary in
512 # well. Never update because working copies aren't necessary in
514 # share mode.
513 # share mode.
515 clone(
514 clone(
516 ui,
515 ui,
517 peeropts,
516 peeropts,
518 source,
517 source,
519 dest=sharepath,
518 dest=sharepath,
520 pull=True,
519 pull=True,
521 revs=rev,
520 revs=rev,
522 update=False,
521 update=False,
523 stream=stream,
522 stream=stream,
524 )
523 )
525
524
526 # Resolve the value to put in [paths] section for the source.
525 # Resolve the value to put in [paths] section for the source.
527 if islocal(source):
526 if islocal(source):
528 defaultpath = os.path.abspath(util.urllocalpath(source))
527 defaultpath = os.path.abspath(util.urllocalpath(source))
529 else:
528 else:
530 defaultpath = source
529 defaultpath = source
531
530
532 sharerepo = repository(ui, path=sharepath)
531 sharerepo = repository(ui, path=sharepath)
533 destrepo = share(
532 destrepo = share(
534 ui,
533 ui,
535 sharerepo,
534 sharerepo,
536 dest=dest,
535 dest=dest,
537 update=False,
536 update=False,
538 bookmarks=False,
537 bookmarks=False,
539 defaultpath=defaultpath,
538 defaultpath=defaultpath,
540 )
539 )
541
540
542 # We need to perform a pull against the dest repo to fetch bookmarks
541 # We need to perform a pull against the dest repo to fetch bookmarks
543 # and other non-store data that isn't shared by default. In the case of
542 # and other non-store data that isn't shared by default. In the case of
544 # non-existing shared repo, this means we pull from the remote twice. This
543 # non-existing shared repo, this means we pull from the remote twice. This
545 # is a bit weird. But at the time it was implemented, there wasn't an easy
544 # is a bit weird. But at the time it was implemented, there wasn't an easy
546 # way to pull just non-changegroup data.
545 # way to pull just non-changegroup data.
547 exchange.pull(destrepo, srcpeer, heads=revs)
546 exchange.pull(destrepo, srcpeer, heads=revs)
548
547
549 _postshareupdate(destrepo, update)
548 _postshareupdate(destrepo, update)
550
549
551 return srcpeer, peer(ui, peeropts, dest)
550 return srcpeer, peer(ui, peeropts, dest)
552
551
553
552
554 # Recomputing branch cache might be slow on big repos,
553 # Recomputing branch cache might be slow on big repos,
555 # so just copy it
554 # so just copy it
556 def _copycache(srcrepo, dstcachedir, fname):
555 def _copycache(srcrepo, dstcachedir, fname):
557 """copy a cache from srcrepo to destcachedir (if it exists)"""
556 """copy a cache from srcrepo to destcachedir (if it exists)"""
558 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
559 dstbranchcache = os.path.join(dstcachedir, fname)
558 dstbranchcache = os.path.join(dstcachedir, fname)
560 if os.path.exists(srcbranchcache):
559 if os.path.exists(srcbranchcache):
561 if not os.path.exists(dstcachedir):
560 if not os.path.exists(dstcachedir):
562 os.mkdir(dstcachedir)
561 os.mkdir(dstcachedir)
563 util.copyfile(srcbranchcache, dstbranchcache)
562 util.copyfile(srcbranchcache, dstbranchcache)
564
563
565
564
566 def clone(
565 def clone(
567 ui,
566 ui,
568 peeropts,
567 peeropts,
569 source,
568 source,
570 dest=None,
569 dest=None,
571 pull=False,
570 pull=False,
572 revs=None,
571 revs=None,
573 update=True,
572 update=True,
574 stream=False,
573 stream=False,
575 branch=None,
574 branch=None,
576 shareopts=None,
575 shareopts=None,
577 storeincludepats=None,
576 storeincludepats=None,
578 storeexcludepats=None,
577 storeexcludepats=None,
579 depth=None,
578 depth=None,
580 ):
579 ):
581 """Make a copy of an existing repository.
580 """Make a copy of an existing repository.
582
581
583 Create a copy of an existing repository in a new directory. The
582 Create a copy of an existing repository in a new directory. The
584 source and destination are URLs, as passed to the repository
583 source and destination are URLs, as passed to the repository
585 function. Returns a pair of repository peers, the source and
584 function. Returns a pair of repository peers, the source and
586 newly created destination.
585 newly created destination.
587
586
588 The location of the source is added to the new repository's
587 The location of the source is added to the new repository's
589 .hg/hgrc file, as the default to be used for future pulls and
588 .hg/hgrc file, as the default to be used for future pulls and
590 pushes.
589 pushes.
591
590
592 If an exception is raised, the partly cloned/updated destination
591 If an exception is raised, the partly cloned/updated destination
593 repository will be deleted.
592 repository will be deleted.
594
593
595 Arguments:
594 Arguments:
596
595
597 source: repository object or URL
596 source: repository object or URL
598
597
599 dest: URL of destination repository to create (defaults to base
598 dest: URL of destination repository to create (defaults to base
600 name of source repository)
599 name of source repository)
601
600
602 pull: always pull from source repository, even in local case or if the
601 pull: always pull from source repository, even in local case or if the
603 server prefers streaming
602 server prefers streaming
604
603
605 stream: stream raw data uncompressed from repository (fast over
604 stream: stream raw data uncompressed from repository (fast over
606 LAN, slow over WAN)
605 LAN, slow over WAN)
607
606
608 revs: revision to clone up to (implies pull=True)
607 revs: revision to clone up to (implies pull=True)
609
608
610 update: update working directory after clone completes, if
609 update: update working directory after clone completes, if
611 destination is local repository (True means update to default rev,
610 destination is local repository (True means update to default rev,
612 anything else is treated as a revision)
611 anything else is treated as a revision)
613
612
614 branch: branches to clone
613 branch: branches to clone
615
614
616 shareopts: dict of options to control auto sharing behavior. The "pool" key
615 shareopts: dict of options to control auto sharing behavior. The "pool" key
617 activates auto sharing mode and defines the directory for stores. The
616 activates auto sharing mode and defines the directory for stores. The
618 "mode" key determines how to construct the directory name of the shared
617 "mode" key determines how to construct the directory name of the shared
619 repository. "identity" means the name is derived from the node of the first
618 repository. "identity" means the name is derived from the node of the first
620 changeset in the repository. "remote" means the name is derived from the
619 changeset in the repository. "remote" means the name is derived from the
621 remote's path/URL. Defaults to "identity."
620 remote's path/URL. Defaults to "identity."
622
621
623 storeincludepats and storeexcludepats: sets of file patterns to include and
622 storeincludepats and storeexcludepats: sets of file patterns to include and
624 exclude in the repository copy, respectively. If not defined, all files
623 exclude in the repository copy, respectively. If not defined, all files
625 will be included (a "full" clone). Otherwise a "narrow" clone containing
624 will be included (a "full" clone). Otherwise a "narrow" clone containing
626 only the requested files will be performed. If ``storeincludepats`` is not
625 only the requested files will be performed. If ``storeincludepats`` is not
627 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
628 ``path:.``. If both are empty sets, no files will be cloned.
627 ``path:.``. If both are empty sets, no files will be cloned.
629 """
628 """
630
629
631 if isinstance(source, bytes):
630 if isinstance(source, bytes):
632 origsource = ui.expandpath(source)
631 origsource = ui.expandpath(source)
633 source, branches = parseurl(origsource, branch)
632 source, branches = parseurl(origsource, branch)
634 srcpeer = peer(ui, peeropts, source)
633 srcpeer = peer(ui, peeropts, source)
635 else:
634 else:
636 srcpeer = source.peer() # in case we were called with a localrepo
635 srcpeer = source.peer() # in case we were called with a localrepo
637 branches = (None, branch or [])
636 branches = (None, branch or [])
638 origsource = source = srcpeer.url()
637 origsource = source = srcpeer.url()
639 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
640
639
641 if dest is None:
640 if dest is None:
642 dest = defaultdest(source)
641 dest = defaultdest(source)
643 if dest:
642 if dest:
644 ui.status(_(b"destination directory: %s\n") % dest)
643 ui.status(_(b"destination directory: %s\n") % dest)
645 else:
644 else:
646 dest = ui.expandpath(dest)
645 dest = ui.expandpath(dest)
647
646
648 dest = util.urllocalpath(dest)
647 dest = util.urllocalpath(dest)
649 source = util.urllocalpath(source)
648 source = util.urllocalpath(source)
650
649
651 if not dest:
650 if not dest:
652 raise error.Abort(_(b"empty destination path is not valid"))
651 raise error.Abort(_(b"empty destination path is not valid"))
653
652
654 destvfs = vfsmod.vfs(dest, expandpath=True)
653 destvfs = vfsmod.vfs(dest, expandpath=True)
655 if destvfs.lexists():
654 if destvfs.lexists():
656 if not destvfs.isdir():
655 if not destvfs.isdir():
657 raise error.Abort(_(b"destination '%s' already exists") % dest)
656 raise error.Abort(_(b"destination '%s' already exists") % dest)
658 elif destvfs.listdir():
657 elif destvfs.listdir():
659 raise error.Abort(_(b"destination '%s' is not empty") % dest)
658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
660
659
661 createopts = {}
660 createopts = {}
662 narrow = False
661 narrow = False
663
662
664 if storeincludepats is not None:
663 if storeincludepats is not None:
665 narrowspec.validatepatterns(storeincludepats)
664 narrowspec.validatepatterns(storeincludepats)
666 narrow = True
665 narrow = True
667
666
668 if storeexcludepats is not None:
667 if storeexcludepats is not None:
669 narrowspec.validatepatterns(storeexcludepats)
668 narrowspec.validatepatterns(storeexcludepats)
670 narrow = True
669 narrow = True
671
670
672 if narrow:
671 if narrow:
673 # Include everything by default if only exclusion patterns defined.
672 # Include everything by default if only exclusion patterns defined.
674 if storeexcludepats and not storeincludepats:
673 if storeexcludepats and not storeincludepats:
675 storeincludepats = {b'path:.'}
674 storeincludepats = {b'path:.'}
676
675
677 createopts[b'narrowfiles'] = True
676 createopts[b'narrowfiles'] = True
678
677
679 if depth:
678 if depth:
680 createopts[b'shallowfilestore'] = True
679 createopts[b'shallowfilestore'] = True
681
680
682 if srcpeer.capable(b'lfs-serve'):
681 if srcpeer.capable(b'lfs-serve'):
683 # Repository creation honors the config if it disabled the extension, so
682 # Repository creation honors the config if it disabled the extension, so
684 # we can't just announce that lfs will be enabled. This check avoids
683 # we can't just announce that lfs will be enabled. This check avoids
685 # saying that lfs will be enabled, and then saying it's an unknown
684 # saying that lfs will be enabled, and then saying it's an unknown
686 # feature. The lfs creation option is set in either case so that a
685 # feature. The lfs creation option is set in either case so that a
687 # requirement is added. If the extension is explicitly disabled but the
686 # requirement is added. If the extension is explicitly disabled but the
688 # requirement is set, the clone aborts early, before transferring any
687 # requirement is set, the clone aborts early, before transferring any
689 # data.
688 # data.
690 createopts[b'lfs'] = True
689 createopts[b'lfs'] = True
691
690
692 if extensions.disabledext(b'lfs'):
691 if extensions.disabledext(b'lfs'):
693 ui.status(
692 ui.status(
694 _(
693 _(
695 b'(remote is using large file support (lfs), but it is '
694 b'(remote is using large file support (lfs), but it is '
696 b'explicitly disabled in the local configuration)\n'
695 b'explicitly disabled in the local configuration)\n'
697 )
696 )
698 )
697 )
699 else:
698 else:
700 ui.status(
699 ui.status(
701 _(
700 _(
702 b'(remote is using large file support (lfs); lfs will '
701 b'(remote is using large file support (lfs); lfs will '
703 b'be enabled for this repository)\n'
702 b'be enabled for this repository)\n'
704 )
703 )
705 )
704 )
706
705
707 shareopts = shareopts or {}
706 shareopts = shareopts or {}
708 sharepool = shareopts.get(b'pool')
707 sharepool = shareopts.get(b'pool')
709 sharenamemode = shareopts.get(b'mode')
708 sharenamemode = shareopts.get(b'mode')
710 if sharepool and islocal(dest):
709 if sharepool and islocal(dest):
711 sharepath = None
710 sharepath = None
712 if sharenamemode == b'identity':
711 if sharenamemode == b'identity':
713 # Resolve the name from the initial changeset in the remote
712 # Resolve the name from the initial changeset in the remote
714 # repository. This returns nullid when the remote is empty. It
713 # repository. This returns nullid when the remote is empty. It
715 # raises RepoLookupError if revision 0 is filtered or otherwise
714 # raises RepoLookupError if revision 0 is filtered or otherwise
716 # not available. If we fail to resolve, sharing is not enabled.
715 # not available. If we fail to resolve, sharing is not enabled.
717 try:
716 try:
718 with srcpeer.commandexecutor() as e:
717 with srcpeer.commandexecutor() as e:
719 rootnode = e.callcommand(
718 rootnode = e.callcommand(
720 b'lookup', {b'key': b'0',}
719 b'lookup', {b'key': b'0',}
721 ).result()
720 ).result()
722
721
723 if rootnode != node.nullid:
722 if rootnode != node.nullid:
724 sharepath = os.path.join(sharepool, node.hex(rootnode))
723 sharepath = os.path.join(sharepool, node.hex(rootnode))
725 else:
724 else:
726 ui.status(
725 ui.status(
727 _(
726 _(
728 b'(not using pooled storage: '
727 b'(not using pooled storage: '
729 b'remote appears to be empty)\n'
728 b'remote appears to be empty)\n'
730 )
729 )
731 )
730 )
732 except error.RepoLookupError:
731 except error.RepoLookupError:
733 ui.status(
732 ui.status(
734 _(
733 _(
735 b'(not using pooled storage: '
734 b'(not using pooled storage: '
736 b'unable to resolve identity of remote)\n'
735 b'unable to resolve identity of remote)\n'
737 )
736 )
738 )
737 )
739 elif sharenamemode == b'remote':
738 elif sharenamemode == b'remote':
740 sharepath = os.path.join(
739 sharepath = os.path.join(
741 sharepool, node.hex(hashlib.sha1(source).digest())
740 sharepool, node.hex(hashutil.sha1(source).digest())
742 )
741 )
743 else:
742 else:
744 raise error.Abort(
743 raise error.Abort(
745 _(b'unknown share naming mode: %s') % sharenamemode
744 _(b'unknown share naming mode: %s') % sharenamemode
746 )
745 )
747
746
748 # TODO this is a somewhat arbitrary restriction.
747 # TODO this is a somewhat arbitrary restriction.
749 if narrow:
748 if narrow:
750 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
751 sharepath = None
750 sharepath = None
752
751
753 if sharepath:
752 if sharepath:
754 return clonewithshare(
753 return clonewithshare(
755 ui,
754 ui,
756 peeropts,
755 peeropts,
757 sharepath,
756 sharepath,
758 source,
757 source,
759 srcpeer,
758 srcpeer,
760 dest,
759 dest,
761 pull=pull,
760 pull=pull,
762 rev=revs,
761 rev=revs,
763 update=update,
762 update=update,
764 stream=stream,
763 stream=stream,
765 )
764 )
766
765
767 srclock = destlock = cleandir = None
766 srclock = destlock = cleandir = None
768 srcrepo = srcpeer.local()
767 srcrepo = srcpeer.local()
769 try:
768 try:
770 abspath = origsource
769 abspath = origsource
771 if islocal(origsource):
770 if islocal(origsource):
772 abspath = os.path.abspath(util.urllocalpath(origsource))
771 abspath = os.path.abspath(util.urllocalpath(origsource))
773
772
774 if islocal(dest):
773 if islocal(dest):
775 cleandir = dest
774 cleandir = dest
776
775
777 copy = False
776 copy = False
778 if (
777 if (
779 srcrepo
778 srcrepo
780 and srcrepo.cancopy()
779 and srcrepo.cancopy()
781 and islocal(dest)
780 and islocal(dest)
782 and not phases.hassecret(srcrepo)
781 and not phases.hassecret(srcrepo)
783 ):
782 ):
784 copy = not pull and not revs
783 copy = not pull and not revs
785
784
786 # TODO this is a somewhat arbitrary restriction.
785 # TODO this is a somewhat arbitrary restriction.
787 if narrow:
786 if narrow:
788 copy = False
787 copy = False
789
788
790 if copy:
789 if copy:
791 try:
790 try:
792 # we use a lock here because if we race with commit, we
791 # we use a lock here because if we race with commit, we
793 # can end up with extra data in the cloned revlogs that's
792 # can end up with extra data in the cloned revlogs that's
794 # not pointed to by changesets, thus causing verify to
793 # not pointed to by changesets, thus causing verify to
795 # fail
794 # fail
796 srclock = srcrepo.lock(wait=False)
795 srclock = srcrepo.lock(wait=False)
797 except error.LockError:
796 except error.LockError:
798 copy = False
797 copy = False
799
798
800 if copy:
799 if copy:
801 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
802 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
803 if not os.path.exists(dest):
802 if not os.path.exists(dest):
804 util.makedirs(dest)
803 util.makedirs(dest)
805 else:
804 else:
806 # only clean up directories we create ourselves
805 # only clean up directories we create ourselves
807 cleandir = hgdir
806 cleandir = hgdir
808 try:
807 try:
809 destpath = hgdir
808 destpath = hgdir
810 util.makedir(destpath, notindexed=True)
809 util.makedir(destpath, notindexed=True)
811 except OSError as inst:
810 except OSError as inst:
812 if inst.errno == errno.EEXIST:
811 if inst.errno == errno.EEXIST:
813 cleandir = None
812 cleandir = None
814 raise error.Abort(
813 raise error.Abort(
815 _(b"destination '%s' already exists") % dest
814 _(b"destination '%s' already exists") % dest
816 )
815 )
817 raise
816 raise
818
817
819 destlock = copystore(ui, srcrepo, destpath)
818 destlock = copystore(ui, srcrepo, destpath)
820 # copy bookmarks over
819 # copy bookmarks over
821 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
822 dstbookmarks = os.path.join(destpath, b'bookmarks')
821 dstbookmarks = os.path.join(destpath, b'bookmarks')
823 if os.path.exists(srcbookmarks):
822 if os.path.exists(srcbookmarks):
824 util.copyfile(srcbookmarks, dstbookmarks)
823 util.copyfile(srcbookmarks, dstbookmarks)
825
824
826 dstcachedir = os.path.join(destpath, b'cache')
825 dstcachedir = os.path.join(destpath, b'cache')
827 for cache in cacheutil.cachetocopy(srcrepo):
826 for cache in cacheutil.cachetocopy(srcrepo):
828 _copycache(srcrepo, dstcachedir, cache)
827 _copycache(srcrepo, dstcachedir, cache)
829
828
830 # we need to re-init the repo after manually copying the data
829 # we need to re-init the repo after manually copying the data
831 # into it
830 # into it
832 destpeer = peer(srcrepo, peeropts, dest)
831 destpeer = peer(srcrepo, peeropts, dest)
833 srcrepo.hook(
832 srcrepo.hook(
834 b'outgoing', source=b'clone', node=node.hex(node.nullid)
833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
835 )
834 )
836 else:
835 else:
837 try:
836 try:
838 # only pass ui when no srcrepo
837 # only pass ui when no srcrepo
839 destpeer = peer(
838 destpeer = peer(
840 srcrepo or ui,
839 srcrepo or ui,
841 peeropts,
840 peeropts,
842 dest,
841 dest,
843 create=True,
842 create=True,
844 createopts=createopts,
843 createopts=createopts,
845 )
844 )
846 except OSError as inst:
845 except OSError as inst:
847 if inst.errno == errno.EEXIST:
846 if inst.errno == errno.EEXIST:
848 cleandir = None
847 cleandir = None
849 raise error.Abort(
848 raise error.Abort(
850 _(b"destination '%s' already exists") % dest
849 _(b"destination '%s' already exists") % dest
851 )
850 )
852 raise
851 raise
853
852
854 if revs:
853 if revs:
855 if not srcpeer.capable(b'lookup'):
854 if not srcpeer.capable(b'lookup'):
856 raise error.Abort(
855 raise error.Abort(
857 _(
856 _(
858 b"src repository does not support "
857 b"src repository does not support "
859 b"revision lookup and so doesn't "
858 b"revision lookup and so doesn't "
860 b"support clone by revision"
859 b"support clone by revision"
861 )
860 )
862 )
861 )
863
862
864 # TODO this is batchable.
863 # TODO this is batchable.
865 remoterevs = []
864 remoterevs = []
866 for rev in revs:
865 for rev in revs:
867 with srcpeer.commandexecutor() as e:
866 with srcpeer.commandexecutor() as e:
868 remoterevs.append(
867 remoterevs.append(
869 e.callcommand(b'lookup', {b'key': rev,}).result()
868 e.callcommand(b'lookup', {b'key': rev,}).result()
870 )
869 )
871 revs = remoterevs
870 revs = remoterevs
872
871
873 checkout = revs[0]
872 checkout = revs[0]
874 else:
873 else:
875 revs = None
874 revs = None
876 local = destpeer.local()
875 local = destpeer.local()
877 if local:
876 if local:
878 if narrow:
877 if narrow:
879 with local.wlock(), local.lock():
878 with local.wlock(), local.lock():
880 local.setnarrowpats(storeincludepats, storeexcludepats)
879 local.setnarrowpats(storeincludepats, storeexcludepats)
881 narrowspec.copytoworkingcopy(local)
880 narrowspec.copytoworkingcopy(local)
882
881
883 u = util.url(abspath)
882 u = util.url(abspath)
884 defaulturl = bytes(u)
883 defaulturl = bytes(u)
885 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
886 if not stream:
885 if not stream:
887 if pull:
886 if pull:
888 stream = False
887 stream = False
889 else:
888 else:
890 stream = None
889 stream = None
891 # internal config: ui.quietbookmarkmove
890 # internal config: ui.quietbookmarkmove
892 overrides = {(b'ui', b'quietbookmarkmove'): True}
891 overrides = {(b'ui', b'quietbookmarkmove'): True}
893 with local.ui.configoverride(overrides, b'clone'):
892 with local.ui.configoverride(overrides, b'clone'):
894 exchange.pull(
893 exchange.pull(
895 local,
894 local,
896 srcpeer,
895 srcpeer,
897 revs,
896 revs,
898 streamclonerequested=stream,
897 streamclonerequested=stream,
899 includepats=storeincludepats,
898 includepats=storeincludepats,
900 excludepats=storeexcludepats,
899 excludepats=storeexcludepats,
901 depth=depth,
900 depth=depth,
902 )
901 )
903 elif srcrepo:
902 elif srcrepo:
904 # TODO lift restriction once exchange.push() accepts narrow
903 # TODO lift restriction once exchange.push() accepts narrow
905 # push.
904 # push.
906 if narrow:
905 if narrow:
907 raise error.Abort(
906 raise error.Abort(
908 _(
907 _(
909 b'narrow clone not available for '
908 b'narrow clone not available for '
910 b'remote destinations'
909 b'remote destinations'
911 )
910 )
912 )
911 )
913
912
914 exchange.push(
913 exchange.push(
915 srcrepo,
914 srcrepo,
916 destpeer,
915 destpeer,
917 revs=revs,
916 revs=revs,
918 bookmarks=srcrepo._bookmarks.keys(),
917 bookmarks=srcrepo._bookmarks.keys(),
919 )
918 )
920 else:
919 else:
921 raise error.Abort(
920 raise error.Abort(
922 _(b"clone from remote to remote not supported")
921 _(b"clone from remote to remote not supported")
923 )
922 )
924
923
925 cleandir = None
924 cleandir = None
926
925
927 destrepo = destpeer.local()
926 destrepo = destpeer.local()
928 if destrepo:
927 if destrepo:
929 template = uimod.samplehgrcs[b'cloned']
928 template = uimod.samplehgrcs[b'cloned']
930 u = util.url(abspath)
929 u = util.url(abspath)
931 u.passwd = None
930 u.passwd = None
932 defaulturl = bytes(u)
931 defaulturl = bytes(u)
933 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
934 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
935
934
936 if ui.configbool(b'experimental', b'remotenames'):
935 if ui.configbool(b'experimental', b'remotenames'):
937 logexchange.pullremotenames(destrepo, srcpeer)
936 logexchange.pullremotenames(destrepo, srcpeer)
938
937
939 if update:
938 if update:
940 if update is not True:
939 if update is not True:
941 with srcpeer.commandexecutor() as e:
940 with srcpeer.commandexecutor() as e:
942 checkout = e.callcommand(
941 checkout = e.callcommand(
943 b'lookup', {b'key': update,}
942 b'lookup', {b'key': update,}
944 ).result()
943 ).result()
945
944
946 uprev = None
945 uprev = None
947 status = None
946 status = None
948 if checkout is not None:
947 if checkout is not None:
949 # Some extensions (at least hg-git and hg-subversion) have
948 # Some extensions (at least hg-git and hg-subversion) have
950 # a peer.lookup() implementation that returns a name instead
949 # a peer.lookup() implementation that returns a name instead
951 # of a nodeid. We work around it here until we've figured
950 # of a nodeid. We work around it here until we've figured
952 # out a better solution.
951 # out a better solution.
953 if len(checkout) == 20 and checkout in destrepo:
952 if len(checkout) == 20 and checkout in destrepo:
954 uprev = checkout
953 uprev = checkout
955 elif scmutil.isrevsymbol(destrepo, checkout):
954 elif scmutil.isrevsymbol(destrepo, checkout):
956 uprev = scmutil.revsymbol(destrepo, checkout).node()
955 uprev = scmutil.revsymbol(destrepo, checkout).node()
957 else:
956 else:
958 if update is not True:
957 if update is not True:
959 try:
958 try:
960 uprev = destrepo.lookup(update)
959 uprev = destrepo.lookup(update)
961 except error.RepoLookupError:
960 except error.RepoLookupError:
962 pass
961 pass
963 if uprev is None:
962 if uprev is None:
964 try:
963 try:
965 uprev = destrepo._bookmarks[b'@']
964 uprev = destrepo._bookmarks[b'@']
966 update = b'@'
965 update = b'@'
967 bn = destrepo[uprev].branch()
966 bn = destrepo[uprev].branch()
968 if bn == b'default':
967 if bn == b'default':
969 status = _(b"updating to bookmark @\n")
968 status = _(b"updating to bookmark @\n")
970 else:
969 else:
971 status = (
970 status = (
972 _(b"updating to bookmark @ on branch %s\n") % bn
971 _(b"updating to bookmark @ on branch %s\n") % bn
973 )
972 )
974 except KeyError:
973 except KeyError:
975 try:
974 try:
976 uprev = destrepo.branchtip(b'default')
975 uprev = destrepo.branchtip(b'default')
977 except error.RepoLookupError:
976 except error.RepoLookupError:
978 uprev = destrepo.lookup(b'tip')
977 uprev = destrepo.lookup(b'tip')
979 if not status:
978 if not status:
980 bn = destrepo[uprev].branch()
979 bn = destrepo[uprev].branch()
981 status = _(b"updating to branch %s\n") % bn
980 status = _(b"updating to branch %s\n") % bn
982 destrepo.ui.status(status)
981 destrepo.ui.status(status)
983 _update(destrepo, uprev)
982 _update(destrepo, uprev)
984 if update in destrepo._bookmarks:
983 if update in destrepo._bookmarks:
985 bookmarks.activate(destrepo, update)
984 bookmarks.activate(destrepo, update)
986 finally:
985 finally:
987 release(srclock, destlock)
986 release(srclock, destlock)
988 if cleandir is not None:
987 if cleandir is not None:
989 shutil.rmtree(cleandir, True)
988 shutil.rmtree(cleandir, True)
990 if srcpeer is not None:
989 if srcpeer is not None:
991 srcpeer.close()
990 srcpeer.close()
992 return srcpeer, destpeer
991 return srcpeer, destpeer
993
992
994
993
995 def _showstats(repo, stats, quietempty=False):
994 def _showstats(repo, stats, quietempty=False):
996 if quietempty and stats.isempty():
995 if quietempty and stats.isempty():
997 return
996 return
998 repo.ui.status(
997 repo.ui.status(
999 _(
998 _(
1000 b"%d files updated, %d files merged, "
999 b"%d files updated, %d files merged, "
1001 b"%d files removed, %d files unresolved\n"
1000 b"%d files removed, %d files unresolved\n"
1002 )
1001 )
1003 % (
1002 % (
1004 stats.updatedcount,
1003 stats.updatedcount,
1005 stats.mergedcount,
1004 stats.mergedcount,
1006 stats.removedcount,
1005 stats.removedcount,
1007 stats.unresolvedcount,
1006 stats.unresolvedcount,
1008 )
1007 )
1009 )
1008 )
1010
1009
1011
1010
1012 def updaterepo(repo, node, overwrite, updatecheck=None):
1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1013 """Update the working directory to node.
1012 """Update the working directory to node.
1014
1013
1015 When overwrite is set, changes are clobbered, merged else
1014 When overwrite is set, changes are clobbered, merged else
1016
1015
1017 returns stats (see pydoc mercurial.merge.applyupdates)"""
1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1018 return mergemod.update(
1017 return mergemod.update(
1019 repo,
1018 repo,
1020 node,
1019 node,
1021 branchmerge=False,
1020 branchmerge=False,
1022 force=overwrite,
1021 force=overwrite,
1023 labels=[b'working copy', b'destination'],
1022 labels=[b'working copy', b'destination'],
1024 updatecheck=updatecheck,
1023 updatecheck=updatecheck,
1025 )
1024 )
1026
1025
1027
1026
1028 def update(repo, node, quietempty=False, updatecheck=None):
1027 def update(repo, node, quietempty=False, updatecheck=None):
1029 """update the working directory to node"""
1028 """update the working directory to node"""
1030 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1031 _showstats(repo, stats, quietempty)
1030 _showstats(repo, stats, quietempty)
1032 if stats.unresolvedcount:
1031 if stats.unresolvedcount:
1033 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1034 return stats.unresolvedcount > 0
1033 return stats.unresolvedcount > 0
1035
1034
1036
1035
1037 # naming conflict in clone()
1036 # naming conflict in clone()
1038 _update = update
1037 _update = update
1039
1038
1040
1039
1041 def clean(repo, node, show_stats=True, quietempty=False):
1040 def clean(repo, node, show_stats=True, quietempty=False):
1042 """forcibly switch the working directory to node, clobbering changes"""
1041 """forcibly switch the working directory to node, clobbering changes"""
1043 stats = updaterepo(repo, node, True)
1042 stats = updaterepo(repo, node, True)
1044 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1043 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1045 if show_stats:
1044 if show_stats:
1046 _showstats(repo, stats, quietempty)
1045 _showstats(repo, stats, quietempty)
1047 return stats.unresolvedcount > 0
1046 return stats.unresolvedcount > 0
1048
1047
1049
1048
1050 # naming conflict in updatetotally()
1049 # naming conflict in updatetotally()
1051 _clean = clean
1050 _clean = clean
1052
1051
1053 _VALID_UPDATECHECKS = {
1052 _VALID_UPDATECHECKS = {
1054 mergemod.UPDATECHECK_ABORT,
1053 mergemod.UPDATECHECK_ABORT,
1055 mergemod.UPDATECHECK_NONE,
1054 mergemod.UPDATECHECK_NONE,
1056 mergemod.UPDATECHECK_LINEAR,
1055 mergemod.UPDATECHECK_LINEAR,
1057 mergemod.UPDATECHECK_NO_CONFLICT,
1056 mergemod.UPDATECHECK_NO_CONFLICT,
1058 }
1057 }
1059
1058
1060
1059
1061 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1062 """Update the working directory with extra care for non-file components
1061 """Update the working directory with extra care for non-file components
1063
1062
1064 This takes care of non-file components below:
1063 This takes care of non-file components below:
1065
1064
1066 :bookmark: might be advanced or (in)activated
1065 :bookmark: might be advanced or (in)activated
1067
1066
1068 This takes arguments below:
1067 This takes arguments below:
1069
1068
1070 :checkout: to which revision the working directory is updated
1069 :checkout: to which revision the working directory is updated
1071 :brev: a name, which might be a bookmark to be activated after updating
1070 :brev: a name, which might be a bookmark to be activated after updating
1072 :clean: whether changes in the working directory can be discarded
1071 :clean: whether changes in the working directory can be discarded
1073 :updatecheck: how to deal with a dirty working directory
1072 :updatecheck: how to deal with a dirty working directory
1074
1073
1075 Valid values for updatecheck are the UPDATECHECK_* constants
1074 Valid values for updatecheck are the UPDATECHECK_* constants
1076 defined in the merge module. Passing `None` will result in using the
1075 defined in the merge module. Passing `None` will result in using the
1077 configured default.
1076 configured default.
1078
1077
1079 * ABORT: abort if the working directory is dirty
1078 * ABORT: abort if the working directory is dirty
1080 * NONE: don't check (merge working directory changes into destination)
1079 * NONE: don't check (merge working directory changes into destination)
1081 * LINEAR: check that update is linear before merging working directory
1080 * LINEAR: check that update is linear before merging working directory
1082 changes into destination
1081 changes into destination
1083 * NO_CONFLICT: check that the update does not result in file merges
1082 * NO_CONFLICT: check that the update does not result in file merges
1084
1083
1085 This returns whether conflict is detected at updating or not.
1084 This returns whether conflict is detected at updating or not.
1086 """
1085 """
1087 if updatecheck is None:
1086 if updatecheck is None:
1088 updatecheck = ui.config(b'commands', b'update.check')
1087 updatecheck = ui.config(b'commands', b'update.check')
1089 if updatecheck not in _VALID_UPDATECHECKS:
1088 if updatecheck not in _VALID_UPDATECHECKS:
1090 # If not configured, or invalid value configured
1089 # If not configured, or invalid value configured
1091 updatecheck = mergemod.UPDATECHECK_LINEAR
1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1092 if updatecheck not in _VALID_UPDATECHECKS:
1091 if updatecheck not in _VALID_UPDATECHECKS:
1093 raise ValueError(
1092 raise ValueError(
1094 r'Invalid updatecheck value %r (can accept %r)'
1093 r'Invalid updatecheck value %r (can accept %r)'
1095 % (updatecheck, _VALID_UPDATECHECKS)
1094 % (updatecheck, _VALID_UPDATECHECKS)
1096 )
1095 )
1097 with repo.wlock():
1096 with repo.wlock():
1098 movemarkfrom = None
1097 movemarkfrom = None
1099 warndest = False
1098 warndest = False
1100 if checkout is None:
1099 if checkout is None:
1101 updata = destutil.destupdate(repo, clean=clean)
1100 updata = destutil.destupdate(repo, clean=clean)
1102 checkout, movemarkfrom, brev = updata
1101 checkout, movemarkfrom, brev = updata
1103 warndest = True
1102 warndest = True
1104
1103
1105 if clean:
1104 if clean:
1106 ret = _clean(repo, checkout)
1105 ret = _clean(repo, checkout)
1107 else:
1106 else:
1108 if updatecheck == mergemod.UPDATECHECK_ABORT:
1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1109 cmdutil.bailifchanged(repo, merge=False)
1108 cmdutil.bailifchanged(repo, merge=False)
1110 updatecheck = mergemod.UPDATECHECK_NONE
1109 updatecheck = mergemod.UPDATECHECK_NONE
1111 ret = _update(repo, checkout, updatecheck=updatecheck)
1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1112
1111
1113 if not ret and movemarkfrom:
1112 if not ret and movemarkfrom:
1114 if movemarkfrom == repo[b'.'].node():
1113 if movemarkfrom == repo[b'.'].node():
1115 pass # no-op update
1114 pass # no-op update
1116 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1117 b = ui.label(repo._activebookmark, b'bookmarks.active')
1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1118 ui.status(_(b"updating bookmark %s\n") % b)
1117 ui.status(_(b"updating bookmark %s\n") % b)
1119 else:
1118 else:
1120 # this can happen with a non-linear update
1119 # this can happen with a non-linear update
1121 b = ui.label(repo._activebookmark, b'bookmarks')
1120 b = ui.label(repo._activebookmark, b'bookmarks')
1122 ui.status(_(b"(leaving bookmark %s)\n") % b)
1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1123 bookmarks.deactivate(repo)
1122 bookmarks.deactivate(repo)
1124 elif brev in repo._bookmarks:
1123 elif brev in repo._bookmarks:
1125 if brev != repo._activebookmark:
1124 if brev != repo._activebookmark:
1126 b = ui.label(brev, b'bookmarks.active')
1125 b = ui.label(brev, b'bookmarks.active')
1127 ui.status(_(b"(activating bookmark %s)\n") % b)
1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1128 bookmarks.activate(repo, brev)
1127 bookmarks.activate(repo, brev)
1129 elif brev:
1128 elif brev:
1130 if repo._activebookmark:
1129 if repo._activebookmark:
1131 b = ui.label(repo._activebookmark, b'bookmarks')
1130 b = ui.label(repo._activebookmark, b'bookmarks')
1132 ui.status(_(b"(leaving bookmark %s)\n") % b)
1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1133 bookmarks.deactivate(repo)
1132 bookmarks.deactivate(repo)
1134
1133
1135 if warndest:
1134 if warndest:
1136 destutil.statusotherdests(ui, repo)
1135 destutil.statusotherdests(ui, repo)
1137
1136
1138 return ret
1137 return ret
1139
1138
1140
1139
1141 def merge(
1140 def merge(
1142 repo,
1141 repo,
1143 node,
1142 node,
1144 force=None,
1143 force=None,
1145 remind=True,
1144 remind=True,
1146 mergeforce=False,
1145 mergeforce=False,
1147 labels=None,
1146 labels=None,
1148 abort=False,
1147 abort=False,
1149 ):
1148 ):
1150 """Branch merge with node, resolving changes. Return true if any
1149 """Branch merge with node, resolving changes. Return true if any
1151 unresolved conflicts."""
1150 unresolved conflicts."""
1152 if abort:
1151 if abort:
1153 return abortmerge(repo.ui, repo)
1152 return abortmerge(repo.ui, repo)
1154
1153
1155 stats = mergemod.update(
1154 stats = mergemod.update(
1156 repo,
1155 repo,
1157 node,
1156 node,
1158 branchmerge=True,
1157 branchmerge=True,
1159 force=force,
1158 force=force,
1160 mergeforce=mergeforce,
1159 mergeforce=mergeforce,
1161 labels=labels,
1160 labels=labels,
1162 )
1161 )
1163 _showstats(repo, stats)
1162 _showstats(repo, stats)
1164 if stats.unresolvedcount:
1163 if stats.unresolvedcount:
1165 repo.ui.status(
1164 repo.ui.status(
1166 _(
1165 _(
1167 b"use 'hg resolve' to retry unresolved file merges "
1166 b"use 'hg resolve' to retry unresolved file merges "
1168 b"or 'hg merge --abort' to abandon\n"
1167 b"or 'hg merge --abort' to abandon\n"
1169 )
1168 )
1170 )
1169 )
1171 elif remind:
1170 elif remind:
1172 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1173 return stats.unresolvedcount > 0
1172 return stats.unresolvedcount > 0
1174
1173
1175
1174
1176 def abortmerge(ui, repo):
1175 def abortmerge(ui, repo):
1177 ms = mergemod.mergestate.read(repo)
1176 ms = mergemod.mergestate.read(repo)
1178 if ms.active():
1177 if ms.active():
1179 # there were conflicts
1178 # there were conflicts
1180 node = ms.localctx.hex()
1179 node = ms.localctx.hex()
1181 else:
1180 else:
1182 # there were no conficts, mergestate was not stored
1181 # there were no conficts, mergestate was not stored
1183 node = repo[b'.'].hex()
1182 node = repo[b'.'].hex()
1184
1183
1185 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1184 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1186 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1185 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1187 _showstats(repo, stats)
1186 _showstats(repo, stats)
1188 return stats.unresolvedcount > 0
1187 return stats.unresolvedcount > 0
1189
1188
1190
1189
1191 def _incoming(
1190 def _incoming(
1192 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1191 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1193 ):
1192 ):
1194 """
1193 """
1195 Helper for incoming / gincoming.
1194 Helper for incoming / gincoming.
1196 displaychlist gets called with
1195 displaychlist gets called with
1197 (remoterepo, incomingchangesetlist, displayer) parameters,
1196 (remoterepo, incomingchangesetlist, displayer) parameters,
1198 and is supposed to contain only code that can't be unified.
1197 and is supposed to contain only code that can't be unified.
1199 """
1198 """
1200 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1199 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1201 other = peer(repo, opts, source)
1200 other = peer(repo, opts, source)
1202 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1201 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1203 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1202 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1204
1203
1205 if revs:
1204 if revs:
1206 revs = [other.lookup(rev) for rev in revs]
1205 revs = [other.lookup(rev) for rev in revs]
1207 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1206 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1208 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1207 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1209 )
1208 )
1210 try:
1209 try:
1211 if not chlist:
1210 if not chlist:
1212 ui.status(_(b"no changes found\n"))
1211 ui.status(_(b"no changes found\n"))
1213 return subreporecurse()
1212 return subreporecurse()
1214 ui.pager(b'incoming')
1213 ui.pager(b'incoming')
1215 displayer = logcmdutil.changesetdisplayer(
1214 displayer = logcmdutil.changesetdisplayer(
1216 ui, other, opts, buffered=buffered
1215 ui, other, opts, buffered=buffered
1217 )
1216 )
1218 displaychlist(other, chlist, displayer)
1217 displaychlist(other, chlist, displayer)
1219 displayer.close()
1218 displayer.close()
1220 finally:
1219 finally:
1221 cleanupfn()
1220 cleanupfn()
1222 subreporecurse()
1221 subreporecurse()
1223 return 0 # exit code is zero since we found incoming changes
1222 return 0 # exit code is zero since we found incoming changes
1224
1223
1225
1224
1226 def incoming(ui, repo, source, opts):
1225 def incoming(ui, repo, source, opts):
1227 def subreporecurse():
1226 def subreporecurse():
1228 ret = 1
1227 ret = 1
1229 if opts.get(b'subrepos'):
1228 if opts.get(b'subrepos'):
1230 ctx = repo[None]
1229 ctx = repo[None]
1231 for subpath in sorted(ctx.substate):
1230 for subpath in sorted(ctx.substate):
1232 sub = ctx.sub(subpath)
1231 sub = ctx.sub(subpath)
1233 ret = min(ret, sub.incoming(ui, source, opts))
1232 ret = min(ret, sub.incoming(ui, source, opts))
1234 return ret
1233 return ret
1235
1234
1236 def display(other, chlist, displayer):
1235 def display(other, chlist, displayer):
1237 limit = logcmdutil.getlimit(opts)
1236 limit = logcmdutil.getlimit(opts)
1238 if opts.get(b'newest_first'):
1237 if opts.get(b'newest_first'):
1239 chlist.reverse()
1238 chlist.reverse()
1240 count = 0
1239 count = 0
1241 for n in chlist:
1240 for n in chlist:
1242 if limit is not None and count >= limit:
1241 if limit is not None and count >= limit:
1243 break
1242 break
1244 parents = [p for p in other.changelog.parents(n) if p != nullid]
1243 parents = [p for p in other.changelog.parents(n) if p != nullid]
1245 if opts.get(b'no_merges') and len(parents) == 2:
1244 if opts.get(b'no_merges') and len(parents) == 2:
1246 continue
1245 continue
1247 count += 1
1246 count += 1
1248 displayer.show(other[n])
1247 displayer.show(other[n])
1249
1248
1250 return _incoming(display, subreporecurse, ui, repo, source, opts)
1249 return _incoming(display, subreporecurse, ui, repo, source, opts)
1251
1250
1252
1251
1253 def _outgoing(ui, repo, dest, opts):
1252 def _outgoing(ui, repo, dest, opts):
1254 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1253 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1255 if not path:
1254 if not path:
1256 raise error.Abort(
1255 raise error.Abort(
1257 _(b'default repository not configured!'),
1256 _(b'default repository not configured!'),
1258 hint=_(b"see 'hg help config.paths'"),
1257 hint=_(b"see 'hg help config.paths'"),
1259 )
1258 )
1260 dest = path.pushloc or path.loc
1259 dest = path.pushloc or path.loc
1261 branches = path.branch, opts.get(b'branch') or []
1260 branches = path.branch, opts.get(b'branch') or []
1262
1261
1263 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1262 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1264 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1263 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1265 if revs:
1264 if revs:
1266 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1265 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1267
1266
1268 other = peer(repo, opts, dest)
1267 other = peer(repo, opts, dest)
1269 outgoing = discovery.findcommonoutgoing(
1268 outgoing = discovery.findcommonoutgoing(
1270 repo, other, revs, force=opts.get(b'force')
1269 repo, other, revs, force=opts.get(b'force')
1271 )
1270 )
1272 o = outgoing.missing
1271 o = outgoing.missing
1273 if not o:
1272 if not o:
1274 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1273 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1275 return o, other
1274 return o, other
1276
1275
1277
1276
1278 def outgoing(ui, repo, dest, opts):
1277 def outgoing(ui, repo, dest, opts):
1279 def recurse():
1278 def recurse():
1280 ret = 1
1279 ret = 1
1281 if opts.get(b'subrepos'):
1280 if opts.get(b'subrepos'):
1282 ctx = repo[None]
1281 ctx = repo[None]
1283 for subpath in sorted(ctx.substate):
1282 for subpath in sorted(ctx.substate):
1284 sub = ctx.sub(subpath)
1283 sub = ctx.sub(subpath)
1285 ret = min(ret, sub.outgoing(ui, dest, opts))
1284 ret = min(ret, sub.outgoing(ui, dest, opts))
1286 return ret
1285 return ret
1287
1286
1288 limit = logcmdutil.getlimit(opts)
1287 limit = logcmdutil.getlimit(opts)
1289 o, other = _outgoing(ui, repo, dest, opts)
1288 o, other = _outgoing(ui, repo, dest, opts)
1290 if not o:
1289 if not o:
1291 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1292 return recurse()
1291 return recurse()
1293
1292
1294 if opts.get(b'newest_first'):
1293 if opts.get(b'newest_first'):
1295 o.reverse()
1294 o.reverse()
1296 ui.pager(b'outgoing')
1295 ui.pager(b'outgoing')
1297 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1296 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1298 count = 0
1297 count = 0
1299 for n in o:
1298 for n in o:
1300 if limit is not None and count >= limit:
1299 if limit is not None and count >= limit:
1301 break
1300 break
1302 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1301 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1303 if opts.get(b'no_merges') and len(parents) == 2:
1302 if opts.get(b'no_merges') and len(parents) == 2:
1304 continue
1303 continue
1305 count += 1
1304 count += 1
1306 displayer.show(repo[n])
1305 displayer.show(repo[n])
1307 displayer.close()
1306 displayer.close()
1308 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1307 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1309 recurse()
1308 recurse()
1310 return 0 # exit code is zero since we found outgoing changes
1309 return 0 # exit code is zero since we found outgoing changes
1311
1310
1312
1311
1313 def verify(repo, level=None):
1312 def verify(repo, level=None):
1314 """verify the consistency of a repository"""
1313 """verify the consistency of a repository"""
1315 ret = verifymod.verify(repo, level=level)
1314 ret = verifymod.verify(repo, level=level)
1316
1315
1317 # Broken subrepo references in hidden csets don't seem worth worrying about,
1316 # Broken subrepo references in hidden csets don't seem worth worrying about,
1318 # since they can't be pushed/pulled, and --hidden can be used if they are a
1317 # since they can't be pushed/pulled, and --hidden can be used if they are a
1319 # concern.
1318 # concern.
1320
1319
1321 # pathto() is needed for -R case
1320 # pathto() is needed for -R case
1322 revs = repo.revs(
1321 revs = repo.revs(
1323 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1322 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1324 )
1323 )
1325
1324
1326 if revs:
1325 if revs:
1327 repo.ui.status(_(b'checking subrepo links\n'))
1326 repo.ui.status(_(b'checking subrepo links\n'))
1328 for rev in revs:
1327 for rev in revs:
1329 ctx = repo[rev]
1328 ctx = repo[rev]
1330 try:
1329 try:
1331 for subpath in ctx.substate:
1330 for subpath in ctx.substate:
1332 try:
1331 try:
1333 ret = (
1332 ret = (
1334 ctx.sub(subpath, allowcreate=False).verify() or ret
1333 ctx.sub(subpath, allowcreate=False).verify() or ret
1335 )
1334 )
1336 except error.RepoError as e:
1335 except error.RepoError as e:
1337 repo.ui.warn(b'%d: %s\n' % (rev, e))
1336 repo.ui.warn(b'%d: %s\n' % (rev, e))
1338 except Exception:
1337 except Exception:
1339 repo.ui.warn(
1338 repo.ui.warn(
1340 _(b'.hgsubstate is corrupt in revision %s\n')
1339 _(b'.hgsubstate is corrupt in revision %s\n')
1341 % node.short(ctx.node())
1340 % node.short(ctx.node())
1342 )
1341 )
1343
1342
1344 return ret
1343 return ret
1345
1344
1346
1345
1347 def remoteui(src, opts):
1346 def remoteui(src, opts):
1348 """build a remote ui from ui or repo and opts"""
1347 """build a remote ui from ui or repo and opts"""
1349 if util.safehasattr(src, b'baseui'): # looks like a repository
1348 if util.safehasattr(src, b'baseui'): # looks like a repository
1350 dst = src.baseui.copy() # drop repo-specific config
1349 dst = src.baseui.copy() # drop repo-specific config
1351 src = src.ui # copy target options from repo
1350 src = src.ui # copy target options from repo
1352 else: # assume it's a global ui object
1351 else: # assume it's a global ui object
1353 dst = src.copy() # keep all global options
1352 dst = src.copy() # keep all global options
1354
1353
1355 # copy ssh-specific options
1354 # copy ssh-specific options
1356 for o in b'ssh', b'remotecmd':
1355 for o in b'ssh', b'remotecmd':
1357 v = opts.get(o) or src.config(b'ui', o)
1356 v = opts.get(o) or src.config(b'ui', o)
1358 if v:
1357 if v:
1359 dst.setconfig(b"ui", o, v, b'copied')
1358 dst.setconfig(b"ui", o, v, b'copied')
1360
1359
1361 # copy bundle-specific options
1360 # copy bundle-specific options
1362 r = src.config(b'bundle', b'mainreporoot')
1361 r = src.config(b'bundle', b'mainreporoot')
1363 if r:
1362 if r:
1364 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1363 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1365
1364
1366 # copy selected local settings to the remote ui
1365 # copy selected local settings to the remote ui
1367 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1366 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1368 for key, val in src.configitems(sect):
1367 for key, val in src.configitems(sect):
1369 dst.setconfig(sect, key, val, b'copied')
1368 dst.setconfig(sect, key, val, b'copied')
1370 v = src.config(b'web', b'cacerts')
1369 v = src.config(b'web', b'cacerts')
1371 if v:
1370 if v:
1372 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1371 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1373
1372
1374 return dst
1373 return dst
1375
1374
1376
1375
1377 # Files of interest
1376 # Files of interest
1378 # Used to check if the repository has changed looking at mtime and size of
1377 # Used to check if the repository has changed looking at mtime and size of
1379 # these files.
1378 # these files.
1380 foi = [
1379 foi = [
1381 (b'spath', b'00changelog.i'),
1380 (b'spath', b'00changelog.i'),
1382 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1381 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1383 (b'spath', b'obsstore'),
1382 (b'spath', b'obsstore'),
1384 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1383 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1385 ]
1384 ]
1386
1385
1387
1386
1388 class cachedlocalrepo(object):
1387 class cachedlocalrepo(object):
1389 """Holds a localrepository that can be cached and reused."""
1388 """Holds a localrepository that can be cached and reused."""
1390
1389
1391 def __init__(self, repo):
1390 def __init__(self, repo):
1392 """Create a new cached repo from an existing repo.
1391 """Create a new cached repo from an existing repo.
1393
1392
1394 We assume the passed in repo was recently created. If the
1393 We assume the passed in repo was recently created. If the
1395 repo has changed between when it was created and when it was
1394 repo has changed between when it was created and when it was
1396 turned into a cache, it may not refresh properly.
1395 turned into a cache, it may not refresh properly.
1397 """
1396 """
1398 assert isinstance(repo, localrepo.localrepository)
1397 assert isinstance(repo, localrepo.localrepository)
1399 self._repo = repo
1398 self._repo = repo
1400 self._state, self.mtime = self._repostate()
1399 self._state, self.mtime = self._repostate()
1401 self._filtername = repo.filtername
1400 self._filtername = repo.filtername
1402
1401
1403 def fetch(self):
1402 def fetch(self):
1404 """Refresh (if necessary) and return a repository.
1403 """Refresh (if necessary) and return a repository.
1405
1404
1406 If the cached instance is out of date, it will be recreated
1405 If the cached instance is out of date, it will be recreated
1407 automatically and returned.
1406 automatically and returned.
1408
1407
1409 Returns a tuple of the repo and a boolean indicating whether a new
1408 Returns a tuple of the repo and a boolean indicating whether a new
1410 repo instance was created.
1409 repo instance was created.
1411 """
1410 """
1412 # We compare the mtimes and sizes of some well-known files to
1411 # We compare the mtimes and sizes of some well-known files to
1413 # determine if the repo changed. This is not precise, as mtimes
1412 # determine if the repo changed. This is not precise, as mtimes
1414 # are susceptible to clock skew and imprecise filesystems and
1413 # are susceptible to clock skew and imprecise filesystems and
1415 # file content can change while maintaining the same size.
1414 # file content can change while maintaining the same size.
1416
1415
1417 state, mtime = self._repostate()
1416 state, mtime = self._repostate()
1418 if state == self._state:
1417 if state == self._state:
1419 return self._repo, False
1418 return self._repo, False
1420
1419
1421 repo = repository(self._repo.baseui, self._repo.url())
1420 repo = repository(self._repo.baseui, self._repo.url())
1422 if self._filtername:
1421 if self._filtername:
1423 self._repo = repo.filtered(self._filtername)
1422 self._repo = repo.filtered(self._filtername)
1424 else:
1423 else:
1425 self._repo = repo.unfiltered()
1424 self._repo = repo.unfiltered()
1426 self._state = state
1425 self._state = state
1427 self.mtime = mtime
1426 self.mtime = mtime
1428
1427
1429 return self._repo, True
1428 return self._repo, True
1430
1429
1431 def _repostate(self):
1430 def _repostate(self):
1432 state = []
1431 state = []
1433 maxmtime = -1
1432 maxmtime = -1
1434 for attr, fname in foi:
1433 for attr, fname in foi:
1435 prefix = getattr(self._repo, attr)
1434 prefix = getattr(self._repo, attr)
1436 p = os.path.join(prefix, fname)
1435 p = os.path.join(prefix, fname)
1437 try:
1436 try:
1438 st = os.stat(p)
1437 st = os.stat(p)
1439 except OSError:
1438 except OSError:
1440 st = os.stat(prefix)
1439 st = os.stat(prefix)
1441 state.append((st[stat.ST_MTIME], st.st_size))
1440 state.append((st[stat.ST_MTIME], st.st_size))
1442 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1441 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1443
1442
1444 return tuple(state), maxmtime
1443 return tuple(state), maxmtime
1445
1444
1446 def copy(self):
1445 def copy(self):
1447 """Obtain a copy of this class instance.
1446 """Obtain a copy of this class instance.
1448
1447
1449 A new localrepository instance is obtained. The new instance should be
1448 A new localrepository instance is obtained. The new instance should be
1450 completely independent of the original.
1449 completely independent of the original.
1451 """
1450 """
1452 repo = repository(self._repo.baseui, self._repo.origroot)
1451 repo = repository(self._repo.baseui, self._repo.origroot)
1453 if self._filtername:
1452 if self._filtername:
1454 repo = repo.filtered(self._filtername)
1453 repo = repo.filtered(self._filtername)
1455 else:
1454 else:
1456 repo = repo.unfiltered()
1455 repo = repo.unfiltered()
1457 c = cachedlocalrepo(repo)
1456 c = cachedlocalrepo(repo)
1458 c._state = self._state
1457 c._state = self._state
1459 c.mtime = self.mtime
1458 c.mtime = self.mtime
1460 return c
1459 return c
@@ -1,3734 +1,3734 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
12 import os
11 import os
13 import random
12 import random
14 import sys
13 import sys
15 import time
14 import time
16 import weakref
15 import weakref
17
16
18 from .i18n import _
17 from .i18n import _
19 from .node import (
18 from .node import (
20 bin,
19 bin,
21 hex,
20 hex,
22 nullid,
21 nullid,
23 nullrev,
22 nullrev,
24 short,
23 short,
25 )
24 )
26 from .pycompat import (
25 from .pycompat import (
27 delattr,
26 delattr,
28 getattr,
27 getattr,
29 )
28 )
30 from . import (
29 from . import (
31 bookmarks,
30 bookmarks,
32 branchmap,
31 branchmap,
33 bundle2,
32 bundle2,
34 changegroup,
33 changegroup,
35 color,
34 color,
36 context,
35 context,
37 dirstate,
36 dirstate,
38 dirstateguard,
37 dirstateguard,
39 discovery,
38 discovery,
40 encoding,
39 encoding,
41 error,
40 error,
42 exchange,
41 exchange,
43 extensions,
42 extensions,
44 filelog,
43 filelog,
45 hook,
44 hook,
46 lock as lockmod,
45 lock as lockmod,
47 match as matchmod,
46 match as matchmod,
48 merge as mergemod,
47 merge as mergemod,
49 mergeutil,
48 mergeutil,
50 namespaces,
49 namespaces,
51 narrowspec,
50 narrowspec,
52 obsolete,
51 obsolete,
53 pathutil,
52 pathutil,
54 phases,
53 phases,
55 pushkey,
54 pushkey,
56 pycompat,
55 pycompat,
57 repoview,
56 repoview,
58 revset,
57 revset,
59 revsetlang,
58 revsetlang,
60 scmutil,
59 scmutil,
61 sparse,
60 sparse,
62 store as storemod,
61 store as storemod,
63 subrepoutil,
62 subrepoutil,
64 tags as tagsmod,
63 tags as tagsmod,
65 transaction,
64 transaction,
66 txnutil,
65 txnutil,
67 util,
66 util,
68 vfs as vfsmod,
67 vfs as vfsmod,
69 )
68 )
70
69
71 from .interfaces import (
70 from .interfaces import (
72 repository,
71 repository,
73 util as interfaceutil,
72 util as interfaceutil,
74 )
73 )
75
74
76 from .utils import (
75 from .utils import (
76 hashutil,
77 procutil,
77 procutil,
78 stringutil,
78 stringutil,
79 )
79 )
80
80
81 from .revlogutils import constants as revlogconst
81 from .revlogutils import constants as revlogconst
82
82
83 release = lockmod.release
83 release = lockmod.release
84 urlerr = util.urlerr
84 urlerr = util.urlerr
85 urlreq = util.urlreq
85 urlreq = util.urlreq
86
86
87 # set of (path, vfs-location) tuples. vfs-location is:
87 # set of (path, vfs-location) tuples. vfs-location is:
88 # - 'plain for vfs relative paths
88 # - 'plain for vfs relative paths
89 # - '' for svfs relative paths
89 # - '' for svfs relative paths
90 _cachedfiles = set()
90 _cachedfiles = set()
91
91
92
92
93 class _basefilecache(scmutil.filecache):
93 class _basefilecache(scmutil.filecache):
94 """All filecache usage on repo are done for logic that should be unfiltered
94 """All filecache usage on repo are done for logic that should be unfiltered
95 """
95 """
96
96
97 def __get__(self, repo, type=None):
97 def __get__(self, repo, type=None):
98 if repo is None:
98 if repo is None:
99 return self
99 return self
100 # proxy to unfiltered __dict__ since filtered repo has no entry
100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 unfi = repo.unfiltered()
101 unfi = repo.unfiltered()
102 try:
102 try:
103 return unfi.__dict__[self.sname]
103 return unfi.__dict__[self.sname]
104 except KeyError:
104 except KeyError:
105 pass
105 pass
106 return super(_basefilecache, self).__get__(unfi, type)
106 return super(_basefilecache, self).__get__(unfi, type)
107
107
108 def set(self, repo, value):
108 def set(self, repo, value):
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110
110
111
111
112 class repofilecache(_basefilecache):
112 class repofilecache(_basefilecache):
113 """filecache for files in .hg but outside of .hg/store"""
113 """filecache for files in .hg but outside of .hg/store"""
114
114
115 def __init__(self, *paths):
115 def __init__(self, *paths):
116 super(repofilecache, self).__init__(*paths)
116 super(repofilecache, self).__init__(*paths)
117 for path in paths:
117 for path in paths:
118 _cachedfiles.add((path, b'plain'))
118 _cachedfiles.add((path, b'plain'))
119
119
120 def join(self, obj, fname):
120 def join(self, obj, fname):
121 return obj.vfs.join(fname)
121 return obj.vfs.join(fname)
122
122
123
123
124 class storecache(_basefilecache):
124 class storecache(_basefilecache):
125 """filecache for files in the store"""
125 """filecache for files in the store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(storecache, self).__init__(*paths)
128 super(storecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b''))
130 _cachedfiles.add((path, b''))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.sjoin(fname)
133 return obj.sjoin(fname)
134
134
135
135
136 class mixedrepostorecache(_basefilecache):
136 class mixedrepostorecache(_basefilecache):
137 """filecache for a mix files in .hg/store and outside"""
137 """filecache for a mix files in .hg/store and outside"""
138
138
139 def __init__(self, *pathsandlocations):
139 def __init__(self, *pathsandlocations):
140 # scmutil.filecache only uses the path for passing back into our
140 # scmutil.filecache only uses the path for passing back into our
141 # join(), so we can safely pass a list of paths and locations
141 # join(), so we can safely pass a list of paths and locations
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
143 _cachedfiles.update(pathsandlocations)
144
144
145 def join(self, obj, fnameandlocation):
145 def join(self, obj, fnameandlocation):
146 fname, location = fnameandlocation
146 fname, location = fnameandlocation
147 if location == b'plain':
147 if location == b'plain':
148 return obj.vfs.join(fname)
148 return obj.vfs.join(fname)
149 else:
149 else:
150 if location != b'':
150 if location != b'':
151 raise error.ProgrammingError(
151 raise error.ProgrammingError(
152 b'unexpected location: %s' % location
152 b'unexpected location: %s' % location
153 )
153 )
154 return obj.sjoin(fname)
154 return obj.sjoin(fname)
155
155
156
156
157 def isfilecached(repo, name):
157 def isfilecached(repo, name):
158 """check if a repo has already cached "name" filecache-ed property
158 """check if a repo has already cached "name" filecache-ed property
159
159
160 This returns (cachedobj-or-None, iscached) tuple.
160 This returns (cachedobj-or-None, iscached) tuple.
161 """
161 """
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 if not cacheentry:
163 if not cacheentry:
164 return None, False
164 return None, False
165 return cacheentry.obj, True
165 return cacheentry.obj, True
166
166
167
167
168 class unfilteredpropertycache(util.propertycache):
168 class unfilteredpropertycache(util.propertycache):
169 """propertycache that apply to unfiltered repo only"""
169 """propertycache that apply to unfiltered repo only"""
170
170
171 def __get__(self, repo, type=None):
171 def __get__(self, repo, type=None):
172 unfi = repo.unfiltered()
172 unfi = repo.unfiltered()
173 if unfi is repo:
173 if unfi is repo:
174 return super(unfilteredpropertycache, self).__get__(unfi)
174 return super(unfilteredpropertycache, self).__get__(unfi)
175 return getattr(unfi, self.name)
175 return getattr(unfi, self.name)
176
176
177
177
178 class filteredpropertycache(util.propertycache):
178 class filteredpropertycache(util.propertycache):
179 """propertycache that must take filtering in account"""
179 """propertycache that must take filtering in account"""
180
180
181 def cachevalue(self, obj, value):
181 def cachevalue(self, obj, value):
182 object.__setattr__(obj, self.name, value)
182 object.__setattr__(obj, self.name, value)
183
183
184
184
185 def hasunfilteredcache(repo, name):
185 def hasunfilteredcache(repo, name):
186 """check if a repo has an unfilteredpropertycache value for <name>"""
186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 return name in vars(repo.unfiltered())
187 return name in vars(repo.unfiltered())
188
188
189
189
190 def unfilteredmethod(orig):
190 def unfilteredmethod(orig):
191 """decorate method that always need to be run on unfiltered version"""
191 """decorate method that always need to be run on unfiltered version"""
192
192
193 def wrapper(repo, *args, **kwargs):
193 def wrapper(repo, *args, **kwargs):
194 return orig(repo.unfiltered(), *args, **kwargs)
194 return orig(repo.unfiltered(), *args, **kwargs)
195
195
196 return wrapper
196 return wrapper
197
197
198
198
199 moderncaps = {
199 moderncaps = {
200 b'lookup',
200 b'lookup',
201 b'branchmap',
201 b'branchmap',
202 b'pushkey',
202 b'pushkey',
203 b'known',
203 b'known',
204 b'getbundle',
204 b'getbundle',
205 b'unbundle',
205 b'unbundle',
206 }
206 }
207 legacycaps = moderncaps.union({b'changegroupsubset'})
207 legacycaps = moderncaps.union({b'changegroupsubset'})
208
208
209
209
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 class localcommandexecutor(object):
211 class localcommandexecutor(object):
212 def __init__(self, peer):
212 def __init__(self, peer):
213 self._peer = peer
213 self._peer = peer
214 self._sent = False
214 self._sent = False
215 self._closed = False
215 self._closed = False
216
216
217 def __enter__(self):
217 def __enter__(self):
218 return self
218 return self
219
219
220 def __exit__(self, exctype, excvalue, exctb):
220 def __exit__(self, exctype, excvalue, exctb):
221 self.close()
221 self.close()
222
222
223 def callcommand(self, command, args):
223 def callcommand(self, command, args):
224 if self._sent:
224 if self._sent:
225 raise error.ProgrammingError(
225 raise error.ProgrammingError(
226 b'callcommand() cannot be used after sendcommands()'
226 b'callcommand() cannot be used after sendcommands()'
227 )
227 )
228
228
229 if self._closed:
229 if self._closed:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after close()'
231 b'callcommand() cannot be used after close()'
232 )
232 )
233
233
234 # We don't need to support anything fancy. Just call the named
234 # We don't need to support anything fancy. Just call the named
235 # method on the peer and return a resolved future.
235 # method on the peer and return a resolved future.
236 fn = getattr(self._peer, pycompat.sysstr(command))
236 fn = getattr(self._peer, pycompat.sysstr(command))
237
237
238 f = pycompat.futures.Future()
238 f = pycompat.futures.Future()
239
239
240 try:
240 try:
241 result = fn(**pycompat.strkwargs(args))
241 result = fn(**pycompat.strkwargs(args))
242 except Exception:
242 except Exception:
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 else:
244 else:
245 f.set_result(result)
245 f.set_result(result)
246
246
247 return f
247 return f
248
248
249 def sendcommands(self):
249 def sendcommands(self):
250 self._sent = True
250 self._sent = True
251
251
252 def close(self):
252 def close(self):
253 self._closed = True
253 self._closed = True
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommands)
256 @interfaceutil.implementer(repository.ipeercommands)
257 class localpeer(repository.peer):
257 class localpeer(repository.peer):
258 '''peer for a local repo; reflects only the most recent API'''
258 '''peer for a local repo; reflects only the most recent API'''
259
259
260 def __init__(self, repo, caps=None):
260 def __init__(self, repo, caps=None):
261 super(localpeer, self).__init__()
261 super(localpeer, self).__init__()
262
262
263 if caps is None:
263 if caps is None:
264 caps = moderncaps.copy()
264 caps = moderncaps.copy()
265 self._repo = repo.filtered(b'served')
265 self._repo = repo.filtered(b'served')
266 self.ui = repo.ui
266 self.ui = repo.ui
267 self._caps = repo._restrictcapabilities(caps)
267 self._caps = repo._restrictcapabilities(caps)
268
268
269 # Begin of _basepeer interface.
269 # Begin of _basepeer interface.
270
270
271 def url(self):
271 def url(self):
272 return self._repo.url()
272 return self._repo.url()
273
273
274 def local(self):
274 def local(self):
275 return self._repo
275 return self._repo
276
276
277 def peer(self):
277 def peer(self):
278 return self
278 return self
279
279
280 def canpush(self):
280 def canpush(self):
281 return True
281 return True
282
282
283 def close(self):
283 def close(self):
284 self._repo.close()
284 self._repo.close()
285
285
286 # End of _basepeer interface.
286 # End of _basepeer interface.
287
287
288 # Begin of _basewirecommands interface.
288 # Begin of _basewirecommands interface.
289
289
290 def branchmap(self):
290 def branchmap(self):
291 return self._repo.branchmap()
291 return self._repo.branchmap()
292
292
293 def capabilities(self):
293 def capabilities(self):
294 return self._caps
294 return self._caps
295
295
296 def clonebundles(self):
296 def clonebundles(self):
297 return self._repo.tryread(b'clonebundles.manifest')
297 return self._repo.tryread(b'clonebundles.manifest')
298
298
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 """Used to test argument passing over the wire"""
300 """Used to test argument passing over the wire"""
301 return b"%s %s %s %s %s" % (
301 return b"%s %s %s %s %s" % (
302 one,
302 one,
303 two,
303 two,
304 pycompat.bytestr(three),
304 pycompat.bytestr(three),
305 pycompat.bytestr(four),
305 pycompat.bytestr(four),
306 pycompat.bytestr(five),
306 pycompat.bytestr(five),
307 )
307 )
308
308
309 def getbundle(
309 def getbundle(
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 ):
311 ):
312 chunks = exchange.getbundlechunks(
312 chunks = exchange.getbundlechunks(
313 self._repo,
313 self._repo,
314 source,
314 source,
315 heads=heads,
315 heads=heads,
316 common=common,
316 common=common,
317 bundlecaps=bundlecaps,
317 bundlecaps=bundlecaps,
318 **kwargs
318 **kwargs
319 )[1]
319 )[1]
320 cb = util.chunkbuffer(chunks)
320 cb = util.chunkbuffer(chunks)
321
321
322 if exchange.bundle2requested(bundlecaps):
322 if exchange.bundle2requested(bundlecaps):
323 # When requesting a bundle2, getbundle returns a stream to make the
323 # When requesting a bundle2, getbundle returns a stream to make the
324 # wire level function happier. We need to build a proper object
324 # wire level function happier. We need to build a proper object
325 # from it in local peer.
325 # from it in local peer.
326 return bundle2.getunbundler(self.ui, cb)
326 return bundle2.getunbundler(self.ui, cb)
327 else:
327 else:
328 return changegroup.getunbundler(b'01', cb, None)
328 return changegroup.getunbundler(b'01', cb, None)
329
329
330 def heads(self):
330 def heads(self):
331 return self._repo.heads()
331 return self._repo.heads()
332
332
333 def known(self, nodes):
333 def known(self, nodes):
334 return self._repo.known(nodes)
334 return self._repo.known(nodes)
335
335
336 def listkeys(self, namespace):
336 def listkeys(self, namespace):
337 return self._repo.listkeys(namespace)
337 return self._repo.listkeys(namespace)
338
338
339 def lookup(self, key):
339 def lookup(self, key):
340 return self._repo.lookup(key)
340 return self._repo.lookup(key)
341
341
342 def pushkey(self, namespace, key, old, new):
342 def pushkey(self, namespace, key, old, new):
343 return self._repo.pushkey(namespace, key, old, new)
343 return self._repo.pushkey(namespace, key, old, new)
344
344
345 def stream_out(self):
345 def stream_out(self):
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347
347
348 def unbundle(self, bundle, heads, url):
348 def unbundle(self, bundle, heads, url):
349 """apply a bundle on a repo
349 """apply a bundle on a repo
350
350
351 This function handles the repo locking itself."""
351 This function handles the repo locking itself."""
352 try:
352 try:
353 try:
353 try:
354 bundle = exchange.readbundle(self.ui, bundle, None)
354 bundle = exchange.readbundle(self.ui, bundle, None)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 if util.safehasattr(ret, b'getchunks'):
356 if util.safehasattr(ret, b'getchunks'):
357 # This is a bundle20 object, turn it into an unbundler.
357 # This is a bundle20 object, turn it into an unbundler.
358 # This little dance should be dropped eventually when the
358 # This little dance should be dropped eventually when the
359 # API is finally improved.
359 # API is finally improved.
360 stream = util.chunkbuffer(ret.getchunks())
360 stream = util.chunkbuffer(ret.getchunks())
361 ret = bundle2.getunbundler(self.ui, stream)
361 ret = bundle2.getunbundler(self.ui, stream)
362 return ret
362 return ret
363 except Exception as exc:
363 except Exception as exc:
364 # If the exception contains output salvaged from a bundle2
364 # If the exception contains output salvaged from a bundle2
365 # reply, we need to make sure it is printed before continuing
365 # reply, we need to make sure it is printed before continuing
366 # to fail. So we build a bundle2 with such output and consume
366 # to fail. So we build a bundle2 with such output and consume
367 # it directly.
367 # it directly.
368 #
368 #
369 # This is not very elegant but allows a "simple" solution for
369 # This is not very elegant but allows a "simple" solution for
370 # issue4594
370 # issue4594
371 output = getattr(exc, '_bundle2salvagedoutput', ())
371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 if output:
372 if output:
373 bundler = bundle2.bundle20(self._repo.ui)
373 bundler = bundle2.bundle20(self._repo.ui)
374 for out in output:
374 for out in output:
375 bundler.addpart(out)
375 bundler.addpart(out)
376 stream = util.chunkbuffer(bundler.getchunks())
376 stream = util.chunkbuffer(bundler.getchunks())
377 b = bundle2.getunbundler(self.ui, stream)
377 b = bundle2.getunbundler(self.ui, stream)
378 bundle2.processbundle(self._repo, b)
378 bundle2.processbundle(self._repo, b)
379 raise
379 raise
380 except error.PushRaced as exc:
380 except error.PushRaced as exc:
381 raise error.ResponseError(
381 raise error.ResponseError(
382 _(b'push failed:'), stringutil.forcebytestr(exc)
382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 )
383 )
384
384
385 # End of _basewirecommands interface.
385 # End of _basewirecommands interface.
386
386
387 # Begin of peer interface.
387 # Begin of peer interface.
388
388
389 def commandexecutor(self):
389 def commandexecutor(self):
390 return localcommandexecutor(self)
390 return localcommandexecutor(self)
391
391
392 # End of peer interface.
392 # End of peer interface.
393
393
394
394
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 class locallegacypeer(localpeer):
396 class locallegacypeer(localpeer):
397 '''peer extension which implements legacy methods too; used for tests with
397 '''peer extension which implements legacy methods too; used for tests with
398 restricted capabilities'''
398 restricted capabilities'''
399
399
400 def __init__(self, repo):
400 def __init__(self, repo):
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402
402
403 # Begin of baselegacywirecommands interface.
403 # Begin of baselegacywirecommands interface.
404
404
405 def between(self, pairs):
405 def between(self, pairs):
406 return self._repo.between(pairs)
406 return self._repo.between(pairs)
407
407
408 def branches(self, nodes):
408 def branches(self, nodes):
409 return self._repo.branches(nodes)
409 return self._repo.branches(nodes)
410
410
411 def changegroup(self, nodes, source):
411 def changegroup(self, nodes, source):
412 outgoing = discovery.outgoing(
412 outgoing = discovery.outgoing(
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 )
414 )
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416
416
417 def changegroupsubset(self, bases, heads, source):
417 def changegroupsubset(self, bases, heads, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=bases, missingheads=heads
419 self._repo, missingroots=bases, missingheads=heads
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 # End of baselegacywirecommands interface.
423 # End of baselegacywirecommands interface.
424
424
425
425
426 # Increment the sub-version when the revlog v2 format changes to lock out old
426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 # clients.
427 # clients.
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429
429
430 # A repository with the sparserevlog feature will have delta chains that
430 # A repository with the sparserevlog feature will have delta chains that
431 # can spread over a larger span. Sparse reading cuts these large spans into
431 # can spread over a larger span. Sparse reading cuts these large spans into
432 # pieces, so that each piece isn't too big.
432 # pieces, so that each piece isn't too big.
433 # Without the sparserevlog capability, reading from the repository could use
433 # Without the sparserevlog capability, reading from the repository could use
434 # huge amounts of memory, because the whole span would be read at once,
434 # huge amounts of memory, because the whole span would be read at once,
435 # including all the intermediate revisions that aren't pertinent for the chain.
435 # including all the intermediate revisions that aren't pertinent for the chain.
436 # This is why once a repository has enabled sparse-read, it becomes required.
436 # This is why once a repository has enabled sparse-read, it becomes required.
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438
438
439 # A repository with the sidedataflag requirement will allow to store extra
439 # A repository with the sidedataflag requirement will allow to store extra
440 # information for revision without altering their original hashes.
440 # information for revision without altering their original hashes.
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442
442
443 # A repository with the the copies-sidedata-changeset requirement will store
443 # A repository with the the copies-sidedata-changeset requirement will store
444 # copies related information in changeset's sidedata.
444 # copies related information in changeset's sidedata.
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446
446
447 # Functions receiving (ui, features) that extensions can register to impact
447 # Functions receiving (ui, features) that extensions can register to impact
448 # the ability to load repositories with custom requirements. Only
448 # the ability to load repositories with custom requirements. Only
449 # functions defined in loaded extensions are called.
449 # functions defined in loaded extensions are called.
450 #
450 #
451 # The function receives a set of requirement strings that the repository
451 # The function receives a set of requirement strings that the repository
452 # is capable of opening. Functions will typically add elements to the
452 # is capable of opening. Functions will typically add elements to the
453 # set to reflect that the extension knows how to handle that requirements.
453 # set to reflect that the extension knows how to handle that requirements.
454 featuresetupfuncs = set()
454 featuresetupfuncs = set()
455
455
456
456
457 def makelocalrepository(baseui, path, intents=None):
457 def makelocalrepository(baseui, path, intents=None):
458 """Create a local repository object.
458 """Create a local repository object.
459
459
460 Given arguments needed to construct a local repository, this function
460 Given arguments needed to construct a local repository, this function
461 performs various early repository loading functionality (such as
461 performs various early repository loading functionality (such as
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 the repository can be opened, derives a type suitable for representing
463 the repository can be opened, derives a type suitable for representing
464 that repository, and returns an instance of it.
464 that repository, and returns an instance of it.
465
465
466 The returned object conforms to the ``repository.completelocalrepository``
466 The returned object conforms to the ``repository.completelocalrepository``
467 interface.
467 interface.
468
468
469 The repository type is derived by calling a series of factory functions
469 The repository type is derived by calling a series of factory functions
470 for each aspect/interface of the final repository. These are defined by
470 for each aspect/interface of the final repository. These are defined by
471 ``REPO_INTERFACES``.
471 ``REPO_INTERFACES``.
472
472
473 Each factory function is called to produce a type implementing a specific
473 Each factory function is called to produce a type implementing a specific
474 interface. The cumulative list of returned types will be combined into a
474 interface. The cumulative list of returned types will be combined into a
475 new type and that type will be instantiated to represent the local
475 new type and that type will be instantiated to represent the local
476 repository.
476 repository.
477
477
478 The factory functions each receive various state that may be consulted
478 The factory functions each receive various state that may be consulted
479 as part of deriving a type.
479 as part of deriving a type.
480
480
481 Extensions should wrap these factory functions to customize repository type
481 Extensions should wrap these factory functions to customize repository type
482 creation. Note that an extension's wrapped function may be called even if
482 creation. Note that an extension's wrapped function may be called even if
483 that extension is not loaded for the repo being constructed. Extensions
483 that extension is not loaded for the repo being constructed. Extensions
484 should check if their ``__name__`` appears in the
484 should check if their ``__name__`` appears in the
485 ``extensionmodulenames`` set passed to the factory function and no-op if
485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 not.
486 not.
487 """
487 """
488 ui = baseui.copy()
488 ui = baseui.copy()
489 # Prevent copying repo configuration.
489 # Prevent copying repo configuration.
490 ui.copy = baseui.copy
490 ui.copy = baseui.copy
491
491
492 # Working directory VFS rooted at repository root.
492 # Working directory VFS rooted at repository root.
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494
494
495 # Main VFS for .hg/ directory.
495 # Main VFS for .hg/ directory.
496 hgpath = wdirvfs.join(b'.hg')
496 hgpath = wdirvfs.join(b'.hg')
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498
498
499 # The .hg/ path should exist and should be a directory. All other
499 # The .hg/ path should exist and should be a directory. All other
500 # cases are errors.
500 # cases are errors.
501 if not hgvfs.isdir():
501 if not hgvfs.isdir():
502 try:
502 try:
503 hgvfs.stat()
503 hgvfs.stat()
504 except OSError as e:
504 except OSError as e:
505 if e.errno != errno.ENOENT:
505 if e.errno != errno.ENOENT:
506 raise
506 raise
507
507
508 raise error.RepoError(_(b'repository %s not found') % path)
508 raise error.RepoError(_(b'repository %s not found') % path)
509
509
510 # .hg/requires file contains a newline-delimited list of
510 # .hg/requires file contains a newline-delimited list of
511 # features/capabilities the opener (us) must have in order to use
511 # features/capabilities the opener (us) must have in order to use
512 # the repository. This file was introduced in Mercurial 0.9.2,
512 # the repository. This file was introduced in Mercurial 0.9.2,
513 # which means very old repositories may not have one. We assume
513 # which means very old repositories may not have one. We assume
514 # a missing file translates to no requirements.
514 # a missing file translates to no requirements.
515 try:
515 try:
516 requirements = set(hgvfs.read(b'requires').splitlines())
516 requirements = set(hgvfs.read(b'requires').splitlines())
517 except IOError as e:
517 except IOError as e:
518 if e.errno != errno.ENOENT:
518 if e.errno != errno.ENOENT:
519 raise
519 raise
520 requirements = set()
520 requirements = set()
521
521
522 # The .hg/hgrc file may load extensions or contain config options
522 # The .hg/hgrc file may load extensions or contain config options
523 # that influence repository construction. Attempt to load it and
523 # that influence repository construction. Attempt to load it and
524 # process any new extensions that it may have pulled in.
524 # process any new extensions that it may have pulled in.
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 extensions.loadall(ui)
527 extensions.loadall(ui)
528 extensions.populateui(ui)
528 extensions.populateui(ui)
529
529
530 # Set of module names of extensions loaded for this repository.
530 # Set of module names of extensions loaded for this repository.
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532
532
533 supportedrequirements = gathersupportedrequirements(ui)
533 supportedrequirements = gathersupportedrequirements(ui)
534
534
535 # We first validate the requirements are known.
535 # We first validate the requirements are known.
536 ensurerequirementsrecognized(requirements, supportedrequirements)
536 ensurerequirementsrecognized(requirements, supportedrequirements)
537
537
538 # Then we validate that the known set is reasonable to use together.
538 # Then we validate that the known set is reasonable to use together.
539 ensurerequirementscompatible(ui, requirements)
539 ensurerequirementscompatible(ui, requirements)
540
540
541 # TODO there are unhandled edge cases related to opening repositories with
541 # TODO there are unhandled edge cases related to opening repositories with
542 # shared storage. If storage is shared, we should also test for requirements
542 # shared storage. If storage is shared, we should also test for requirements
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 # that repo, as that repo may load extensions needed to open it. This is a
544 # that repo, as that repo may load extensions needed to open it. This is a
545 # bit complicated because we don't want the other hgrc to overwrite settings
545 # bit complicated because we don't want the other hgrc to overwrite settings
546 # in this hgrc.
546 # in this hgrc.
547 #
547 #
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 # file when sharing repos. But if a requirement is added after the share is
549 # file when sharing repos. But if a requirement is added after the share is
550 # performed, thereby introducing a new requirement for the opener, we may
550 # performed, thereby introducing a new requirement for the opener, we may
551 # will not see that and could encounter a run-time error interacting with
551 # will not see that and could encounter a run-time error interacting with
552 # that shared store since it has an unknown-to-us requirement.
552 # that shared store since it has an unknown-to-us requirement.
553
553
554 # At this point, we know we should be capable of opening the repository.
554 # At this point, we know we should be capable of opening the repository.
555 # Now get on with doing that.
555 # Now get on with doing that.
556
556
557 features = set()
557 features = set()
558
558
559 # The "store" part of the repository holds versioned data. How it is
559 # The "store" part of the repository holds versioned data. How it is
560 # accessed is determined by various requirements. The ``shared`` or
560 # accessed is determined by various requirements. The ``shared`` or
561 # ``relshared`` requirements indicate the store lives in the path contained
561 # ``relshared`` requirements indicate the store lives in the path contained
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 if b'shared' in requirements or b'relshared' in requirements:
564 if b'shared' in requirements or b'relshared' in requirements:
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 if b'relshared' in requirements:
566 if b'relshared' in requirements:
567 sharedpath = hgvfs.join(sharedpath)
567 sharedpath = hgvfs.join(sharedpath)
568
568
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570
570
571 if not sharedvfs.exists():
571 if not sharedvfs.exists():
572 raise error.RepoError(
572 raise error.RepoError(
573 _(b'.hg/sharedpath points to nonexistent directory %s')
573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 % sharedvfs.base
574 % sharedvfs.base
575 )
575 )
576
576
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578
578
579 storebasepath = sharedvfs.base
579 storebasepath = sharedvfs.base
580 cachepath = sharedvfs.join(b'cache')
580 cachepath = sharedvfs.join(b'cache')
581 else:
581 else:
582 storebasepath = hgvfs.base
582 storebasepath = hgvfs.base
583 cachepath = hgvfs.join(b'cache')
583 cachepath = hgvfs.join(b'cache')
584 wcachepath = hgvfs.join(b'wcache')
584 wcachepath = hgvfs.join(b'wcache')
585
585
586 # The store has changed over time and the exact layout is dictated by
586 # The store has changed over time and the exact layout is dictated by
587 # requirements. The store interface abstracts differences across all
587 # requirements. The store interface abstracts differences across all
588 # of them.
588 # of them.
589 store = makestore(
589 store = makestore(
590 requirements,
590 requirements,
591 storebasepath,
591 storebasepath,
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 )
593 )
594 hgvfs.createmode = store.createmode
594 hgvfs.createmode = store.createmode
595
595
596 storevfs = store.vfs
596 storevfs = store.vfs
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598
598
599 # The cache vfs is used to manage cache files.
599 # The cache vfs is used to manage cache files.
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 cachevfs.createmode = store.createmode
601 cachevfs.createmode = store.createmode
602 # The cache vfs is used to manage cache files related to the working copy
602 # The cache vfs is used to manage cache files related to the working copy
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 wcachevfs.createmode = store.createmode
604 wcachevfs.createmode = store.createmode
605
605
606 # Now resolve the type for the repository object. We do this by repeatedly
606 # Now resolve the type for the repository object. We do this by repeatedly
607 # calling a factory function to produces types for specific aspects of the
607 # calling a factory function to produces types for specific aspects of the
608 # repo's operation. The aggregate returned types are used as base classes
608 # repo's operation. The aggregate returned types are used as base classes
609 # for a dynamically-derived type, which will represent our new repository.
609 # for a dynamically-derived type, which will represent our new repository.
610
610
611 bases = []
611 bases = []
612 extrastate = {}
612 extrastate = {}
613
613
614 for iface, fn in REPO_INTERFACES:
614 for iface, fn in REPO_INTERFACES:
615 # We pass all potentially useful state to give extensions tons of
615 # We pass all potentially useful state to give extensions tons of
616 # flexibility.
616 # flexibility.
617 typ = fn()(
617 typ = fn()(
618 ui=ui,
618 ui=ui,
619 intents=intents,
619 intents=intents,
620 requirements=requirements,
620 requirements=requirements,
621 features=features,
621 features=features,
622 wdirvfs=wdirvfs,
622 wdirvfs=wdirvfs,
623 hgvfs=hgvfs,
623 hgvfs=hgvfs,
624 store=store,
624 store=store,
625 storevfs=storevfs,
625 storevfs=storevfs,
626 storeoptions=storevfs.options,
626 storeoptions=storevfs.options,
627 cachevfs=cachevfs,
627 cachevfs=cachevfs,
628 wcachevfs=wcachevfs,
628 wcachevfs=wcachevfs,
629 extensionmodulenames=extensionmodulenames,
629 extensionmodulenames=extensionmodulenames,
630 extrastate=extrastate,
630 extrastate=extrastate,
631 baseclasses=bases,
631 baseclasses=bases,
632 )
632 )
633
633
634 if not isinstance(typ, type):
634 if not isinstance(typ, type):
635 raise error.ProgrammingError(
635 raise error.ProgrammingError(
636 b'unable to construct type for %s' % iface
636 b'unable to construct type for %s' % iface
637 )
637 )
638
638
639 bases.append(typ)
639 bases.append(typ)
640
640
641 # type() allows you to use characters in type names that wouldn't be
641 # type() allows you to use characters in type names that wouldn't be
642 # recognized as Python symbols in source code. We abuse that to add
642 # recognized as Python symbols in source code. We abuse that to add
643 # rich information about our constructed repo.
643 # rich information about our constructed repo.
644 name = pycompat.sysstr(
644 name = pycompat.sysstr(
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 )
646 )
647
647
648 cls = type(name, tuple(bases), {})
648 cls = type(name, tuple(bases), {})
649
649
650 return cls(
650 return cls(
651 baseui=baseui,
651 baseui=baseui,
652 ui=ui,
652 ui=ui,
653 origroot=path,
653 origroot=path,
654 wdirvfs=wdirvfs,
654 wdirvfs=wdirvfs,
655 hgvfs=hgvfs,
655 hgvfs=hgvfs,
656 requirements=requirements,
656 requirements=requirements,
657 supportedrequirements=supportedrequirements,
657 supportedrequirements=supportedrequirements,
658 sharedpath=storebasepath,
658 sharedpath=storebasepath,
659 store=store,
659 store=store,
660 cachevfs=cachevfs,
660 cachevfs=cachevfs,
661 wcachevfs=wcachevfs,
661 wcachevfs=wcachevfs,
662 features=features,
662 features=features,
663 intents=intents,
663 intents=intents,
664 )
664 )
665
665
666
666
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 """Load hgrc files/content into a ui instance.
668 """Load hgrc files/content into a ui instance.
669
669
670 This is called during repository opening to load any additional
670 This is called during repository opening to load any additional
671 config files or settings relevant to the current repository.
671 config files or settings relevant to the current repository.
672
672
673 Returns a bool indicating whether any additional configs were loaded.
673 Returns a bool indicating whether any additional configs were loaded.
674
674
675 Extensions should monkeypatch this function to modify how per-repo
675 Extensions should monkeypatch this function to modify how per-repo
676 configs are loaded. For example, an extension may wish to pull in
676 configs are loaded. For example, an extension may wish to pull in
677 configs from alternate files or sources.
677 configs from alternate files or sources.
678 """
678 """
679 try:
679 try:
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 return True
681 return True
682 except IOError:
682 except IOError:
683 return False
683 return False
684
684
685
685
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 """Perform additional actions after .hg/hgrc is loaded.
687 """Perform additional actions after .hg/hgrc is loaded.
688
688
689 This function is called during repository loading immediately after
689 This function is called during repository loading immediately after
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691
691
692 The function can be used to validate configs, automatically add
692 The function can be used to validate configs, automatically add
693 options (including extensions) based on requirements, etc.
693 options (including extensions) based on requirements, etc.
694 """
694 """
695
695
696 # Map of requirements to list of extensions to load automatically when
696 # Map of requirements to list of extensions to load automatically when
697 # requirement is present.
697 # requirement is present.
698 autoextensions = {
698 autoextensions = {
699 b'largefiles': [b'largefiles'],
699 b'largefiles': [b'largefiles'],
700 b'lfs': [b'lfs'],
700 b'lfs': [b'lfs'],
701 }
701 }
702
702
703 for requirement, names in sorted(autoextensions.items()):
703 for requirement, names in sorted(autoextensions.items()):
704 if requirement not in requirements:
704 if requirement not in requirements:
705 continue
705 continue
706
706
707 for name in names:
707 for name in names:
708 if not ui.hasconfig(b'extensions', name):
708 if not ui.hasconfig(b'extensions', name):
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710
710
711
711
712 def gathersupportedrequirements(ui):
712 def gathersupportedrequirements(ui):
713 """Determine the complete set of recognized requirements."""
713 """Determine the complete set of recognized requirements."""
714 # Start with all requirements supported by this file.
714 # Start with all requirements supported by this file.
715 supported = set(localrepository._basesupported)
715 supported = set(localrepository._basesupported)
716
716
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 # relevant to this ui instance.
718 # relevant to this ui instance.
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720
720
721 for fn in featuresetupfuncs:
721 for fn in featuresetupfuncs:
722 if fn.__module__ in modules:
722 if fn.__module__ in modules:
723 fn(ui, supported)
723 fn(ui, supported)
724
724
725 # Add derived requirements from registered compression engines.
725 # Add derived requirements from registered compression engines.
726 for name in util.compengines:
726 for name in util.compengines:
727 engine = util.compengines[name]
727 engine = util.compengines[name]
728 if engine.available() and engine.revlogheader():
728 if engine.available() and engine.revlogheader():
729 supported.add(b'exp-compression-%s' % name)
729 supported.add(b'exp-compression-%s' % name)
730 if engine.name() == b'zstd':
730 if engine.name() == b'zstd':
731 supported.add(b'revlog-compression-zstd')
731 supported.add(b'revlog-compression-zstd')
732
732
733 return supported
733 return supported
734
734
735
735
736 def ensurerequirementsrecognized(requirements, supported):
736 def ensurerequirementsrecognized(requirements, supported):
737 """Validate that a set of local requirements is recognized.
737 """Validate that a set of local requirements is recognized.
738
738
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 exists any requirement in that set that currently loaded code doesn't
740 exists any requirement in that set that currently loaded code doesn't
741 recognize.
741 recognize.
742
742
743 Returns a set of supported requirements.
743 Returns a set of supported requirements.
744 """
744 """
745 missing = set()
745 missing = set()
746
746
747 for requirement in requirements:
747 for requirement in requirements:
748 if requirement in supported:
748 if requirement in supported:
749 continue
749 continue
750
750
751 if not requirement or not requirement[0:1].isalnum():
751 if not requirement or not requirement[0:1].isalnum():
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753
753
754 missing.add(requirement)
754 missing.add(requirement)
755
755
756 if missing:
756 if missing:
757 raise error.RequirementError(
757 raise error.RequirementError(
758 _(b'repository requires features unknown to this Mercurial: %s')
758 _(b'repository requires features unknown to this Mercurial: %s')
759 % b' '.join(sorted(missing)),
759 % b' '.join(sorted(missing)),
760 hint=_(
760 hint=_(
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 b'for more information'
762 b'for more information'
763 ),
763 ),
764 )
764 )
765
765
766
766
767 def ensurerequirementscompatible(ui, requirements):
767 def ensurerequirementscompatible(ui, requirements):
768 """Validates that a set of recognized requirements is mutually compatible.
768 """Validates that a set of recognized requirements is mutually compatible.
769
769
770 Some requirements may not be compatible with others or require
770 Some requirements may not be compatible with others or require
771 config options that aren't enabled. This function is called during
771 config options that aren't enabled. This function is called during
772 repository opening to ensure that the set of requirements needed
772 repository opening to ensure that the set of requirements needed
773 to open a repository is sane and compatible with config options.
773 to open a repository is sane and compatible with config options.
774
774
775 Extensions can monkeypatch this function to perform additional
775 Extensions can monkeypatch this function to perform additional
776 checking.
776 checking.
777
777
778 ``error.RepoError`` should be raised on failure.
778 ``error.RepoError`` should be raised on failure.
779 """
779 """
780 if b'exp-sparse' in requirements and not sparse.enabled:
780 if b'exp-sparse' in requirements and not sparse.enabled:
781 raise error.RepoError(
781 raise error.RepoError(
782 _(
782 _(
783 b'repository is using sparse feature but '
783 b'repository is using sparse feature but '
784 b'sparse is not enabled; enable the '
784 b'sparse is not enabled; enable the '
785 b'"sparse" extensions to access'
785 b'"sparse" extensions to access'
786 )
786 )
787 )
787 )
788
788
789
789
790 def makestore(requirements, path, vfstype):
790 def makestore(requirements, path, vfstype):
791 """Construct a storage object for a repository."""
791 """Construct a storage object for a repository."""
792 if b'store' in requirements:
792 if b'store' in requirements:
793 if b'fncache' in requirements:
793 if b'fncache' in requirements:
794 return storemod.fncachestore(
794 return storemod.fncachestore(
795 path, vfstype, b'dotencode' in requirements
795 path, vfstype, b'dotencode' in requirements
796 )
796 )
797
797
798 return storemod.encodedstore(path, vfstype)
798 return storemod.encodedstore(path, vfstype)
799
799
800 return storemod.basicstore(path, vfstype)
800 return storemod.basicstore(path, vfstype)
801
801
802
802
803 def resolvestorevfsoptions(ui, requirements, features):
803 def resolvestorevfsoptions(ui, requirements, features):
804 """Resolve the options to pass to the store vfs opener.
804 """Resolve the options to pass to the store vfs opener.
805
805
806 The returned dict is used to influence behavior of the storage layer.
806 The returned dict is used to influence behavior of the storage layer.
807 """
807 """
808 options = {}
808 options = {}
809
809
810 if b'treemanifest' in requirements:
810 if b'treemanifest' in requirements:
811 options[b'treemanifest'] = True
811 options[b'treemanifest'] = True
812
812
813 # experimental config: format.manifestcachesize
813 # experimental config: format.manifestcachesize
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 if manifestcachesize is not None:
815 if manifestcachesize is not None:
816 options[b'manifestcachesize'] = manifestcachesize
816 options[b'manifestcachesize'] = manifestcachesize
817
817
818 # In the absence of another requirement superseding a revlog-related
818 # In the absence of another requirement superseding a revlog-related
819 # requirement, we have to assume the repo is using revlog version 0.
819 # requirement, we have to assume the repo is using revlog version 0.
820 # This revlog format is super old and we don't bother trying to parse
820 # This revlog format is super old and we don't bother trying to parse
821 # opener options for it because those options wouldn't do anything
821 # opener options for it because those options wouldn't do anything
822 # meaningful on such old repos.
822 # meaningful on such old repos.
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 else: # explicitly mark repo as using revlogv0
825 else: # explicitly mark repo as using revlogv0
826 options[b'revlogv0'] = True
826 options[b'revlogv0'] = True
827
827
828 if COPIESSDC_REQUIREMENT in requirements:
828 if COPIESSDC_REQUIREMENT in requirements:
829 options[b'copies-storage'] = b'changeset-sidedata'
829 options[b'copies-storage'] = b'changeset-sidedata'
830 else:
830 else:
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 copiesextramode = (b'changeset-only', b'compatibility')
832 copiesextramode = (b'changeset-only', b'compatibility')
833 if writecopiesto in copiesextramode:
833 if writecopiesto in copiesextramode:
834 options[b'copies-storage'] = b'extra'
834 options[b'copies-storage'] = b'extra'
835
835
836 return options
836 return options
837
837
838
838
839 def resolverevlogstorevfsoptions(ui, requirements, features):
839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 """Resolve opener options specific to revlogs."""
840 """Resolve opener options specific to revlogs."""
841
841
842 options = {}
842 options = {}
843 options[b'flagprocessors'] = {}
843 options[b'flagprocessors'] = {}
844
844
845 if b'revlogv1' in requirements:
845 if b'revlogv1' in requirements:
846 options[b'revlogv1'] = True
846 options[b'revlogv1'] = True
847 if REVLOGV2_REQUIREMENT in requirements:
847 if REVLOGV2_REQUIREMENT in requirements:
848 options[b'revlogv2'] = True
848 options[b'revlogv2'] = True
849
849
850 if b'generaldelta' in requirements:
850 if b'generaldelta' in requirements:
851 options[b'generaldelta'] = True
851 options[b'generaldelta'] = True
852
852
853 # experimental config: format.chunkcachesize
853 # experimental config: format.chunkcachesize
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 if chunkcachesize is not None:
855 if chunkcachesize is not None:
856 options[b'chunkcachesize'] = chunkcachesize
856 options[b'chunkcachesize'] = chunkcachesize
857
857
858 deltabothparents = ui.configbool(
858 deltabothparents = ui.configbool(
859 b'storage', b'revlog.optimize-delta-parent-choice'
859 b'storage', b'revlog.optimize-delta-parent-choice'
860 )
860 )
861 options[b'deltabothparents'] = deltabothparents
861 options[b'deltabothparents'] = deltabothparents
862
862
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 lazydeltabase = False
864 lazydeltabase = False
865 if lazydelta:
865 if lazydelta:
866 lazydeltabase = ui.configbool(
866 lazydeltabase = ui.configbool(
867 b'storage', b'revlog.reuse-external-delta-parent'
867 b'storage', b'revlog.reuse-external-delta-parent'
868 )
868 )
869 if lazydeltabase is None:
869 if lazydeltabase is None:
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 options[b'lazydelta'] = lazydelta
871 options[b'lazydelta'] = lazydelta
872 options[b'lazydeltabase'] = lazydeltabase
872 options[b'lazydeltabase'] = lazydeltabase
873
873
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 if 0 <= chainspan:
875 if 0 <= chainspan:
876 options[b'maxdeltachainspan'] = chainspan
876 options[b'maxdeltachainspan'] = chainspan
877
877
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 if mmapindexthreshold is not None:
879 if mmapindexthreshold is not None:
880 options[b'mmapindexthreshold'] = mmapindexthreshold
880 options[b'mmapindexthreshold'] = mmapindexthreshold
881
881
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 srdensitythres = float(
883 srdensitythres = float(
884 ui.config(b'experimental', b'sparse-read.density-threshold')
884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 )
885 )
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 options[b'with-sparse-read'] = withsparseread
887 options[b'with-sparse-read'] = withsparseread
888 options[b'sparse-read-density-threshold'] = srdensitythres
888 options[b'sparse-read-density-threshold'] = srdensitythres
889 options[b'sparse-read-min-gap-size'] = srmingapsize
889 options[b'sparse-read-min-gap-size'] = srmingapsize
890
890
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 options[b'sparse-revlog'] = sparserevlog
892 options[b'sparse-revlog'] = sparserevlog
893 if sparserevlog:
893 if sparserevlog:
894 options[b'generaldelta'] = True
894 options[b'generaldelta'] = True
895
895
896 sidedata = SIDEDATA_REQUIREMENT in requirements
896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 options[b'side-data'] = sidedata
897 options[b'side-data'] = sidedata
898
898
899 maxchainlen = None
899 maxchainlen = None
900 if sparserevlog:
900 if sparserevlog:
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 # experimental config: format.maxchainlen
902 # experimental config: format.maxchainlen
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 if maxchainlen is not None:
904 if maxchainlen is not None:
905 options[b'maxchainlen'] = maxchainlen
905 options[b'maxchainlen'] = maxchainlen
906
906
907 for r in requirements:
907 for r in requirements:
908 # we allow multiple compression engine requirement to co-exist because
908 # we allow multiple compression engine requirement to co-exist because
909 # strickly speaking, revlog seems to support mixed compression style.
909 # strickly speaking, revlog seems to support mixed compression style.
910 #
910 #
911 # The compression used for new entries will be "the last one"
911 # The compression used for new entries will be "the last one"
912 prefix = r.startswith
912 prefix = r.startswith
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 options[b'compengine'] = r.split(b'-', 2)[2]
914 options[b'compengine'] = r.split(b'-', 2)[2]
915
915
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 if options[b'zlib.level'] is not None:
917 if options[b'zlib.level'] is not None:
918 if not (0 <= options[b'zlib.level'] <= 9):
918 if not (0 <= options[b'zlib.level'] <= 9):
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 raise error.Abort(msg % options[b'zlib.level'])
920 raise error.Abort(msg % options[b'zlib.level'])
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 if options[b'zstd.level'] is not None:
922 if options[b'zstd.level'] is not None:
923 if not (0 <= options[b'zstd.level'] <= 22):
923 if not (0 <= options[b'zstd.level'] <= 22):
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 raise error.Abort(msg % options[b'zstd.level'])
925 raise error.Abort(msg % options[b'zstd.level'])
926
926
927 if repository.NARROW_REQUIREMENT in requirements:
927 if repository.NARROW_REQUIREMENT in requirements:
928 options[b'enableellipsis'] = True
928 options[b'enableellipsis'] = True
929
929
930 if ui.configbool(b'experimental', b'rust.index'):
930 if ui.configbool(b'experimental', b'rust.index'):
931 options[b'rust.index'] = True
931 options[b'rust.index'] = True
932
932
933 return options
933 return options
934
934
935
935
936 def makemain(**kwargs):
936 def makemain(**kwargs):
937 """Produce a type conforming to ``ilocalrepositorymain``."""
937 """Produce a type conforming to ``ilocalrepositorymain``."""
938 return localrepository
938 return localrepository
939
939
940
940
941 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
941 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
942 class revlogfilestorage(object):
942 class revlogfilestorage(object):
943 """File storage when using revlogs."""
943 """File storage when using revlogs."""
944
944
945 def file(self, path):
945 def file(self, path):
946 if path[0] == b'/':
946 if path[0] == b'/':
947 path = path[1:]
947 path = path[1:]
948
948
949 return filelog.filelog(self.svfs, path)
949 return filelog.filelog(self.svfs, path)
950
950
951
951
952 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
952 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
953 class revlognarrowfilestorage(object):
953 class revlognarrowfilestorage(object):
954 """File storage when using revlogs and narrow files."""
954 """File storage when using revlogs and narrow files."""
955
955
956 def file(self, path):
956 def file(self, path):
957 if path[0] == b'/':
957 if path[0] == b'/':
958 path = path[1:]
958 path = path[1:]
959
959
960 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
960 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
961
961
962
962
963 def makefilestorage(requirements, features, **kwargs):
963 def makefilestorage(requirements, features, **kwargs):
964 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
964 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
965 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
965 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
966 features.add(repository.REPO_FEATURE_STREAM_CLONE)
966 features.add(repository.REPO_FEATURE_STREAM_CLONE)
967
967
968 if repository.NARROW_REQUIREMENT in requirements:
968 if repository.NARROW_REQUIREMENT in requirements:
969 return revlognarrowfilestorage
969 return revlognarrowfilestorage
970 else:
970 else:
971 return revlogfilestorage
971 return revlogfilestorage
972
972
973
973
974 # List of repository interfaces and factory functions for them. Each
974 # List of repository interfaces and factory functions for them. Each
975 # will be called in order during ``makelocalrepository()`` to iteratively
975 # will be called in order during ``makelocalrepository()`` to iteratively
976 # derive the final type for a local repository instance. We capture the
976 # derive the final type for a local repository instance. We capture the
977 # function as a lambda so we don't hold a reference and the module-level
977 # function as a lambda so we don't hold a reference and the module-level
978 # functions can be wrapped.
978 # functions can be wrapped.
979 REPO_INTERFACES = [
979 REPO_INTERFACES = [
980 (repository.ilocalrepositorymain, lambda: makemain),
980 (repository.ilocalrepositorymain, lambda: makemain),
981 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
981 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
982 ]
982 ]
983
983
984
984
985 @interfaceutil.implementer(repository.ilocalrepositorymain)
985 @interfaceutil.implementer(repository.ilocalrepositorymain)
986 class localrepository(object):
986 class localrepository(object):
987 """Main class for representing local repositories.
987 """Main class for representing local repositories.
988
988
989 All local repositories are instances of this class.
989 All local repositories are instances of this class.
990
990
991 Constructed on its own, instances of this class are not usable as
991 Constructed on its own, instances of this class are not usable as
992 repository objects. To obtain a usable repository object, call
992 repository objects. To obtain a usable repository object, call
993 ``hg.repository()``, ``localrepo.instance()``, or
993 ``hg.repository()``, ``localrepo.instance()``, or
994 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
994 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
995 ``instance()`` adds support for creating new repositories.
995 ``instance()`` adds support for creating new repositories.
996 ``hg.repository()`` adds more extension integration, including calling
996 ``hg.repository()`` adds more extension integration, including calling
997 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
997 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
998 used.
998 used.
999 """
999 """
1000
1000
1001 # obsolete experimental requirements:
1001 # obsolete experimental requirements:
1002 # - manifestv2: An experimental new manifest format that allowed
1002 # - manifestv2: An experimental new manifest format that allowed
1003 # for stem compression of long paths. Experiment ended up not
1003 # for stem compression of long paths. Experiment ended up not
1004 # being successful (repository sizes went up due to worse delta
1004 # being successful (repository sizes went up due to worse delta
1005 # chains), and the code was deleted in 4.6.
1005 # chains), and the code was deleted in 4.6.
1006 supportedformats = {
1006 supportedformats = {
1007 b'revlogv1',
1007 b'revlogv1',
1008 b'generaldelta',
1008 b'generaldelta',
1009 b'treemanifest',
1009 b'treemanifest',
1010 COPIESSDC_REQUIREMENT,
1010 COPIESSDC_REQUIREMENT,
1011 REVLOGV2_REQUIREMENT,
1011 REVLOGV2_REQUIREMENT,
1012 SIDEDATA_REQUIREMENT,
1012 SIDEDATA_REQUIREMENT,
1013 SPARSEREVLOG_REQUIREMENT,
1013 SPARSEREVLOG_REQUIREMENT,
1014 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1014 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1015 }
1015 }
1016 _basesupported = supportedformats | {
1016 _basesupported = supportedformats | {
1017 b'store',
1017 b'store',
1018 b'fncache',
1018 b'fncache',
1019 b'shared',
1019 b'shared',
1020 b'relshared',
1020 b'relshared',
1021 b'dotencode',
1021 b'dotencode',
1022 b'exp-sparse',
1022 b'exp-sparse',
1023 b'internal-phase',
1023 b'internal-phase',
1024 }
1024 }
1025
1025
1026 # list of prefix for file which can be written without 'wlock'
1026 # list of prefix for file which can be written without 'wlock'
1027 # Extensions should extend this list when needed
1027 # Extensions should extend this list when needed
1028 _wlockfreeprefix = {
1028 _wlockfreeprefix = {
1029 # We migh consider requiring 'wlock' for the next
1029 # We migh consider requiring 'wlock' for the next
1030 # two, but pretty much all the existing code assume
1030 # two, but pretty much all the existing code assume
1031 # wlock is not needed so we keep them excluded for
1031 # wlock is not needed so we keep them excluded for
1032 # now.
1032 # now.
1033 b'hgrc',
1033 b'hgrc',
1034 b'requires',
1034 b'requires',
1035 # XXX cache is a complicatged business someone
1035 # XXX cache is a complicatged business someone
1036 # should investigate this in depth at some point
1036 # should investigate this in depth at some point
1037 b'cache/',
1037 b'cache/',
1038 # XXX shouldn't be dirstate covered by the wlock?
1038 # XXX shouldn't be dirstate covered by the wlock?
1039 b'dirstate',
1039 b'dirstate',
1040 # XXX bisect was still a bit too messy at the time
1040 # XXX bisect was still a bit too messy at the time
1041 # this changeset was introduced. Someone should fix
1041 # this changeset was introduced. Someone should fix
1042 # the remainig bit and drop this line
1042 # the remainig bit and drop this line
1043 b'bisect.state',
1043 b'bisect.state',
1044 }
1044 }
1045
1045
1046 def __init__(
1046 def __init__(
1047 self,
1047 self,
1048 baseui,
1048 baseui,
1049 ui,
1049 ui,
1050 origroot,
1050 origroot,
1051 wdirvfs,
1051 wdirvfs,
1052 hgvfs,
1052 hgvfs,
1053 requirements,
1053 requirements,
1054 supportedrequirements,
1054 supportedrequirements,
1055 sharedpath,
1055 sharedpath,
1056 store,
1056 store,
1057 cachevfs,
1057 cachevfs,
1058 wcachevfs,
1058 wcachevfs,
1059 features,
1059 features,
1060 intents=None,
1060 intents=None,
1061 ):
1061 ):
1062 """Create a new local repository instance.
1062 """Create a new local repository instance.
1063
1063
1064 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1064 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1065 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1065 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1066 object.
1066 object.
1067
1067
1068 Arguments:
1068 Arguments:
1069
1069
1070 baseui
1070 baseui
1071 ``ui.ui`` instance that ``ui`` argument was based off of.
1071 ``ui.ui`` instance that ``ui`` argument was based off of.
1072
1072
1073 ui
1073 ui
1074 ``ui.ui`` instance for use by the repository.
1074 ``ui.ui`` instance for use by the repository.
1075
1075
1076 origroot
1076 origroot
1077 ``bytes`` path to working directory root of this repository.
1077 ``bytes`` path to working directory root of this repository.
1078
1078
1079 wdirvfs
1079 wdirvfs
1080 ``vfs.vfs`` rooted at the working directory.
1080 ``vfs.vfs`` rooted at the working directory.
1081
1081
1082 hgvfs
1082 hgvfs
1083 ``vfs.vfs`` rooted at .hg/
1083 ``vfs.vfs`` rooted at .hg/
1084
1084
1085 requirements
1085 requirements
1086 ``set`` of bytestrings representing repository opening requirements.
1086 ``set`` of bytestrings representing repository opening requirements.
1087
1087
1088 supportedrequirements
1088 supportedrequirements
1089 ``set`` of bytestrings representing repository requirements that we
1089 ``set`` of bytestrings representing repository requirements that we
1090 know how to open. May be a supetset of ``requirements``.
1090 know how to open. May be a supetset of ``requirements``.
1091
1091
1092 sharedpath
1092 sharedpath
1093 ``bytes`` Defining path to storage base directory. Points to a
1093 ``bytes`` Defining path to storage base directory. Points to a
1094 ``.hg/`` directory somewhere.
1094 ``.hg/`` directory somewhere.
1095
1095
1096 store
1096 store
1097 ``store.basicstore`` (or derived) instance providing access to
1097 ``store.basicstore`` (or derived) instance providing access to
1098 versioned storage.
1098 versioned storage.
1099
1099
1100 cachevfs
1100 cachevfs
1101 ``vfs.vfs`` used for cache files.
1101 ``vfs.vfs`` used for cache files.
1102
1102
1103 wcachevfs
1103 wcachevfs
1104 ``vfs.vfs`` used for cache files related to the working copy.
1104 ``vfs.vfs`` used for cache files related to the working copy.
1105
1105
1106 features
1106 features
1107 ``set`` of bytestrings defining features/capabilities of this
1107 ``set`` of bytestrings defining features/capabilities of this
1108 instance.
1108 instance.
1109
1109
1110 intents
1110 intents
1111 ``set`` of system strings indicating what this repo will be used
1111 ``set`` of system strings indicating what this repo will be used
1112 for.
1112 for.
1113 """
1113 """
1114 self.baseui = baseui
1114 self.baseui = baseui
1115 self.ui = ui
1115 self.ui = ui
1116 self.origroot = origroot
1116 self.origroot = origroot
1117 # vfs rooted at working directory.
1117 # vfs rooted at working directory.
1118 self.wvfs = wdirvfs
1118 self.wvfs = wdirvfs
1119 self.root = wdirvfs.base
1119 self.root = wdirvfs.base
1120 # vfs rooted at .hg/. Used to access most non-store paths.
1120 # vfs rooted at .hg/. Used to access most non-store paths.
1121 self.vfs = hgvfs
1121 self.vfs = hgvfs
1122 self.path = hgvfs.base
1122 self.path = hgvfs.base
1123 self.requirements = requirements
1123 self.requirements = requirements
1124 self.supported = supportedrequirements
1124 self.supported = supportedrequirements
1125 self.sharedpath = sharedpath
1125 self.sharedpath = sharedpath
1126 self.store = store
1126 self.store = store
1127 self.cachevfs = cachevfs
1127 self.cachevfs = cachevfs
1128 self.wcachevfs = wcachevfs
1128 self.wcachevfs = wcachevfs
1129 self.features = features
1129 self.features = features
1130
1130
1131 self.filtername = None
1131 self.filtername = None
1132
1132
1133 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1133 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1134 b'devel', b'check-locks'
1134 b'devel', b'check-locks'
1135 ):
1135 ):
1136 self.vfs.audit = self._getvfsward(self.vfs.audit)
1136 self.vfs.audit = self._getvfsward(self.vfs.audit)
1137 # A list of callback to shape the phase if no data were found.
1137 # A list of callback to shape the phase if no data were found.
1138 # Callback are in the form: func(repo, roots) --> processed root.
1138 # Callback are in the form: func(repo, roots) --> processed root.
1139 # This list it to be filled by extension during repo setup
1139 # This list it to be filled by extension during repo setup
1140 self._phasedefaults = []
1140 self._phasedefaults = []
1141
1141
1142 color.setup(self.ui)
1142 color.setup(self.ui)
1143
1143
1144 self.spath = self.store.path
1144 self.spath = self.store.path
1145 self.svfs = self.store.vfs
1145 self.svfs = self.store.vfs
1146 self.sjoin = self.store.join
1146 self.sjoin = self.store.join
1147 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1147 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1148 b'devel', b'check-locks'
1148 b'devel', b'check-locks'
1149 ):
1149 ):
1150 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1150 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1151 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1151 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1152 else: # standard vfs
1152 else: # standard vfs
1153 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1153 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1154
1154
1155 self._dirstatevalidatewarned = False
1155 self._dirstatevalidatewarned = False
1156
1156
1157 self._branchcaches = branchmap.BranchMapCache()
1157 self._branchcaches = branchmap.BranchMapCache()
1158 self._revbranchcache = None
1158 self._revbranchcache = None
1159 self._filterpats = {}
1159 self._filterpats = {}
1160 self._datafilters = {}
1160 self._datafilters = {}
1161 self._transref = self._lockref = self._wlockref = None
1161 self._transref = self._lockref = self._wlockref = None
1162
1162
1163 # A cache for various files under .hg/ that tracks file changes,
1163 # A cache for various files under .hg/ that tracks file changes,
1164 # (used by the filecache decorator)
1164 # (used by the filecache decorator)
1165 #
1165 #
1166 # Maps a property name to its util.filecacheentry
1166 # Maps a property name to its util.filecacheentry
1167 self._filecache = {}
1167 self._filecache = {}
1168
1168
1169 # hold sets of revision to be filtered
1169 # hold sets of revision to be filtered
1170 # should be cleared when something might have changed the filter value:
1170 # should be cleared when something might have changed the filter value:
1171 # - new changesets,
1171 # - new changesets,
1172 # - phase change,
1172 # - phase change,
1173 # - new obsolescence marker,
1173 # - new obsolescence marker,
1174 # - working directory parent change,
1174 # - working directory parent change,
1175 # - bookmark changes
1175 # - bookmark changes
1176 self.filteredrevcache = {}
1176 self.filteredrevcache = {}
1177
1177
1178 # post-dirstate-status hooks
1178 # post-dirstate-status hooks
1179 self._postdsstatus = []
1179 self._postdsstatus = []
1180
1180
1181 # generic mapping between names and nodes
1181 # generic mapping between names and nodes
1182 self.names = namespaces.namespaces()
1182 self.names = namespaces.namespaces()
1183
1183
1184 # Key to signature value.
1184 # Key to signature value.
1185 self._sparsesignaturecache = {}
1185 self._sparsesignaturecache = {}
1186 # Signature to cached matcher instance.
1186 # Signature to cached matcher instance.
1187 self._sparsematchercache = {}
1187 self._sparsematchercache = {}
1188
1188
1189 self._extrafilterid = repoview.extrafilter(ui)
1189 self._extrafilterid = repoview.extrafilter(ui)
1190
1190
1191 self.filecopiesmode = None
1191 self.filecopiesmode = None
1192 if COPIESSDC_REQUIREMENT in self.requirements:
1192 if COPIESSDC_REQUIREMENT in self.requirements:
1193 self.filecopiesmode = b'changeset-sidedata'
1193 self.filecopiesmode = b'changeset-sidedata'
1194
1194
1195 def _getvfsward(self, origfunc):
1195 def _getvfsward(self, origfunc):
1196 """build a ward for self.vfs"""
1196 """build a ward for self.vfs"""
1197 rref = weakref.ref(self)
1197 rref = weakref.ref(self)
1198
1198
1199 def checkvfs(path, mode=None):
1199 def checkvfs(path, mode=None):
1200 ret = origfunc(path, mode=mode)
1200 ret = origfunc(path, mode=mode)
1201 repo = rref()
1201 repo = rref()
1202 if (
1202 if (
1203 repo is None
1203 repo is None
1204 or not util.safehasattr(repo, b'_wlockref')
1204 or not util.safehasattr(repo, b'_wlockref')
1205 or not util.safehasattr(repo, b'_lockref')
1205 or not util.safehasattr(repo, b'_lockref')
1206 ):
1206 ):
1207 return
1207 return
1208 if mode in (None, b'r', b'rb'):
1208 if mode in (None, b'r', b'rb'):
1209 return
1209 return
1210 if path.startswith(repo.path):
1210 if path.startswith(repo.path):
1211 # truncate name relative to the repository (.hg)
1211 # truncate name relative to the repository (.hg)
1212 path = path[len(repo.path) + 1 :]
1212 path = path[len(repo.path) + 1 :]
1213 if path.startswith(b'cache/'):
1213 if path.startswith(b'cache/'):
1214 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1214 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1215 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1215 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1216 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1216 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1217 # journal is covered by 'lock'
1217 # journal is covered by 'lock'
1218 if repo._currentlock(repo._lockref) is None:
1218 if repo._currentlock(repo._lockref) is None:
1219 repo.ui.develwarn(
1219 repo.ui.develwarn(
1220 b'write with no lock: "%s"' % path,
1220 b'write with no lock: "%s"' % path,
1221 stacklevel=3,
1221 stacklevel=3,
1222 config=b'check-locks',
1222 config=b'check-locks',
1223 )
1223 )
1224 elif repo._currentlock(repo._wlockref) is None:
1224 elif repo._currentlock(repo._wlockref) is None:
1225 # rest of vfs files are covered by 'wlock'
1225 # rest of vfs files are covered by 'wlock'
1226 #
1226 #
1227 # exclude special files
1227 # exclude special files
1228 for prefix in self._wlockfreeprefix:
1228 for prefix in self._wlockfreeprefix:
1229 if path.startswith(prefix):
1229 if path.startswith(prefix):
1230 return
1230 return
1231 repo.ui.develwarn(
1231 repo.ui.develwarn(
1232 b'write with no wlock: "%s"' % path,
1232 b'write with no wlock: "%s"' % path,
1233 stacklevel=3,
1233 stacklevel=3,
1234 config=b'check-locks',
1234 config=b'check-locks',
1235 )
1235 )
1236 return ret
1236 return ret
1237
1237
1238 return checkvfs
1238 return checkvfs
1239
1239
1240 def _getsvfsward(self, origfunc):
1240 def _getsvfsward(self, origfunc):
1241 """build a ward for self.svfs"""
1241 """build a ward for self.svfs"""
1242 rref = weakref.ref(self)
1242 rref = weakref.ref(self)
1243
1243
1244 def checksvfs(path, mode=None):
1244 def checksvfs(path, mode=None):
1245 ret = origfunc(path, mode=mode)
1245 ret = origfunc(path, mode=mode)
1246 repo = rref()
1246 repo = rref()
1247 if repo is None or not util.safehasattr(repo, b'_lockref'):
1247 if repo is None or not util.safehasattr(repo, b'_lockref'):
1248 return
1248 return
1249 if mode in (None, b'r', b'rb'):
1249 if mode in (None, b'r', b'rb'):
1250 return
1250 return
1251 if path.startswith(repo.sharedpath):
1251 if path.startswith(repo.sharedpath):
1252 # truncate name relative to the repository (.hg)
1252 # truncate name relative to the repository (.hg)
1253 path = path[len(repo.sharedpath) + 1 :]
1253 path = path[len(repo.sharedpath) + 1 :]
1254 if repo._currentlock(repo._lockref) is None:
1254 if repo._currentlock(repo._lockref) is None:
1255 repo.ui.develwarn(
1255 repo.ui.develwarn(
1256 b'write with no lock: "%s"' % path, stacklevel=4
1256 b'write with no lock: "%s"' % path, stacklevel=4
1257 )
1257 )
1258 return ret
1258 return ret
1259
1259
1260 return checksvfs
1260 return checksvfs
1261
1261
1262 def close(self):
1262 def close(self):
1263 self._writecaches()
1263 self._writecaches()
1264
1264
1265 def _writecaches(self):
1265 def _writecaches(self):
1266 if self._revbranchcache:
1266 if self._revbranchcache:
1267 self._revbranchcache.write()
1267 self._revbranchcache.write()
1268
1268
1269 def _restrictcapabilities(self, caps):
1269 def _restrictcapabilities(self, caps):
1270 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1270 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1271 caps = set(caps)
1271 caps = set(caps)
1272 capsblob = bundle2.encodecaps(
1272 capsblob = bundle2.encodecaps(
1273 bundle2.getrepocaps(self, role=b'client')
1273 bundle2.getrepocaps(self, role=b'client')
1274 )
1274 )
1275 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1275 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1276 return caps
1276 return caps
1277
1277
1278 def _writerequirements(self):
1278 def _writerequirements(self):
1279 scmutil.writerequires(self.vfs, self.requirements)
1279 scmutil.writerequires(self.vfs, self.requirements)
1280
1280
1281 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1281 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1282 # self -> auditor -> self._checknested -> self
1282 # self -> auditor -> self._checknested -> self
1283
1283
1284 @property
1284 @property
1285 def auditor(self):
1285 def auditor(self):
1286 # This is only used by context.workingctx.match in order to
1286 # This is only used by context.workingctx.match in order to
1287 # detect files in subrepos.
1287 # detect files in subrepos.
1288 return pathutil.pathauditor(self.root, callback=self._checknested)
1288 return pathutil.pathauditor(self.root, callback=self._checknested)
1289
1289
1290 @property
1290 @property
1291 def nofsauditor(self):
1291 def nofsauditor(self):
1292 # This is only used by context.basectx.match in order to detect
1292 # This is only used by context.basectx.match in order to detect
1293 # files in subrepos.
1293 # files in subrepos.
1294 return pathutil.pathauditor(
1294 return pathutil.pathauditor(
1295 self.root, callback=self._checknested, realfs=False, cached=True
1295 self.root, callback=self._checknested, realfs=False, cached=True
1296 )
1296 )
1297
1297
1298 def _checknested(self, path):
1298 def _checknested(self, path):
1299 """Determine if path is a legal nested repository."""
1299 """Determine if path is a legal nested repository."""
1300 if not path.startswith(self.root):
1300 if not path.startswith(self.root):
1301 return False
1301 return False
1302 subpath = path[len(self.root) + 1 :]
1302 subpath = path[len(self.root) + 1 :]
1303 normsubpath = util.pconvert(subpath)
1303 normsubpath = util.pconvert(subpath)
1304
1304
1305 # XXX: Checking against the current working copy is wrong in
1305 # XXX: Checking against the current working copy is wrong in
1306 # the sense that it can reject things like
1306 # the sense that it can reject things like
1307 #
1307 #
1308 # $ hg cat -r 10 sub/x.txt
1308 # $ hg cat -r 10 sub/x.txt
1309 #
1309 #
1310 # if sub/ is no longer a subrepository in the working copy
1310 # if sub/ is no longer a subrepository in the working copy
1311 # parent revision.
1311 # parent revision.
1312 #
1312 #
1313 # However, it can of course also allow things that would have
1313 # However, it can of course also allow things that would have
1314 # been rejected before, such as the above cat command if sub/
1314 # been rejected before, such as the above cat command if sub/
1315 # is a subrepository now, but was a normal directory before.
1315 # is a subrepository now, but was a normal directory before.
1316 # The old path auditor would have rejected by mistake since it
1316 # The old path auditor would have rejected by mistake since it
1317 # panics when it sees sub/.hg/.
1317 # panics when it sees sub/.hg/.
1318 #
1318 #
1319 # All in all, checking against the working copy seems sensible
1319 # All in all, checking against the working copy seems sensible
1320 # since we want to prevent access to nested repositories on
1320 # since we want to prevent access to nested repositories on
1321 # the filesystem *now*.
1321 # the filesystem *now*.
1322 ctx = self[None]
1322 ctx = self[None]
1323 parts = util.splitpath(subpath)
1323 parts = util.splitpath(subpath)
1324 while parts:
1324 while parts:
1325 prefix = b'/'.join(parts)
1325 prefix = b'/'.join(parts)
1326 if prefix in ctx.substate:
1326 if prefix in ctx.substate:
1327 if prefix == normsubpath:
1327 if prefix == normsubpath:
1328 return True
1328 return True
1329 else:
1329 else:
1330 sub = ctx.sub(prefix)
1330 sub = ctx.sub(prefix)
1331 return sub.checknested(subpath[len(prefix) + 1 :])
1331 return sub.checknested(subpath[len(prefix) + 1 :])
1332 else:
1332 else:
1333 parts.pop()
1333 parts.pop()
1334 return False
1334 return False
1335
1335
1336 def peer(self):
1336 def peer(self):
1337 return localpeer(self) # not cached to avoid reference cycle
1337 return localpeer(self) # not cached to avoid reference cycle
1338
1338
1339 def unfiltered(self):
1339 def unfiltered(self):
1340 """Return unfiltered version of the repository
1340 """Return unfiltered version of the repository
1341
1341
1342 Intended to be overwritten by filtered repo."""
1342 Intended to be overwritten by filtered repo."""
1343 return self
1343 return self
1344
1344
1345 def filtered(self, name, visibilityexceptions=None):
1345 def filtered(self, name, visibilityexceptions=None):
1346 """Return a filtered version of a repository
1346 """Return a filtered version of a repository
1347
1347
1348 The `name` parameter is the identifier of the requested view. This
1348 The `name` parameter is the identifier of the requested view. This
1349 will return a repoview object set "exactly" to the specified view.
1349 will return a repoview object set "exactly" to the specified view.
1350
1350
1351 This function does not apply recursive filtering to a repository. For
1351 This function does not apply recursive filtering to a repository. For
1352 example calling `repo.filtered("served")` will return a repoview using
1352 example calling `repo.filtered("served")` will return a repoview using
1353 the "served" view, regardless of the initial view used by `repo`.
1353 the "served" view, regardless of the initial view used by `repo`.
1354
1354
1355 In other word, there is always only one level of `repoview` "filtering".
1355 In other word, there is always only one level of `repoview` "filtering".
1356 """
1356 """
1357 if self._extrafilterid is not None and b'%' not in name:
1357 if self._extrafilterid is not None and b'%' not in name:
1358 name = name + b'%' + self._extrafilterid
1358 name = name + b'%' + self._extrafilterid
1359
1359
1360 cls = repoview.newtype(self.unfiltered().__class__)
1360 cls = repoview.newtype(self.unfiltered().__class__)
1361 return cls(self, name, visibilityexceptions)
1361 return cls(self, name, visibilityexceptions)
1362
1362
1363 @mixedrepostorecache(
1363 @mixedrepostorecache(
1364 (b'bookmarks', b'plain'),
1364 (b'bookmarks', b'plain'),
1365 (b'bookmarks.current', b'plain'),
1365 (b'bookmarks.current', b'plain'),
1366 (b'bookmarks', b''),
1366 (b'bookmarks', b''),
1367 (b'00changelog.i', b''),
1367 (b'00changelog.i', b''),
1368 )
1368 )
1369 def _bookmarks(self):
1369 def _bookmarks(self):
1370 # Since the multiple files involved in the transaction cannot be
1370 # Since the multiple files involved in the transaction cannot be
1371 # written atomically (with current repository format), there is a race
1371 # written atomically (with current repository format), there is a race
1372 # condition here.
1372 # condition here.
1373 #
1373 #
1374 # 1) changelog content A is read
1374 # 1) changelog content A is read
1375 # 2) outside transaction update changelog to content B
1375 # 2) outside transaction update changelog to content B
1376 # 3) outside transaction update bookmark file referring to content B
1376 # 3) outside transaction update bookmark file referring to content B
1377 # 4) bookmarks file content is read and filtered against changelog-A
1377 # 4) bookmarks file content is read and filtered against changelog-A
1378 #
1378 #
1379 # When this happens, bookmarks against nodes missing from A are dropped.
1379 # When this happens, bookmarks against nodes missing from A are dropped.
1380 #
1380 #
1381 # Having this happening during read is not great, but it become worse
1381 # Having this happening during read is not great, but it become worse
1382 # when this happen during write because the bookmarks to the "unknown"
1382 # when this happen during write because the bookmarks to the "unknown"
1383 # nodes will be dropped for good. However, writes happen within locks.
1383 # nodes will be dropped for good. However, writes happen within locks.
1384 # This locking makes it possible to have a race free consistent read.
1384 # This locking makes it possible to have a race free consistent read.
1385 # For this purpose data read from disc before locking are
1385 # For this purpose data read from disc before locking are
1386 # "invalidated" right after the locks are taken. This invalidations are
1386 # "invalidated" right after the locks are taken. This invalidations are
1387 # "light", the `filecache` mechanism keep the data in memory and will
1387 # "light", the `filecache` mechanism keep the data in memory and will
1388 # reuse them if the underlying files did not changed. Not parsing the
1388 # reuse them if the underlying files did not changed. Not parsing the
1389 # same data multiple times helps performances.
1389 # same data multiple times helps performances.
1390 #
1390 #
1391 # Unfortunately in the case describe above, the files tracked by the
1391 # Unfortunately in the case describe above, the files tracked by the
1392 # bookmarks file cache might not have changed, but the in-memory
1392 # bookmarks file cache might not have changed, but the in-memory
1393 # content is still "wrong" because we used an older changelog content
1393 # content is still "wrong" because we used an older changelog content
1394 # to process the on-disk data. So after locking, the changelog would be
1394 # to process the on-disk data. So after locking, the changelog would be
1395 # refreshed but `_bookmarks` would be preserved.
1395 # refreshed but `_bookmarks` would be preserved.
1396 # Adding `00changelog.i` to the list of tracked file is not
1396 # Adding `00changelog.i` to the list of tracked file is not
1397 # enough, because at the time we build the content for `_bookmarks` in
1397 # enough, because at the time we build the content for `_bookmarks` in
1398 # (4), the changelog file has already diverged from the content used
1398 # (4), the changelog file has already diverged from the content used
1399 # for loading `changelog` in (1)
1399 # for loading `changelog` in (1)
1400 #
1400 #
1401 # To prevent the issue, we force the changelog to be explicitly
1401 # To prevent the issue, we force the changelog to be explicitly
1402 # reloaded while computing `_bookmarks`. The data race can still happen
1402 # reloaded while computing `_bookmarks`. The data race can still happen
1403 # without the lock (with a narrower window), but it would no longer go
1403 # without the lock (with a narrower window), but it would no longer go
1404 # undetected during the lock time refresh.
1404 # undetected during the lock time refresh.
1405 #
1405 #
1406 # The new schedule is as follow
1406 # The new schedule is as follow
1407 #
1407 #
1408 # 1) filecache logic detect that `_bookmarks` needs to be computed
1408 # 1) filecache logic detect that `_bookmarks` needs to be computed
1409 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1409 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1410 # 3) We force `changelog` filecache to be tested
1410 # 3) We force `changelog` filecache to be tested
1411 # 4) cachestat for `changelog` are captured (for changelog)
1411 # 4) cachestat for `changelog` are captured (for changelog)
1412 # 5) `_bookmarks` is computed and cached
1412 # 5) `_bookmarks` is computed and cached
1413 #
1413 #
1414 # The step in (3) ensure we have a changelog at least as recent as the
1414 # The step in (3) ensure we have a changelog at least as recent as the
1415 # cache stat computed in (1). As a result at locking time:
1415 # cache stat computed in (1). As a result at locking time:
1416 # * if the changelog did not changed since (1) -> we can reuse the data
1416 # * if the changelog did not changed since (1) -> we can reuse the data
1417 # * otherwise -> the bookmarks get refreshed.
1417 # * otherwise -> the bookmarks get refreshed.
1418 self._refreshchangelog()
1418 self._refreshchangelog()
1419 return bookmarks.bmstore(self)
1419 return bookmarks.bmstore(self)
1420
1420
1421 def _refreshchangelog(self):
1421 def _refreshchangelog(self):
1422 """make sure the in memory changelog match the on-disk one"""
1422 """make sure the in memory changelog match the on-disk one"""
1423 if 'changelog' in vars(self) and self.currenttransaction() is None:
1423 if 'changelog' in vars(self) and self.currenttransaction() is None:
1424 del self.changelog
1424 del self.changelog
1425
1425
1426 @property
1426 @property
1427 def _activebookmark(self):
1427 def _activebookmark(self):
1428 return self._bookmarks.active
1428 return self._bookmarks.active
1429
1429
1430 # _phasesets depend on changelog. what we need is to call
1430 # _phasesets depend on changelog. what we need is to call
1431 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1431 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1432 # can't be easily expressed in filecache mechanism.
1432 # can't be easily expressed in filecache mechanism.
1433 @storecache(b'phaseroots', b'00changelog.i')
1433 @storecache(b'phaseroots', b'00changelog.i')
1434 def _phasecache(self):
1434 def _phasecache(self):
1435 return phases.phasecache(self, self._phasedefaults)
1435 return phases.phasecache(self, self._phasedefaults)
1436
1436
1437 @storecache(b'obsstore')
1437 @storecache(b'obsstore')
1438 def obsstore(self):
1438 def obsstore(self):
1439 return obsolete.makestore(self.ui, self)
1439 return obsolete.makestore(self.ui, self)
1440
1440
1441 @storecache(b'00changelog.i')
1441 @storecache(b'00changelog.i')
1442 def changelog(self):
1442 def changelog(self):
1443 return self.store.changelog(txnutil.mayhavepending(self.root))
1443 return self.store.changelog(txnutil.mayhavepending(self.root))
1444
1444
1445 @storecache(b'00manifest.i')
1445 @storecache(b'00manifest.i')
1446 def manifestlog(self):
1446 def manifestlog(self):
1447 return self.store.manifestlog(self, self._storenarrowmatch)
1447 return self.store.manifestlog(self, self._storenarrowmatch)
1448
1448
1449 @repofilecache(b'dirstate')
1449 @repofilecache(b'dirstate')
1450 def dirstate(self):
1450 def dirstate(self):
1451 return self._makedirstate()
1451 return self._makedirstate()
1452
1452
1453 def _makedirstate(self):
1453 def _makedirstate(self):
1454 """Extension point for wrapping the dirstate per-repo."""
1454 """Extension point for wrapping the dirstate per-repo."""
1455 sparsematchfn = lambda: sparse.matcher(self)
1455 sparsematchfn = lambda: sparse.matcher(self)
1456
1456
1457 return dirstate.dirstate(
1457 return dirstate.dirstate(
1458 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1458 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1459 )
1459 )
1460
1460
1461 def _dirstatevalidate(self, node):
1461 def _dirstatevalidate(self, node):
1462 try:
1462 try:
1463 self.changelog.rev(node)
1463 self.changelog.rev(node)
1464 return node
1464 return node
1465 except error.LookupError:
1465 except error.LookupError:
1466 if not self._dirstatevalidatewarned:
1466 if not self._dirstatevalidatewarned:
1467 self._dirstatevalidatewarned = True
1467 self._dirstatevalidatewarned = True
1468 self.ui.warn(
1468 self.ui.warn(
1469 _(b"warning: ignoring unknown working parent %s!\n")
1469 _(b"warning: ignoring unknown working parent %s!\n")
1470 % short(node)
1470 % short(node)
1471 )
1471 )
1472 return nullid
1472 return nullid
1473
1473
1474 @storecache(narrowspec.FILENAME)
1474 @storecache(narrowspec.FILENAME)
1475 def narrowpats(self):
1475 def narrowpats(self):
1476 """matcher patterns for this repository's narrowspec
1476 """matcher patterns for this repository's narrowspec
1477
1477
1478 A tuple of (includes, excludes).
1478 A tuple of (includes, excludes).
1479 """
1479 """
1480 return narrowspec.load(self)
1480 return narrowspec.load(self)
1481
1481
1482 @storecache(narrowspec.FILENAME)
1482 @storecache(narrowspec.FILENAME)
1483 def _storenarrowmatch(self):
1483 def _storenarrowmatch(self):
1484 if repository.NARROW_REQUIREMENT not in self.requirements:
1484 if repository.NARROW_REQUIREMENT not in self.requirements:
1485 return matchmod.always()
1485 return matchmod.always()
1486 include, exclude = self.narrowpats
1486 include, exclude = self.narrowpats
1487 return narrowspec.match(self.root, include=include, exclude=exclude)
1487 return narrowspec.match(self.root, include=include, exclude=exclude)
1488
1488
1489 @storecache(narrowspec.FILENAME)
1489 @storecache(narrowspec.FILENAME)
1490 def _narrowmatch(self):
1490 def _narrowmatch(self):
1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1492 return matchmod.always()
1492 return matchmod.always()
1493 narrowspec.checkworkingcopynarrowspec(self)
1493 narrowspec.checkworkingcopynarrowspec(self)
1494 include, exclude = self.narrowpats
1494 include, exclude = self.narrowpats
1495 return narrowspec.match(self.root, include=include, exclude=exclude)
1495 return narrowspec.match(self.root, include=include, exclude=exclude)
1496
1496
1497 def narrowmatch(self, match=None, includeexact=False):
1497 def narrowmatch(self, match=None, includeexact=False):
1498 """matcher corresponding the the repo's narrowspec
1498 """matcher corresponding the the repo's narrowspec
1499
1499
1500 If `match` is given, then that will be intersected with the narrow
1500 If `match` is given, then that will be intersected with the narrow
1501 matcher.
1501 matcher.
1502
1502
1503 If `includeexact` is True, then any exact matches from `match` will
1503 If `includeexact` is True, then any exact matches from `match` will
1504 be included even if they're outside the narrowspec.
1504 be included even if they're outside the narrowspec.
1505 """
1505 """
1506 if match:
1506 if match:
1507 if includeexact and not self._narrowmatch.always():
1507 if includeexact and not self._narrowmatch.always():
1508 # do not exclude explicitly-specified paths so that they can
1508 # do not exclude explicitly-specified paths so that they can
1509 # be warned later on
1509 # be warned later on
1510 em = matchmod.exact(match.files())
1510 em = matchmod.exact(match.files())
1511 nm = matchmod.unionmatcher([self._narrowmatch, em])
1511 nm = matchmod.unionmatcher([self._narrowmatch, em])
1512 return matchmod.intersectmatchers(match, nm)
1512 return matchmod.intersectmatchers(match, nm)
1513 return matchmod.intersectmatchers(match, self._narrowmatch)
1513 return matchmod.intersectmatchers(match, self._narrowmatch)
1514 return self._narrowmatch
1514 return self._narrowmatch
1515
1515
1516 def setnarrowpats(self, newincludes, newexcludes):
1516 def setnarrowpats(self, newincludes, newexcludes):
1517 narrowspec.save(self, newincludes, newexcludes)
1517 narrowspec.save(self, newincludes, newexcludes)
1518 self.invalidate(clearfilecache=True)
1518 self.invalidate(clearfilecache=True)
1519
1519
1520 @util.propertycache
1520 @util.propertycache
1521 def _quick_access_changeid(self):
1521 def _quick_access_changeid(self):
1522 """an helper dictionnary for __getitem__ calls
1522 """an helper dictionnary for __getitem__ calls
1523
1523
1524 This contains a list of symbol we can recognise right away without
1524 This contains a list of symbol we can recognise right away without
1525 further processing.
1525 further processing.
1526 """
1526 """
1527 return {
1527 return {
1528 b'null': (nullrev, nullid),
1528 b'null': (nullrev, nullid),
1529 nullrev: (nullrev, nullid),
1529 nullrev: (nullrev, nullid),
1530 nullid: (nullrev, nullid),
1530 nullid: (nullrev, nullid),
1531 }
1531 }
1532
1532
1533 def __getitem__(self, changeid):
1533 def __getitem__(self, changeid):
1534 # dealing with special cases
1534 # dealing with special cases
1535 if changeid is None:
1535 if changeid is None:
1536 return context.workingctx(self)
1536 return context.workingctx(self)
1537 if isinstance(changeid, context.basectx):
1537 if isinstance(changeid, context.basectx):
1538 return changeid
1538 return changeid
1539
1539
1540 # dealing with multiple revisions
1540 # dealing with multiple revisions
1541 if isinstance(changeid, slice):
1541 if isinstance(changeid, slice):
1542 # wdirrev isn't contiguous so the slice shouldn't include it
1542 # wdirrev isn't contiguous so the slice shouldn't include it
1543 return [
1543 return [
1544 self[i]
1544 self[i]
1545 for i in pycompat.xrange(*changeid.indices(len(self)))
1545 for i in pycompat.xrange(*changeid.indices(len(self)))
1546 if i not in self.changelog.filteredrevs
1546 if i not in self.changelog.filteredrevs
1547 ]
1547 ]
1548
1548
1549 # dealing with some special values
1549 # dealing with some special values
1550 quick_access = self._quick_access_changeid.get(changeid)
1550 quick_access = self._quick_access_changeid.get(changeid)
1551 if quick_access is not None:
1551 if quick_access is not None:
1552 rev, node = quick_access
1552 rev, node = quick_access
1553 return context.changectx(self, rev, node, maybe_filtered=False)
1553 return context.changectx(self, rev, node, maybe_filtered=False)
1554 if changeid == b'tip':
1554 if changeid == b'tip':
1555 node = self.changelog.tip()
1555 node = self.changelog.tip()
1556 rev = self.changelog.rev(node)
1556 rev = self.changelog.rev(node)
1557 return context.changectx(self, rev, node)
1557 return context.changectx(self, rev, node)
1558
1558
1559 # dealing with arbitrary values
1559 # dealing with arbitrary values
1560 try:
1560 try:
1561 if isinstance(changeid, int):
1561 if isinstance(changeid, int):
1562 node = self.changelog.node(changeid)
1562 node = self.changelog.node(changeid)
1563 rev = changeid
1563 rev = changeid
1564 elif changeid == b'.':
1564 elif changeid == b'.':
1565 # this is a hack to delay/avoid loading obsmarkers
1565 # this is a hack to delay/avoid loading obsmarkers
1566 # when we know that '.' won't be hidden
1566 # when we know that '.' won't be hidden
1567 node = self.dirstate.p1()
1567 node = self.dirstate.p1()
1568 rev = self.unfiltered().changelog.rev(node)
1568 rev = self.unfiltered().changelog.rev(node)
1569 elif len(changeid) == 20:
1569 elif len(changeid) == 20:
1570 try:
1570 try:
1571 node = changeid
1571 node = changeid
1572 rev = self.changelog.rev(changeid)
1572 rev = self.changelog.rev(changeid)
1573 except error.FilteredLookupError:
1573 except error.FilteredLookupError:
1574 changeid = hex(changeid) # for the error message
1574 changeid = hex(changeid) # for the error message
1575 raise
1575 raise
1576 except LookupError:
1576 except LookupError:
1577 # check if it might have come from damaged dirstate
1577 # check if it might have come from damaged dirstate
1578 #
1578 #
1579 # XXX we could avoid the unfiltered if we had a recognizable
1579 # XXX we could avoid the unfiltered if we had a recognizable
1580 # exception for filtered changeset access
1580 # exception for filtered changeset access
1581 if (
1581 if (
1582 self.local()
1582 self.local()
1583 and changeid in self.unfiltered().dirstate.parents()
1583 and changeid in self.unfiltered().dirstate.parents()
1584 ):
1584 ):
1585 msg = _(b"working directory has unknown parent '%s'!")
1585 msg = _(b"working directory has unknown parent '%s'!")
1586 raise error.Abort(msg % short(changeid))
1586 raise error.Abort(msg % short(changeid))
1587 changeid = hex(changeid) # for the error message
1587 changeid = hex(changeid) # for the error message
1588 raise
1588 raise
1589
1589
1590 elif len(changeid) == 40:
1590 elif len(changeid) == 40:
1591 node = bin(changeid)
1591 node = bin(changeid)
1592 rev = self.changelog.rev(node)
1592 rev = self.changelog.rev(node)
1593 else:
1593 else:
1594 raise error.ProgrammingError(
1594 raise error.ProgrammingError(
1595 b"unsupported changeid '%s' of type %s"
1595 b"unsupported changeid '%s' of type %s"
1596 % (changeid, pycompat.bytestr(type(changeid)))
1596 % (changeid, pycompat.bytestr(type(changeid)))
1597 )
1597 )
1598
1598
1599 return context.changectx(self, rev, node)
1599 return context.changectx(self, rev, node)
1600
1600
1601 except (error.FilteredIndexError, error.FilteredLookupError):
1601 except (error.FilteredIndexError, error.FilteredLookupError):
1602 raise error.FilteredRepoLookupError(
1602 raise error.FilteredRepoLookupError(
1603 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1603 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1604 )
1604 )
1605 except (IndexError, LookupError):
1605 except (IndexError, LookupError):
1606 raise error.RepoLookupError(
1606 raise error.RepoLookupError(
1607 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1607 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1608 )
1608 )
1609 except error.WdirUnsupported:
1609 except error.WdirUnsupported:
1610 return context.workingctx(self)
1610 return context.workingctx(self)
1611
1611
1612 def __contains__(self, changeid):
1612 def __contains__(self, changeid):
1613 """True if the given changeid exists
1613 """True if the given changeid exists
1614
1614
1615 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1615 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1616 specified.
1616 specified.
1617 """
1617 """
1618 try:
1618 try:
1619 self[changeid]
1619 self[changeid]
1620 return True
1620 return True
1621 except error.RepoLookupError:
1621 except error.RepoLookupError:
1622 return False
1622 return False
1623
1623
1624 def __nonzero__(self):
1624 def __nonzero__(self):
1625 return True
1625 return True
1626
1626
1627 __bool__ = __nonzero__
1627 __bool__ = __nonzero__
1628
1628
1629 def __len__(self):
1629 def __len__(self):
1630 # no need to pay the cost of repoview.changelog
1630 # no need to pay the cost of repoview.changelog
1631 unfi = self.unfiltered()
1631 unfi = self.unfiltered()
1632 return len(unfi.changelog)
1632 return len(unfi.changelog)
1633
1633
1634 def __iter__(self):
1634 def __iter__(self):
1635 return iter(self.changelog)
1635 return iter(self.changelog)
1636
1636
1637 def revs(self, expr, *args):
1637 def revs(self, expr, *args):
1638 '''Find revisions matching a revset.
1638 '''Find revisions matching a revset.
1639
1639
1640 The revset is specified as a string ``expr`` that may contain
1640 The revset is specified as a string ``expr`` that may contain
1641 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1641 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1642
1642
1643 Revset aliases from the configuration are not expanded. To expand
1643 Revset aliases from the configuration are not expanded. To expand
1644 user aliases, consider calling ``scmutil.revrange()`` or
1644 user aliases, consider calling ``scmutil.revrange()`` or
1645 ``repo.anyrevs([expr], user=True)``.
1645 ``repo.anyrevs([expr], user=True)``.
1646
1646
1647 Returns a revset.abstractsmartset, which is a list-like interface
1647 Returns a revset.abstractsmartset, which is a list-like interface
1648 that contains integer revisions.
1648 that contains integer revisions.
1649 '''
1649 '''
1650 tree = revsetlang.spectree(expr, *args)
1650 tree = revsetlang.spectree(expr, *args)
1651 return revset.makematcher(tree)(self)
1651 return revset.makematcher(tree)(self)
1652
1652
1653 def set(self, expr, *args):
1653 def set(self, expr, *args):
1654 '''Find revisions matching a revset and emit changectx instances.
1654 '''Find revisions matching a revset and emit changectx instances.
1655
1655
1656 This is a convenience wrapper around ``revs()`` that iterates the
1656 This is a convenience wrapper around ``revs()`` that iterates the
1657 result and is a generator of changectx instances.
1657 result and is a generator of changectx instances.
1658
1658
1659 Revset aliases from the configuration are not expanded. To expand
1659 Revset aliases from the configuration are not expanded. To expand
1660 user aliases, consider calling ``scmutil.revrange()``.
1660 user aliases, consider calling ``scmutil.revrange()``.
1661 '''
1661 '''
1662 for r in self.revs(expr, *args):
1662 for r in self.revs(expr, *args):
1663 yield self[r]
1663 yield self[r]
1664
1664
1665 def anyrevs(self, specs, user=False, localalias=None):
1665 def anyrevs(self, specs, user=False, localalias=None):
1666 '''Find revisions matching one of the given revsets.
1666 '''Find revisions matching one of the given revsets.
1667
1667
1668 Revset aliases from the configuration are not expanded by default. To
1668 Revset aliases from the configuration are not expanded by default. To
1669 expand user aliases, specify ``user=True``. To provide some local
1669 expand user aliases, specify ``user=True``. To provide some local
1670 definitions overriding user aliases, set ``localalias`` to
1670 definitions overriding user aliases, set ``localalias`` to
1671 ``{name: definitionstring}``.
1671 ``{name: definitionstring}``.
1672 '''
1672 '''
1673 if specs == [b'null']:
1673 if specs == [b'null']:
1674 return revset.baseset([nullrev])
1674 return revset.baseset([nullrev])
1675 if user:
1675 if user:
1676 m = revset.matchany(
1676 m = revset.matchany(
1677 self.ui,
1677 self.ui,
1678 specs,
1678 specs,
1679 lookup=revset.lookupfn(self),
1679 lookup=revset.lookupfn(self),
1680 localalias=localalias,
1680 localalias=localalias,
1681 )
1681 )
1682 else:
1682 else:
1683 m = revset.matchany(None, specs, localalias=localalias)
1683 m = revset.matchany(None, specs, localalias=localalias)
1684 return m(self)
1684 return m(self)
1685
1685
1686 def url(self):
1686 def url(self):
1687 return b'file:' + self.root
1687 return b'file:' + self.root
1688
1688
1689 def hook(self, name, throw=False, **args):
1689 def hook(self, name, throw=False, **args):
1690 """Call a hook, passing this repo instance.
1690 """Call a hook, passing this repo instance.
1691
1691
1692 This a convenience method to aid invoking hooks. Extensions likely
1692 This a convenience method to aid invoking hooks. Extensions likely
1693 won't call this unless they have registered a custom hook or are
1693 won't call this unless they have registered a custom hook or are
1694 replacing code that is expected to call a hook.
1694 replacing code that is expected to call a hook.
1695 """
1695 """
1696 return hook.hook(self.ui, self, name, throw, **args)
1696 return hook.hook(self.ui, self, name, throw, **args)
1697
1697
1698 @filteredpropertycache
1698 @filteredpropertycache
1699 def _tagscache(self):
1699 def _tagscache(self):
1700 '''Returns a tagscache object that contains various tags related
1700 '''Returns a tagscache object that contains various tags related
1701 caches.'''
1701 caches.'''
1702
1702
1703 # This simplifies its cache management by having one decorated
1703 # This simplifies its cache management by having one decorated
1704 # function (this one) and the rest simply fetch things from it.
1704 # function (this one) and the rest simply fetch things from it.
1705 class tagscache(object):
1705 class tagscache(object):
1706 def __init__(self):
1706 def __init__(self):
1707 # These two define the set of tags for this repository. tags
1707 # These two define the set of tags for this repository. tags
1708 # maps tag name to node; tagtypes maps tag name to 'global' or
1708 # maps tag name to node; tagtypes maps tag name to 'global' or
1709 # 'local'. (Global tags are defined by .hgtags across all
1709 # 'local'. (Global tags are defined by .hgtags across all
1710 # heads, and local tags are defined in .hg/localtags.)
1710 # heads, and local tags are defined in .hg/localtags.)
1711 # They constitute the in-memory cache of tags.
1711 # They constitute the in-memory cache of tags.
1712 self.tags = self.tagtypes = None
1712 self.tags = self.tagtypes = None
1713
1713
1714 self.nodetagscache = self.tagslist = None
1714 self.nodetagscache = self.tagslist = None
1715
1715
1716 cache = tagscache()
1716 cache = tagscache()
1717 cache.tags, cache.tagtypes = self._findtags()
1717 cache.tags, cache.tagtypes = self._findtags()
1718
1718
1719 return cache
1719 return cache
1720
1720
1721 def tags(self):
1721 def tags(self):
1722 '''return a mapping of tag to node'''
1722 '''return a mapping of tag to node'''
1723 t = {}
1723 t = {}
1724 if self.changelog.filteredrevs:
1724 if self.changelog.filteredrevs:
1725 tags, tt = self._findtags()
1725 tags, tt = self._findtags()
1726 else:
1726 else:
1727 tags = self._tagscache.tags
1727 tags = self._tagscache.tags
1728 rev = self.changelog.rev
1728 rev = self.changelog.rev
1729 for k, v in pycompat.iteritems(tags):
1729 for k, v in pycompat.iteritems(tags):
1730 try:
1730 try:
1731 # ignore tags to unknown nodes
1731 # ignore tags to unknown nodes
1732 rev(v)
1732 rev(v)
1733 t[k] = v
1733 t[k] = v
1734 except (error.LookupError, ValueError):
1734 except (error.LookupError, ValueError):
1735 pass
1735 pass
1736 return t
1736 return t
1737
1737
1738 def _findtags(self):
1738 def _findtags(self):
1739 '''Do the hard work of finding tags. Return a pair of dicts
1739 '''Do the hard work of finding tags. Return a pair of dicts
1740 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1740 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1741 maps tag name to a string like \'global\' or \'local\'.
1741 maps tag name to a string like \'global\' or \'local\'.
1742 Subclasses or extensions are free to add their own tags, but
1742 Subclasses or extensions are free to add their own tags, but
1743 should be aware that the returned dicts will be retained for the
1743 should be aware that the returned dicts will be retained for the
1744 duration of the localrepo object.'''
1744 duration of the localrepo object.'''
1745
1745
1746 # XXX what tagtype should subclasses/extensions use? Currently
1746 # XXX what tagtype should subclasses/extensions use? Currently
1747 # mq and bookmarks add tags, but do not set the tagtype at all.
1747 # mq and bookmarks add tags, but do not set the tagtype at all.
1748 # Should each extension invent its own tag type? Should there
1748 # Should each extension invent its own tag type? Should there
1749 # be one tagtype for all such "virtual" tags? Or is the status
1749 # be one tagtype for all such "virtual" tags? Or is the status
1750 # quo fine?
1750 # quo fine?
1751
1751
1752 # map tag name to (node, hist)
1752 # map tag name to (node, hist)
1753 alltags = tagsmod.findglobaltags(self.ui, self)
1753 alltags = tagsmod.findglobaltags(self.ui, self)
1754 # map tag name to tag type
1754 # map tag name to tag type
1755 tagtypes = dict((tag, b'global') for tag in alltags)
1755 tagtypes = dict((tag, b'global') for tag in alltags)
1756
1756
1757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1758
1758
1759 # Build the return dicts. Have to re-encode tag names because
1759 # Build the return dicts. Have to re-encode tag names because
1760 # the tags module always uses UTF-8 (in order not to lose info
1760 # the tags module always uses UTF-8 (in order not to lose info
1761 # writing to the cache), but the rest of Mercurial wants them in
1761 # writing to the cache), but the rest of Mercurial wants them in
1762 # local encoding.
1762 # local encoding.
1763 tags = {}
1763 tags = {}
1764 for (name, (node, hist)) in pycompat.iteritems(alltags):
1764 for (name, (node, hist)) in pycompat.iteritems(alltags):
1765 if node != nullid:
1765 if node != nullid:
1766 tags[encoding.tolocal(name)] = node
1766 tags[encoding.tolocal(name)] = node
1767 tags[b'tip'] = self.changelog.tip()
1767 tags[b'tip'] = self.changelog.tip()
1768 tagtypes = dict(
1768 tagtypes = dict(
1769 [
1769 [
1770 (encoding.tolocal(name), value)
1770 (encoding.tolocal(name), value)
1771 for (name, value) in pycompat.iteritems(tagtypes)
1771 for (name, value) in pycompat.iteritems(tagtypes)
1772 ]
1772 ]
1773 )
1773 )
1774 return (tags, tagtypes)
1774 return (tags, tagtypes)
1775
1775
1776 def tagtype(self, tagname):
1776 def tagtype(self, tagname):
1777 '''
1777 '''
1778 return the type of the given tag. result can be:
1778 return the type of the given tag. result can be:
1779
1779
1780 'local' : a local tag
1780 'local' : a local tag
1781 'global' : a global tag
1781 'global' : a global tag
1782 None : tag does not exist
1782 None : tag does not exist
1783 '''
1783 '''
1784
1784
1785 return self._tagscache.tagtypes.get(tagname)
1785 return self._tagscache.tagtypes.get(tagname)
1786
1786
1787 def tagslist(self):
1787 def tagslist(self):
1788 '''return a list of tags ordered by revision'''
1788 '''return a list of tags ordered by revision'''
1789 if not self._tagscache.tagslist:
1789 if not self._tagscache.tagslist:
1790 l = []
1790 l = []
1791 for t, n in pycompat.iteritems(self.tags()):
1791 for t, n in pycompat.iteritems(self.tags()):
1792 l.append((self.changelog.rev(n), t, n))
1792 l.append((self.changelog.rev(n), t, n))
1793 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1793 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1794
1794
1795 return self._tagscache.tagslist
1795 return self._tagscache.tagslist
1796
1796
1797 def nodetags(self, node):
1797 def nodetags(self, node):
1798 '''return the tags associated with a node'''
1798 '''return the tags associated with a node'''
1799 if not self._tagscache.nodetagscache:
1799 if not self._tagscache.nodetagscache:
1800 nodetagscache = {}
1800 nodetagscache = {}
1801 for t, n in pycompat.iteritems(self._tagscache.tags):
1801 for t, n in pycompat.iteritems(self._tagscache.tags):
1802 nodetagscache.setdefault(n, []).append(t)
1802 nodetagscache.setdefault(n, []).append(t)
1803 for tags in pycompat.itervalues(nodetagscache):
1803 for tags in pycompat.itervalues(nodetagscache):
1804 tags.sort()
1804 tags.sort()
1805 self._tagscache.nodetagscache = nodetagscache
1805 self._tagscache.nodetagscache = nodetagscache
1806 return self._tagscache.nodetagscache.get(node, [])
1806 return self._tagscache.nodetagscache.get(node, [])
1807
1807
1808 def nodebookmarks(self, node):
1808 def nodebookmarks(self, node):
1809 """return the list of bookmarks pointing to the specified node"""
1809 """return the list of bookmarks pointing to the specified node"""
1810 return self._bookmarks.names(node)
1810 return self._bookmarks.names(node)
1811
1811
1812 def branchmap(self):
1812 def branchmap(self):
1813 '''returns a dictionary {branch: [branchheads]} with branchheads
1813 '''returns a dictionary {branch: [branchheads]} with branchheads
1814 ordered by increasing revision number'''
1814 ordered by increasing revision number'''
1815 return self._branchcaches[self]
1815 return self._branchcaches[self]
1816
1816
1817 @unfilteredmethod
1817 @unfilteredmethod
1818 def revbranchcache(self):
1818 def revbranchcache(self):
1819 if not self._revbranchcache:
1819 if not self._revbranchcache:
1820 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1820 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1821 return self._revbranchcache
1821 return self._revbranchcache
1822
1822
1823 def branchtip(self, branch, ignoremissing=False):
1823 def branchtip(self, branch, ignoremissing=False):
1824 '''return the tip node for a given branch
1824 '''return the tip node for a given branch
1825
1825
1826 If ignoremissing is True, then this method will not raise an error.
1826 If ignoremissing is True, then this method will not raise an error.
1827 This is helpful for callers that only expect None for a missing branch
1827 This is helpful for callers that only expect None for a missing branch
1828 (e.g. namespace).
1828 (e.g. namespace).
1829
1829
1830 '''
1830 '''
1831 try:
1831 try:
1832 return self.branchmap().branchtip(branch)
1832 return self.branchmap().branchtip(branch)
1833 except KeyError:
1833 except KeyError:
1834 if not ignoremissing:
1834 if not ignoremissing:
1835 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1835 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1836 else:
1836 else:
1837 pass
1837 pass
1838
1838
1839 def lookup(self, key):
1839 def lookup(self, key):
1840 node = scmutil.revsymbol(self, key).node()
1840 node = scmutil.revsymbol(self, key).node()
1841 if node is None:
1841 if node is None:
1842 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1842 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1843 return node
1843 return node
1844
1844
1845 def lookupbranch(self, key):
1845 def lookupbranch(self, key):
1846 if self.branchmap().hasbranch(key):
1846 if self.branchmap().hasbranch(key):
1847 return key
1847 return key
1848
1848
1849 return scmutil.revsymbol(self, key).branch()
1849 return scmutil.revsymbol(self, key).branch()
1850
1850
1851 def known(self, nodes):
1851 def known(self, nodes):
1852 cl = self.changelog
1852 cl = self.changelog
1853 get_rev = cl.index.get_rev
1853 get_rev = cl.index.get_rev
1854 filtered = cl.filteredrevs
1854 filtered = cl.filteredrevs
1855 result = []
1855 result = []
1856 for n in nodes:
1856 for n in nodes:
1857 r = get_rev(n)
1857 r = get_rev(n)
1858 resp = not (r is None or r in filtered)
1858 resp = not (r is None or r in filtered)
1859 result.append(resp)
1859 result.append(resp)
1860 return result
1860 return result
1861
1861
1862 def local(self):
1862 def local(self):
1863 return self
1863 return self
1864
1864
1865 def publishing(self):
1865 def publishing(self):
1866 # it's safe (and desirable) to trust the publish flag unconditionally
1866 # it's safe (and desirable) to trust the publish flag unconditionally
1867 # so that we don't finalize changes shared between users via ssh or nfs
1867 # so that we don't finalize changes shared between users via ssh or nfs
1868 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1868 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1869
1869
1870 def cancopy(self):
1870 def cancopy(self):
1871 # so statichttprepo's override of local() works
1871 # so statichttprepo's override of local() works
1872 if not self.local():
1872 if not self.local():
1873 return False
1873 return False
1874 if not self.publishing():
1874 if not self.publishing():
1875 return True
1875 return True
1876 # if publishing we can't copy if there is filtered content
1876 # if publishing we can't copy if there is filtered content
1877 return not self.filtered(b'visible').changelog.filteredrevs
1877 return not self.filtered(b'visible').changelog.filteredrevs
1878
1878
1879 def shared(self):
1879 def shared(self):
1880 '''the type of shared repository (None if not shared)'''
1880 '''the type of shared repository (None if not shared)'''
1881 if self.sharedpath != self.path:
1881 if self.sharedpath != self.path:
1882 return b'store'
1882 return b'store'
1883 return None
1883 return None
1884
1884
1885 def wjoin(self, f, *insidef):
1885 def wjoin(self, f, *insidef):
1886 return self.vfs.reljoin(self.root, f, *insidef)
1886 return self.vfs.reljoin(self.root, f, *insidef)
1887
1887
1888 def setparents(self, p1, p2=nullid):
1888 def setparents(self, p1, p2=nullid):
1889 self[None].setparents(p1, p2)
1889 self[None].setparents(p1, p2)
1890
1890
1891 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1891 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1892 """changeid must be a changeset revision, if specified.
1892 """changeid must be a changeset revision, if specified.
1893 fileid can be a file revision or node."""
1893 fileid can be a file revision or node."""
1894 return context.filectx(
1894 return context.filectx(
1895 self, path, changeid, fileid, changectx=changectx
1895 self, path, changeid, fileid, changectx=changectx
1896 )
1896 )
1897
1897
1898 def getcwd(self):
1898 def getcwd(self):
1899 return self.dirstate.getcwd()
1899 return self.dirstate.getcwd()
1900
1900
1901 def pathto(self, f, cwd=None):
1901 def pathto(self, f, cwd=None):
1902 return self.dirstate.pathto(f, cwd)
1902 return self.dirstate.pathto(f, cwd)
1903
1903
1904 def _loadfilter(self, filter):
1904 def _loadfilter(self, filter):
1905 if filter not in self._filterpats:
1905 if filter not in self._filterpats:
1906 l = []
1906 l = []
1907 for pat, cmd in self.ui.configitems(filter):
1907 for pat, cmd in self.ui.configitems(filter):
1908 if cmd == b'!':
1908 if cmd == b'!':
1909 continue
1909 continue
1910 mf = matchmod.match(self.root, b'', [pat])
1910 mf = matchmod.match(self.root, b'', [pat])
1911 fn = None
1911 fn = None
1912 params = cmd
1912 params = cmd
1913 for name, filterfn in pycompat.iteritems(self._datafilters):
1913 for name, filterfn in pycompat.iteritems(self._datafilters):
1914 if cmd.startswith(name):
1914 if cmd.startswith(name):
1915 fn = filterfn
1915 fn = filterfn
1916 params = cmd[len(name) :].lstrip()
1916 params = cmd[len(name) :].lstrip()
1917 break
1917 break
1918 if not fn:
1918 if not fn:
1919 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1919 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1920 fn.__name__ = 'commandfilter'
1920 fn.__name__ = 'commandfilter'
1921 # Wrap old filters not supporting keyword arguments
1921 # Wrap old filters not supporting keyword arguments
1922 if not pycompat.getargspec(fn)[2]:
1922 if not pycompat.getargspec(fn)[2]:
1923 oldfn = fn
1923 oldfn = fn
1924 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1924 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1925 fn.__name__ = 'compat-' + oldfn.__name__
1925 fn.__name__ = 'compat-' + oldfn.__name__
1926 l.append((mf, fn, params))
1926 l.append((mf, fn, params))
1927 self._filterpats[filter] = l
1927 self._filterpats[filter] = l
1928 return self._filterpats[filter]
1928 return self._filterpats[filter]
1929
1929
1930 def _filter(self, filterpats, filename, data):
1930 def _filter(self, filterpats, filename, data):
1931 for mf, fn, cmd in filterpats:
1931 for mf, fn, cmd in filterpats:
1932 if mf(filename):
1932 if mf(filename):
1933 self.ui.debug(
1933 self.ui.debug(
1934 b"filtering %s through %s\n"
1934 b"filtering %s through %s\n"
1935 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1935 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1936 )
1936 )
1937 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1937 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1938 break
1938 break
1939
1939
1940 return data
1940 return data
1941
1941
1942 @unfilteredpropertycache
1942 @unfilteredpropertycache
1943 def _encodefilterpats(self):
1943 def _encodefilterpats(self):
1944 return self._loadfilter(b'encode')
1944 return self._loadfilter(b'encode')
1945
1945
1946 @unfilteredpropertycache
1946 @unfilteredpropertycache
1947 def _decodefilterpats(self):
1947 def _decodefilterpats(self):
1948 return self._loadfilter(b'decode')
1948 return self._loadfilter(b'decode')
1949
1949
1950 def adddatafilter(self, name, filter):
1950 def adddatafilter(self, name, filter):
1951 self._datafilters[name] = filter
1951 self._datafilters[name] = filter
1952
1952
1953 def wread(self, filename):
1953 def wread(self, filename):
1954 if self.wvfs.islink(filename):
1954 if self.wvfs.islink(filename):
1955 data = self.wvfs.readlink(filename)
1955 data = self.wvfs.readlink(filename)
1956 else:
1956 else:
1957 data = self.wvfs.read(filename)
1957 data = self.wvfs.read(filename)
1958 return self._filter(self._encodefilterpats, filename, data)
1958 return self._filter(self._encodefilterpats, filename, data)
1959
1959
1960 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1960 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1961 """write ``data`` into ``filename`` in the working directory
1961 """write ``data`` into ``filename`` in the working directory
1962
1962
1963 This returns length of written (maybe decoded) data.
1963 This returns length of written (maybe decoded) data.
1964 """
1964 """
1965 data = self._filter(self._decodefilterpats, filename, data)
1965 data = self._filter(self._decodefilterpats, filename, data)
1966 if b'l' in flags:
1966 if b'l' in flags:
1967 self.wvfs.symlink(data, filename)
1967 self.wvfs.symlink(data, filename)
1968 else:
1968 else:
1969 self.wvfs.write(
1969 self.wvfs.write(
1970 filename, data, backgroundclose=backgroundclose, **kwargs
1970 filename, data, backgroundclose=backgroundclose, **kwargs
1971 )
1971 )
1972 if b'x' in flags:
1972 if b'x' in flags:
1973 self.wvfs.setflags(filename, False, True)
1973 self.wvfs.setflags(filename, False, True)
1974 else:
1974 else:
1975 self.wvfs.setflags(filename, False, False)
1975 self.wvfs.setflags(filename, False, False)
1976 return len(data)
1976 return len(data)
1977
1977
1978 def wwritedata(self, filename, data):
1978 def wwritedata(self, filename, data):
1979 return self._filter(self._decodefilterpats, filename, data)
1979 return self._filter(self._decodefilterpats, filename, data)
1980
1980
1981 def currenttransaction(self):
1981 def currenttransaction(self):
1982 """return the current transaction or None if non exists"""
1982 """return the current transaction or None if non exists"""
1983 if self._transref:
1983 if self._transref:
1984 tr = self._transref()
1984 tr = self._transref()
1985 else:
1985 else:
1986 tr = None
1986 tr = None
1987
1987
1988 if tr and tr.running():
1988 if tr and tr.running():
1989 return tr
1989 return tr
1990 return None
1990 return None
1991
1991
1992 def transaction(self, desc, report=None):
1992 def transaction(self, desc, report=None):
1993 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1993 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1994 b'devel', b'check-locks'
1994 b'devel', b'check-locks'
1995 ):
1995 ):
1996 if self._currentlock(self._lockref) is None:
1996 if self._currentlock(self._lockref) is None:
1997 raise error.ProgrammingError(b'transaction requires locking')
1997 raise error.ProgrammingError(b'transaction requires locking')
1998 tr = self.currenttransaction()
1998 tr = self.currenttransaction()
1999 if tr is not None:
1999 if tr is not None:
2000 return tr.nest(name=desc)
2000 return tr.nest(name=desc)
2001
2001
2002 # abort here if the journal already exists
2002 # abort here if the journal already exists
2003 if self.svfs.exists(b"journal"):
2003 if self.svfs.exists(b"journal"):
2004 raise error.RepoError(
2004 raise error.RepoError(
2005 _(b"abandoned transaction found"),
2005 _(b"abandoned transaction found"),
2006 hint=_(b"run 'hg recover' to clean up transaction"),
2006 hint=_(b"run 'hg recover' to clean up transaction"),
2007 )
2007 )
2008
2008
2009 idbase = b"%.40f#%f" % (random.random(), time.time())
2009 idbase = b"%.40f#%f" % (random.random(), time.time())
2010 ha = hex(hashlib.sha1(idbase).digest())
2010 ha = hex(hashutil.sha1(idbase).digest())
2011 txnid = b'TXN:' + ha
2011 txnid = b'TXN:' + ha
2012 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2012 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2013
2013
2014 self._writejournal(desc)
2014 self._writejournal(desc)
2015 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2015 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2016 if report:
2016 if report:
2017 rp = report
2017 rp = report
2018 else:
2018 else:
2019 rp = self.ui.warn
2019 rp = self.ui.warn
2020 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2020 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2021 # we must avoid cyclic reference between repo and transaction.
2021 # we must avoid cyclic reference between repo and transaction.
2022 reporef = weakref.ref(self)
2022 reporef = weakref.ref(self)
2023 # Code to track tag movement
2023 # Code to track tag movement
2024 #
2024 #
2025 # Since tags are all handled as file content, it is actually quite hard
2025 # Since tags are all handled as file content, it is actually quite hard
2026 # to track these movement from a code perspective. So we fallback to a
2026 # to track these movement from a code perspective. So we fallback to a
2027 # tracking at the repository level. One could envision to track changes
2027 # tracking at the repository level. One could envision to track changes
2028 # to the '.hgtags' file through changegroup apply but that fails to
2028 # to the '.hgtags' file through changegroup apply but that fails to
2029 # cope with case where transaction expose new heads without changegroup
2029 # cope with case where transaction expose new heads without changegroup
2030 # being involved (eg: phase movement).
2030 # being involved (eg: phase movement).
2031 #
2031 #
2032 # For now, We gate the feature behind a flag since this likely comes
2032 # For now, We gate the feature behind a flag since this likely comes
2033 # with performance impacts. The current code run more often than needed
2033 # with performance impacts. The current code run more often than needed
2034 # and do not use caches as much as it could. The current focus is on
2034 # and do not use caches as much as it could. The current focus is on
2035 # the behavior of the feature so we disable it by default. The flag
2035 # the behavior of the feature so we disable it by default. The flag
2036 # will be removed when we are happy with the performance impact.
2036 # will be removed when we are happy with the performance impact.
2037 #
2037 #
2038 # Once this feature is no longer experimental move the following
2038 # Once this feature is no longer experimental move the following
2039 # documentation to the appropriate help section:
2039 # documentation to the appropriate help section:
2040 #
2040 #
2041 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2041 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2042 # tags (new or changed or deleted tags). In addition the details of
2042 # tags (new or changed or deleted tags). In addition the details of
2043 # these changes are made available in a file at:
2043 # these changes are made available in a file at:
2044 # ``REPOROOT/.hg/changes/tags.changes``.
2044 # ``REPOROOT/.hg/changes/tags.changes``.
2045 # Make sure you check for HG_TAG_MOVED before reading that file as it
2045 # Make sure you check for HG_TAG_MOVED before reading that file as it
2046 # might exist from a previous transaction even if no tag were touched
2046 # might exist from a previous transaction even if no tag were touched
2047 # in this one. Changes are recorded in a line base format::
2047 # in this one. Changes are recorded in a line base format::
2048 #
2048 #
2049 # <action> <hex-node> <tag-name>\n
2049 # <action> <hex-node> <tag-name>\n
2050 #
2050 #
2051 # Actions are defined as follow:
2051 # Actions are defined as follow:
2052 # "-R": tag is removed,
2052 # "-R": tag is removed,
2053 # "+A": tag is added,
2053 # "+A": tag is added,
2054 # "-M": tag is moved (old value),
2054 # "-M": tag is moved (old value),
2055 # "+M": tag is moved (new value),
2055 # "+M": tag is moved (new value),
2056 tracktags = lambda x: None
2056 tracktags = lambda x: None
2057 # experimental config: experimental.hook-track-tags
2057 # experimental config: experimental.hook-track-tags
2058 shouldtracktags = self.ui.configbool(
2058 shouldtracktags = self.ui.configbool(
2059 b'experimental', b'hook-track-tags'
2059 b'experimental', b'hook-track-tags'
2060 )
2060 )
2061 if desc != b'strip' and shouldtracktags:
2061 if desc != b'strip' and shouldtracktags:
2062 oldheads = self.changelog.headrevs()
2062 oldheads = self.changelog.headrevs()
2063
2063
2064 def tracktags(tr2):
2064 def tracktags(tr2):
2065 repo = reporef()
2065 repo = reporef()
2066 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2066 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2067 newheads = repo.changelog.headrevs()
2067 newheads = repo.changelog.headrevs()
2068 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2068 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2069 # notes: we compare lists here.
2069 # notes: we compare lists here.
2070 # As we do it only once buiding set would not be cheaper
2070 # As we do it only once buiding set would not be cheaper
2071 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2071 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2072 if changes:
2072 if changes:
2073 tr2.hookargs[b'tag_moved'] = b'1'
2073 tr2.hookargs[b'tag_moved'] = b'1'
2074 with repo.vfs(
2074 with repo.vfs(
2075 b'changes/tags.changes', b'w', atomictemp=True
2075 b'changes/tags.changes', b'w', atomictemp=True
2076 ) as changesfile:
2076 ) as changesfile:
2077 # note: we do not register the file to the transaction
2077 # note: we do not register the file to the transaction
2078 # because we needs it to still exist on the transaction
2078 # because we needs it to still exist on the transaction
2079 # is close (for txnclose hooks)
2079 # is close (for txnclose hooks)
2080 tagsmod.writediff(changesfile, changes)
2080 tagsmod.writediff(changesfile, changes)
2081
2081
2082 def validate(tr2):
2082 def validate(tr2):
2083 """will run pre-closing hooks"""
2083 """will run pre-closing hooks"""
2084 # XXX the transaction API is a bit lacking here so we take a hacky
2084 # XXX the transaction API is a bit lacking here so we take a hacky
2085 # path for now
2085 # path for now
2086 #
2086 #
2087 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2087 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2088 # dict is copied before these run. In addition we needs the data
2088 # dict is copied before these run. In addition we needs the data
2089 # available to in memory hooks too.
2089 # available to in memory hooks too.
2090 #
2090 #
2091 # Moreover, we also need to make sure this runs before txnclose
2091 # Moreover, we also need to make sure this runs before txnclose
2092 # hooks and there is no "pending" mechanism that would execute
2092 # hooks and there is no "pending" mechanism that would execute
2093 # logic only if hooks are about to run.
2093 # logic only if hooks are about to run.
2094 #
2094 #
2095 # Fixing this limitation of the transaction is also needed to track
2095 # Fixing this limitation of the transaction is also needed to track
2096 # other families of changes (bookmarks, phases, obsolescence).
2096 # other families of changes (bookmarks, phases, obsolescence).
2097 #
2097 #
2098 # This will have to be fixed before we remove the experimental
2098 # This will have to be fixed before we remove the experimental
2099 # gating.
2099 # gating.
2100 tracktags(tr2)
2100 tracktags(tr2)
2101 repo = reporef()
2101 repo = reporef()
2102
2102
2103 singleheadopt = (b'experimental', b'single-head-per-branch')
2103 singleheadopt = (b'experimental', b'single-head-per-branch')
2104 singlehead = repo.ui.configbool(*singleheadopt)
2104 singlehead = repo.ui.configbool(*singleheadopt)
2105 if singlehead:
2105 if singlehead:
2106 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2106 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2107 accountclosed = singleheadsub.get(
2107 accountclosed = singleheadsub.get(
2108 b"account-closed-heads", False
2108 b"account-closed-heads", False
2109 )
2109 )
2110 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2110 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2111 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2111 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2112 for name, (old, new) in sorted(
2112 for name, (old, new) in sorted(
2113 tr.changes[b'bookmarks'].items()
2113 tr.changes[b'bookmarks'].items()
2114 ):
2114 ):
2115 args = tr.hookargs.copy()
2115 args = tr.hookargs.copy()
2116 args.update(bookmarks.preparehookargs(name, old, new))
2116 args.update(bookmarks.preparehookargs(name, old, new))
2117 repo.hook(
2117 repo.hook(
2118 b'pretxnclose-bookmark',
2118 b'pretxnclose-bookmark',
2119 throw=True,
2119 throw=True,
2120 **pycompat.strkwargs(args)
2120 **pycompat.strkwargs(args)
2121 )
2121 )
2122 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2122 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2123 cl = repo.unfiltered().changelog
2123 cl = repo.unfiltered().changelog
2124 for rev, (old, new) in tr.changes[b'phases'].items():
2124 for rev, (old, new) in tr.changes[b'phases'].items():
2125 args = tr.hookargs.copy()
2125 args = tr.hookargs.copy()
2126 node = hex(cl.node(rev))
2126 node = hex(cl.node(rev))
2127 args.update(phases.preparehookargs(node, old, new))
2127 args.update(phases.preparehookargs(node, old, new))
2128 repo.hook(
2128 repo.hook(
2129 b'pretxnclose-phase',
2129 b'pretxnclose-phase',
2130 throw=True,
2130 throw=True,
2131 **pycompat.strkwargs(args)
2131 **pycompat.strkwargs(args)
2132 )
2132 )
2133
2133
2134 repo.hook(
2134 repo.hook(
2135 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2135 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2136 )
2136 )
2137
2137
2138 def releasefn(tr, success):
2138 def releasefn(tr, success):
2139 repo = reporef()
2139 repo = reporef()
2140 if repo is None:
2140 if repo is None:
2141 # If the repo has been GC'd (and this release function is being
2141 # If the repo has been GC'd (and this release function is being
2142 # called from transaction.__del__), there's not much we can do,
2142 # called from transaction.__del__), there's not much we can do,
2143 # so just leave the unfinished transaction there and let the
2143 # so just leave the unfinished transaction there and let the
2144 # user run `hg recover`.
2144 # user run `hg recover`.
2145 return
2145 return
2146 if success:
2146 if success:
2147 # this should be explicitly invoked here, because
2147 # this should be explicitly invoked here, because
2148 # in-memory changes aren't written out at closing
2148 # in-memory changes aren't written out at closing
2149 # transaction, if tr.addfilegenerator (via
2149 # transaction, if tr.addfilegenerator (via
2150 # dirstate.write or so) isn't invoked while
2150 # dirstate.write or so) isn't invoked while
2151 # transaction running
2151 # transaction running
2152 repo.dirstate.write(None)
2152 repo.dirstate.write(None)
2153 else:
2153 else:
2154 # discard all changes (including ones already written
2154 # discard all changes (including ones already written
2155 # out) in this transaction
2155 # out) in this transaction
2156 narrowspec.restorebackup(self, b'journal.narrowspec')
2156 narrowspec.restorebackup(self, b'journal.narrowspec')
2157 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2157 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2158 repo.dirstate.restorebackup(None, b'journal.dirstate')
2158 repo.dirstate.restorebackup(None, b'journal.dirstate')
2159
2159
2160 repo.invalidate(clearfilecache=True)
2160 repo.invalidate(clearfilecache=True)
2161
2161
2162 tr = transaction.transaction(
2162 tr = transaction.transaction(
2163 rp,
2163 rp,
2164 self.svfs,
2164 self.svfs,
2165 vfsmap,
2165 vfsmap,
2166 b"journal",
2166 b"journal",
2167 b"undo",
2167 b"undo",
2168 aftertrans(renames),
2168 aftertrans(renames),
2169 self.store.createmode,
2169 self.store.createmode,
2170 validator=validate,
2170 validator=validate,
2171 releasefn=releasefn,
2171 releasefn=releasefn,
2172 checkambigfiles=_cachedfiles,
2172 checkambigfiles=_cachedfiles,
2173 name=desc,
2173 name=desc,
2174 )
2174 )
2175 tr.changes[b'origrepolen'] = len(self)
2175 tr.changes[b'origrepolen'] = len(self)
2176 tr.changes[b'obsmarkers'] = set()
2176 tr.changes[b'obsmarkers'] = set()
2177 tr.changes[b'phases'] = {}
2177 tr.changes[b'phases'] = {}
2178 tr.changes[b'bookmarks'] = {}
2178 tr.changes[b'bookmarks'] = {}
2179
2179
2180 tr.hookargs[b'txnid'] = txnid
2180 tr.hookargs[b'txnid'] = txnid
2181 tr.hookargs[b'txnname'] = desc
2181 tr.hookargs[b'txnname'] = desc
2182 # note: writing the fncache only during finalize mean that the file is
2182 # note: writing the fncache only during finalize mean that the file is
2183 # outdated when running hooks. As fncache is used for streaming clone,
2183 # outdated when running hooks. As fncache is used for streaming clone,
2184 # this is not expected to break anything that happen during the hooks.
2184 # this is not expected to break anything that happen during the hooks.
2185 tr.addfinalize(b'flush-fncache', self.store.write)
2185 tr.addfinalize(b'flush-fncache', self.store.write)
2186
2186
2187 def txnclosehook(tr2):
2187 def txnclosehook(tr2):
2188 """To be run if transaction is successful, will schedule a hook run
2188 """To be run if transaction is successful, will schedule a hook run
2189 """
2189 """
2190 # Don't reference tr2 in hook() so we don't hold a reference.
2190 # Don't reference tr2 in hook() so we don't hold a reference.
2191 # This reduces memory consumption when there are multiple
2191 # This reduces memory consumption when there are multiple
2192 # transactions per lock. This can likely go away if issue5045
2192 # transactions per lock. This can likely go away if issue5045
2193 # fixes the function accumulation.
2193 # fixes the function accumulation.
2194 hookargs = tr2.hookargs
2194 hookargs = tr2.hookargs
2195
2195
2196 def hookfunc(unused_success):
2196 def hookfunc(unused_success):
2197 repo = reporef()
2197 repo = reporef()
2198 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2198 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2199 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2199 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2200 for name, (old, new) in bmchanges:
2200 for name, (old, new) in bmchanges:
2201 args = tr.hookargs.copy()
2201 args = tr.hookargs.copy()
2202 args.update(bookmarks.preparehookargs(name, old, new))
2202 args.update(bookmarks.preparehookargs(name, old, new))
2203 repo.hook(
2203 repo.hook(
2204 b'txnclose-bookmark',
2204 b'txnclose-bookmark',
2205 throw=False,
2205 throw=False,
2206 **pycompat.strkwargs(args)
2206 **pycompat.strkwargs(args)
2207 )
2207 )
2208
2208
2209 if hook.hashook(repo.ui, b'txnclose-phase'):
2209 if hook.hashook(repo.ui, b'txnclose-phase'):
2210 cl = repo.unfiltered().changelog
2210 cl = repo.unfiltered().changelog
2211 phasemv = sorted(tr.changes[b'phases'].items())
2211 phasemv = sorted(tr.changes[b'phases'].items())
2212 for rev, (old, new) in phasemv:
2212 for rev, (old, new) in phasemv:
2213 args = tr.hookargs.copy()
2213 args = tr.hookargs.copy()
2214 node = hex(cl.node(rev))
2214 node = hex(cl.node(rev))
2215 args.update(phases.preparehookargs(node, old, new))
2215 args.update(phases.preparehookargs(node, old, new))
2216 repo.hook(
2216 repo.hook(
2217 b'txnclose-phase',
2217 b'txnclose-phase',
2218 throw=False,
2218 throw=False,
2219 **pycompat.strkwargs(args)
2219 **pycompat.strkwargs(args)
2220 )
2220 )
2221
2221
2222 repo.hook(
2222 repo.hook(
2223 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2223 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2224 )
2224 )
2225
2225
2226 reporef()._afterlock(hookfunc)
2226 reporef()._afterlock(hookfunc)
2227
2227
2228 tr.addfinalize(b'txnclose-hook', txnclosehook)
2228 tr.addfinalize(b'txnclose-hook', txnclosehook)
2229 # Include a leading "-" to make it happen before the transaction summary
2229 # Include a leading "-" to make it happen before the transaction summary
2230 # reports registered via scmutil.registersummarycallback() whose names
2230 # reports registered via scmutil.registersummarycallback() whose names
2231 # are 00-txnreport etc. That way, the caches will be warm when the
2231 # are 00-txnreport etc. That way, the caches will be warm when the
2232 # callbacks run.
2232 # callbacks run.
2233 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2233 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2234
2234
2235 def txnaborthook(tr2):
2235 def txnaborthook(tr2):
2236 """To be run if transaction is aborted
2236 """To be run if transaction is aborted
2237 """
2237 """
2238 reporef().hook(
2238 reporef().hook(
2239 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2239 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2240 )
2240 )
2241
2241
2242 tr.addabort(b'txnabort-hook', txnaborthook)
2242 tr.addabort(b'txnabort-hook', txnaborthook)
2243 # avoid eager cache invalidation. in-memory data should be identical
2243 # avoid eager cache invalidation. in-memory data should be identical
2244 # to stored data if transaction has no error.
2244 # to stored data if transaction has no error.
2245 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2245 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2246 self._transref = weakref.ref(tr)
2246 self._transref = weakref.ref(tr)
2247 scmutil.registersummarycallback(self, tr, desc)
2247 scmutil.registersummarycallback(self, tr, desc)
2248 return tr
2248 return tr
2249
2249
2250 def _journalfiles(self):
2250 def _journalfiles(self):
2251 return (
2251 return (
2252 (self.svfs, b'journal'),
2252 (self.svfs, b'journal'),
2253 (self.svfs, b'journal.narrowspec'),
2253 (self.svfs, b'journal.narrowspec'),
2254 (self.vfs, b'journal.narrowspec.dirstate'),
2254 (self.vfs, b'journal.narrowspec.dirstate'),
2255 (self.vfs, b'journal.dirstate'),
2255 (self.vfs, b'journal.dirstate'),
2256 (self.vfs, b'journal.branch'),
2256 (self.vfs, b'journal.branch'),
2257 (self.vfs, b'journal.desc'),
2257 (self.vfs, b'journal.desc'),
2258 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2258 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2259 (self.svfs, b'journal.phaseroots'),
2259 (self.svfs, b'journal.phaseroots'),
2260 )
2260 )
2261
2261
2262 def undofiles(self):
2262 def undofiles(self):
2263 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2263 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2264
2264
2265 @unfilteredmethod
2265 @unfilteredmethod
2266 def _writejournal(self, desc):
2266 def _writejournal(self, desc):
2267 self.dirstate.savebackup(None, b'journal.dirstate')
2267 self.dirstate.savebackup(None, b'journal.dirstate')
2268 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2268 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2269 narrowspec.savebackup(self, b'journal.narrowspec')
2269 narrowspec.savebackup(self, b'journal.narrowspec')
2270 self.vfs.write(
2270 self.vfs.write(
2271 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2271 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2272 )
2272 )
2273 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2273 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2274 bookmarksvfs = bookmarks.bookmarksvfs(self)
2274 bookmarksvfs = bookmarks.bookmarksvfs(self)
2275 bookmarksvfs.write(
2275 bookmarksvfs.write(
2276 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2276 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2277 )
2277 )
2278 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2278 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2279
2279
2280 def recover(self):
2280 def recover(self):
2281 with self.lock():
2281 with self.lock():
2282 if self.svfs.exists(b"journal"):
2282 if self.svfs.exists(b"journal"):
2283 self.ui.status(_(b"rolling back interrupted transaction\n"))
2283 self.ui.status(_(b"rolling back interrupted transaction\n"))
2284 vfsmap = {
2284 vfsmap = {
2285 b'': self.svfs,
2285 b'': self.svfs,
2286 b'plain': self.vfs,
2286 b'plain': self.vfs,
2287 }
2287 }
2288 transaction.rollback(
2288 transaction.rollback(
2289 self.svfs,
2289 self.svfs,
2290 vfsmap,
2290 vfsmap,
2291 b"journal",
2291 b"journal",
2292 self.ui.warn,
2292 self.ui.warn,
2293 checkambigfiles=_cachedfiles,
2293 checkambigfiles=_cachedfiles,
2294 )
2294 )
2295 self.invalidate()
2295 self.invalidate()
2296 return True
2296 return True
2297 else:
2297 else:
2298 self.ui.warn(_(b"no interrupted transaction available\n"))
2298 self.ui.warn(_(b"no interrupted transaction available\n"))
2299 return False
2299 return False
2300
2300
2301 def rollback(self, dryrun=False, force=False):
2301 def rollback(self, dryrun=False, force=False):
2302 wlock = lock = dsguard = None
2302 wlock = lock = dsguard = None
2303 try:
2303 try:
2304 wlock = self.wlock()
2304 wlock = self.wlock()
2305 lock = self.lock()
2305 lock = self.lock()
2306 if self.svfs.exists(b"undo"):
2306 if self.svfs.exists(b"undo"):
2307 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2307 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2308
2308
2309 return self._rollback(dryrun, force, dsguard)
2309 return self._rollback(dryrun, force, dsguard)
2310 else:
2310 else:
2311 self.ui.warn(_(b"no rollback information available\n"))
2311 self.ui.warn(_(b"no rollback information available\n"))
2312 return 1
2312 return 1
2313 finally:
2313 finally:
2314 release(dsguard, lock, wlock)
2314 release(dsguard, lock, wlock)
2315
2315
2316 @unfilteredmethod # Until we get smarter cache management
2316 @unfilteredmethod # Until we get smarter cache management
2317 def _rollback(self, dryrun, force, dsguard):
2317 def _rollback(self, dryrun, force, dsguard):
2318 ui = self.ui
2318 ui = self.ui
2319 try:
2319 try:
2320 args = self.vfs.read(b'undo.desc').splitlines()
2320 args = self.vfs.read(b'undo.desc').splitlines()
2321 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2321 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2322 if len(args) >= 3:
2322 if len(args) >= 3:
2323 detail = args[2]
2323 detail = args[2]
2324 oldtip = oldlen - 1
2324 oldtip = oldlen - 1
2325
2325
2326 if detail and ui.verbose:
2326 if detail and ui.verbose:
2327 msg = _(
2327 msg = _(
2328 b'repository tip rolled back to revision %d'
2328 b'repository tip rolled back to revision %d'
2329 b' (undo %s: %s)\n'
2329 b' (undo %s: %s)\n'
2330 ) % (oldtip, desc, detail)
2330 ) % (oldtip, desc, detail)
2331 else:
2331 else:
2332 msg = _(
2332 msg = _(
2333 b'repository tip rolled back to revision %d (undo %s)\n'
2333 b'repository tip rolled back to revision %d (undo %s)\n'
2334 ) % (oldtip, desc)
2334 ) % (oldtip, desc)
2335 except IOError:
2335 except IOError:
2336 msg = _(b'rolling back unknown transaction\n')
2336 msg = _(b'rolling back unknown transaction\n')
2337 desc = None
2337 desc = None
2338
2338
2339 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2339 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2340 raise error.Abort(
2340 raise error.Abort(
2341 _(
2341 _(
2342 b'rollback of last commit while not checked out '
2342 b'rollback of last commit while not checked out '
2343 b'may lose data'
2343 b'may lose data'
2344 ),
2344 ),
2345 hint=_(b'use -f to force'),
2345 hint=_(b'use -f to force'),
2346 )
2346 )
2347
2347
2348 ui.status(msg)
2348 ui.status(msg)
2349 if dryrun:
2349 if dryrun:
2350 return 0
2350 return 0
2351
2351
2352 parents = self.dirstate.parents()
2352 parents = self.dirstate.parents()
2353 self.destroying()
2353 self.destroying()
2354 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2354 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2355 transaction.rollback(
2355 transaction.rollback(
2356 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2356 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2357 )
2357 )
2358 bookmarksvfs = bookmarks.bookmarksvfs(self)
2358 bookmarksvfs = bookmarks.bookmarksvfs(self)
2359 if bookmarksvfs.exists(b'undo.bookmarks'):
2359 if bookmarksvfs.exists(b'undo.bookmarks'):
2360 bookmarksvfs.rename(
2360 bookmarksvfs.rename(
2361 b'undo.bookmarks', b'bookmarks', checkambig=True
2361 b'undo.bookmarks', b'bookmarks', checkambig=True
2362 )
2362 )
2363 if self.svfs.exists(b'undo.phaseroots'):
2363 if self.svfs.exists(b'undo.phaseroots'):
2364 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2364 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2365 self.invalidate()
2365 self.invalidate()
2366
2366
2367 has_node = self.changelog.index.has_node
2367 has_node = self.changelog.index.has_node
2368 parentgone = any(not has_node(p) for p in parents)
2368 parentgone = any(not has_node(p) for p in parents)
2369 if parentgone:
2369 if parentgone:
2370 # prevent dirstateguard from overwriting already restored one
2370 # prevent dirstateguard from overwriting already restored one
2371 dsguard.close()
2371 dsguard.close()
2372
2372
2373 narrowspec.restorebackup(self, b'undo.narrowspec')
2373 narrowspec.restorebackup(self, b'undo.narrowspec')
2374 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2374 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2375 self.dirstate.restorebackup(None, b'undo.dirstate')
2375 self.dirstate.restorebackup(None, b'undo.dirstate')
2376 try:
2376 try:
2377 branch = self.vfs.read(b'undo.branch')
2377 branch = self.vfs.read(b'undo.branch')
2378 self.dirstate.setbranch(encoding.tolocal(branch))
2378 self.dirstate.setbranch(encoding.tolocal(branch))
2379 except IOError:
2379 except IOError:
2380 ui.warn(
2380 ui.warn(
2381 _(
2381 _(
2382 b'named branch could not be reset: '
2382 b'named branch could not be reset: '
2383 b'current branch is still \'%s\'\n'
2383 b'current branch is still \'%s\'\n'
2384 )
2384 )
2385 % self.dirstate.branch()
2385 % self.dirstate.branch()
2386 )
2386 )
2387
2387
2388 parents = tuple([p.rev() for p in self[None].parents()])
2388 parents = tuple([p.rev() for p in self[None].parents()])
2389 if len(parents) > 1:
2389 if len(parents) > 1:
2390 ui.status(
2390 ui.status(
2391 _(
2391 _(
2392 b'working directory now based on '
2392 b'working directory now based on '
2393 b'revisions %d and %d\n'
2393 b'revisions %d and %d\n'
2394 )
2394 )
2395 % parents
2395 % parents
2396 )
2396 )
2397 else:
2397 else:
2398 ui.status(
2398 ui.status(
2399 _(b'working directory now based on revision %d\n') % parents
2399 _(b'working directory now based on revision %d\n') % parents
2400 )
2400 )
2401 mergemod.mergestate.clean(self, self[b'.'].node())
2401 mergemod.mergestate.clean(self, self[b'.'].node())
2402
2402
2403 # TODO: if we know which new heads may result from this rollback, pass
2403 # TODO: if we know which new heads may result from this rollback, pass
2404 # them to destroy(), which will prevent the branchhead cache from being
2404 # them to destroy(), which will prevent the branchhead cache from being
2405 # invalidated.
2405 # invalidated.
2406 self.destroyed()
2406 self.destroyed()
2407 return 0
2407 return 0
2408
2408
2409 def _buildcacheupdater(self, newtransaction):
2409 def _buildcacheupdater(self, newtransaction):
2410 """called during transaction to build the callback updating cache
2410 """called during transaction to build the callback updating cache
2411
2411
2412 Lives on the repository to help extension who might want to augment
2412 Lives on the repository to help extension who might want to augment
2413 this logic. For this purpose, the created transaction is passed to the
2413 this logic. For this purpose, the created transaction is passed to the
2414 method.
2414 method.
2415 """
2415 """
2416 # we must avoid cyclic reference between repo and transaction.
2416 # we must avoid cyclic reference between repo and transaction.
2417 reporef = weakref.ref(self)
2417 reporef = weakref.ref(self)
2418
2418
2419 def updater(tr):
2419 def updater(tr):
2420 repo = reporef()
2420 repo = reporef()
2421 repo.updatecaches(tr)
2421 repo.updatecaches(tr)
2422
2422
2423 return updater
2423 return updater
2424
2424
2425 @unfilteredmethod
2425 @unfilteredmethod
2426 def updatecaches(self, tr=None, full=False):
2426 def updatecaches(self, tr=None, full=False):
2427 """warm appropriate caches
2427 """warm appropriate caches
2428
2428
2429 If this function is called after a transaction closed. The transaction
2429 If this function is called after a transaction closed. The transaction
2430 will be available in the 'tr' argument. This can be used to selectively
2430 will be available in the 'tr' argument. This can be used to selectively
2431 update caches relevant to the changes in that transaction.
2431 update caches relevant to the changes in that transaction.
2432
2432
2433 If 'full' is set, make sure all caches the function knows about have
2433 If 'full' is set, make sure all caches the function knows about have
2434 up-to-date data. Even the ones usually loaded more lazily.
2434 up-to-date data. Even the ones usually loaded more lazily.
2435 """
2435 """
2436 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2436 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2437 # During strip, many caches are invalid but
2437 # During strip, many caches are invalid but
2438 # later call to `destroyed` will refresh them.
2438 # later call to `destroyed` will refresh them.
2439 return
2439 return
2440
2440
2441 if tr is None or tr.changes[b'origrepolen'] < len(self):
2441 if tr is None or tr.changes[b'origrepolen'] < len(self):
2442 # accessing the 'ser ved' branchmap should refresh all the others,
2442 # accessing the 'ser ved' branchmap should refresh all the others,
2443 self.ui.debug(b'updating the branch cache\n')
2443 self.ui.debug(b'updating the branch cache\n')
2444 self.filtered(b'served').branchmap()
2444 self.filtered(b'served').branchmap()
2445 self.filtered(b'served.hidden').branchmap()
2445 self.filtered(b'served.hidden').branchmap()
2446
2446
2447 if full:
2447 if full:
2448 unfi = self.unfiltered()
2448 unfi = self.unfiltered()
2449 rbc = unfi.revbranchcache()
2449 rbc = unfi.revbranchcache()
2450 for r in unfi.changelog:
2450 for r in unfi.changelog:
2451 rbc.branchinfo(r)
2451 rbc.branchinfo(r)
2452 rbc.write()
2452 rbc.write()
2453
2453
2454 # ensure the working copy parents are in the manifestfulltextcache
2454 # ensure the working copy parents are in the manifestfulltextcache
2455 for ctx in self[b'.'].parents():
2455 for ctx in self[b'.'].parents():
2456 ctx.manifest() # accessing the manifest is enough
2456 ctx.manifest() # accessing the manifest is enough
2457
2457
2458 # accessing fnode cache warms the cache
2458 # accessing fnode cache warms the cache
2459 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2459 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2460 # accessing tags warm the cache
2460 # accessing tags warm the cache
2461 self.tags()
2461 self.tags()
2462 self.filtered(b'served').tags()
2462 self.filtered(b'served').tags()
2463
2463
2464 # The `full` arg is documented as updating even the lazily-loaded
2464 # The `full` arg is documented as updating even the lazily-loaded
2465 # caches immediately, so we're forcing a write to cause these caches
2465 # caches immediately, so we're forcing a write to cause these caches
2466 # to be warmed up even if they haven't explicitly been requested
2466 # to be warmed up even if they haven't explicitly been requested
2467 # yet (if they've never been used by hg, they won't ever have been
2467 # yet (if they've never been used by hg, they won't ever have been
2468 # written, even if they're a subset of another kind of cache that
2468 # written, even if they're a subset of another kind of cache that
2469 # *has* been used).
2469 # *has* been used).
2470 for filt in repoview.filtertable.keys():
2470 for filt in repoview.filtertable.keys():
2471 filtered = self.filtered(filt)
2471 filtered = self.filtered(filt)
2472 filtered.branchmap().write(filtered)
2472 filtered.branchmap().write(filtered)
2473
2473
2474 def invalidatecaches(self):
2474 def invalidatecaches(self):
2475
2475
2476 if '_tagscache' in vars(self):
2476 if '_tagscache' in vars(self):
2477 # can't use delattr on proxy
2477 # can't use delattr on proxy
2478 del self.__dict__['_tagscache']
2478 del self.__dict__['_tagscache']
2479
2479
2480 self._branchcaches.clear()
2480 self._branchcaches.clear()
2481 self.invalidatevolatilesets()
2481 self.invalidatevolatilesets()
2482 self._sparsesignaturecache.clear()
2482 self._sparsesignaturecache.clear()
2483
2483
2484 def invalidatevolatilesets(self):
2484 def invalidatevolatilesets(self):
2485 self.filteredrevcache.clear()
2485 self.filteredrevcache.clear()
2486 obsolete.clearobscaches(self)
2486 obsolete.clearobscaches(self)
2487
2487
2488 def invalidatedirstate(self):
2488 def invalidatedirstate(self):
2489 '''Invalidates the dirstate, causing the next call to dirstate
2489 '''Invalidates the dirstate, causing the next call to dirstate
2490 to check if it was modified since the last time it was read,
2490 to check if it was modified since the last time it was read,
2491 rereading it if it has.
2491 rereading it if it has.
2492
2492
2493 This is different to dirstate.invalidate() that it doesn't always
2493 This is different to dirstate.invalidate() that it doesn't always
2494 rereads the dirstate. Use dirstate.invalidate() if you want to
2494 rereads the dirstate. Use dirstate.invalidate() if you want to
2495 explicitly read the dirstate again (i.e. restoring it to a previous
2495 explicitly read the dirstate again (i.e. restoring it to a previous
2496 known good state).'''
2496 known good state).'''
2497 if hasunfilteredcache(self, 'dirstate'):
2497 if hasunfilteredcache(self, 'dirstate'):
2498 for k in self.dirstate._filecache:
2498 for k in self.dirstate._filecache:
2499 try:
2499 try:
2500 delattr(self.dirstate, k)
2500 delattr(self.dirstate, k)
2501 except AttributeError:
2501 except AttributeError:
2502 pass
2502 pass
2503 delattr(self.unfiltered(), 'dirstate')
2503 delattr(self.unfiltered(), 'dirstate')
2504
2504
2505 def invalidate(self, clearfilecache=False):
2505 def invalidate(self, clearfilecache=False):
2506 '''Invalidates both store and non-store parts other than dirstate
2506 '''Invalidates both store and non-store parts other than dirstate
2507
2507
2508 If a transaction is running, invalidation of store is omitted,
2508 If a transaction is running, invalidation of store is omitted,
2509 because discarding in-memory changes might cause inconsistency
2509 because discarding in-memory changes might cause inconsistency
2510 (e.g. incomplete fncache causes unintentional failure, but
2510 (e.g. incomplete fncache causes unintentional failure, but
2511 redundant one doesn't).
2511 redundant one doesn't).
2512 '''
2512 '''
2513 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2513 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2514 for k in list(self._filecache.keys()):
2514 for k in list(self._filecache.keys()):
2515 # dirstate is invalidated separately in invalidatedirstate()
2515 # dirstate is invalidated separately in invalidatedirstate()
2516 if k == b'dirstate':
2516 if k == b'dirstate':
2517 continue
2517 continue
2518 if (
2518 if (
2519 k == b'changelog'
2519 k == b'changelog'
2520 and self.currenttransaction()
2520 and self.currenttransaction()
2521 and self.changelog._delayed
2521 and self.changelog._delayed
2522 ):
2522 ):
2523 # The changelog object may store unwritten revisions. We don't
2523 # The changelog object may store unwritten revisions. We don't
2524 # want to lose them.
2524 # want to lose them.
2525 # TODO: Solve the problem instead of working around it.
2525 # TODO: Solve the problem instead of working around it.
2526 continue
2526 continue
2527
2527
2528 if clearfilecache:
2528 if clearfilecache:
2529 del self._filecache[k]
2529 del self._filecache[k]
2530 try:
2530 try:
2531 delattr(unfiltered, k)
2531 delattr(unfiltered, k)
2532 except AttributeError:
2532 except AttributeError:
2533 pass
2533 pass
2534 self.invalidatecaches()
2534 self.invalidatecaches()
2535 if not self.currenttransaction():
2535 if not self.currenttransaction():
2536 # TODO: Changing contents of store outside transaction
2536 # TODO: Changing contents of store outside transaction
2537 # causes inconsistency. We should make in-memory store
2537 # causes inconsistency. We should make in-memory store
2538 # changes detectable, and abort if changed.
2538 # changes detectable, and abort if changed.
2539 self.store.invalidatecaches()
2539 self.store.invalidatecaches()
2540
2540
2541 def invalidateall(self):
2541 def invalidateall(self):
2542 '''Fully invalidates both store and non-store parts, causing the
2542 '''Fully invalidates both store and non-store parts, causing the
2543 subsequent operation to reread any outside changes.'''
2543 subsequent operation to reread any outside changes.'''
2544 # extension should hook this to invalidate its caches
2544 # extension should hook this to invalidate its caches
2545 self.invalidate()
2545 self.invalidate()
2546 self.invalidatedirstate()
2546 self.invalidatedirstate()
2547
2547
2548 @unfilteredmethod
2548 @unfilteredmethod
2549 def _refreshfilecachestats(self, tr):
2549 def _refreshfilecachestats(self, tr):
2550 """Reload stats of cached files so that they are flagged as valid"""
2550 """Reload stats of cached files so that they are flagged as valid"""
2551 for k, ce in self._filecache.items():
2551 for k, ce in self._filecache.items():
2552 k = pycompat.sysstr(k)
2552 k = pycompat.sysstr(k)
2553 if k == 'dirstate' or k not in self.__dict__:
2553 if k == 'dirstate' or k not in self.__dict__:
2554 continue
2554 continue
2555 ce.refresh()
2555 ce.refresh()
2556
2556
2557 def _lock(
2557 def _lock(
2558 self,
2558 self,
2559 vfs,
2559 vfs,
2560 lockname,
2560 lockname,
2561 wait,
2561 wait,
2562 releasefn,
2562 releasefn,
2563 acquirefn,
2563 acquirefn,
2564 desc,
2564 desc,
2565 inheritchecker=None,
2565 inheritchecker=None,
2566 parentenvvar=None,
2566 parentenvvar=None,
2567 ):
2567 ):
2568 parentlock = None
2568 parentlock = None
2569 # the contents of parentenvvar are used by the underlying lock to
2569 # the contents of parentenvvar are used by the underlying lock to
2570 # determine whether it can be inherited
2570 # determine whether it can be inherited
2571 if parentenvvar is not None:
2571 if parentenvvar is not None:
2572 parentlock = encoding.environ.get(parentenvvar)
2572 parentlock = encoding.environ.get(parentenvvar)
2573
2573
2574 timeout = 0
2574 timeout = 0
2575 warntimeout = 0
2575 warntimeout = 0
2576 if wait:
2576 if wait:
2577 timeout = self.ui.configint(b"ui", b"timeout")
2577 timeout = self.ui.configint(b"ui", b"timeout")
2578 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2578 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2579 # internal config: ui.signal-safe-lock
2579 # internal config: ui.signal-safe-lock
2580 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2580 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2581
2581
2582 l = lockmod.trylock(
2582 l = lockmod.trylock(
2583 self.ui,
2583 self.ui,
2584 vfs,
2584 vfs,
2585 lockname,
2585 lockname,
2586 timeout,
2586 timeout,
2587 warntimeout,
2587 warntimeout,
2588 releasefn=releasefn,
2588 releasefn=releasefn,
2589 acquirefn=acquirefn,
2589 acquirefn=acquirefn,
2590 desc=desc,
2590 desc=desc,
2591 inheritchecker=inheritchecker,
2591 inheritchecker=inheritchecker,
2592 parentlock=parentlock,
2592 parentlock=parentlock,
2593 signalsafe=signalsafe,
2593 signalsafe=signalsafe,
2594 )
2594 )
2595 return l
2595 return l
2596
2596
2597 def _afterlock(self, callback):
2597 def _afterlock(self, callback):
2598 """add a callback to be run when the repository is fully unlocked
2598 """add a callback to be run when the repository is fully unlocked
2599
2599
2600 The callback will be executed when the outermost lock is released
2600 The callback will be executed when the outermost lock is released
2601 (with wlock being higher level than 'lock')."""
2601 (with wlock being higher level than 'lock')."""
2602 for ref in (self._wlockref, self._lockref):
2602 for ref in (self._wlockref, self._lockref):
2603 l = ref and ref()
2603 l = ref and ref()
2604 if l and l.held:
2604 if l and l.held:
2605 l.postrelease.append(callback)
2605 l.postrelease.append(callback)
2606 break
2606 break
2607 else: # no lock have been found.
2607 else: # no lock have been found.
2608 callback(True)
2608 callback(True)
2609
2609
2610 def lock(self, wait=True):
2610 def lock(self, wait=True):
2611 '''Lock the repository store (.hg/store) and return a weak reference
2611 '''Lock the repository store (.hg/store) and return a weak reference
2612 to the lock. Use this before modifying the store (e.g. committing or
2612 to the lock. Use this before modifying the store (e.g. committing or
2613 stripping). If you are opening a transaction, get a lock as well.)
2613 stripping). If you are opening a transaction, get a lock as well.)
2614
2614
2615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2616 'wlock' first to avoid a dead-lock hazard.'''
2616 'wlock' first to avoid a dead-lock hazard.'''
2617 l = self._currentlock(self._lockref)
2617 l = self._currentlock(self._lockref)
2618 if l is not None:
2618 if l is not None:
2619 l.lock()
2619 l.lock()
2620 return l
2620 return l
2621
2621
2622 l = self._lock(
2622 l = self._lock(
2623 vfs=self.svfs,
2623 vfs=self.svfs,
2624 lockname=b"lock",
2624 lockname=b"lock",
2625 wait=wait,
2625 wait=wait,
2626 releasefn=None,
2626 releasefn=None,
2627 acquirefn=self.invalidate,
2627 acquirefn=self.invalidate,
2628 desc=_(b'repository %s') % self.origroot,
2628 desc=_(b'repository %s') % self.origroot,
2629 )
2629 )
2630 self._lockref = weakref.ref(l)
2630 self._lockref = weakref.ref(l)
2631 return l
2631 return l
2632
2632
2633 def _wlockchecktransaction(self):
2633 def _wlockchecktransaction(self):
2634 if self.currenttransaction() is not None:
2634 if self.currenttransaction() is not None:
2635 raise error.LockInheritanceContractViolation(
2635 raise error.LockInheritanceContractViolation(
2636 b'wlock cannot be inherited in the middle of a transaction'
2636 b'wlock cannot be inherited in the middle of a transaction'
2637 )
2637 )
2638
2638
2639 def wlock(self, wait=True):
2639 def wlock(self, wait=True):
2640 '''Lock the non-store parts of the repository (everything under
2640 '''Lock the non-store parts of the repository (everything under
2641 .hg except .hg/store) and return a weak reference to the lock.
2641 .hg except .hg/store) and return a weak reference to the lock.
2642
2642
2643 Use this before modifying files in .hg.
2643 Use this before modifying files in .hg.
2644
2644
2645 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2645 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2646 'wlock' first to avoid a dead-lock hazard.'''
2646 'wlock' first to avoid a dead-lock hazard.'''
2647 l = self._wlockref and self._wlockref()
2647 l = self._wlockref and self._wlockref()
2648 if l is not None and l.held:
2648 if l is not None and l.held:
2649 l.lock()
2649 l.lock()
2650 return l
2650 return l
2651
2651
2652 # We do not need to check for non-waiting lock acquisition. Such
2652 # We do not need to check for non-waiting lock acquisition. Such
2653 # acquisition would not cause dead-lock as they would just fail.
2653 # acquisition would not cause dead-lock as they would just fail.
2654 if wait and (
2654 if wait and (
2655 self.ui.configbool(b'devel', b'all-warnings')
2655 self.ui.configbool(b'devel', b'all-warnings')
2656 or self.ui.configbool(b'devel', b'check-locks')
2656 or self.ui.configbool(b'devel', b'check-locks')
2657 ):
2657 ):
2658 if self._currentlock(self._lockref) is not None:
2658 if self._currentlock(self._lockref) is not None:
2659 self.ui.develwarn(b'"wlock" acquired after "lock"')
2659 self.ui.develwarn(b'"wlock" acquired after "lock"')
2660
2660
2661 def unlock():
2661 def unlock():
2662 if self.dirstate.pendingparentchange():
2662 if self.dirstate.pendingparentchange():
2663 self.dirstate.invalidate()
2663 self.dirstate.invalidate()
2664 else:
2664 else:
2665 self.dirstate.write(None)
2665 self.dirstate.write(None)
2666
2666
2667 self._filecache[b'dirstate'].refresh()
2667 self._filecache[b'dirstate'].refresh()
2668
2668
2669 l = self._lock(
2669 l = self._lock(
2670 self.vfs,
2670 self.vfs,
2671 b"wlock",
2671 b"wlock",
2672 wait,
2672 wait,
2673 unlock,
2673 unlock,
2674 self.invalidatedirstate,
2674 self.invalidatedirstate,
2675 _(b'working directory of %s') % self.origroot,
2675 _(b'working directory of %s') % self.origroot,
2676 inheritchecker=self._wlockchecktransaction,
2676 inheritchecker=self._wlockchecktransaction,
2677 parentenvvar=b'HG_WLOCK_LOCKER',
2677 parentenvvar=b'HG_WLOCK_LOCKER',
2678 )
2678 )
2679 self._wlockref = weakref.ref(l)
2679 self._wlockref = weakref.ref(l)
2680 return l
2680 return l
2681
2681
2682 def _currentlock(self, lockref):
2682 def _currentlock(self, lockref):
2683 """Returns the lock if it's held, or None if it's not."""
2683 """Returns the lock if it's held, or None if it's not."""
2684 if lockref is None:
2684 if lockref is None:
2685 return None
2685 return None
2686 l = lockref()
2686 l = lockref()
2687 if l is None or not l.held:
2687 if l is None or not l.held:
2688 return None
2688 return None
2689 return l
2689 return l
2690
2690
2691 def currentwlock(self):
2691 def currentwlock(self):
2692 """Returns the wlock if it's held, or None if it's not."""
2692 """Returns the wlock if it's held, or None if it's not."""
2693 return self._currentlock(self._wlockref)
2693 return self._currentlock(self._wlockref)
2694
2694
2695 def _filecommit(
2695 def _filecommit(
2696 self,
2696 self,
2697 fctx,
2697 fctx,
2698 manifest1,
2698 manifest1,
2699 manifest2,
2699 manifest2,
2700 linkrev,
2700 linkrev,
2701 tr,
2701 tr,
2702 changelist,
2702 changelist,
2703 includecopymeta,
2703 includecopymeta,
2704 ):
2704 ):
2705 """
2705 """
2706 commit an individual file as part of a larger transaction
2706 commit an individual file as part of a larger transaction
2707 """
2707 """
2708
2708
2709 fname = fctx.path()
2709 fname = fctx.path()
2710 fparent1 = manifest1.get(fname, nullid)
2710 fparent1 = manifest1.get(fname, nullid)
2711 fparent2 = manifest2.get(fname, nullid)
2711 fparent2 = manifest2.get(fname, nullid)
2712 if isinstance(fctx, context.filectx):
2712 if isinstance(fctx, context.filectx):
2713 node = fctx.filenode()
2713 node = fctx.filenode()
2714 if node in [fparent1, fparent2]:
2714 if node in [fparent1, fparent2]:
2715 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2715 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2716 if (
2716 if (
2717 fparent1 != nullid
2717 fparent1 != nullid
2718 and manifest1.flags(fname) != fctx.flags()
2718 and manifest1.flags(fname) != fctx.flags()
2719 ) or (
2719 ) or (
2720 fparent2 != nullid
2720 fparent2 != nullid
2721 and manifest2.flags(fname) != fctx.flags()
2721 and manifest2.flags(fname) != fctx.flags()
2722 ):
2722 ):
2723 changelist.append(fname)
2723 changelist.append(fname)
2724 return node
2724 return node
2725
2725
2726 flog = self.file(fname)
2726 flog = self.file(fname)
2727 meta = {}
2727 meta = {}
2728 cfname = fctx.copysource()
2728 cfname = fctx.copysource()
2729 if cfname and cfname != fname:
2729 if cfname and cfname != fname:
2730 # Mark the new revision of this file as a copy of another
2730 # Mark the new revision of this file as a copy of another
2731 # file. This copy data will effectively act as a parent
2731 # file. This copy data will effectively act as a parent
2732 # of this new revision. If this is a merge, the first
2732 # of this new revision. If this is a merge, the first
2733 # parent will be the nullid (meaning "look up the copy data")
2733 # parent will be the nullid (meaning "look up the copy data")
2734 # and the second one will be the other parent. For example:
2734 # and the second one will be the other parent. For example:
2735 #
2735 #
2736 # 0 --- 1 --- 3 rev1 changes file foo
2736 # 0 --- 1 --- 3 rev1 changes file foo
2737 # \ / rev2 renames foo to bar and changes it
2737 # \ / rev2 renames foo to bar and changes it
2738 # \- 2 -/ rev3 should have bar with all changes and
2738 # \- 2 -/ rev3 should have bar with all changes and
2739 # should record that bar descends from
2739 # should record that bar descends from
2740 # bar in rev2 and foo in rev1
2740 # bar in rev2 and foo in rev1
2741 #
2741 #
2742 # this allows this merge to succeed:
2742 # this allows this merge to succeed:
2743 #
2743 #
2744 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2744 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2745 # \ / merging rev3 and rev4 should use bar@rev2
2745 # \ / merging rev3 and rev4 should use bar@rev2
2746 # \- 2 --- 4 as the merge base
2746 # \- 2 --- 4 as the merge base
2747 #
2747 #
2748
2748
2749 cnode = manifest1.get(cfname)
2749 cnode = manifest1.get(cfname)
2750 newfparent = fparent2
2750 newfparent = fparent2
2751
2751
2752 if manifest2: # branch merge
2752 if manifest2: # branch merge
2753 if fparent2 == nullid or cnode is None: # copied on remote side
2753 if fparent2 == nullid or cnode is None: # copied on remote side
2754 if cfname in manifest2:
2754 if cfname in manifest2:
2755 cnode = manifest2[cfname]
2755 cnode = manifest2[cfname]
2756 newfparent = fparent1
2756 newfparent = fparent1
2757
2757
2758 # Here, we used to search backwards through history to try to find
2758 # Here, we used to search backwards through history to try to find
2759 # where the file copy came from if the source of a copy was not in
2759 # where the file copy came from if the source of a copy was not in
2760 # the parent directory. However, this doesn't actually make sense to
2760 # the parent directory. However, this doesn't actually make sense to
2761 # do (what does a copy from something not in your working copy even
2761 # do (what does a copy from something not in your working copy even
2762 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2762 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2763 # the user that copy information was dropped, so if they didn't
2763 # the user that copy information was dropped, so if they didn't
2764 # expect this outcome it can be fixed, but this is the correct
2764 # expect this outcome it can be fixed, but this is the correct
2765 # behavior in this circumstance.
2765 # behavior in this circumstance.
2766
2766
2767 if cnode:
2767 if cnode:
2768 self.ui.debug(
2768 self.ui.debug(
2769 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2769 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2770 )
2770 )
2771 if includecopymeta:
2771 if includecopymeta:
2772 meta[b"copy"] = cfname
2772 meta[b"copy"] = cfname
2773 meta[b"copyrev"] = hex(cnode)
2773 meta[b"copyrev"] = hex(cnode)
2774 fparent1, fparent2 = nullid, newfparent
2774 fparent1, fparent2 = nullid, newfparent
2775 else:
2775 else:
2776 self.ui.warn(
2776 self.ui.warn(
2777 _(
2777 _(
2778 b"warning: can't find ancestor for '%s' "
2778 b"warning: can't find ancestor for '%s' "
2779 b"copied from '%s'!\n"
2779 b"copied from '%s'!\n"
2780 )
2780 )
2781 % (fname, cfname)
2781 % (fname, cfname)
2782 )
2782 )
2783
2783
2784 elif fparent1 == nullid:
2784 elif fparent1 == nullid:
2785 fparent1, fparent2 = fparent2, nullid
2785 fparent1, fparent2 = fparent2, nullid
2786 elif fparent2 != nullid:
2786 elif fparent2 != nullid:
2787 # is one parent an ancestor of the other?
2787 # is one parent an ancestor of the other?
2788 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2788 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2789 if fparent1 in fparentancestors:
2789 if fparent1 in fparentancestors:
2790 fparent1, fparent2 = fparent2, nullid
2790 fparent1, fparent2 = fparent2, nullid
2791 elif fparent2 in fparentancestors:
2791 elif fparent2 in fparentancestors:
2792 fparent2 = nullid
2792 fparent2 = nullid
2793
2793
2794 # is the file changed?
2794 # is the file changed?
2795 text = fctx.data()
2795 text = fctx.data()
2796 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2796 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2797 changelist.append(fname)
2797 changelist.append(fname)
2798 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2798 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2799 # are just the flags changed during merge?
2799 # are just the flags changed during merge?
2800 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2800 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2801 changelist.append(fname)
2801 changelist.append(fname)
2802
2802
2803 return fparent1
2803 return fparent1
2804
2804
2805 def checkcommitpatterns(self, wctx, match, status, fail):
2805 def checkcommitpatterns(self, wctx, match, status, fail):
2806 """check for commit arguments that aren't committable"""
2806 """check for commit arguments that aren't committable"""
2807 if match.isexact() or match.prefix():
2807 if match.isexact() or match.prefix():
2808 matched = set(status.modified + status.added + status.removed)
2808 matched = set(status.modified + status.added + status.removed)
2809
2809
2810 for f in match.files():
2810 for f in match.files():
2811 f = self.dirstate.normalize(f)
2811 f = self.dirstate.normalize(f)
2812 if f == b'.' or f in matched or f in wctx.substate:
2812 if f == b'.' or f in matched or f in wctx.substate:
2813 continue
2813 continue
2814 if f in status.deleted:
2814 if f in status.deleted:
2815 fail(f, _(b'file not found!'))
2815 fail(f, _(b'file not found!'))
2816 # Is it a directory that exists or used to exist?
2816 # Is it a directory that exists or used to exist?
2817 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2817 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2818 d = f + b'/'
2818 d = f + b'/'
2819 for mf in matched:
2819 for mf in matched:
2820 if mf.startswith(d):
2820 if mf.startswith(d):
2821 break
2821 break
2822 else:
2822 else:
2823 fail(f, _(b"no match under directory!"))
2823 fail(f, _(b"no match under directory!"))
2824 elif f not in self.dirstate:
2824 elif f not in self.dirstate:
2825 fail(f, _(b"file not tracked!"))
2825 fail(f, _(b"file not tracked!"))
2826
2826
2827 @unfilteredmethod
2827 @unfilteredmethod
2828 def commit(
2828 def commit(
2829 self,
2829 self,
2830 text=b"",
2830 text=b"",
2831 user=None,
2831 user=None,
2832 date=None,
2832 date=None,
2833 match=None,
2833 match=None,
2834 force=False,
2834 force=False,
2835 editor=None,
2835 editor=None,
2836 extra=None,
2836 extra=None,
2837 ):
2837 ):
2838 """Add a new revision to current repository.
2838 """Add a new revision to current repository.
2839
2839
2840 Revision information is gathered from the working directory,
2840 Revision information is gathered from the working directory,
2841 match can be used to filter the committed files. If editor is
2841 match can be used to filter the committed files. If editor is
2842 supplied, it is called to get a commit message.
2842 supplied, it is called to get a commit message.
2843 """
2843 """
2844 if extra is None:
2844 if extra is None:
2845 extra = {}
2845 extra = {}
2846
2846
2847 def fail(f, msg):
2847 def fail(f, msg):
2848 raise error.Abort(b'%s: %s' % (f, msg))
2848 raise error.Abort(b'%s: %s' % (f, msg))
2849
2849
2850 if not match:
2850 if not match:
2851 match = matchmod.always()
2851 match = matchmod.always()
2852
2852
2853 if not force:
2853 if not force:
2854 match.bad = fail
2854 match.bad = fail
2855
2855
2856 # lock() for recent changelog (see issue4368)
2856 # lock() for recent changelog (see issue4368)
2857 with self.wlock(), self.lock():
2857 with self.wlock(), self.lock():
2858 wctx = self[None]
2858 wctx = self[None]
2859 merge = len(wctx.parents()) > 1
2859 merge = len(wctx.parents()) > 1
2860
2860
2861 if not force and merge and not match.always():
2861 if not force and merge and not match.always():
2862 raise error.Abort(
2862 raise error.Abort(
2863 _(
2863 _(
2864 b'cannot partially commit a merge '
2864 b'cannot partially commit a merge '
2865 b'(do not specify files or patterns)'
2865 b'(do not specify files or patterns)'
2866 )
2866 )
2867 )
2867 )
2868
2868
2869 status = self.status(match=match, clean=force)
2869 status = self.status(match=match, clean=force)
2870 if force:
2870 if force:
2871 status.modified.extend(
2871 status.modified.extend(
2872 status.clean
2872 status.clean
2873 ) # mq may commit clean files
2873 ) # mq may commit clean files
2874
2874
2875 # check subrepos
2875 # check subrepos
2876 subs, commitsubs, newstate = subrepoutil.precommit(
2876 subs, commitsubs, newstate = subrepoutil.precommit(
2877 self.ui, wctx, status, match, force=force
2877 self.ui, wctx, status, match, force=force
2878 )
2878 )
2879
2879
2880 # make sure all explicit patterns are matched
2880 # make sure all explicit patterns are matched
2881 if not force:
2881 if not force:
2882 self.checkcommitpatterns(wctx, match, status, fail)
2882 self.checkcommitpatterns(wctx, match, status, fail)
2883
2883
2884 cctx = context.workingcommitctx(
2884 cctx = context.workingcommitctx(
2885 self, status, text, user, date, extra
2885 self, status, text, user, date, extra
2886 )
2886 )
2887
2887
2888 # internal config: ui.allowemptycommit
2888 # internal config: ui.allowemptycommit
2889 allowemptycommit = (
2889 allowemptycommit = (
2890 wctx.branch() != wctx.p1().branch()
2890 wctx.branch() != wctx.p1().branch()
2891 or extra.get(b'close')
2891 or extra.get(b'close')
2892 or merge
2892 or merge
2893 or cctx.files()
2893 or cctx.files()
2894 or self.ui.configbool(b'ui', b'allowemptycommit')
2894 or self.ui.configbool(b'ui', b'allowemptycommit')
2895 )
2895 )
2896 if not allowemptycommit:
2896 if not allowemptycommit:
2897 return None
2897 return None
2898
2898
2899 if merge and cctx.deleted():
2899 if merge and cctx.deleted():
2900 raise error.Abort(_(b"cannot commit merge with missing files"))
2900 raise error.Abort(_(b"cannot commit merge with missing files"))
2901
2901
2902 ms = mergemod.mergestate.read(self)
2902 ms = mergemod.mergestate.read(self)
2903 mergeutil.checkunresolved(ms)
2903 mergeutil.checkunresolved(ms)
2904
2904
2905 if editor:
2905 if editor:
2906 cctx._text = editor(self, cctx, subs)
2906 cctx._text = editor(self, cctx, subs)
2907 edited = text != cctx._text
2907 edited = text != cctx._text
2908
2908
2909 # Save commit message in case this transaction gets rolled back
2909 # Save commit message in case this transaction gets rolled back
2910 # (e.g. by a pretxncommit hook). Leave the content alone on
2910 # (e.g. by a pretxncommit hook). Leave the content alone on
2911 # the assumption that the user will use the same editor again.
2911 # the assumption that the user will use the same editor again.
2912 msgfn = self.savecommitmessage(cctx._text)
2912 msgfn = self.savecommitmessage(cctx._text)
2913
2913
2914 # commit subs and write new state
2914 # commit subs and write new state
2915 if subs:
2915 if subs:
2916 uipathfn = scmutil.getuipathfn(self)
2916 uipathfn = scmutil.getuipathfn(self)
2917 for s in sorted(commitsubs):
2917 for s in sorted(commitsubs):
2918 sub = wctx.sub(s)
2918 sub = wctx.sub(s)
2919 self.ui.status(
2919 self.ui.status(
2920 _(b'committing subrepository %s\n')
2920 _(b'committing subrepository %s\n')
2921 % uipathfn(subrepoutil.subrelpath(sub))
2921 % uipathfn(subrepoutil.subrelpath(sub))
2922 )
2922 )
2923 sr = sub.commit(cctx._text, user, date)
2923 sr = sub.commit(cctx._text, user, date)
2924 newstate[s] = (newstate[s][0], sr)
2924 newstate[s] = (newstate[s][0], sr)
2925 subrepoutil.writestate(self, newstate)
2925 subrepoutil.writestate(self, newstate)
2926
2926
2927 p1, p2 = self.dirstate.parents()
2927 p1, p2 = self.dirstate.parents()
2928 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2928 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2929 try:
2929 try:
2930 self.hook(
2930 self.hook(
2931 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2931 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2932 )
2932 )
2933 with self.transaction(b'commit'):
2933 with self.transaction(b'commit'):
2934 ret = self.commitctx(cctx, True)
2934 ret = self.commitctx(cctx, True)
2935 # update bookmarks, dirstate and mergestate
2935 # update bookmarks, dirstate and mergestate
2936 bookmarks.update(self, [p1, p2], ret)
2936 bookmarks.update(self, [p1, p2], ret)
2937 cctx.markcommitted(ret)
2937 cctx.markcommitted(ret)
2938 ms.reset()
2938 ms.reset()
2939 except: # re-raises
2939 except: # re-raises
2940 if edited:
2940 if edited:
2941 self.ui.write(
2941 self.ui.write(
2942 _(b'note: commit message saved in %s\n') % msgfn
2942 _(b'note: commit message saved in %s\n') % msgfn
2943 )
2943 )
2944 raise
2944 raise
2945
2945
2946 def commithook(unused_success):
2946 def commithook(unused_success):
2947 # hack for command that use a temporary commit (eg: histedit)
2947 # hack for command that use a temporary commit (eg: histedit)
2948 # temporary commit got stripped before hook release
2948 # temporary commit got stripped before hook release
2949 if self.changelog.hasnode(ret):
2949 if self.changelog.hasnode(ret):
2950 self.hook(
2950 self.hook(
2951 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2951 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2952 )
2952 )
2953
2953
2954 self._afterlock(commithook)
2954 self._afterlock(commithook)
2955 return ret
2955 return ret
2956
2956
2957 @unfilteredmethod
2957 @unfilteredmethod
2958 def commitctx(self, ctx, error=False, origctx=None):
2958 def commitctx(self, ctx, error=False, origctx=None):
2959 """Add a new revision to current repository.
2959 """Add a new revision to current repository.
2960 Revision information is passed via the context argument.
2960 Revision information is passed via the context argument.
2961
2961
2962 ctx.files() should list all files involved in this commit, i.e.
2962 ctx.files() should list all files involved in this commit, i.e.
2963 modified/added/removed files. On merge, it may be wider than the
2963 modified/added/removed files. On merge, it may be wider than the
2964 ctx.files() to be committed, since any file nodes derived directly
2964 ctx.files() to be committed, since any file nodes derived directly
2965 from p1 or p2 are excluded from the committed ctx.files().
2965 from p1 or p2 are excluded from the committed ctx.files().
2966
2966
2967 origctx is for convert to work around the problem that bug
2967 origctx is for convert to work around the problem that bug
2968 fixes to the files list in changesets change hashes. For
2968 fixes to the files list in changesets change hashes. For
2969 convert to be the identity, it can pass an origctx and this
2969 convert to be the identity, it can pass an origctx and this
2970 function will use the same files list when it makes sense to
2970 function will use the same files list when it makes sense to
2971 do so.
2971 do so.
2972 """
2972 """
2973
2973
2974 p1, p2 = ctx.p1(), ctx.p2()
2974 p1, p2 = ctx.p1(), ctx.p2()
2975 user = ctx.user()
2975 user = ctx.user()
2976
2976
2977 if self.filecopiesmode == b'changeset-sidedata':
2977 if self.filecopiesmode == b'changeset-sidedata':
2978 writechangesetcopy = True
2978 writechangesetcopy = True
2979 writefilecopymeta = True
2979 writefilecopymeta = True
2980 writecopiesto = None
2980 writecopiesto = None
2981 else:
2981 else:
2982 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2982 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2983 writefilecopymeta = writecopiesto != b'changeset-only'
2983 writefilecopymeta = writecopiesto != b'changeset-only'
2984 writechangesetcopy = writecopiesto in (
2984 writechangesetcopy = writecopiesto in (
2985 b'changeset-only',
2985 b'changeset-only',
2986 b'compatibility',
2986 b'compatibility',
2987 )
2987 )
2988 p1copies, p2copies = None, None
2988 p1copies, p2copies = None, None
2989 if writechangesetcopy:
2989 if writechangesetcopy:
2990 p1copies = ctx.p1copies()
2990 p1copies = ctx.p1copies()
2991 p2copies = ctx.p2copies()
2991 p2copies = ctx.p2copies()
2992 filesadded, filesremoved = None, None
2992 filesadded, filesremoved = None, None
2993 with self.lock(), self.transaction(b"commit") as tr:
2993 with self.lock(), self.transaction(b"commit") as tr:
2994 trp = weakref.proxy(tr)
2994 trp = weakref.proxy(tr)
2995
2995
2996 if ctx.manifestnode():
2996 if ctx.manifestnode():
2997 # reuse an existing manifest revision
2997 # reuse an existing manifest revision
2998 self.ui.debug(b'reusing known manifest\n')
2998 self.ui.debug(b'reusing known manifest\n')
2999 mn = ctx.manifestnode()
2999 mn = ctx.manifestnode()
3000 files = ctx.files()
3000 files = ctx.files()
3001 if writechangesetcopy:
3001 if writechangesetcopy:
3002 filesadded = ctx.filesadded()
3002 filesadded = ctx.filesadded()
3003 filesremoved = ctx.filesremoved()
3003 filesremoved = ctx.filesremoved()
3004 elif ctx.files():
3004 elif ctx.files():
3005 m1ctx = p1.manifestctx()
3005 m1ctx = p1.manifestctx()
3006 m2ctx = p2.manifestctx()
3006 m2ctx = p2.manifestctx()
3007 mctx = m1ctx.copy()
3007 mctx = m1ctx.copy()
3008
3008
3009 m = mctx.read()
3009 m = mctx.read()
3010 m1 = m1ctx.read()
3010 m1 = m1ctx.read()
3011 m2 = m2ctx.read()
3011 m2 = m2ctx.read()
3012
3012
3013 # check in files
3013 # check in files
3014 added = []
3014 added = []
3015 changed = []
3015 changed = []
3016 removed = list(ctx.removed())
3016 removed = list(ctx.removed())
3017 linkrev = len(self)
3017 linkrev = len(self)
3018 self.ui.note(_(b"committing files:\n"))
3018 self.ui.note(_(b"committing files:\n"))
3019 uipathfn = scmutil.getuipathfn(self)
3019 uipathfn = scmutil.getuipathfn(self)
3020 for f in sorted(ctx.modified() + ctx.added()):
3020 for f in sorted(ctx.modified() + ctx.added()):
3021 self.ui.note(uipathfn(f) + b"\n")
3021 self.ui.note(uipathfn(f) + b"\n")
3022 try:
3022 try:
3023 fctx = ctx[f]
3023 fctx = ctx[f]
3024 if fctx is None:
3024 if fctx is None:
3025 removed.append(f)
3025 removed.append(f)
3026 else:
3026 else:
3027 added.append(f)
3027 added.append(f)
3028 m[f] = self._filecommit(
3028 m[f] = self._filecommit(
3029 fctx,
3029 fctx,
3030 m1,
3030 m1,
3031 m2,
3031 m2,
3032 linkrev,
3032 linkrev,
3033 trp,
3033 trp,
3034 changed,
3034 changed,
3035 writefilecopymeta,
3035 writefilecopymeta,
3036 )
3036 )
3037 m.setflag(f, fctx.flags())
3037 m.setflag(f, fctx.flags())
3038 except OSError:
3038 except OSError:
3039 self.ui.warn(
3039 self.ui.warn(
3040 _(b"trouble committing %s!\n") % uipathfn(f)
3040 _(b"trouble committing %s!\n") % uipathfn(f)
3041 )
3041 )
3042 raise
3042 raise
3043 except IOError as inst:
3043 except IOError as inst:
3044 errcode = getattr(inst, 'errno', errno.ENOENT)
3044 errcode = getattr(inst, 'errno', errno.ENOENT)
3045 if error or errcode and errcode != errno.ENOENT:
3045 if error or errcode and errcode != errno.ENOENT:
3046 self.ui.warn(
3046 self.ui.warn(
3047 _(b"trouble committing %s!\n") % uipathfn(f)
3047 _(b"trouble committing %s!\n") % uipathfn(f)
3048 )
3048 )
3049 raise
3049 raise
3050
3050
3051 # update manifest
3051 # update manifest
3052 removed = [f for f in removed if f in m1 or f in m2]
3052 removed = [f for f in removed if f in m1 or f in m2]
3053 drop = sorted([f for f in removed if f in m])
3053 drop = sorted([f for f in removed if f in m])
3054 for f in drop:
3054 for f in drop:
3055 del m[f]
3055 del m[f]
3056 if p2.rev() != nullrev:
3056 if p2.rev() != nullrev:
3057
3057
3058 @util.cachefunc
3058 @util.cachefunc
3059 def mas():
3059 def mas():
3060 p1n = p1.node()
3060 p1n = p1.node()
3061 p2n = p2.node()
3061 p2n = p2.node()
3062 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3062 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3063 if not cahs:
3063 if not cahs:
3064 cahs = [nullrev]
3064 cahs = [nullrev]
3065 return [self[r].manifest() for r in cahs]
3065 return [self[r].manifest() for r in cahs]
3066
3066
3067 def deletionfromparent(f):
3067 def deletionfromparent(f):
3068 # When a file is removed relative to p1 in a merge, this
3068 # When a file is removed relative to p1 in a merge, this
3069 # function determines whether the absence is due to a
3069 # function determines whether the absence is due to a
3070 # deletion from a parent, or whether the merge commit
3070 # deletion from a parent, or whether the merge commit
3071 # itself deletes the file. We decide this by doing a
3071 # itself deletes the file. We decide this by doing a
3072 # simplified three way merge of the manifest entry for
3072 # simplified three way merge of the manifest entry for
3073 # the file. There are two ways we decide the merge
3073 # the file. There are two ways we decide the merge
3074 # itself didn't delete a file:
3074 # itself didn't delete a file:
3075 # - neither parent (nor the merge) contain the file
3075 # - neither parent (nor the merge) contain the file
3076 # - exactly one parent contains the file, and that
3076 # - exactly one parent contains the file, and that
3077 # parent has the same filelog entry as the merge
3077 # parent has the same filelog entry as the merge
3078 # ancestor (or all of them if there two). In other
3078 # ancestor (or all of them if there two). In other
3079 # words, that parent left the file unchanged while the
3079 # words, that parent left the file unchanged while the
3080 # other one deleted it.
3080 # other one deleted it.
3081 # One way to think about this is that deleting a file is
3081 # One way to think about this is that deleting a file is
3082 # similar to emptying it, so the list of changed files
3082 # similar to emptying it, so the list of changed files
3083 # should be similar either way. The computation
3083 # should be similar either way. The computation
3084 # described above is not done directly in _filecommit
3084 # described above is not done directly in _filecommit
3085 # when creating the list of changed files, however
3085 # when creating the list of changed files, however
3086 # it does something very similar by comparing filelog
3086 # it does something very similar by comparing filelog
3087 # nodes.
3087 # nodes.
3088 if f in m1:
3088 if f in m1:
3089 return f not in m2 and all(
3089 return f not in m2 and all(
3090 f in ma and ma.find(f) == m1.find(f)
3090 f in ma and ma.find(f) == m1.find(f)
3091 for ma in mas()
3091 for ma in mas()
3092 )
3092 )
3093 elif f in m2:
3093 elif f in m2:
3094 return all(
3094 return all(
3095 f in ma and ma.find(f) == m2.find(f)
3095 f in ma and ma.find(f) == m2.find(f)
3096 for ma in mas()
3096 for ma in mas()
3097 )
3097 )
3098 else:
3098 else:
3099 return True
3099 return True
3100
3100
3101 removed = [f for f in removed if not deletionfromparent(f)]
3101 removed = [f for f in removed if not deletionfromparent(f)]
3102
3102
3103 files = changed + removed
3103 files = changed + removed
3104 md = None
3104 md = None
3105 if not files:
3105 if not files:
3106 # if no "files" actually changed in terms of the changelog,
3106 # if no "files" actually changed in terms of the changelog,
3107 # try hard to detect unmodified manifest entry so that the
3107 # try hard to detect unmodified manifest entry so that the
3108 # exact same commit can be reproduced later on convert.
3108 # exact same commit can be reproduced later on convert.
3109 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3109 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3110 if not files and md:
3110 if not files and md:
3111 self.ui.debug(
3111 self.ui.debug(
3112 b'not reusing manifest (no file change in '
3112 b'not reusing manifest (no file change in '
3113 b'changelog, but manifest differs)\n'
3113 b'changelog, but manifest differs)\n'
3114 )
3114 )
3115 if files or md:
3115 if files or md:
3116 self.ui.note(_(b"committing manifest\n"))
3116 self.ui.note(_(b"committing manifest\n"))
3117 # we're using narrowmatch here since it's already applied at
3117 # we're using narrowmatch here since it's already applied at
3118 # other stages (such as dirstate.walk), so we're already
3118 # other stages (such as dirstate.walk), so we're already
3119 # ignoring things outside of narrowspec in most cases. The
3119 # ignoring things outside of narrowspec in most cases. The
3120 # one case where we might have files outside the narrowspec
3120 # one case where we might have files outside the narrowspec
3121 # at this point is merges, and we already error out in the
3121 # at this point is merges, and we already error out in the
3122 # case where the merge has files outside of the narrowspec,
3122 # case where the merge has files outside of the narrowspec,
3123 # so this is safe.
3123 # so this is safe.
3124 mn = mctx.write(
3124 mn = mctx.write(
3125 trp,
3125 trp,
3126 linkrev,
3126 linkrev,
3127 p1.manifestnode(),
3127 p1.manifestnode(),
3128 p2.manifestnode(),
3128 p2.manifestnode(),
3129 added,
3129 added,
3130 drop,
3130 drop,
3131 match=self.narrowmatch(),
3131 match=self.narrowmatch(),
3132 )
3132 )
3133
3133
3134 if writechangesetcopy:
3134 if writechangesetcopy:
3135 filesadded = [
3135 filesadded = [
3136 f for f in changed if not (f in m1 or f in m2)
3136 f for f in changed if not (f in m1 or f in m2)
3137 ]
3137 ]
3138 filesremoved = removed
3138 filesremoved = removed
3139 else:
3139 else:
3140 self.ui.debug(
3140 self.ui.debug(
3141 b'reusing manifest from p1 (listed files '
3141 b'reusing manifest from p1 (listed files '
3142 b'actually unchanged)\n'
3142 b'actually unchanged)\n'
3143 )
3143 )
3144 mn = p1.manifestnode()
3144 mn = p1.manifestnode()
3145 else:
3145 else:
3146 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3146 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3147 mn = p1.manifestnode()
3147 mn = p1.manifestnode()
3148 files = []
3148 files = []
3149
3149
3150 if writecopiesto == b'changeset-only':
3150 if writecopiesto == b'changeset-only':
3151 # If writing only to changeset extras, use None to indicate that
3151 # If writing only to changeset extras, use None to indicate that
3152 # no entry should be written. If writing to both, write an empty
3152 # no entry should be written. If writing to both, write an empty
3153 # entry to prevent the reader from falling back to reading
3153 # entry to prevent the reader from falling back to reading
3154 # filelogs.
3154 # filelogs.
3155 p1copies = p1copies or None
3155 p1copies = p1copies or None
3156 p2copies = p2copies or None
3156 p2copies = p2copies or None
3157 filesadded = filesadded or None
3157 filesadded = filesadded or None
3158 filesremoved = filesremoved or None
3158 filesremoved = filesremoved or None
3159
3159
3160 if origctx and origctx.manifestnode() == mn:
3160 if origctx and origctx.manifestnode() == mn:
3161 files = origctx.files()
3161 files = origctx.files()
3162
3162
3163 # update changelog
3163 # update changelog
3164 self.ui.note(_(b"committing changelog\n"))
3164 self.ui.note(_(b"committing changelog\n"))
3165 self.changelog.delayupdate(tr)
3165 self.changelog.delayupdate(tr)
3166 n = self.changelog.add(
3166 n = self.changelog.add(
3167 mn,
3167 mn,
3168 files,
3168 files,
3169 ctx.description(),
3169 ctx.description(),
3170 trp,
3170 trp,
3171 p1.node(),
3171 p1.node(),
3172 p2.node(),
3172 p2.node(),
3173 user,
3173 user,
3174 ctx.date(),
3174 ctx.date(),
3175 ctx.extra().copy(),
3175 ctx.extra().copy(),
3176 p1copies,
3176 p1copies,
3177 p2copies,
3177 p2copies,
3178 filesadded,
3178 filesadded,
3179 filesremoved,
3179 filesremoved,
3180 )
3180 )
3181 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3181 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3182 self.hook(
3182 self.hook(
3183 b'pretxncommit',
3183 b'pretxncommit',
3184 throw=True,
3184 throw=True,
3185 node=hex(n),
3185 node=hex(n),
3186 parent1=xp1,
3186 parent1=xp1,
3187 parent2=xp2,
3187 parent2=xp2,
3188 )
3188 )
3189 # set the new commit is proper phase
3189 # set the new commit is proper phase
3190 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3190 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3191 if targetphase:
3191 if targetphase:
3192 # retract boundary do not alter parent changeset.
3192 # retract boundary do not alter parent changeset.
3193 # if a parent have higher the resulting phase will
3193 # if a parent have higher the resulting phase will
3194 # be compliant anyway
3194 # be compliant anyway
3195 #
3195 #
3196 # if minimal phase was 0 we don't need to retract anything
3196 # if minimal phase was 0 we don't need to retract anything
3197 phases.registernew(self, tr, targetphase, [n])
3197 phases.registernew(self, tr, targetphase, [n])
3198 return n
3198 return n
3199
3199
3200 @unfilteredmethod
3200 @unfilteredmethod
3201 def destroying(self):
3201 def destroying(self):
3202 '''Inform the repository that nodes are about to be destroyed.
3202 '''Inform the repository that nodes are about to be destroyed.
3203 Intended for use by strip and rollback, so there's a common
3203 Intended for use by strip and rollback, so there's a common
3204 place for anything that has to be done before destroying history.
3204 place for anything that has to be done before destroying history.
3205
3205
3206 This is mostly useful for saving state that is in memory and waiting
3206 This is mostly useful for saving state that is in memory and waiting
3207 to be flushed when the current lock is released. Because a call to
3207 to be flushed when the current lock is released. Because a call to
3208 destroyed is imminent, the repo will be invalidated causing those
3208 destroyed is imminent, the repo will be invalidated causing those
3209 changes to stay in memory (waiting for the next unlock), or vanish
3209 changes to stay in memory (waiting for the next unlock), or vanish
3210 completely.
3210 completely.
3211 '''
3211 '''
3212 # When using the same lock to commit and strip, the phasecache is left
3212 # When using the same lock to commit and strip, the phasecache is left
3213 # dirty after committing. Then when we strip, the repo is invalidated,
3213 # dirty after committing. Then when we strip, the repo is invalidated,
3214 # causing those changes to disappear.
3214 # causing those changes to disappear.
3215 if '_phasecache' in vars(self):
3215 if '_phasecache' in vars(self):
3216 self._phasecache.write()
3216 self._phasecache.write()
3217
3217
3218 @unfilteredmethod
3218 @unfilteredmethod
3219 def destroyed(self):
3219 def destroyed(self):
3220 '''Inform the repository that nodes have been destroyed.
3220 '''Inform the repository that nodes have been destroyed.
3221 Intended for use by strip and rollback, so there's a common
3221 Intended for use by strip and rollback, so there's a common
3222 place for anything that has to be done after destroying history.
3222 place for anything that has to be done after destroying history.
3223 '''
3223 '''
3224 # When one tries to:
3224 # When one tries to:
3225 # 1) destroy nodes thus calling this method (e.g. strip)
3225 # 1) destroy nodes thus calling this method (e.g. strip)
3226 # 2) use phasecache somewhere (e.g. commit)
3226 # 2) use phasecache somewhere (e.g. commit)
3227 #
3227 #
3228 # then 2) will fail because the phasecache contains nodes that were
3228 # then 2) will fail because the phasecache contains nodes that were
3229 # removed. We can either remove phasecache from the filecache,
3229 # removed. We can either remove phasecache from the filecache,
3230 # causing it to reload next time it is accessed, or simply filter
3230 # causing it to reload next time it is accessed, or simply filter
3231 # the removed nodes now and write the updated cache.
3231 # the removed nodes now and write the updated cache.
3232 self._phasecache.filterunknown(self)
3232 self._phasecache.filterunknown(self)
3233 self._phasecache.write()
3233 self._phasecache.write()
3234
3234
3235 # refresh all repository caches
3235 # refresh all repository caches
3236 self.updatecaches()
3236 self.updatecaches()
3237
3237
3238 # Ensure the persistent tag cache is updated. Doing it now
3238 # Ensure the persistent tag cache is updated. Doing it now
3239 # means that the tag cache only has to worry about destroyed
3239 # means that the tag cache only has to worry about destroyed
3240 # heads immediately after a strip/rollback. That in turn
3240 # heads immediately after a strip/rollback. That in turn
3241 # guarantees that "cachetip == currenttip" (comparing both rev
3241 # guarantees that "cachetip == currenttip" (comparing both rev
3242 # and node) always means no nodes have been added or destroyed.
3242 # and node) always means no nodes have been added or destroyed.
3243
3243
3244 # XXX this is suboptimal when qrefresh'ing: we strip the current
3244 # XXX this is suboptimal when qrefresh'ing: we strip the current
3245 # head, refresh the tag cache, then immediately add a new head.
3245 # head, refresh the tag cache, then immediately add a new head.
3246 # But I think doing it this way is necessary for the "instant
3246 # But I think doing it this way is necessary for the "instant
3247 # tag cache retrieval" case to work.
3247 # tag cache retrieval" case to work.
3248 self.invalidate()
3248 self.invalidate()
3249
3249
3250 def status(
3250 def status(
3251 self,
3251 self,
3252 node1=b'.',
3252 node1=b'.',
3253 node2=None,
3253 node2=None,
3254 match=None,
3254 match=None,
3255 ignored=False,
3255 ignored=False,
3256 clean=False,
3256 clean=False,
3257 unknown=False,
3257 unknown=False,
3258 listsubrepos=False,
3258 listsubrepos=False,
3259 ):
3259 ):
3260 '''a convenience method that calls node1.status(node2)'''
3260 '''a convenience method that calls node1.status(node2)'''
3261 return self[node1].status(
3261 return self[node1].status(
3262 node2, match, ignored, clean, unknown, listsubrepos
3262 node2, match, ignored, clean, unknown, listsubrepos
3263 )
3263 )
3264
3264
3265 def addpostdsstatus(self, ps):
3265 def addpostdsstatus(self, ps):
3266 """Add a callback to run within the wlock, at the point at which status
3266 """Add a callback to run within the wlock, at the point at which status
3267 fixups happen.
3267 fixups happen.
3268
3268
3269 On status completion, callback(wctx, status) will be called with the
3269 On status completion, callback(wctx, status) will be called with the
3270 wlock held, unless the dirstate has changed from underneath or the wlock
3270 wlock held, unless the dirstate has changed from underneath or the wlock
3271 couldn't be grabbed.
3271 couldn't be grabbed.
3272
3272
3273 Callbacks should not capture and use a cached copy of the dirstate --
3273 Callbacks should not capture and use a cached copy of the dirstate --
3274 it might change in the meanwhile. Instead, they should access the
3274 it might change in the meanwhile. Instead, they should access the
3275 dirstate via wctx.repo().dirstate.
3275 dirstate via wctx.repo().dirstate.
3276
3276
3277 This list is emptied out after each status run -- extensions should
3277 This list is emptied out after each status run -- extensions should
3278 make sure it adds to this list each time dirstate.status is called.
3278 make sure it adds to this list each time dirstate.status is called.
3279 Extensions should also make sure they don't call this for statuses
3279 Extensions should also make sure they don't call this for statuses
3280 that don't involve the dirstate.
3280 that don't involve the dirstate.
3281 """
3281 """
3282
3282
3283 # The list is located here for uniqueness reasons -- it is actually
3283 # The list is located here for uniqueness reasons -- it is actually
3284 # managed by the workingctx, but that isn't unique per-repo.
3284 # managed by the workingctx, but that isn't unique per-repo.
3285 self._postdsstatus.append(ps)
3285 self._postdsstatus.append(ps)
3286
3286
3287 def postdsstatus(self):
3287 def postdsstatus(self):
3288 """Used by workingctx to get the list of post-dirstate-status hooks."""
3288 """Used by workingctx to get the list of post-dirstate-status hooks."""
3289 return self._postdsstatus
3289 return self._postdsstatus
3290
3290
3291 def clearpostdsstatus(self):
3291 def clearpostdsstatus(self):
3292 """Used by workingctx to clear post-dirstate-status hooks."""
3292 """Used by workingctx to clear post-dirstate-status hooks."""
3293 del self._postdsstatus[:]
3293 del self._postdsstatus[:]
3294
3294
3295 def heads(self, start=None):
3295 def heads(self, start=None):
3296 if start is None:
3296 if start is None:
3297 cl = self.changelog
3297 cl = self.changelog
3298 headrevs = reversed(cl.headrevs())
3298 headrevs = reversed(cl.headrevs())
3299 return [cl.node(rev) for rev in headrevs]
3299 return [cl.node(rev) for rev in headrevs]
3300
3300
3301 heads = self.changelog.heads(start)
3301 heads = self.changelog.heads(start)
3302 # sort the output in rev descending order
3302 # sort the output in rev descending order
3303 return sorted(heads, key=self.changelog.rev, reverse=True)
3303 return sorted(heads, key=self.changelog.rev, reverse=True)
3304
3304
3305 def branchheads(self, branch=None, start=None, closed=False):
3305 def branchheads(self, branch=None, start=None, closed=False):
3306 '''return a (possibly filtered) list of heads for the given branch
3306 '''return a (possibly filtered) list of heads for the given branch
3307
3307
3308 Heads are returned in topological order, from newest to oldest.
3308 Heads are returned in topological order, from newest to oldest.
3309 If branch is None, use the dirstate branch.
3309 If branch is None, use the dirstate branch.
3310 If start is not None, return only heads reachable from start.
3310 If start is not None, return only heads reachable from start.
3311 If closed is True, return heads that are marked as closed as well.
3311 If closed is True, return heads that are marked as closed as well.
3312 '''
3312 '''
3313 if branch is None:
3313 if branch is None:
3314 branch = self[None].branch()
3314 branch = self[None].branch()
3315 branches = self.branchmap()
3315 branches = self.branchmap()
3316 if not branches.hasbranch(branch):
3316 if not branches.hasbranch(branch):
3317 return []
3317 return []
3318 # the cache returns heads ordered lowest to highest
3318 # the cache returns heads ordered lowest to highest
3319 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3319 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3320 if start is not None:
3320 if start is not None:
3321 # filter out the heads that cannot be reached from startrev
3321 # filter out the heads that cannot be reached from startrev
3322 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3322 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3323 bheads = [h for h in bheads if h in fbheads]
3323 bheads = [h for h in bheads if h in fbheads]
3324 return bheads
3324 return bheads
3325
3325
3326 def branches(self, nodes):
3326 def branches(self, nodes):
3327 if not nodes:
3327 if not nodes:
3328 nodes = [self.changelog.tip()]
3328 nodes = [self.changelog.tip()]
3329 b = []
3329 b = []
3330 for n in nodes:
3330 for n in nodes:
3331 t = n
3331 t = n
3332 while True:
3332 while True:
3333 p = self.changelog.parents(n)
3333 p = self.changelog.parents(n)
3334 if p[1] != nullid or p[0] == nullid:
3334 if p[1] != nullid or p[0] == nullid:
3335 b.append((t, n, p[0], p[1]))
3335 b.append((t, n, p[0], p[1]))
3336 break
3336 break
3337 n = p[0]
3337 n = p[0]
3338 return b
3338 return b
3339
3339
3340 def between(self, pairs):
3340 def between(self, pairs):
3341 r = []
3341 r = []
3342
3342
3343 for top, bottom in pairs:
3343 for top, bottom in pairs:
3344 n, l, i = top, [], 0
3344 n, l, i = top, [], 0
3345 f = 1
3345 f = 1
3346
3346
3347 while n != bottom and n != nullid:
3347 while n != bottom and n != nullid:
3348 p = self.changelog.parents(n)[0]
3348 p = self.changelog.parents(n)[0]
3349 if i == f:
3349 if i == f:
3350 l.append(n)
3350 l.append(n)
3351 f = f * 2
3351 f = f * 2
3352 n = p
3352 n = p
3353 i += 1
3353 i += 1
3354
3354
3355 r.append(l)
3355 r.append(l)
3356
3356
3357 return r
3357 return r
3358
3358
3359 def checkpush(self, pushop):
3359 def checkpush(self, pushop):
3360 """Extensions can override this function if additional checks have
3360 """Extensions can override this function if additional checks have
3361 to be performed before pushing, or call it if they override push
3361 to be performed before pushing, or call it if they override push
3362 command.
3362 command.
3363 """
3363 """
3364
3364
3365 @unfilteredpropertycache
3365 @unfilteredpropertycache
3366 def prepushoutgoinghooks(self):
3366 def prepushoutgoinghooks(self):
3367 """Return util.hooks consists of a pushop with repo, remote, outgoing
3367 """Return util.hooks consists of a pushop with repo, remote, outgoing
3368 methods, which are called before pushing changesets.
3368 methods, which are called before pushing changesets.
3369 """
3369 """
3370 return util.hooks()
3370 return util.hooks()
3371
3371
3372 def pushkey(self, namespace, key, old, new):
3372 def pushkey(self, namespace, key, old, new):
3373 try:
3373 try:
3374 tr = self.currenttransaction()
3374 tr = self.currenttransaction()
3375 hookargs = {}
3375 hookargs = {}
3376 if tr is not None:
3376 if tr is not None:
3377 hookargs.update(tr.hookargs)
3377 hookargs.update(tr.hookargs)
3378 hookargs = pycompat.strkwargs(hookargs)
3378 hookargs = pycompat.strkwargs(hookargs)
3379 hookargs['namespace'] = namespace
3379 hookargs['namespace'] = namespace
3380 hookargs['key'] = key
3380 hookargs['key'] = key
3381 hookargs['old'] = old
3381 hookargs['old'] = old
3382 hookargs['new'] = new
3382 hookargs['new'] = new
3383 self.hook(b'prepushkey', throw=True, **hookargs)
3383 self.hook(b'prepushkey', throw=True, **hookargs)
3384 except error.HookAbort as exc:
3384 except error.HookAbort as exc:
3385 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3385 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3386 if exc.hint:
3386 if exc.hint:
3387 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3387 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3388 return False
3388 return False
3389 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3389 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3390 ret = pushkey.push(self, namespace, key, old, new)
3390 ret = pushkey.push(self, namespace, key, old, new)
3391
3391
3392 def runhook(unused_success):
3392 def runhook(unused_success):
3393 self.hook(
3393 self.hook(
3394 b'pushkey',
3394 b'pushkey',
3395 namespace=namespace,
3395 namespace=namespace,
3396 key=key,
3396 key=key,
3397 old=old,
3397 old=old,
3398 new=new,
3398 new=new,
3399 ret=ret,
3399 ret=ret,
3400 )
3400 )
3401
3401
3402 self._afterlock(runhook)
3402 self._afterlock(runhook)
3403 return ret
3403 return ret
3404
3404
3405 def listkeys(self, namespace):
3405 def listkeys(self, namespace):
3406 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3406 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3407 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3407 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3408 values = pushkey.list(self, namespace)
3408 values = pushkey.list(self, namespace)
3409 self.hook(b'listkeys', namespace=namespace, values=values)
3409 self.hook(b'listkeys', namespace=namespace, values=values)
3410 return values
3410 return values
3411
3411
3412 def debugwireargs(self, one, two, three=None, four=None, five=None):
3412 def debugwireargs(self, one, two, three=None, four=None, five=None):
3413 '''used to test argument passing over the wire'''
3413 '''used to test argument passing over the wire'''
3414 return b"%s %s %s %s %s" % (
3414 return b"%s %s %s %s %s" % (
3415 one,
3415 one,
3416 two,
3416 two,
3417 pycompat.bytestr(three),
3417 pycompat.bytestr(three),
3418 pycompat.bytestr(four),
3418 pycompat.bytestr(four),
3419 pycompat.bytestr(five),
3419 pycompat.bytestr(five),
3420 )
3420 )
3421
3421
3422 def savecommitmessage(self, text):
3422 def savecommitmessage(self, text):
3423 fp = self.vfs(b'last-message.txt', b'wb')
3423 fp = self.vfs(b'last-message.txt', b'wb')
3424 try:
3424 try:
3425 fp.write(text)
3425 fp.write(text)
3426 finally:
3426 finally:
3427 fp.close()
3427 fp.close()
3428 return self.pathto(fp.name[len(self.root) + 1 :])
3428 return self.pathto(fp.name[len(self.root) + 1 :])
3429
3429
3430
3430
3431 # used to avoid circular references so destructors work
3431 # used to avoid circular references so destructors work
3432 def aftertrans(files):
3432 def aftertrans(files):
3433 renamefiles = [tuple(t) for t in files]
3433 renamefiles = [tuple(t) for t in files]
3434
3434
3435 def a():
3435 def a():
3436 for vfs, src, dest in renamefiles:
3436 for vfs, src, dest in renamefiles:
3437 # if src and dest refer to a same file, vfs.rename is a no-op,
3437 # if src and dest refer to a same file, vfs.rename is a no-op,
3438 # leaving both src and dest on disk. delete dest to make sure
3438 # leaving both src and dest on disk. delete dest to make sure
3439 # the rename couldn't be such a no-op.
3439 # the rename couldn't be such a no-op.
3440 vfs.tryunlink(dest)
3440 vfs.tryunlink(dest)
3441 try:
3441 try:
3442 vfs.rename(src, dest)
3442 vfs.rename(src, dest)
3443 except OSError: # journal file does not yet exist
3443 except OSError: # journal file does not yet exist
3444 pass
3444 pass
3445
3445
3446 return a
3446 return a
3447
3447
3448
3448
3449 def undoname(fn):
3449 def undoname(fn):
3450 base, name = os.path.split(fn)
3450 base, name = os.path.split(fn)
3451 assert name.startswith(b'journal')
3451 assert name.startswith(b'journal')
3452 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3452 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3453
3453
3454
3454
3455 def instance(ui, path, create, intents=None, createopts=None):
3455 def instance(ui, path, create, intents=None, createopts=None):
3456 localpath = util.urllocalpath(path)
3456 localpath = util.urllocalpath(path)
3457 if create:
3457 if create:
3458 createrepository(ui, localpath, createopts=createopts)
3458 createrepository(ui, localpath, createopts=createopts)
3459
3459
3460 return makelocalrepository(ui, localpath, intents=intents)
3460 return makelocalrepository(ui, localpath, intents=intents)
3461
3461
3462
3462
3463 def islocal(path):
3463 def islocal(path):
3464 return True
3464 return True
3465
3465
3466
3466
3467 def defaultcreateopts(ui, createopts=None):
3467 def defaultcreateopts(ui, createopts=None):
3468 """Populate the default creation options for a repository.
3468 """Populate the default creation options for a repository.
3469
3469
3470 A dictionary of explicitly requested creation options can be passed
3470 A dictionary of explicitly requested creation options can be passed
3471 in. Missing keys will be populated.
3471 in. Missing keys will be populated.
3472 """
3472 """
3473 createopts = dict(createopts or {})
3473 createopts = dict(createopts or {})
3474
3474
3475 if b'backend' not in createopts:
3475 if b'backend' not in createopts:
3476 # experimental config: storage.new-repo-backend
3476 # experimental config: storage.new-repo-backend
3477 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3477 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3478
3478
3479 return createopts
3479 return createopts
3480
3480
3481
3481
3482 def newreporequirements(ui, createopts):
3482 def newreporequirements(ui, createopts):
3483 """Determine the set of requirements for a new local repository.
3483 """Determine the set of requirements for a new local repository.
3484
3484
3485 Extensions can wrap this function to specify custom requirements for
3485 Extensions can wrap this function to specify custom requirements for
3486 new repositories.
3486 new repositories.
3487 """
3487 """
3488 # If the repo is being created from a shared repository, we copy
3488 # If the repo is being created from a shared repository, we copy
3489 # its requirements.
3489 # its requirements.
3490 if b'sharedrepo' in createopts:
3490 if b'sharedrepo' in createopts:
3491 requirements = set(createopts[b'sharedrepo'].requirements)
3491 requirements = set(createopts[b'sharedrepo'].requirements)
3492 if createopts.get(b'sharedrelative'):
3492 if createopts.get(b'sharedrelative'):
3493 requirements.add(b'relshared')
3493 requirements.add(b'relshared')
3494 else:
3494 else:
3495 requirements.add(b'shared')
3495 requirements.add(b'shared')
3496
3496
3497 return requirements
3497 return requirements
3498
3498
3499 if b'backend' not in createopts:
3499 if b'backend' not in createopts:
3500 raise error.ProgrammingError(
3500 raise error.ProgrammingError(
3501 b'backend key not present in createopts; '
3501 b'backend key not present in createopts; '
3502 b'was defaultcreateopts() called?'
3502 b'was defaultcreateopts() called?'
3503 )
3503 )
3504
3504
3505 if createopts[b'backend'] != b'revlogv1':
3505 if createopts[b'backend'] != b'revlogv1':
3506 raise error.Abort(
3506 raise error.Abort(
3507 _(
3507 _(
3508 b'unable to determine repository requirements for '
3508 b'unable to determine repository requirements for '
3509 b'storage backend: %s'
3509 b'storage backend: %s'
3510 )
3510 )
3511 % createopts[b'backend']
3511 % createopts[b'backend']
3512 )
3512 )
3513
3513
3514 requirements = {b'revlogv1'}
3514 requirements = {b'revlogv1'}
3515 if ui.configbool(b'format', b'usestore'):
3515 if ui.configbool(b'format', b'usestore'):
3516 requirements.add(b'store')
3516 requirements.add(b'store')
3517 if ui.configbool(b'format', b'usefncache'):
3517 if ui.configbool(b'format', b'usefncache'):
3518 requirements.add(b'fncache')
3518 requirements.add(b'fncache')
3519 if ui.configbool(b'format', b'dotencode'):
3519 if ui.configbool(b'format', b'dotencode'):
3520 requirements.add(b'dotencode')
3520 requirements.add(b'dotencode')
3521
3521
3522 compengine = ui.config(b'format', b'revlog-compression')
3522 compengine = ui.config(b'format', b'revlog-compression')
3523 if compengine not in util.compengines:
3523 if compengine not in util.compengines:
3524 raise error.Abort(
3524 raise error.Abort(
3525 _(
3525 _(
3526 b'compression engine %s defined by '
3526 b'compression engine %s defined by '
3527 b'format.revlog-compression not available'
3527 b'format.revlog-compression not available'
3528 )
3528 )
3529 % compengine,
3529 % compengine,
3530 hint=_(
3530 hint=_(
3531 b'run "hg debuginstall" to list available '
3531 b'run "hg debuginstall" to list available '
3532 b'compression engines'
3532 b'compression engines'
3533 ),
3533 ),
3534 )
3534 )
3535
3535
3536 # zlib is the historical default and doesn't need an explicit requirement.
3536 # zlib is the historical default and doesn't need an explicit requirement.
3537 elif compengine == b'zstd':
3537 elif compengine == b'zstd':
3538 requirements.add(b'revlog-compression-zstd')
3538 requirements.add(b'revlog-compression-zstd')
3539 elif compengine != b'zlib':
3539 elif compengine != b'zlib':
3540 requirements.add(b'exp-compression-%s' % compengine)
3540 requirements.add(b'exp-compression-%s' % compengine)
3541
3541
3542 if scmutil.gdinitconfig(ui):
3542 if scmutil.gdinitconfig(ui):
3543 requirements.add(b'generaldelta')
3543 requirements.add(b'generaldelta')
3544 if ui.configbool(b'format', b'sparse-revlog'):
3544 if ui.configbool(b'format', b'sparse-revlog'):
3545 requirements.add(SPARSEREVLOG_REQUIREMENT)
3545 requirements.add(SPARSEREVLOG_REQUIREMENT)
3546
3546
3547 # experimental config: format.exp-use-side-data
3547 # experimental config: format.exp-use-side-data
3548 if ui.configbool(b'format', b'exp-use-side-data'):
3548 if ui.configbool(b'format', b'exp-use-side-data'):
3549 requirements.add(SIDEDATA_REQUIREMENT)
3549 requirements.add(SIDEDATA_REQUIREMENT)
3550 # experimental config: format.exp-use-copies-side-data-changeset
3550 # experimental config: format.exp-use-copies-side-data-changeset
3551 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3551 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3552 requirements.add(SIDEDATA_REQUIREMENT)
3552 requirements.add(SIDEDATA_REQUIREMENT)
3553 requirements.add(COPIESSDC_REQUIREMENT)
3553 requirements.add(COPIESSDC_REQUIREMENT)
3554 if ui.configbool(b'experimental', b'treemanifest'):
3554 if ui.configbool(b'experimental', b'treemanifest'):
3555 requirements.add(b'treemanifest')
3555 requirements.add(b'treemanifest')
3556
3556
3557 revlogv2 = ui.config(b'experimental', b'revlogv2')
3557 revlogv2 = ui.config(b'experimental', b'revlogv2')
3558 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3558 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3559 requirements.remove(b'revlogv1')
3559 requirements.remove(b'revlogv1')
3560 # generaldelta is implied by revlogv2.
3560 # generaldelta is implied by revlogv2.
3561 requirements.discard(b'generaldelta')
3561 requirements.discard(b'generaldelta')
3562 requirements.add(REVLOGV2_REQUIREMENT)
3562 requirements.add(REVLOGV2_REQUIREMENT)
3563 # experimental config: format.internal-phase
3563 # experimental config: format.internal-phase
3564 if ui.configbool(b'format', b'internal-phase'):
3564 if ui.configbool(b'format', b'internal-phase'):
3565 requirements.add(b'internal-phase')
3565 requirements.add(b'internal-phase')
3566
3566
3567 if createopts.get(b'narrowfiles'):
3567 if createopts.get(b'narrowfiles'):
3568 requirements.add(repository.NARROW_REQUIREMENT)
3568 requirements.add(repository.NARROW_REQUIREMENT)
3569
3569
3570 if createopts.get(b'lfs'):
3570 if createopts.get(b'lfs'):
3571 requirements.add(b'lfs')
3571 requirements.add(b'lfs')
3572
3572
3573 if ui.configbool(b'format', b'bookmarks-in-store'):
3573 if ui.configbool(b'format', b'bookmarks-in-store'):
3574 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3574 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3575
3575
3576 return requirements
3576 return requirements
3577
3577
3578
3578
3579 def filterknowncreateopts(ui, createopts):
3579 def filterknowncreateopts(ui, createopts):
3580 """Filters a dict of repo creation options against options that are known.
3580 """Filters a dict of repo creation options against options that are known.
3581
3581
3582 Receives a dict of repo creation options and returns a dict of those
3582 Receives a dict of repo creation options and returns a dict of those
3583 options that we don't know how to handle.
3583 options that we don't know how to handle.
3584
3584
3585 This function is called as part of repository creation. If the
3585 This function is called as part of repository creation. If the
3586 returned dict contains any items, repository creation will not
3586 returned dict contains any items, repository creation will not
3587 be allowed, as it means there was a request to create a repository
3587 be allowed, as it means there was a request to create a repository
3588 with options not recognized by loaded code.
3588 with options not recognized by loaded code.
3589
3589
3590 Extensions can wrap this function to filter out creation options
3590 Extensions can wrap this function to filter out creation options
3591 they know how to handle.
3591 they know how to handle.
3592 """
3592 """
3593 known = {
3593 known = {
3594 b'backend',
3594 b'backend',
3595 b'lfs',
3595 b'lfs',
3596 b'narrowfiles',
3596 b'narrowfiles',
3597 b'sharedrepo',
3597 b'sharedrepo',
3598 b'sharedrelative',
3598 b'sharedrelative',
3599 b'shareditems',
3599 b'shareditems',
3600 b'shallowfilestore',
3600 b'shallowfilestore',
3601 }
3601 }
3602
3602
3603 return {k: v for k, v in createopts.items() if k not in known}
3603 return {k: v for k, v in createopts.items() if k not in known}
3604
3604
3605
3605
3606 def createrepository(ui, path, createopts=None):
3606 def createrepository(ui, path, createopts=None):
3607 """Create a new repository in a vfs.
3607 """Create a new repository in a vfs.
3608
3608
3609 ``path`` path to the new repo's working directory.
3609 ``path`` path to the new repo's working directory.
3610 ``createopts`` options for the new repository.
3610 ``createopts`` options for the new repository.
3611
3611
3612 The following keys for ``createopts`` are recognized:
3612 The following keys for ``createopts`` are recognized:
3613
3613
3614 backend
3614 backend
3615 The storage backend to use.
3615 The storage backend to use.
3616 lfs
3616 lfs
3617 Repository will be created with ``lfs`` requirement. The lfs extension
3617 Repository will be created with ``lfs`` requirement. The lfs extension
3618 will automatically be loaded when the repository is accessed.
3618 will automatically be loaded when the repository is accessed.
3619 narrowfiles
3619 narrowfiles
3620 Set up repository to support narrow file storage.
3620 Set up repository to support narrow file storage.
3621 sharedrepo
3621 sharedrepo
3622 Repository object from which storage should be shared.
3622 Repository object from which storage should be shared.
3623 sharedrelative
3623 sharedrelative
3624 Boolean indicating if the path to the shared repo should be
3624 Boolean indicating if the path to the shared repo should be
3625 stored as relative. By default, the pointer to the "parent" repo
3625 stored as relative. By default, the pointer to the "parent" repo
3626 is stored as an absolute path.
3626 is stored as an absolute path.
3627 shareditems
3627 shareditems
3628 Set of items to share to the new repository (in addition to storage).
3628 Set of items to share to the new repository (in addition to storage).
3629 shallowfilestore
3629 shallowfilestore
3630 Indicates that storage for files should be shallow (not all ancestor
3630 Indicates that storage for files should be shallow (not all ancestor
3631 revisions are known).
3631 revisions are known).
3632 """
3632 """
3633 createopts = defaultcreateopts(ui, createopts=createopts)
3633 createopts = defaultcreateopts(ui, createopts=createopts)
3634
3634
3635 unknownopts = filterknowncreateopts(ui, createopts)
3635 unknownopts = filterknowncreateopts(ui, createopts)
3636
3636
3637 if not isinstance(unknownopts, dict):
3637 if not isinstance(unknownopts, dict):
3638 raise error.ProgrammingError(
3638 raise error.ProgrammingError(
3639 b'filterknowncreateopts() did not return a dict'
3639 b'filterknowncreateopts() did not return a dict'
3640 )
3640 )
3641
3641
3642 if unknownopts:
3642 if unknownopts:
3643 raise error.Abort(
3643 raise error.Abort(
3644 _(
3644 _(
3645 b'unable to create repository because of unknown '
3645 b'unable to create repository because of unknown '
3646 b'creation option: %s'
3646 b'creation option: %s'
3647 )
3647 )
3648 % b', '.join(sorted(unknownopts)),
3648 % b', '.join(sorted(unknownopts)),
3649 hint=_(b'is a required extension not loaded?'),
3649 hint=_(b'is a required extension not loaded?'),
3650 )
3650 )
3651
3651
3652 requirements = newreporequirements(ui, createopts=createopts)
3652 requirements = newreporequirements(ui, createopts=createopts)
3653
3653
3654 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3654 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3655
3655
3656 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3656 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3657 if hgvfs.exists():
3657 if hgvfs.exists():
3658 raise error.RepoError(_(b'repository %s already exists') % path)
3658 raise error.RepoError(_(b'repository %s already exists') % path)
3659
3659
3660 if b'sharedrepo' in createopts:
3660 if b'sharedrepo' in createopts:
3661 sharedpath = createopts[b'sharedrepo'].sharedpath
3661 sharedpath = createopts[b'sharedrepo'].sharedpath
3662
3662
3663 if createopts.get(b'sharedrelative'):
3663 if createopts.get(b'sharedrelative'):
3664 try:
3664 try:
3665 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3665 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3666 except (IOError, ValueError) as e:
3666 except (IOError, ValueError) as e:
3667 # ValueError is raised on Windows if the drive letters differ
3667 # ValueError is raised on Windows if the drive letters differ
3668 # on each path.
3668 # on each path.
3669 raise error.Abort(
3669 raise error.Abort(
3670 _(b'cannot calculate relative path'),
3670 _(b'cannot calculate relative path'),
3671 hint=stringutil.forcebytestr(e),
3671 hint=stringutil.forcebytestr(e),
3672 )
3672 )
3673
3673
3674 if not wdirvfs.exists():
3674 if not wdirvfs.exists():
3675 wdirvfs.makedirs()
3675 wdirvfs.makedirs()
3676
3676
3677 hgvfs.makedir(notindexed=True)
3677 hgvfs.makedir(notindexed=True)
3678 if b'sharedrepo' not in createopts:
3678 if b'sharedrepo' not in createopts:
3679 hgvfs.mkdir(b'cache')
3679 hgvfs.mkdir(b'cache')
3680 hgvfs.mkdir(b'wcache')
3680 hgvfs.mkdir(b'wcache')
3681
3681
3682 if b'store' in requirements and b'sharedrepo' not in createopts:
3682 if b'store' in requirements and b'sharedrepo' not in createopts:
3683 hgvfs.mkdir(b'store')
3683 hgvfs.mkdir(b'store')
3684
3684
3685 # We create an invalid changelog outside the store so very old
3685 # We create an invalid changelog outside the store so very old
3686 # Mercurial versions (which didn't know about the requirements
3686 # Mercurial versions (which didn't know about the requirements
3687 # file) encounter an error on reading the changelog. This
3687 # file) encounter an error on reading the changelog. This
3688 # effectively locks out old clients and prevents them from
3688 # effectively locks out old clients and prevents them from
3689 # mucking with a repo in an unknown format.
3689 # mucking with a repo in an unknown format.
3690 #
3690 #
3691 # The revlog header has version 2, which won't be recognized by
3691 # The revlog header has version 2, which won't be recognized by
3692 # such old clients.
3692 # such old clients.
3693 hgvfs.append(
3693 hgvfs.append(
3694 b'00changelog.i',
3694 b'00changelog.i',
3695 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3695 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3696 b'layout',
3696 b'layout',
3697 )
3697 )
3698
3698
3699 scmutil.writerequires(hgvfs, requirements)
3699 scmutil.writerequires(hgvfs, requirements)
3700
3700
3701 # Write out file telling readers where to find the shared store.
3701 # Write out file telling readers where to find the shared store.
3702 if b'sharedrepo' in createopts:
3702 if b'sharedrepo' in createopts:
3703 hgvfs.write(b'sharedpath', sharedpath)
3703 hgvfs.write(b'sharedpath', sharedpath)
3704
3704
3705 if createopts.get(b'shareditems'):
3705 if createopts.get(b'shareditems'):
3706 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3706 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3707 hgvfs.write(b'shared', shared)
3707 hgvfs.write(b'shared', shared)
3708
3708
3709
3709
3710 def poisonrepository(repo):
3710 def poisonrepository(repo):
3711 """Poison a repository instance so it can no longer be used."""
3711 """Poison a repository instance so it can no longer be used."""
3712 # Perform any cleanup on the instance.
3712 # Perform any cleanup on the instance.
3713 repo.close()
3713 repo.close()
3714
3714
3715 # Our strategy is to replace the type of the object with one that
3715 # Our strategy is to replace the type of the object with one that
3716 # has all attribute lookups result in error.
3716 # has all attribute lookups result in error.
3717 #
3717 #
3718 # But we have to allow the close() method because some constructors
3718 # But we have to allow the close() method because some constructors
3719 # of repos call close() on repo references.
3719 # of repos call close() on repo references.
3720 class poisonedrepository(object):
3720 class poisonedrepository(object):
3721 def __getattribute__(self, item):
3721 def __getattribute__(self, item):
3722 if item == 'close':
3722 if item == 'close':
3723 return object.__getattribute__(self, item)
3723 return object.__getattribute__(self, item)
3724
3724
3725 raise error.ProgrammingError(
3725 raise error.ProgrammingError(
3726 b'repo instances should not be used after unshare'
3726 b'repo instances should not be used after unshare'
3727 )
3727 )
3728
3728
3729 def close(self):
3729 def close(self):
3730 pass
3730 pass
3731
3731
3732 # We may have a repoview, which intercepts __setattr__. So be sure
3732 # We may have a repoview, which intercepts __setattr__. So be sure
3733 # we operate at the lowest level possible.
3733 # we operate at the lowest level possible.
3734 object.__setattr__(repo, '__class__', poisonedrepository)
3734 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2712 +1,2712 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
12 import shutil
11 import shutil
13 import stat
12 import stat
14 import struct
13 import struct
15
14
16 from .i18n import _
15 from .i18n import _
17 from .node import (
16 from .node import (
18 addednodeid,
17 addednodeid,
19 bin,
18 bin,
20 hex,
19 hex,
21 modifiednodeid,
20 modifiednodeid,
22 nullhex,
21 nullhex,
23 nullid,
22 nullid,
24 nullrev,
23 nullrev,
25 )
24 )
26 from .pycompat import delattr
25 from .pycompat import delattr
27 from .thirdparty import attr
26 from .thirdparty import attr
28 from . import (
27 from . import (
29 copies,
28 copies,
30 encoding,
29 encoding,
31 error,
30 error,
32 filemerge,
31 filemerge,
33 match as matchmod,
32 match as matchmod,
34 obsutil,
33 obsutil,
35 pathutil,
34 pathutil,
36 pycompat,
35 pycompat,
37 scmutil,
36 scmutil,
38 subrepoutil,
37 subrepoutil,
39 util,
38 util,
40 worker,
39 worker,
41 )
40 )
41 from .utils import hashutil
42
42
43 _pack = struct.pack
43 _pack = struct.pack
44 _unpack = struct.unpack
44 _unpack = struct.unpack
45
45
46
46
47 def _droponode(data):
47 def _droponode(data):
48 # used for compatibility for v1
48 # used for compatibility for v1
49 bits = data.split(b'\0')
49 bits = data.split(b'\0')
50 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
51 return b'\0'.join(bits)
51 return b'\0'.join(bits)
52
52
53
53
54 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
55 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
56 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
57 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
63 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
64 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67
67
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
71
71
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77
77
78 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
79 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
80 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
81 ACTION_GET = b'g'
81 ACTION_GET = b'g'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
85 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
88 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
92 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
93 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
94
94
95
95
96 class mergestate(object):
96 class mergestate(object):
97 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
98
98
99 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
100 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
101 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
102 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
103 `_readrecordsv2`.
103 `_readrecordsv2`.
104
104
105 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
108 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
109
109
110 Currently known records:
110 Currently known records:
111
111
112 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
114 F: a file to be merged entry
114 F: a file to be merged entry
115 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
116 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
117 (experimental)
117 (experimental)
118 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
119 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
120 (experimental)
120 (experimental)
121 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
122 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
124 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
125
125
126 Merge driver run states (experimental):
126 Merge driver run states (experimental):
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 to resolve or commit
128 to resolve or commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
130 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
131
131
132 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
133 u: unresolved conflict
133 u: unresolved conflict
134 r: resolved conflict
134 r: resolved conflict
135 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
136 pr: resolved path conflict
136 pr: resolved path conflict
137 d: driver-resolved conflict
137 d: driver-resolved conflict
138
138
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
140 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
141 '''
141 '''
142
142
143 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
144 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
145
145
146 @staticmethod
146 @staticmethod
147 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
148 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
149 disk."""
149 disk."""
150 ms = mergestate(repo)
150 ms = mergestate(repo)
151 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
152 return ms
152 return ms
153
153
154 @staticmethod
154 @staticmethod
155 def read(repo):
155 def read(repo):
156 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
157 ms = mergestate(repo)
157 ms = mergestate(repo)
158 ms._read()
158 ms._read()
159 return ms
159 return ms
160
160
161 def __init__(self, repo):
161 def __init__(self, repo):
162 """Initialize the merge state.
162 """Initialize the merge state.
163
163
164 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
165 self._repo = repo
165 self._repo = repo
166 self._dirty = False
166 self._dirty = False
167 self._labels = None
167 self._labels = None
168
168
169 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
170 self._state = {}
170 self._state = {}
171 self._stateextras = {}
171 self._stateextras = {}
172 self._local = None
172 self._local = None
173 self._other = None
173 self._other = None
174 self._labels = labels
174 self._labels = labels
175 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
176 if var in vars(self):
176 if var in vars(self):
177 delattr(self, var)
177 delattr(self, var)
178 if node:
178 if node:
179 self._local = node
179 self._local = node
180 self._other = other
180 self._other = other
181 self._readmergedriver = None
181 self._readmergedriver = None
182 if self.mergedriver:
182 if self.mergedriver:
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 else:
184 else:
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 self._results = {}
187 self._results = {}
188 self._dirty = False
188 self._dirty = False
189
189
190 def _read(self):
190 def _read(self):
191 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
192
192
193 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
194 of on disk file.
194 of on disk file.
195 """
195 """
196 self._state = {}
196 self._state = {}
197 self._stateextras = {}
197 self._stateextras = {}
198 self._local = None
198 self._local = None
199 self._other = None
199 self._other = None
200 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
201 if var in vars(self):
201 if var in vars(self):
202 delattr(self, var)
202 delattr(self, var)
203 self._readmergedriver = None
203 self._readmergedriver = None
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 unsupported = set()
205 unsupported = set()
206 records = self._readrecords()
206 records = self._readrecords()
207 for rtype, record in records:
207 for rtype, record in records:
208 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
209 self._local = bin(record)
209 self._local = bin(record)
210 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
211 self._other = bin(record)
211 self._other = bin(record)
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
214 mdstate = bits[1]
214 mdstate = bits[1]
215 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
218 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
219 ):
219 ):
220 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222
222
223 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
224 self._mdstate = mdstate
224 self._mdstate = mdstate
225 elif rtype in (
225 elif rtype in (
226 RECORD_MERGED,
226 RECORD_MERGED,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
229 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
230 ):
230 ):
231 bits = record.split(b'\0')
231 bits = record.split(b'\0')
232 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
233 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
234 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
235 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
236 extras = {}
236 extras = {}
237 i = 0
237 i = 0
238 while i < len(extraparts):
238 while i < len(extraparts):
239 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
240 i += 2
240 i += 2
241
241
242 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
243 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
244 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
245 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
246 elif not rtype.islower():
246 elif not rtype.islower():
247 unsupported.add(rtype)
247 unsupported.add(rtype)
248 self._results = {}
248 self._results = {}
249 self._dirty = False
249 self._dirty = False
250
250
251 if unsupported:
251 if unsupported:
252 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
253
253
254 def _readrecords(self):
254 def _readrecords(self):
255 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
256
256
257 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
258
258
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
260 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
261 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
262 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
263 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
264 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
265 file around.
265 file around.
266
266
267 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
268 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
269 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
270 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
271 return v2records
271 return v2records
272 else:
272 else:
273 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
274 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
275 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
276 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
278 # add place holder "other" file node information
278 # add place holder "other" file node information
279 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
282 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
283 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
284 bits.insert(-2, b'')
284 bits.insert(-2, b'')
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
286 return v1records
286 return v1records
287
287
288 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
289 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
290 for rec in v2records:
290 for rec in v2records:
291 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
292 oldv2.add(rec)
292 oldv2.add(rec)
293 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
294 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 for rec in v1records:
296 for rec in v1records:
297 if rec not in oldv2:
297 if rec not in oldv2:
298 return False
298 return False
299 else:
299 else:
300 return True
300 return True
301
301
302 def _readrecordsv1(self):
302 def _readrecordsv1(self):
303 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
304
304
305 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
306
306
307 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
308 (no "other file node" entry)
308 (no "other file node" entry)
309 """
309 """
310 records = []
310 records = []
311 try:
311 try:
312 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
313 for i, l in enumerate(f):
313 for i, l in enumerate(f):
314 if i == 0:
314 if i == 0:
315 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
316 else:
316 else:
317 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
318 f.close()
318 f.close()
319 except IOError as err:
319 except IOError as err:
320 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
321 raise
321 raise
322 return records
322 return records
323
323
324 def _readrecordsv2(self):
324 def _readrecordsv2(self):
325 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
326
326
327 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
328
328
329 [type][length][content]
329 [type][length][content]
330
330
331 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
332 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
333
333
334 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
335 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
336 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
337 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
338 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
339 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
340
340
341 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
342 records = []
342 records = []
343 try:
343 try:
344 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
345 data = f.read()
345 data = f.read()
346 off = 0
346 off = 0
347 end = len(data)
347 end = len(data)
348 while off < end:
348 while off < end:
349 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
350 off += 1
350 off += 1
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 off += 4
352 off += 4
353 record = data[off : (off + length)]
353 record = data[off : (off + length)]
354 off += length
354 off += length
355 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
356 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
357 records.append((rtype, record))
357 records.append((rtype, record))
358 f.close()
358 f.close()
359 except IOError as err:
359 except IOError as err:
360 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
361 raise
361 raise
362 return records
362 return records
363
363
364 @util.propertycache
364 @util.propertycache
365 def mergedriver(self):
365 def mergedriver(self):
366 # protect against the following:
366 # protect against the following:
367 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
368 # pauses the merge
368 # pauses the merge
369 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
371 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
372 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
373 # gets invoked
373 # gets invoked
374 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
375 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
376 )
376 )
377 if (
377 if (
378 self._readmergedriver is not None
378 self._readmergedriver is not None
379 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
380 ):
380 ):
381 raise error.ConfigError(
381 raise error.ConfigError(
382 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
383 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
384 )
384 )
385
385
386 return configmergedriver
386 return configmergedriver
387
387
388 @util.propertycache
388 @util.propertycache
389 def localctx(self):
389 def localctx(self):
390 if self._local is None:
390 if self._local is None:
391 msg = b"localctx accessed but self._local isn't set"
391 msg = b"localctx accessed but self._local isn't set"
392 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
393 return self._repo[self._local]
393 return self._repo[self._local]
394
394
395 @util.propertycache
395 @util.propertycache
396 def otherctx(self):
396 def otherctx(self):
397 if self._other is None:
397 if self._other is None:
398 msg = b"otherctx accessed but self._other isn't set"
398 msg = b"otherctx accessed but self._other isn't set"
399 raise error.ProgrammingError(msg)
399 raise error.ProgrammingError(msg)
400 return self._repo[self._other]
400 return self._repo[self._other]
401
401
402 def active(self):
402 def active(self):
403 """Whether mergestate is active.
403 """Whether mergestate is active.
404
404
405 Returns True if there appears to be mergestate. This is a rough proxy
405 Returns True if there appears to be mergestate. This is a rough proxy
406 for "is a merge in progress."
406 for "is a merge in progress."
407 """
407 """
408 # Check local variables before looking at filesystem for performance
408 # Check local variables before looking at filesystem for performance
409 # reasons.
409 # reasons.
410 return (
410 return (
411 bool(self._local)
411 bool(self._local)
412 or bool(self._state)
412 or bool(self._state)
413 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv1)
414 or self._repo.vfs.exists(self.statepathv2)
414 or self._repo.vfs.exists(self.statepathv2)
415 )
415 )
416
416
417 def commit(self):
417 def commit(self):
418 """Write current state on disk (if necessary)"""
418 """Write current state on disk (if necessary)"""
419 if self._dirty:
419 if self._dirty:
420 records = self._makerecords()
420 records = self._makerecords()
421 self._writerecords(records)
421 self._writerecords(records)
422 self._dirty = False
422 self._dirty = False
423
423
424 def _makerecords(self):
424 def _makerecords(self):
425 records = []
425 records = []
426 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_LOCAL, hex(self._local)))
427 records.append((RECORD_OTHER, hex(self._other)))
427 records.append((RECORD_OTHER, hex(self._other)))
428 if self.mergedriver:
428 if self.mergedriver:
429 records.append(
429 records.append(
430 (
430 (
431 RECORD_MERGE_DRIVER_STATE,
431 RECORD_MERGE_DRIVER_STATE,
432 b'\0'.join([self.mergedriver, self._mdstate]),
432 b'\0'.join([self.mergedriver, self._mdstate]),
433 )
433 )
434 )
434 )
435 # Write out state items. In all cases, the value of the state map entry
435 # Write out state items. In all cases, the value of the state map entry
436 # is written as the contents of the record. The record type depends on
436 # is written as the contents of the record. The record type depends on
437 # the type of state that is stored, and capital-letter records are used
437 # the type of state that is stored, and capital-letter records are used
438 # to prevent older versions of Mercurial that do not support the feature
438 # to prevent older versions of Mercurial that do not support the feature
439 # from loading them.
439 # from loading them.
440 for filename, v in pycompat.iteritems(self._state):
440 for filename, v in pycompat.iteritems(self._state):
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 # Driver-resolved merge. These are stored in 'D' records.
442 # Driver-resolved merge. These are stored in 'D' records.
443 records.append(
443 records.append(
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 )
445 )
446 elif v[0] in (
446 elif v[0] in (
447 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_UNRESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
449 ):
449 ):
450 # Path conflicts. These are stored in 'P' records. The current
450 # Path conflicts. These are stored in 'P' records. The current
451 # resolution state ('pu' or 'pr') is stored within the record.
451 # resolution state ('pu' or 'pr') is stored within the record.
452 records.append(
452 records.append(
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 )
454 )
455 elif v[1] == nullhex or v[6] == nullhex:
455 elif v[1] == nullhex or v[6] == nullhex:
456 # Change/Delete or Delete/Change conflicts. These are stored in
456 # Change/Delete or Delete/Change conflicts. These are stored in
457 # 'C' records. v[1] is the local file, and is nullhex when the
457 # 'C' records. v[1] is the local file, and is nullhex when the
458 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 # is nullhex when the file is deleted remotely ('cd').
459 # is nullhex when the file is deleted remotely ('cd').
460 records.append(
460 records.append(
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 )
462 )
463 else:
463 else:
464 # Normal files. These are stored in 'F' records.
464 # Normal files. These are stored in 'F' records.
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 rawextras = b'\0'.join(
467 rawextras = b'\0'.join(
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 )
469 )
470 records.append(
470 records.append(
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 )
472 )
473 if self._labels is not None:
473 if self._labels is not None:
474 labels = b'\0'.join(self._labels)
474 labels = b'\0'.join(self._labels)
475 records.append((RECORD_LABELS, labels))
475 records.append((RECORD_LABELS, labels))
476 return records
476 return records
477
477
478 def _writerecords(self, records):
478 def _writerecords(self, records):
479 """Write current state on disk (both v1 and v2)"""
479 """Write current state on disk (both v1 and v2)"""
480 self._writerecordsv1(records)
480 self._writerecordsv1(records)
481 self._writerecordsv2(records)
481 self._writerecordsv2(records)
482
482
483 def _writerecordsv1(self, records):
483 def _writerecordsv1(self, records):
484 """Write current state on disk in a version 1 file"""
484 """Write current state on disk in a version 1 file"""
485 f = self._repo.vfs(self.statepathv1, b'wb')
485 f = self._repo.vfs(self.statepathv1, b'wb')
486 irecords = iter(records)
486 irecords = iter(records)
487 lrecords = next(irecords)
487 lrecords = next(irecords)
488 assert lrecords[0] == RECORD_LOCAL
488 assert lrecords[0] == RECORD_LOCAL
489 f.write(hex(self._local) + b'\n')
489 f.write(hex(self._local) + b'\n')
490 for rtype, data in irecords:
490 for rtype, data in irecords:
491 if rtype == RECORD_MERGED:
491 if rtype == RECORD_MERGED:
492 f.write(b'%s\n' % _droponode(data))
492 f.write(b'%s\n' % _droponode(data))
493 f.close()
493 f.close()
494
494
495 def _writerecordsv2(self, records):
495 def _writerecordsv2(self, records):
496 """Write current state on disk in a version 2 file
496 """Write current state on disk in a version 2 file
497
497
498 See the docstring for _readrecordsv2 for why we use 't'."""
498 See the docstring for _readrecordsv2 for why we use 't'."""
499 # these are the records that all version 2 clients can read
499 # these are the records that all version 2 clients can read
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 f = self._repo.vfs(self.statepathv2, b'wb')
501 f = self._repo.vfs(self.statepathv2, b'wb')
502 for key, data in records:
502 for key, data in records:
503 assert len(key) == 1
503 assert len(key) == 1
504 if key not in allowlist:
504 if key not in allowlist:
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 format = b'>sI%is' % len(data)
506 format = b'>sI%is' % len(data)
507 f.write(_pack(format, key, len(data), data))
507 f.write(_pack(format, key, len(data), data))
508 f.close()
508 f.close()
509
509
510 @staticmethod
510 @staticmethod
511 def getlocalkey(path):
511 def getlocalkey(path):
512 """hash the path of a local file context for storage in the .hg/merge
512 """hash the path of a local file context for storage in the .hg/merge
513 directory."""
513 directory."""
514
514
515 return hex(hashlib.sha1(path).digest())
515 return hex(hashutil.sha1(path).digest())
516
516
517 def add(self, fcl, fco, fca, fd):
517 def add(self, fcl, fco, fca, fd):
518 """add a new (potentially?) conflicting file the merge state
518 """add a new (potentially?) conflicting file the merge state
519 fcl: file context for local,
519 fcl: file context for local,
520 fco: file context for remote,
520 fco: file context for remote,
521 fca: file context for ancestors,
521 fca: file context for ancestors,
522 fd: file path of the resulting merge.
522 fd: file path of the resulting merge.
523
523
524 note: also write the local version to the `.hg/merge` directory.
524 note: also write the local version to the `.hg/merge` directory.
525 """
525 """
526 if fcl.isabsent():
526 if fcl.isabsent():
527 localkey = nullhex
527 localkey = nullhex
528 else:
528 else:
529 localkey = mergestate.getlocalkey(fcl.path())
529 localkey = mergestate.getlocalkey(fcl.path())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 self._state[fd] = [
531 self._state[fd] = [
532 MERGE_RECORD_UNRESOLVED,
532 MERGE_RECORD_UNRESOLVED,
533 localkey,
533 localkey,
534 fcl.path(),
534 fcl.path(),
535 fca.path(),
535 fca.path(),
536 hex(fca.filenode()),
536 hex(fca.filenode()),
537 fco.path(),
537 fco.path(),
538 hex(fco.filenode()),
538 hex(fco.filenode()),
539 fcl.flags(),
539 fcl.flags(),
540 ]
540 ]
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 self._dirty = True
542 self._dirty = True
543
543
544 def addpath(self, path, frename, forigin):
544 def addpath(self, path, frename, forigin):
545 """add a new conflicting path to the merge state
545 """add a new conflicting path to the merge state
546 path: the path that conflicts
546 path: the path that conflicts
547 frename: the filename the conflicting file was renamed to
547 frename: the filename the conflicting file was renamed to
548 forigin: origin of the file ('l' or 'r' for local/remote)
548 forigin: origin of the file ('l' or 'r' for local/remote)
549 """
549 """
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 self._dirty = True
551 self._dirty = True
552
552
553 def __contains__(self, dfile):
553 def __contains__(self, dfile):
554 return dfile in self._state
554 return dfile in self._state
555
555
556 def __getitem__(self, dfile):
556 def __getitem__(self, dfile):
557 return self._state[dfile][0]
557 return self._state[dfile][0]
558
558
559 def __iter__(self):
559 def __iter__(self):
560 return iter(sorted(self._state))
560 return iter(sorted(self._state))
561
561
562 def files(self):
562 def files(self):
563 return self._state.keys()
563 return self._state.keys()
564
564
565 def mark(self, dfile, state):
565 def mark(self, dfile, state):
566 self._state[dfile][0] = state
566 self._state[dfile][0] = state
567 self._dirty = True
567 self._dirty = True
568
568
569 def mdstate(self):
569 def mdstate(self):
570 return self._mdstate
570 return self._mdstate
571
571
572 def unresolved(self):
572 def unresolved(self):
573 """Obtain the paths of unresolved files."""
573 """Obtain the paths of unresolved files."""
574
574
575 for f, entry in pycompat.iteritems(self._state):
575 for f, entry in pycompat.iteritems(self._state):
576 if entry[0] in (
576 if entry[0] in (
577 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED,
578 MERGE_RECORD_UNRESOLVED_PATH,
578 MERGE_RECORD_UNRESOLVED_PATH,
579 ):
579 ):
580 yield f
580 yield f
581
581
582 def driverresolved(self):
582 def driverresolved(self):
583 """Obtain the paths of driver-resolved files."""
583 """Obtain the paths of driver-resolved files."""
584
584
585 for f, entry in self._state.items():
585 for f, entry in self._state.items():
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 yield f
587 yield f
588
588
589 def extras(self, filename):
589 def extras(self, filename):
590 return self._stateextras.setdefault(filename, {})
590 return self._stateextras.setdefault(filename, {})
591
591
592 def _resolve(self, preresolve, dfile, wctx):
592 def _resolve(self, preresolve, dfile, wctx):
593 """rerun merge process for file path `dfile`"""
593 """rerun merge process for file path `dfile`"""
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 return True, 0
595 return True, 0
596 stateentry = self._state[dfile]
596 stateentry = self._state[dfile]
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 octx = self._repo[self._other]
598 octx = self._repo[self._other]
599 extras = self.extras(dfile)
599 extras = self.extras(dfile)
600 anccommitnode = extras.get(b'ancestorlinknode')
600 anccommitnode = extras.get(b'ancestorlinknode')
601 if anccommitnode:
601 if anccommitnode:
602 actx = self._repo[anccommitnode]
602 actx = self._repo[anccommitnode]
603 else:
603 else:
604 actx = None
604 actx = None
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 fco = self._filectxorabsent(onode, octx, ofile)
606 fco = self._filectxorabsent(onode, octx, ofile)
607 # TODO: move this to filectxorabsent
607 # TODO: move this to filectxorabsent
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 # "premerge" x flags
609 # "premerge" x flags
610 flo = fco.flags()
610 flo = fco.flags()
611 fla = fca.flags()
611 fla = fca.flags()
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 if fca.node() == nullid and flags != flo:
613 if fca.node() == nullid and flags != flo:
614 if preresolve:
614 if preresolve:
615 self._repo.ui.warn(
615 self._repo.ui.warn(
616 _(
616 _(
617 b'warning: cannot merge flags for %s '
617 b'warning: cannot merge flags for %s '
618 b'without common ancestor - keeping local flags\n'
618 b'without common ancestor - keeping local flags\n'
619 )
619 )
620 % afile
620 % afile
621 )
621 )
622 elif flags == fla:
622 elif flags == fla:
623 flags = flo
623 flags = flo
624 if preresolve:
624 if preresolve:
625 # restore local
625 # restore local
626 if localkey != nullhex:
626 if localkey != nullhex:
627 f = self._repo.vfs(b'merge/' + localkey)
627 f = self._repo.vfs(b'merge/' + localkey)
628 wctx[dfile].write(f.read(), flags)
628 wctx[dfile].write(f.read(), flags)
629 f.close()
629 f.close()
630 else:
630 else:
631 wctx[dfile].remove(ignoremissing=True)
631 wctx[dfile].remove(ignoremissing=True)
632 complete, r, deleted = filemerge.premerge(
632 complete, r, deleted = filemerge.premerge(
633 self._repo,
633 self._repo,
634 wctx,
634 wctx,
635 self._local,
635 self._local,
636 lfile,
636 lfile,
637 fcd,
637 fcd,
638 fco,
638 fco,
639 fca,
639 fca,
640 labels=self._labels,
640 labels=self._labels,
641 )
641 )
642 else:
642 else:
643 complete, r, deleted = filemerge.filemerge(
643 complete, r, deleted = filemerge.filemerge(
644 self._repo,
644 self._repo,
645 wctx,
645 wctx,
646 self._local,
646 self._local,
647 lfile,
647 lfile,
648 fcd,
648 fcd,
649 fco,
649 fco,
650 fca,
650 fca,
651 labels=self._labels,
651 labels=self._labels,
652 )
652 )
653 if r is None:
653 if r is None:
654 # no real conflict
654 # no real conflict
655 del self._state[dfile]
655 del self._state[dfile]
656 self._stateextras.pop(dfile, None)
656 self._stateextras.pop(dfile, None)
657 self._dirty = True
657 self._dirty = True
658 elif not r:
658 elif not r:
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660
660
661 if complete:
661 if complete:
662 action = None
662 action = None
663 if deleted:
663 if deleted:
664 if fcd.isabsent():
664 if fcd.isabsent():
665 # dc: local picked. Need to drop if present, which may
665 # dc: local picked. Need to drop if present, which may
666 # happen on re-resolves.
666 # happen on re-resolves.
667 action = ACTION_FORGET
667 action = ACTION_FORGET
668 else:
668 else:
669 # cd: remote picked (or otherwise deleted)
669 # cd: remote picked (or otherwise deleted)
670 action = ACTION_REMOVE
670 action = ACTION_REMOVE
671 else:
671 else:
672 if fcd.isabsent(): # dc: remote picked
672 if fcd.isabsent(): # dc: remote picked
673 action = ACTION_GET
673 action = ACTION_GET
674 elif fco.isabsent(): # cd: local picked
674 elif fco.isabsent(): # cd: local picked
675 if dfile in self.localctx:
675 if dfile in self.localctx:
676 action = ACTION_ADD_MODIFIED
676 action = ACTION_ADD_MODIFIED
677 else:
677 else:
678 action = ACTION_ADD
678 action = ACTION_ADD
679 # else: regular merges (no action necessary)
679 # else: regular merges (no action necessary)
680 self._results[dfile] = r, action
680 self._results[dfile] = r, action
681
681
682 return complete, r
682 return complete, r
683
683
684 def _filectxorabsent(self, hexnode, ctx, f):
684 def _filectxorabsent(self, hexnode, ctx, f):
685 if hexnode == nullhex:
685 if hexnode == nullhex:
686 return filemerge.absentfilectx(ctx, f)
686 return filemerge.absentfilectx(ctx, f)
687 else:
687 else:
688 return ctx[f]
688 return ctx[f]
689
689
690 def preresolve(self, dfile, wctx):
690 def preresolve(self, dfile, wctx):
691 """run premerge process for dfile
691 """run premerge process for dfile
692
692
693 Returns whether the merge is complete, and the exit code."""
693 Returns whether the merge is complete, and the exit code."""
694 return self._resolve(True, dfile, wctx)
694 return self._resolve(True, dfile, wctx)
695
695
696 def resolve(self, dfile, wctx):
696 def resolve(self, dfile, wctx):
697 """run merge process (assuming premerge was run) for dfile
697 """run merge process (assuming premerge was run) for dfile
698
698
699 Returns the exit code of the merge."""
699 Returns the exit code of the merge."""
700 return self._resolve(False, dfile, wctx)[1]
700 return self._resolve(False, dfile, wctx)[1]
701
701
702 def counts(self):
702 def counts(self):
703 """return counts for updated, merged and removed files in this
703 """return counts for updated, merged and removed files in this
704 session"""
704 session"""
705 updated, merged, removed = 0, 0, 0
705 updated, merged, removed = 0, 0, 0
706 for r, action in pycompat.itervalues(self._results):
706 for r, action in pycompat.itervalues(self._results):
707 if r is None:
707 if r is None:
708 updated += 1
708 updated += 1
709 elif r == 0:
709 elif r == 0:
710 if action == ACTION_REMOVE:
710 if action == ACTION_REMOVE:
711 removed += 1
711 removed += 1
712 else:
712 else:
713 merged += 1
713 merged += 1
714 return updated, merged, removed
714 return updated, merged, removed
715
715
716 def unresolvedcount(self):
716 def unresolvedcount(self):
717 """get unresolved count for this merge (persistent)"""
717 """get unresolved count for this merge (persistent)"""
718 return len(list(self.unresolved()))
718 return len(list(self.unresolved()))
719
719
720 def actions(self):
720 def actions(self):
721 """return lists of actions to perform on the dirstate"""
721 """return lists of actions to perform on the dirstate"""
722 actions = {
722 actions = {
723 ACTION_REMOVE: [],
723 ACTION_REMOVE: [],
724 ACTION_FORGET: [],
724 ACTION_FORGET: [],
725 ACTION_ADD: [],
725 ACTION_ADD: [],
726 ACTION_ADD_MODIFIED: [],
726 ACTION_ADD_MODIFIED: [],
727 ACTION_GET: [],
727 ACTION_GET: [],
728 }
728 }
729 for f, (r, action) in pycompat.iteritems(self._results):
729 for f, (r, action) in pycompat.iteritems(self._results):
730 if action is not None:
730 if action is not None:
731 actions[action].append((f, None, b"merge result"))
731 actions[action].append((f, None, b"merge result"))
732 return actions
732 return actions
733
733
734 def recordactions(self):
734 def recordactions(self):
735 """record remove/add/get actions in the dirstate"""
735 """record remove/add/get actions in the dirstate"""
736 branchmerge = self._repo.dirstate.p2() != nullid
736 branchmerge = self._repo.dirstate.p2() != nullid
737 recordupdates(self._repo, self.actions(), branchmerge, None)
737 recordupdates(self._repo, self.actions(), branchmerge, None)
738
738
739 def queueremove(self, f):
739 def queueremove(self, f):
740 """queues a file to be removed from the dirstate
740 """queues a file to be removed from the dirstate
741
741
742 Meant for use by custom merge drivers."""
742 Meant for use by custom merge drivers."""
743 self._results[f] = 0, ACTION_REMOVE
743 self._results[f] = 0, ACTION_REMOVE
744
744
745 def queueadd(self, f):
745 def queueadd(self, f):
746 """queues a file to be added to the dirstate
746 """queues a file to be added to the dirstate
747
747
748 Meant for use by custom merge drivers."""
748 Meant for use by custom merge drivers."""
749 self._results[f] = 0, ACTION_ADD
749 self._results[f] = 0, ACTION_ADD
750
750
751 def queueget(self, f):
751 def queueget(self, f):
752 """queues a file to be marked modified in the dirstate
752 """queues a file to be marked modified in the dirstate
753
753
754 Meant for use by custom merge drivers."""
754 Meant for use by custom merge drivers."""
755 self._results[f] = 0, ACTION_GET
755 self._results[f] = 0, ACTION_GET
756
756
757
757
758 def _getcheckunknownconfig(repo, section, name):
758 def _getcheckunknownconfig(repo, section, name):
759 config = repo.ui.config(section, name)
759 config = repo.ui.config(section, name)
760 valid = [b'abort', b'ignore', b'warn']
760 valid = [b'abort', b'ignore', b'warn']
761 if config not in valid:
761 if config not in valid:
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 raise error.ConfigError(
763 raise error.ConfigError(
764 _(b"%s.%s not valid ('%s' is none of %s)")
764 _(b"%s.%s not valid ('%s' is none of %s)")
765 % (section, name, config, validstr)
765 % (section, name, config, validstr)
766 )
766 )
767 return config
767 return config
768
768
769
769
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 if wctx.isinmemory():
771 if wctx.isinmemory():
772 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 # unknown file.
773 # unknown file.
774 #
774 #
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 # because that function does other useful work.
776 # because that function does other useful work.
777 return False
777 return False
778
778
779 if f2 is None:
779 if f2 is None:
780 f2 = f
780 f2 = f
781 return (
781 return (
782 repo.wvfs.audit.check(f)
782 repo.wvfs.audit.check(f)
783 and repo.wvfs.isfileorlink(f)
783 and repo.wvfs.isfileorlink(f)
784 and repo.dirstate.normalize(f) not in repo.dirstate
784 and repo.dirstate.normalize(f) not in repo.dirstate
785 and mctx[f2].cmp(wctx[f])
785 and mctx[f2].cmp(wctx[f])
786 )
786 )
787
787
788
788
789 class _unknowndirschecker(object):
789 class _unknowndirschecker(object):
790 """
790 """
791 Look for any unknown files or directories that may have a path conflict
791 Look for any unknown files or directories that may have a path conflict
792 with a file. If any path prefix of the file exists as a file or link,
792 with a file. If any path prefix of the file exists as a file or link,
793 then it conflicts. If the file itself is a directory that contains any
793 then it conflicts. If the file itself is a directory that contains any
794 file that is not tracked, then it conflicts.
794 file that is not tracked, then it conflicts.
795
795
796 Returns the shortest path at which a conflict occurs, or None if there is
796 Returns the shortest path at which a conflict occurs, or None if there is
797 no conflict.
797 no conflict.
798 """
798 """
799
799
800 def __init__(self):
800 def __init__(self):
801 # A set of paths known to be good. This prevents repeated checking of
801 # A set of paths known to be good. This prevents repeated checking of
802 # dirs. It will be updated with any new dirs that are checked and found
802 # dirs. It will be updated with any new dirs that are checked and found
803 # to be safe.
803 # to be safe.
804 self._unknowndircache = set()
804 self._unknowndircache = set()
805
805
806 # A set of paths that are known to be absent. This prevents repeated
806 # A set of paths that are known to be absent. This prevents repeated
807 # checking of subdirectories that are known not to exist. It will be
807 # checking of subdirectories that are known not to exist. It will be
808 # updated with any new dirs that are checked and found to be absent.
808 # updated with any new dirs that are checked and found to be absent.
809 self._missingdircache = set()
809 self._missingdircache = set()
810
810
811 def __call__(self, repo, wctx, f):
811 def __call__(self, repo, wctx, f):
812 if wctx.isinmemory():
812 if wctx.isinmemory():
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 return False
814 return False
815
815
816 # Check for path prefixes that exist as unknown files.
816 # Check for path prefixes that exist as unknown files.
817 for p in reversed(list(pathutil.finddirs(f))):
817 for p in reversed(list(pathutil.finddirs(f))):
818 if p in self._missingdircache:
818 if p in self._missingdircache:
819 return
819 return
820 if p in self._unknowndircache:
820 if p in self._unknowndircache:
821 continue
821 continue
822 if repo.wvfs.audit.check(p):
822 if repo.wvfs.audit.check(p):
823 if (
823 if (
824 repo.wvfs.isfileorlink(p)
824 repo.wvfs.isfileorlink(p)
825 and repo.dirstate.normalize(p) not in repo.dirstate
825 and repo.dirstate.normalize(p) not in repo.dirstate
826 ):
826 ):
827 return p
827 return p
828 if not repo.wvfs.lexists(p):
828 if not repo.wvfs.lexists(p):
829 self._missingdircache.add(p)
829 self._missingdircache.add(p)
830 return
830 return
831 self._unknowndircache.add(p)
831 self._unknowndircache.add(p)
832
832
833 # Check if the file conflicts with a directory containing unknown files.
833 # Check if the file conflicts with a directory containing unknown files.
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 # Does the directory contain any files that are not in the dirstate?
835 # Does the directory contain any files that are not in the dirstate?
836 for p, dirs, files in repo.wvfs.walk(f):
836 for p, dirs, files in repo.wvfs.walk(f):
837 for fn in files:
837 for fn in files:
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 relf = repo.dirstate.normalize(relf, isknown=True)
839 relf = repo.dirstate.normalize(relf, isknown=True)
840 if relf not in repo.dirstate:
840 if relf not in repo.dirstate:
841 return f
841 return f
842 return None
842 return None
843
843
844
844
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 """
846 """
847 Considers any actions that care about the presence of conflicting unknown
847 Considers any actions that care about the presence of conflicting unknown
848 files. For some actions, the result is to abort; for others, it is to
848 files. For some actions, the result is to abort; for others, it is to
849 choose a different action.
849 choose a different action.
850 """
850 """
851 fileconflicts = set()
851 fileconflicts = set()
852 pathconflicts = set()
852 pathconflicts = set()
853 warnconflicts = set()
853 warnconflicts = set()
854 abortconflicts = set()
854 abortconflicts = set()
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 pathconfig = repo.ui.configbool(
857 pathconfig = repo.ui.configbool(
858 b'experimental', b'merge.checkpathconflicts'
858 b'experimental', b'merge.checkpathconflicts'
859 )
859 )
860 if not force:
860 if not force:
861
861
862 def collectconflicts(conflicts, config):
862 def collectconflicts(conflicts, config):
863 if config == b'abort':
863 if config == b'abort':
864 abortconflicts.update(conflicts)
864 abortconflicts.update(conflicts)
865 elif config == b'warn':
865 elif config == b'warn':
866 warnconflicts.update(conflicts)
866 warnconflicts.update(conflicts)
867
867
868 checkunknowndirs = _unknowndirschecker()
868 checkunknowndirs = _unknowndirschecker()
869 for f, (m, args, msg) in pycompat.iteritems(actions):
869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 if _checkunknownfile(repo, wctx, mctx, f):
871 if _checkunknownfile(repo, wctx, mctx, f):
872 fileconflicts.add(f)
872 fileconflicts.add(f)
873 elif pathconfig and f not in wctx:
873 elif pathconfig and f not in wctx:
874 path = checkunknowndirs(repo, wctx, f)
874 path = checkunknowndirs(repo, wctx, f)
875 if path is not None:
875 if path is not None:
876 pathconflicts.add(path)
876 pathconflicts.add(path)
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 fileconflicts.add(f)
879 fileconflicts.add(f)
880
880
881 allconflicts = fileconflicts | pathconflicts
881 allconflicts = fileconflicts | pathconflicts
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 unknownconflicts = allconflicts - ignoredconflicts
883 unknownconflicts = allconflicts - ignoredconflicts
884 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(ignoredconflicts, ignoredconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
886 else:
886 else:
887 for f, (m, args, msg) in pycompat.iteritems(actions):
887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 if m == ACTION_CREATED_MERGE:
888 if m == ACTION_CREATED_MERGE:
889 fl2, anc = args
889 fl2, anc = args
890 different = _checkunknownfile(repo, wctx, mctx, f)
890 different = _checkunknownfile(repo, wctx, mctx, f)
891 if repo.dirstate._ignore(f):
891 if repo.dirstate._ignore(f):
892 config = ignoredconfig
892 config = ignoredconfig
893 else:
893 else:
894 config = unknownconfig
894 config = unknownconfig
895
895
896 # The behavior when force is True is described by this table:
896 # The behavior when force is True is described by this table:
897 # config different mergeforce | action backup
897 # config different mergeforce | action backup
898 # * n * | get n
898 # * n * | get n
899 # * y y | merge -
899 # * y y | merge -
900 # abort y n | merge - (1)
900 # abort y n | merge - (1)
901 # warn y n | warn + get y
901 # warn y n | warn + get y
902 # ignore y n | get y
902 # ignore y n | get y
903 #
903 #
904 # (1) this is probably the wrong behavior here -- we should
904 # (1) this is probably the wrong behavior here -- we should
905 # probably abort, but some actions like rebases currently
905 # probably abort, but some actions like rebases currently
906 # don't like an abort happening in the middle of
906 # don't like an abort happening in the middle of
907 # merge.update.
907 # merge.update.
908 if not different:
908 if not different:
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 elif mergeforce or config == b'abort':
910 elif mergeforce or config == b'abort':
911 actions[f] = (
911 actions[f] = (
912 ACTION_MERGE,
912 ACTION_MERGE,
913 (f, f, None, False, anc),
913 (f, f, None, False, anc),
914 b'remote differs from untracked local',
914 b'remote differs from untracked local',
915 )
915 )
916 elif config == b'abort':
916 elif config == b'abort':
917 abortconflicts.add(f)
917 abortconflicts.add(f)
918 else:
918 else:
919 if config == b'warn':
919 if config == b'warn':
920 warnconflicts.add(f)
920 warnconflicts.add(f)
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922
922
923 for f in sorted(abortconflicts):
923 for f in sorted(abortconflicts):
924 warn = repo.ui.warn
924 warn = repo.ui.warn
925 if f in pathconflicts:
925 if f in pathconflicts:
926 if repo.wvfs.isfileorlink(f):
926 if repo.wvfs.isfileorlink(f):
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 else:
928 else:
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 else:
930 else:
931 warn(_(b"%s: untracked file differs\n") % f)
931 warn(_(b"%s: untracked file differs\n") % f)
932 if abortconflicts:
932 if abortconflicts:
933 raise error.Abort(
933 raise error.Abort(
934 _(
934 _(
935 b"untracked files in working directory "
935 b"untracked files in working directory "
936 b"differ from files in requested revision"
936 b"differ from files in requested revision"
937 )
937 )
938 )
938 )
939
939
940 for f in sorted(warnconflicts):
940 for f in sorted(warnconflicts):
941 if repo.wvfs.isfileorlink(f):
941 if repo.wvfs.isfileorlink(f):
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 else:
943 else:
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945
945
946 for f, (m, args, msg) in pycompat.iteritems(actions):
946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 if m == ACTION_CREATED:
947 if m == ACTION_CREATED:
948 backup = (
948 backup = (
949 f in fileconflicts
949 f in fileconflicts
950 or f in pathconflicts
950 or f in pathconflicts
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 )
952 )
953 (flags,) = args
953 (flags,) = args
954 actions[f] = (ACTION_GET, (flags, backup), msg)
954 actions[f] = (ACTION_GET, (flags, backup), msg)
955
955
956
956
957 def _forgetremoved(wctx, mctx, branchmerge):
957 def _forgetremoved(wctx, mctx, branchmerge):
958 """
958 """
959 Forget removed files
959 Forget removed files
960
960
961 If we're jumping between revisions (as opposed to merging), and if
961 If we're jumping between revisions (as opposed to merging), and if
962 neither the working directory nor the target rev has the file,
962 neither the working directory nor the target rev has the file,
963 then we need to remove it from the dirstate, to prevent the
963 then we need to remove it from the dirstate, to prevent the
964 dirstate from listing the file when it is no longer in the
964 dirstate from listing the file when it is no longer in the
965 manifest.
965 manifest.
966
966
967 If we're merging, and the other revision has removed a file
967 If we're merging, and the other revision has removed a file
968 that is not present in the working directory, we need to mark it
968 that is not present in the working directory, we need to mark it
969 as removed.
969 as removed.
970 """
970 """
971
971
972 actions = {}
972 actions = {}
973 m = ACTION_FORGET
973 m = ACTION_FORGET
974 if branchmerge:
974 if branchmerge:
975 m = ACTION_REMOVE
975 m = ACTION_REMOVE
976 for f in wctx.deleted():
976 for f in wctx.deleted():
977 if f not in mctx:
977 if f not in mctx:
978 actions[f] = m, None, b"forget deleted"
978 actions[f] = m, None, b"forget deleted"
979
979
980 if not branchmerge:
980 if not branchmerge:
981 for f in wctx.removed():
981 for f in wctx.removed():
982 if f not in mctx:
982 if f not in mctx:
983 actions[f] = ACTION_FORGET, None, b"forget removed"
983 actions[f] = ACTION_FORGET, None, b"forget removed"
984
984
985 return actions
985 return actions
986
986
987
987
988 def _checkcollision(repo, wmf, actions):
988 def _checkcollision(repo, wmf, actions):
989 """
989 """
990 Check for case-folding collisions.
990 Check for case-folding collisions.
991 """
991 """
992
992
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
994 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
995 if not narrowmatch.always():
995 if not narrowmatch.always():
996 wmf = wmf.matches(narrowmatch)
996 wmf = wmf.matches(narrowmatch)
997 if actions:
997 if actions:
998 narrowactions = {}
998 narrowactions = {}
999 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
1000 narrowactions[m] = []
1000 narrowactions[m] = []
1001 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1002 if narrowmatch(f):
1002 if narrowmatch(f):
1003 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1004 actions = narrowactions
1004 actions = narrowactions
1005
1005
1006 # build provisional merged manifest up
1006 # build provisional merged manifest up
1007 pmmf = set(wmf)
1007 pmmf = set(wmf)
1008
1008
1009 if actions:
1009 if actions:
1010 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1011 for m in (
1011 for m in (
1012 ACTION_ADD,
1012 ACTION_ADD,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1014 ACTION_FORGET,
1014 ACTION_FORGET,
1015 ACTION_GET,
1015 ACTION_GET,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1017 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1018 ):
1018 ):
1019 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1020 pmmf.add(f)
1020 pmmf.add(f)
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 pmmf.discard(f)
1022 pmmf.discard(f)
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 f2, flags = args
1024 f2, flags = args
1025 pmmf.discard(f2)
1025 pmmf.discard(f2)
1026 pmmf.add(f)
1026 pmmf.add(f)
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 pmmf.add(f)
1028 pmmf.add(f)
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1030 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1031 if move:
1031 if move:
1032 pmmf.discard(f1)
1032 pmmf.discard(f1)
1033 pmmf.add(f)
1033 pmmf.add(f)
1034
1034
1035 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1036 foldmap = {}
1036 foldmap = {}
1037 for f in pmmf:
1037 for f in pmmf:
1038 fold = util.normcase(f)
1038 fold = util.normcase(f)
1039 if fold in foldmap:
1039 if fold in foldmap:
1040 raise error.Abort(
1040 raise error.Abort(
1041 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1042 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1043 )
1043 )
1044 foldmap[fold] = f
1044 foldmap[fold] = f
1045
1045
1046 # check case-folding of directories
1046 # check case-folding of directories
1047 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1048 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1051 raise error.Abort(
1051 raise error.Abort(
1052 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1053 % (lastfull, f)
1053 % (lastfull, f)
1054 )
1054 )
1055 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1056 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1057 lastfull = f
1057 lastfull = f
1058
1058
1059
1059
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1062
1062
1063 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1064 return True
1064 return True
1065
1065
1066
1066
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1068 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1069
1069
1070 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1071 return True
1071 return True
1072
1072
1073
1073
1074 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1075 """
1075 """
1076 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1077 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1078 in.
1078 in.
1079 """
1079 """
1080 for f in manifest:
1080 for f in manifest:
1081 for p in pathutil.finddirs(f):
1081 for p in pathutil.finddirs(f):
1082 if p in dirs:
1082 if p in dirs:
1083 yield f, p
1083 yield f, p
1084 break
1084 break
1085
1085
1086
1086
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 """
1088 """
1089 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1090 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1091 """
1091 """
1092 mf = wctx.manifest()
1092 mf = wctx.manifest()
1093
1093
1094 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1095 localconflicts = set()
1095 localconflicts = set()
1096
1096
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1098 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1099 remoteconflicts = set()
1099 remoteconflicts = set()
1100
1100
1101 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1103 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1104 invalidconflicts = set()
1104 invalidconflicts = set()
1105
1105
1106 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1107 createdfiledirs = set()
1107 createdfiledirs = set()
1108
1108
1109 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1110 deletedfiles = set()
1110 deletedfiles = set()
1111
1111
1112 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1113 if m in (
1113 if m in (
1114 ACTION_CREATED,
1114 ACTION_CREATED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1116 ACTION_MERGE,
1116 ACTION_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1118 ):
1118 ):
1119 # This action may create a new local file.
1119 # This action may create a new local file.
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1121 if mf.hasdir(f):
1121 if mf.hasdir(f):
1122 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1123 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1124 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1125 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1126 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1127 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1128 deletedfiles.add(f)
1128 deletedfiles.add(f)
1129 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1130 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1131 if move:
1131 if move:
1132 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 f2, flags = args
1134 f2, flags = args
1135 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1136
1136
1137 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1138 for p in createdfiledirs:
1138 for p in createdfiledirs:
1139 if p in mf:
1139 if p in mf:
1140 if p in mctx:
1140 if p in mctx:
1141 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1142 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1143 # within the remote manifest.
1143 # within the remote manifest.
1144 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1145 else:
1145 else:
1146 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1147 # We will need to rename the local file.
1147 # We will need to rename the local file.
1148 localconflicts.add(p)
1148 localconflicts.add(p)
1149 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1150 ACTION_CREATED,
1150 ACTION_CREATED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1152 ACTION_MERGE,
1152 ACTION_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1154 ):
1154 ):
1155 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1156 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1157 # manifest.
1157 # manifest.
1158 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1159
1159
1160 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1161 for p in localconflicts:
1161 for p in localconflicts:
1162 if p not in deletedfiles:
1162 if p not in deletedfiles:
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 actions[pnew] = (
1165 actions[pnew] = (
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 (p,),
1167 (p,),
1168 b'local path conflict',
1168 b'local path conflict',
1169 )
1169 )
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171
1171
1172 if remoteconflicts:
1172 if remoteconflicts:
1173 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 if f not in deletedfiles:
1176 if f not in deletedfiles:
1177 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 # Action was merge, just update target.
1180 # Action was merge, just update target.
1181 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1182 else:
1182 else:
1183 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1184 fl = args[0]
1184 fl = args[0]
1185 actions[pnew] = (
1185 actions[pnew] = (
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 (p, fl),
1187 (p, fl),
1188 b'remote path conflict',
1188 b'remote path conflict',
1189 )
1189 )
1190 actions[p] = (
1190 actions[p] = (
1191 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1192 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1193 b'path conflict',
1193 b'path conflict',
1194 )
1194 )
1195 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1196 break
1196 break
1197
1197
1198 if invalidconflicts:
1198 if invalidconflicts:
1199 for p in invalidconflicts:
1199 for p in invalidconflicts:
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202
1202
1203
1203
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 """
1205 """
1206 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1207
1207
1208 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1209 narrowed.
1209 narrowed.
1210 """
1210 """
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1214 # over a copy.
1214 # over a copy.
1215 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1216 if narrowmatch(f):
1216 if narrowmatch(f):
1217 pass
1217 pass
1218 elif not branchmerge:
1218 elif not branchmerge:
1219 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1220 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1221 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1222 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1223 raise error.Abort(
1223 raise error.Abort(
1224 _(
1224 _(
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1226 b'which is not yet supported'
1226 b'which is not yet supported'
1227 )
1227 )
1228 % f,
1228 % f,
1229 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1230 )
1230 )
1231 else:
1231 else:
1232 raise error.Abort(
1232 raise error.Abort(
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 )
1234 )
1235
1235
1236
1236
1237 def manifestmerge(
1237 def manifestmerge(
1238 repo,
1238 repo,
1239 wctx,
1239 wctx,
1240 p2,
1240 p2,
1241 pa,
1241 pa,
1242 branchmerge,
1242 branchmerge,
1243 force,
1243 force,
1244 matcher,
1244 matcher,
1245 acceptremote,
1245 acceptremote,
1246 followcopies,
1246 followcopies,
1247 forcefulldiff=False,
1247 forcefulldiff=False,
1248 ):
1248 ):
1249 """
1249 """
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251
1251
1252 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1253 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1254 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1255 """
1255 """
1256 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1257 matcher = None
1257 matcher = None
1258
1258
1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1260
1260
1261 # manifests fetched in order are going to be faster, so prime the caches
1261 # manifests fetched in order are going to be faster, so prime the caches
1262 [
1262 [
1263 x.manifest()
1263 x.manifest()
1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1265 ]
1265 ]
1266
1266
1267 if followcopies:
1267 if followcopies:
1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1270
1270
1271 boolbm = pycompat.bytestr(bool(branchmerge))
1271 boolbm = pycompat.bytestr(bool(branchmerge))
1272 boolf = pycompat.bytestr(bool(force))
1272 boolf = pycompat.bytestr(bool(force))
1273 boolm = pycompat.bytestr(bool(matcher))
1273 boolm = pycompat.bytestr(bool(matcher))
1274 repo.ui.note(_(b"resolving manifests\n"))
1274 repo.ui.note(_(b"resolving manifests\n"))
1275 repo.ui.debug(
1275 repo.ui.debug(
1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1277 )
1277 )
1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1279
1279
1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1281 copied = set(copy.values())
1281 copied = set(copy.values())
1282 copied.update(movewithdir.values())
1282 copied.update(movewithdir.values())
1283
1283
1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1285 # Check whether sub state is modified, and overwrite the manifest
1285 # Check whether sub state is modified, and overwrite the manifest
1286 # to flag the change. If wctx is a committed revision, we shouldn't
1286 # to flag the change. If wctx is a committed revision, we shouldn't
1287 # care for the dirty state of the working directory.
1287 # care for the dirty state of the working directory.
1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1289 m1[b'.hgsubstate'] = modifiednodeid
1289 m1[b'.hgsubstate'] = modifiednodeid
1290
1290
1291 # Don't use m2-vs-ma optimization if:
1291 # Don't use m2-vs-ma optimization if:
1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1293 # - The caller specifically asks for a full diff, which is useful during bid
1293 # - The caller specifically asks for a full diff, which is useful during bid
1294 # merge.
1294 # merge.
1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1296 # Identify which files are relevant to the merge, so we can limit the
1296 # Identify which files are relevant to the merge, so we can limit the
1297 # total m1-vs-m2 diff to just those files. This has significant
1297 # total m1-vs-m2 diff to just those files. This has significant
1298 # performance benefits in large repositories.
1298 # performance benefits in large repositories.
1299 relevantfiles = set(ma.diff(m2).keys())
1299 relevantfiles = set(ma.diff(m2).keys())
1300
1300
1301 # For copied and moved files, we need to add the source file too.
1301 # For copied and moved files, we need to add the source file too.
1302 for copykey, copyvalue in pycompat.iteritems(copy):
1302 for copykey, copyvalue in pycompat.iteritems(copy):
1303 if copyvalue in relevantfiles:
1303 if copyvalue in relevantfiles:
1304 relevantfiles.add(copykey)
1304 relevantfiles.add(copykey)
1305 for movedirkey in movewithdir:
1305 for movedirkey in movewithdir:
1306 relevantfiles.add(movedirkey)
1306 relevantfiles.add(movedirkey)
1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1309
1309
1310 diff = m1.diff(m2, match=matcher)
1310 diff = m1.diff(m2, match=matcher)
1311
1311
1312 actions = {}
1312 actions = {}
1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1314 if n1 and n2: # file exists on both local and remote side
1314 if n1 and n2: # file exists on both local and remote side
1315 if f not in ma:
1315 if f not in ma:
1316 fa = copy.get(f, None)
1316 fa = copy.get(f, None)
1317 if fa is not None:
1317 if fa is not None:
1318 actions[f] = (
1318 actions[f] = (
1319 ACTION_MERGE,
1319 ACTION_MERGE,
1320 (f, f, fa, False, pa.node()),
1320 (f, f, fa, False, pa.node()),
1321 b'both renamed from %s' % fa,
1321 b'both renamed from %s' % fa,
1322 )
1322 )
1323 else:
1323 else:
1324 actions[f] = (
1324 actions[f] = (
1325 ACTION_MERGE,
1325 ACTION_MERGE,
1326 (f, f, None, False, pa.node()),
1326 (f, f, None, False, pa.node()),
1327 b'both created',
1327 b'both created',
1328 )
1328 )
1329 else:
1329 else:
1330 a = ma[f]
1330 a = ma[f]
1331 fla = ma.flags(f)
1331 fla = ma.flags(f)
1332 nol = b'l' not in fl1 + fl2 + fla
1332 nol = b'l' not in fl1 + fl2 + fla
1333 if n2 == a and fl2 == fla:
1333 if n2 == a and fl2 == fla:
1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1336 if n1 == n2: # optimization: keep local content
1336 if n1 == n2: # optimization: keep local content
1337 actions[f] = (
1337 actions[f] = (
1338 ACTION_EXEC,
1338 ACTION_EXEC,
1339 (fl2,),
1339 (fl2,),
1340 b'update permissions',
1340 b'update permissions',
1341 )
1341 )
1342 else:
1342 else:
1343 actions[f] = (
1343 actions[f] = (
1344 ACTION_GET,
1344 ACTION_GET,
1345 (fl2, False),
1345 (fl2, False),
1346 b'remote is newer',
1346 b'remote is newer',
1347 )
1347 )
1348 elif nol and n2 == a: # remote only changed 'x'
1348 elif nol and n2 == a: # remote only changed 'x'
1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1350 elif nol and n1 == a: # local only changed 'x'
1350 elif nol and n1 == a: # local only changed 'x'
1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1352 else: # both changed something
1352 else: # both changed something
1353 actions[f] = (
1353 actions[f] = (
1354 ACTION_MERGE,
1354 ACTION_MERGE,
1355 (f, f, f, False, pa.node()),
1355 (f, f, f, False, pa.node()),
1356 b'versions differ',
1356 b'versions differ',
1357 )
1357 )
1358 elif n1: # file exists only on local side
1358 elif n1: # file exists only on local side
1359 if f in copied:
1359 if f in copied:
1360 pass # we'll deal with it on m2 side
1360 pass # we'll deal with it on m2 side
1361 elif f in movewithdir: # directory rename, move local
1361 elif f in movewithdir: # directory rename, move local
1362 f2 = movewithdir[f]
1362 f2 = movewithdir[f]
1363 if f2 in m2:
1363 if f2 in m2:
1364 actions[f2] = (
1364 actions[f2] = (
1365 ACTION_MERGE,
1365 ACTION_MERGE,
1366 (f, f2, None, True, pa.node()),
1366 (f, f2, None, True, pa.node()),
1367 b'remote directory rename, both created',
1367 b'remote directory rename, both created',
1368 )
1368 )
1369 else:
1369 else:
1370 actions[f2] = (
1370 actions[f2] = (
1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1372 (f, fl1),
1372 (f, fl1),
1373 b'remote directory rename - move from %s' % f,
1373 b'remote directory rename - move from %s' % f,
1374 )
1374 )
1375 elif f in copy:
1375 elif f in copy:
1376 f2 = copy[f]
1376 f2 = copy[f]
1377 actions[f] = (
1377 actions[f] = (
1378 ACTION_MERGE,
1378 ACTION_MERGE,
1379 (f, f2, f2, False, pa.node()),
1379 (f, f2, f2, False, pa.node()),
1380 b'local copied/moved from %s' % f2,
1380 b'local copied/moved from %s' % f2,
1381 )
1381 )
1382 elif f in ma: # clean, a different, no remote
1382 elif f in ma: # clean, a different, no remote
1383 if n1 != ma[f]:
1383 if n1 != ma[f]:
1384 if acceptremote:
1384 if acceptremote:
1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1386 else:
1386 else:
1387 actions[f] = (
1387 actions[f] = (
1388 ACTION_CHANGED_DELETED,
1388 ACTION_CHANGED_DELETED,
1389 (f, None, f, False, pa.node()),
1389 (f, None, f, False, pa.node()),
1390 b'prompt changed/deleted',
1390 b'prompt changed/deleted',
1391 )
1391 )
1392 elif n1 == addednodeid:
1392 elif n1 == addednodeid:
1393 # This extra 'a' is added by working copy manifest to mark
1393 # This extra 'a' is added by working copy manifest to mark
1394 # the file as locally added. We should forget it instead of
1394 # the file as locally added. We should forget it instead of
1395 # deleting it.
1395 # deleting it.
1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1397 else:
1397 else:
1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1399 elif n2: # file exists only on remote side
1399 elif n2: # file exists only on remote side
1400 if f in copied:
1400 if f in copied:
1401 pass # we'll deal with it on m1 side
1401 pass # we'll deal with it on m1 side
1402 elif f in movewithdir:
1402 elif f in movewithdir:
1403 f2 = movewithdir[f]
1403 f2 = movewithdir[f]
1404 if f2 in m1:
1404 if f2 in m1:
1405 actions[f2] = (
1405 actions[f2] = (
1406 ACTION_MERGE,
1406 ACTION_MERGE,
1407 (f2, f, None, False, pa.node()),
1407 (f2, f, None, False, pa.node()),
1408 b'local directory rename, both created',
1408 b'local directory rename, both created',
1409 )
1409 )
1410 else:
1410 else:
1411 actions[f2] = (
1411 actions[f2] = (
1412 ACTION_LOCAL_DIR_RENAME_GET,
1412 ACTION_LOCAL_DIR_RENAME_GET,
1413 (f, fl2),
1413 (f, fl2),
1414 b'local directory rename - get from %s' % f,
1414 b'local directory rename - get from %s' % f,
1415 )
1415 )
1416 elif f in copy:
1416 elif f in copy:
1417 f2 = copy[f]
1417 f2 = copy[f]
1418 if f2 in m2:
1418 if f2 in m2:
1419 actions[f] = (
1419 actions[f] = (
1420 ACTION_MERGE,
1420 ACTION_MERGE,
1421 (f2, f, f2, False, pa.node()),
1421 (f2, f, f2, False, pa.node()),
1422 b'remote copied from %s' % f2,
1422 b'remote copied from %s' % f2,
1423 )
1423 )
1424 else:
1424 else:
1425 actions[f] = (
1425 actions[f] = (
1426 ACTION_MERGE,
1426 ACTION_MERGE,
1427 (f2, f, f2, True, pa.node()),
1427 (f2, f, f2, True, pa.node()),
1428 b'remote moved from %s' % f2,
1428 b'remote moved from %s' % f2,
1429 )
1429 )
1430 elif f not in ma:
1430 elif f not in ma:
1431 # local unknown, remote created: the logic is described by the
1431 # local unknown, remote created: the logic is described by the
1432 # following table:
1432 # following table:
1433 #
1433 #
1434 # force branchmerge different | action
1434 # force branchmerge different | action
1435 # n * * | create
1435 # n * * | create
1436 # y n * | create
1436 # y n * | create
1437 # y y n | create
1437 # y y n | create
1438 # y y y | merge
1438 # y y y | merge
1439 #
1439 #
1440 # Checking whether the files are different is expensive, so we
1440 # Checking whether the files are different is expensive, so we
1441 # don't do that when we can avoid it.
1441 # don't do that when we can avoid it.
1442 if not force:
1442 if not force:
1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1444 elif not branchmerge:
1444 elif not branchmerge:
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1446 else:
1446 else:
1447 actions[f] = (
1447 actions[f] = (
1448 ACTION_CREATED_MERGE,
1448 ACTION_CREATED_MERGE,
1449 (fl2, pa.node()),
1449 (fl2, pa.node()),
1450 b'remote created, get or merge',
1450 b'remote created, get or merge',
1451 )
1451 )
1452 elif n2 != ma[f]:
1452 elif n2 != ma[f]:
1453 df = None
1453 df = None
1454 for d in dirmove:
1454 for d in dirmove:
1455 if f.startswith(d):
1455 if f.startswith(d):
1456 # new file added in a directory that was moved
1456 # new file added in a directory that was moved
1457 df = dirmove[d] + f[len(d) :]
1457 df = dirmove[d] + f[len(d) :]
1458 break
1458 break
1459 if df is not None and df in m1:
1459 if df is not None and df in m1:
1460 actions[df] = (
1460 actions[df] = (
1461 ACTION_MERGE,
1461 ACTION_MERGE,
1462 (df, f, f, False, pa.node()),
1462 (df, f, f, False, pa.node()),
1463 b'local directory rename - respect move '
1463 b'local directory rename - respect move '
1464 b'from %s' % f,
1464 b'from %s' % f,
1465 )
1465 )
1466 elif acceptremote:
1466 elif acceptremote:
1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1468 else:
1468 else:
1469 actions[f] = (
1469 actions[f] = (
1470 ACTION_DELETED_CHANGED,
1470 ACTION_DELETED_CHANGED,
1471 (None, f, f, False, pa.node()),
1471 (None, f, f, False, pa.node()),
1472 b'prompt deleted/changed',
1472 b'prompt deleted/changed',
1473 )
1473 )
1474
1474
1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1476 # If we are merging, look for path conflicts.
1476 # If we are merging, look for path conflicts.
1477 checkpathconflicts(repo, wctx, p2, actions)
1477 checkpathconflicts(repo, wctx, p2, actions)
1478
1478
1479 narrowmatch = repo.narrowmatch()
1479 narrowmatch = repo.narrowmatch()
1480 if not narrowmatch.always():
1480 if not narrowmatch.always():
1481 # Updates "actions" in place
1481 # Updates "actions" in place
1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1483
1483
1484 return actions, diverge, renamedelete
1484 return actions, diverge, renamedelete
1485
1485
1486
1486
1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1488 """Resolves false conflicts where the nodeid changed but the content
1488 """Resolves false conflicts where the nodeid changed but the content
1489 remained the same."""
1489 remained the same."""
1490 # We force a copy of actions.items() because we're going to mutate
1490 # We force a copy of actions.items() because we're going to mutate
1491 # actions as we resolve trivial conflicts.
1491 # actions as we resolve trivial conflicts.
1492 for f, (m, args, msg) in list(actions.items()):
1492 for f, (m, args, msg) in list(actions.items()):
1493 if (
1493 if (
1494 m == ACTION_CHANGED_DELETED
1494 m == ACTION_CHANGED_DELETED
1495 and f in ancestor
1495 and f in ancestor
1496 and not wctx[f].cmp(ancestor[f])
1496 and not wctx[f].cmp(ancestor[f])
1497 ):
1497 ):
1498 # local did change but ended up with same content
1498 # local did change but ended up with same content
1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1500 elif (
1500 elif (
1501 m == ACTION_DELETED_CHANGED
1501 m == ACTION_DELETED_CHANGED
1502 and f in ancestor
1502 and f in ancestor
1503 and not mctx[f].cmp(ancestor[f])
1503 and not mctx[f].cmp(ancestor[f])
1504 ):
1504 ):
1505 # remote did change but ended up with same content
1505 # remote did change but ended up with same content
1506 del actions[f] # don't get = keep local deleted
1506 del actions[f] # don't get = keep local deleted
1507
1507
1508
1508
1509 def calculateupdates(
1509 def calculateupdates(
1510 repo,
1510 repo,
1511 wctx,
1511 wctx,
1512 mctx,
1512 mctx,
1513 ancestors,
1513 ancestors,
1514 branchmerge,
1514 branchmerge,
1515 force,
1515 force,
1516 acceptremote,
1516 acceptremote,
1517 followcopies,
1517 followcopies,
1518 matcher=None,
1518 matcher=None,
1519 mergeforce=False,
1519 mergeforce=False,
1520 ):
1520 ):
1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1522 # Avoid cycle.
1522 # Avoid cycle.
1523 from . import sparse
1523 from . import sparse
1524
1524
1525 if len(ancestors) == 1: # default
1525 if len(ancestors) == 1: # default
1526 actions, diverge, renamedelete = manifestmerge(
1526 actions, diverge, renamedelete = manifestmerge(
1527 repo,
1527 repo,
1528 wctx,
1528 wctx,
1529 mctx,
1529 mctx,
1530 ancestors[0],
1530 ancestors[0],
1531 branchmerge,
1531 branchmerge,
1532 force,
1532 force,
1533 matcher,
1533 matcher,
1534 acceptremote,
1534 acceptremote,
1535 followcopies,
1535 followcopies,
1536 )
1536 )
1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1538
1538
1539 else: # only when merge.preferancestor=* - the default
1539 else: # only when merge.preferancestor=* - the default
1540 repo.ui.note(
1540 repo.ui.note(
1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1542 % (
1542 % (
1543 wctx,
1543 wctx,
1544 mctx,
1544 mctx,
1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1546 )
1546 )
1547 )
1547 )
1548
1548
1549 # Call for bids
1549 # Call for bids
1550 fbids = (
1550 fbids = (
1551 {}
1551 {}
1552 ) # mapping filename to bids (action method to list af actions)
1552 ) # mapping filename to bids (action method to list af actions)
1553 diverge, renamedelete = None, None
1553 diverge, renamedelete = None, None
1554 for ancestor in ancestors:
1554 for ancestor in ancestors:
1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1556 actions, diverge1, renamedelete1 = manifestmerge(
1556 actions, diverge1, renamedelete1 = manifestmerge(
1557 repo,
1557 repo,
1558 wctx,
1558 wctx,
1559 mctx,
1559 mctx,
1560 ancestor,
1560 ancestor,
1561 branchmerge,
1561 branchmerge,
1562 force,
1562 force,
1563 matcher,
1563 matcher,
1564 acceptremote,
1564 acceptremote,
1565 followcopies,
1565 followcopies,
1566 forcefulldiff=True,
1566 forcefulldiff=True,
1567 )
1567 )
1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1569
1569
1570 # Track the shortest set of warning on the theory that bid
1570 # Track the shortest set of warning on the theory that bid
1571 # merge will correctly incorporate more information
1571 # merge will correctly incorporate more information
1572 if diverge is None or len(diverge1) < len(diverge):
1572 if diverge is None or len(diverge1) < len(diverge):
1573 diverge = diverge1
1573 diverge = diverge1
1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1575 renamedelete = renamedelete1
1575 renamedelete = renamedelete1
1576
1576
1577 for f, a in sorted(pycompat.iteritems(actions)):
1577 for f, a in sorted(pycompat.iteritems(actions)):
1578 m, args, msg = a
1578 m, args, msg = a
1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1580 if f in fbids:
1580 if f in fbids:
1581 d = fbids[f]
1581 d = fbids[f]
1582 if m in d:
1582 if m in d:
1583 d[m].append(a)
1583 d[m].append(a)
1584 else:
1584 else:
1585 d[m] = [a]
1585 d[m] = [a]
1586 else:
1586 else:
1587 fbids[f] = {m: [a]}
1587 fbids[f] = {m: [a]}
1588
1588
1589 # Pick the best bid for each file
1589 # Pick the best bid for each file
1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1591 actions = {}
1591 actions = {}
1592 for f, bids in sorted(fbids.items()):
1592 for f, bids in sorted(fbids.items()):
1593 # bids is a mapping from action method to list af actions
1593 # bids is a mapping from action method to list af actions
1594 # Consensus?
1594 # Consensus?
1595 if len(bids) == 1: # all bids are the same kind of method
1595 if len(bids) == 1: # all bids are the same kind of method
1596 m, l = list(bids.items())[0]
1596 m, l = list(bids.items())[0]
1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1599 actions[f] = l[0]
1599 actions[f] = l[0]
1600 continue
1600 continue
1601 # If keep is an option, just do it.
1601 # If keep is an option, just do it.
1602 if ACTION_KEEP in bids:
1602 if ACTION_KEEP in bids:
1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1604 actions[f] = bids[ACTION_KEEP][0]
1604 actions[f] = bids[ACTION_KEEP][0]
1605 continue
1605 continue
1606 # If there are gets and they all agree [how could they not?], do it.
1606 # If there are gets and they all agree [how could they not?], do it.
1607 if ACTION_GET in bids:
1607 if ACTION_GET in bids:
1608 ga0 = bids[ACTION_GET][0]
1608 ga0 = bids[ACTION_GET][0]
1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1611 actions[f] = ga0
1611 actions[f] = ga0
1612 continue
1612 continue
1613 # TODO: Consider other simple actions such as mode changes
1613 # TODO: Consider other simple actions such as mode changes
1614 # Handle inefficient democrazy.
1614 # Handle inefficient democrazy.
1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1616 for m, l in sorted(bids.items()):
1616 for m, l in sorted(bids.items()):
1617 for _f, args, msg in l:
1617 for _f, args, msg in l:
1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1619 # Pick random action. TODO: Instead, prompt user when resolving
1619 # Pick random action. TODO: Instead, prompt user when resolving
1620 m, l = list(bids.items())[0]
1620 m, l = list(bids.items())[0]
1621 repo.ui.warn(
1621 repo.ui.warn(
1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1623 )
1623 )
1624 actions[f] = l[0]
1624 actions[f] = l[0]
1625 continue
1625 continue
1626 repo.ui.note(_(b'end of auction\n\n'))
1626 repo.ui.note(_(b'end of auction\n\n'))
1627
1627
1628 if wctx.rev() is None:
1628 if wctx.rev() is None:
1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1630 actions.update(fractions)
1630 actions.update(fractions)
1631
1631
1632 prunedactions = sparse.filterupdatesactions(
1632 prunedactions = sparse.filterupdatesactions(
1633 repo, wctx, mctx, branchmerge, actions
1633 repo, wctx, mctx, branchmerge, actions
1634 )
1634 )
1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1636
1636
1637 return prunedactions, diverge, renamedelete
1637 return prunedactions, diverge, renamedelete
1638
1638
1639
1639
1640 def _getcwd():
1640 def _getcwd():
1641 try:
1641 try:
1642 return encoding.getcwd()
1642 return encoding.getcwd()
1643 except OSError as err:
1643 except OSError as err:
1644 if err.errno == errno.ENOENT:
1644 if err.errno == errno.ENOENT:
1645 return None
1645 return None
1646 raise
1646 raise
1647
1647
1648
1648
1649 def batchremove(repo, wctx, actions):
1649 def batchremove(repo, wctx, actions):
1650 """apply removes to the working directory
1650 """apply removes to the working directory
1651
1651
1652 yields tuples for progress updates
1652 yields tuples for progress updates
1653 """
1653 """
1654 verbose = repo.ui.verbose
1654 verbose = repo.ui.verbose
1655 cwd = _getcwd()
1655 cwd = _getcwd()
1656 i = 0
1656 i = 0
1657 for f, args, msg in actions:
1657 for f, args, msg in actions:
1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1659 if verbose:
1659 if verbose:
1660 repo.ui.note(_(b"removing %s\n") % f)
1660 repo.ui.note(_(b"removing %s\n") % f)
1661 wctx[f].audit()
1661 wctx[f].audit()
1662 try:
1662 try:
1663 wctx[f].remove(ignoremissing=True)
1663 wctx[f].remove(ignoremissing=True)
1664 except OSError as inst:
1664 except OSError as inst:
1665 repo.ui.warn(
1665 repo.ui.warn(
1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1667 )
1667 )
1668 if i == 100:
1668 if i == 100:
1669 yield i, f
1669 yield i, f
1670 i = 0
1670 i = 0
1671 i += 1
1671 i += 1
1672 if i > 0:
1672 if i > 0:
1673 yield i, f
1673 yield i, f
1674
1674
1675 if cwd and not _getcwd():
1675 if cwd and not _getcwd():
1676 # cwd was removed in the course of removing files; print a helpful
1676 # cwd was removed in the course of removing files; print a helpful
1677 # warning.
1677 # warning.
1678 repo.ui.warn(
1678 repo.ui.warn(
1679 _(
1679 _(
1680 b"current directory was removed\n"
1680 b"current directory was removed\n"
1681 b"(consider changing to repo root: %s)\n"
1681 b"(consider changing to repo root: %s)\n"
1682 )
1682 )
1683 % repo.root
1683 % repo.root
1684 )
1684 )
1685
1685
1686
1686
1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1688 """apply gets to the working directory
1688 """apply gets to the working directory
1689
1689
1690 mctx is the context to get from
1690 mctx is the context to get from
1691
1691
1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1695 mtime) of the file f written for each action.
1695 mtime) of the file f written for each action.
1696 """
1696 """
1697 filedata = {}
1697 filedata = {}
1698 verbose = repo.ui.verbose
1698 verbose = repo.ui.verbose
1699 fctx = mctx.filectx
1699 fctx = mctx.filectx
1700 ui = repo.ui
1700 ui = repo.ui
1701 i = 0
1701 i = 0
1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1703 for f, (flags, backup), msg in actions:
1703 for f, (flags, backup), msg in actions:
1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1705 if verbose:
1705 if verbose:
1706 repo.ui.note(_(b"getting %s\n") % f)
1706 repo.ui.note(_(b"getting %s\n") % f)
1707
1707
1708 if backup:
1708 if backup:
1709 # If a file or directory exists with the same name, back that
1709 # If a file or directory exists with the same name, back that
1710 # up. Otherwise, look to see if there is a file that conflicts
1710 # up. Otherwise, look to see if there is a file that conflicts
1711 # with a directory this file is in, and if so, back that up.
1711 # with a directory this file is in, and if so, back that up.
1712 conflicting = f
1712 conflicting = f
1713 if not repo.wvfs.lexists(f):
1713 if not repo.wvfs.lexists(f):
1714 for p in pathutil.finddirs(f):
1714 for p in pathutil.finddirs(f):
1715 if repo.wvfs.isfileorlink(p):
1715 if repo.wvfs.isfileorlink(p):
1716 conflicting = p
1716 conflicting = p
1717 break
1717 break
1718 if repo.wvfs.lexists(conflicting):
1718 if repo.wvfs.lexists(conflicting):
1719 orig = scmutil.backuppath(ui, repo, conflicting)
1719 orig = scmutil.backuppath(ui, repo, conflicting)
1720 util.rename(repo.wjoin(conflicting), orig)
1720 util.rename(repo.wjoin(conflicting), orig)
1721 wfctx = wctx[f]
1721 wfctx = wctx[f]
1722 wfctx.clearunknown()
1722 wfctx.clearunknown()
1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1724 size = wfctx.write(
1724 size = wfctx.write(
1725 fctx(f).data(),
1725 fctx(f).data(),
1726 flags,
1726 flags,
1727 backgroundclose=True,
1727 backgroundclose=True,
1728 atomictemp=atomictemp,
1728 atomictemp=atomictemp,
1729 )
1729 )
1730 if wantfiledata:
1730 if wantfiledata:
1731 s = wfctx.lstat()
1731 s = wfctx.lstat()
1732 mode = s.st_mode
1732 mode = s.st_mode
1733 mtime = s[stat.ST_MTIME]
1733 mtime = s[stat.ST_MTIME]
1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1735 if i == 100:
1735 if i == 100:
1736 yield False, (i, f)
1736 yield False, (i, f)
1737 i = 0
1737 i = 0
1738 i += 1
1738 i += 1
1739 if i > 0:
1739 if i > 0:
1740 yield False, (i, f)
1740 yield False, (i, f)
1741 yield True, filedata
1741 yield True, filedata
1742
1742
1743
1743
1744 def _prefetchfiles(repo, ctx, actions):
1744 def _prefetchfiles(repo, ctx, actions):
1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1746 of merge actions. ``ctx`` is the context being merged in."""
1746 of merge actions. ``ctx`` is the context being merged in."""
1747
1747
1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1749 # don't touch the context to be merged in. 'cd' is skipped, because
1749 # don't touch the context to be merged in. 'cd' is skipped, because
1750 # changed/deleted never resolves to something from the remote side.
1750 # changed/deleted never resolves to something from the remote side.
1751 oplist = [
1751 oplist = [
1752 actions[a]
1752 actions[a]
1753 for a in (
1753 for a in (
1754 ACTION_GET,
1754 ACTION_GET,
1755 ACTION_DELETED_CHANGED,
1755 ACTION_DELETED_CHANGED,
1756 ACTION_LOCAL_DIR_RENAME_GET,
1756 ACTION_LOCAL_DIR_RENAME_GET,
1757 ACTION_MERGE,
1757 ACTION_MERGE,
1758 )
1758 )
1759 ]
1759 ]
1760 prefetch = scmutil.prefetchfiles
1760 prefetch = scmutil.prefetchfiles
1761 matchfiles = scmutil.matchfiles
1761 matchfiles = scmutil.matchfiles
1762 prefetch(
1762 prefetch(
1763 repo,
1763 repo,
1764 [ctx.rev()],
1764 [ctx.rev()],
1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1766 )
1766 )
1767
1767
1768
1768
1769 @attr.s(frozen=True)
1769 @attr.s(frozen=True)
1770 class updateresult(object):
1770 class updateresult(object):
1771 updatedcount = attr.ib()
1771 updatedcount = attr.ib()
1772 mergedcount = attr.ib()
1772 mergedcount = attr.ib()
1773 removedcount = attr.ib()
1773 removedcount = attr.ib()
1774 unresolvedcount = attr.ib()
1774 unresolvedcount = attr.ib()
1775
1775
1776 def isempty(self):
1776 def isempty(self):
1777 return not (
1777 return not (
1778 self.updatedcount
1778 self.updatedcount
1779 or self.mergedcount
1779 or self.mergedcount
1780 or self.removedcount
1780 or self.removedcount
1781 or self.unresolvedcount
1781 or self.unresolvedcount
1782 )
1782 )
1783
1783
1784
1784
1785 def emptyactions():
1785 def emptyactions():
1786 """create an actions dict, to be populated and passed to applyupdates()"""
1786 """create an actions dict, to be populated and passed to applyupdates()"""
1787 return dict(
1787 return dict(
1788 (m, [])
1788 (m, [])
1789 for m in (
1789 for m in (
1790 ACTION_ADD,
1790 ACTION_ADD,
1791 ACTION_ADD_MODIFIED,
1791 ACTION_ADD_MODIFIED,
1792 ACTION_FORGET,
1792 ACTION_FORGET,
1793 ACTION_GET,
1793 ACTION_GET,
1794 ACTION_CHANGED_DELETED,
1794 ACTION_CHANGED_DELETED,
1795 ACTION_DELETED_CHANGED,
1795 ACTION_DELETED_CHANGED,
1796 ACTION_REMOVE,
1796 ACTION_REMOVE,
1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1798 ACTION_LOCAL_DIR_RENAME_GET,
1798 ACTION_LOCAL_DIR_RENAME_GET,
1799 ACTION_MERGE,
1799 ACTION_MERGE,
1800 ACTION_EXEC,
1800 ACTION_EXEC,
1801 ACTION_KEEP,
1801 ACTION_KEEP,
1802 ACTION_PATH_CONFLICT,
1802 ACTION_PATH_CONFLICT,
1803 ACTION_PATH_CONFLICT_RESOLVE,
1803 ACTION_PATH_CONFLICT_RESOLVE,
1804 )
1804 )
1805 )
1805 )
1806
1806
1807
1807
1808 def applyupdates(
1808 def applyupdates(
1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1810 ):
1810 ):
1811 """apply the merge action list to the working directory
1811 """apply the merge action list to the working directory
1812
1812
1813 wctx is the working copy context
1813 wctx is the working copy context
1814 mctx is the context to be merged into the working copy
1814 mctx is the context to be merged into the working copy
1815
1815
1816 Return a tuple of (counts, filedata), where counts is a tuple
1816 Return a tuple of (counts, filedata), where counts is a tuple
1817 (updated, merged, removed, unresolved) that describes how many
1817 (updated, merged, removed, unresolved) that describes how many
1818 files were affected by the update, and filedata is as described in
1818 files were affected by the update, and filedata is as described in
1819 batchget.
1819 batchget.
1820 """
1820 """
1821
1821
1822 _prefetchfiles(repo, mctx, actions)
1822 _prefetchfiles(repo, mctx, actions)
1823
1823
1824 updated, merged, removed = 0, 0, 0
1824 updated, merged, removed = 0, 0, 0
1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1826 moves = []
1826 moves = []
1827 for m, l in actions.items():
1827 for m, l in actions.items():
1828 l.sort()
1828 l.sort()
1829
1829
1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1833 mergeactions.extend(actions[ACTION_MERGE])
1833 mergeactions.extend(actions[ACTION_MERGE])
1834 for f, args, msg in mergeactions:
1834 for f, args, msg in mergeactions:
1835 f1, f2, fa, move, anc = args
1835 f1, f2, fa, move, anc = args
1836 if f == b'.hgsubstate': # merged internally
1836 if f == b'.hgsubstate': # merged internally
1837 continue
1837 continue
1838 if f1 is None:
1838 if f1 is None:
1839 fcl = filemerge.absentfilectx(wctx, fa)
1839 fcl = filemerge.absentfilectx(wctx, fa)
1840 else:
1840 else:
1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1842 fcl = wctx[f1]
1842 fcl = wctx[f1]
1843 if f2 is None:
1843 if f2 is None:
1844 fco = filemerge.absentfilectx(mctx, fa)
1844 fco = filemerge.absentfilectx(mctx, fa)
1845 else:
1845 else:
1846 fco = mctx[f2]
1846 fco = mctx[f2]
1847 actx = repo[anc]
1847 actx = repo[anc]
1848 if fa in actx:
1848 if fa in actx:
1849 fca = actx[fa]
1849 fca = actx[fa]
1850 else:
1850 else:
1851 # TODO: move to absentfilectx
1851 # TODO: move to absentfilectx
1852 fca = repo.filectx(f1, fileid=nullrev)
1852 fca = repo.filectx(f1, fileid=nullrev)
1853 ms.add(fcl, fco, fca, f)
1853 ms.add(fcl, fco, fca, f)
1854 if f1 != f and move:
1854 if f1 != f and move:
1855 moves.append(f1)
1855 moves.append(f1)
1856
1856
1857 # remove renamed files after safely stored
1857 # remove renamed files after safely stored
1858 for f in moves:
1858 for f in moves:
1859 if wctx[f].lexists():
1859 if wctx[f].lexists():
1860 repo.ui.debug(b"removing %s\n" % f)
1860 repo.ui.debug(b"removing %s\n" % f)
1861 wctx[f].audit()
1861 wctx[f].audit()
1862 wctx[f].remove()
1862 wctx[f].remove()
1863
1863
1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1865 progress = repo.ui.makeprogress(
1865 progress = repo.ui.makeprogress(
1866 _(b'updating'), unit=_(b'files'), total=numupdates
1866 _(b'updating'), unit=_(b'files'), total=numupdates
1867 )
1867 )
1868
1868
1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1871
1871
1872 # record path conflicts
1872 # record path conflicts
1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1874 f1, fo = args
1874 f1, fo = args
1875 s = repo.ui.status
1875 s = repo.ui.status
1876 s(
1876 s(
1877 _(
1877 _(
1878 b"%s: path conflict - a file or link has the same name as a "
1878 b"%s: path conflict - a file or link has the same name as a "
1879 b"directory\n"
1879 b"directory\n"
1880 )
1880 )
1881 % f
1881 % f
1882 )
1882 )
1883 if fo == b'l':
1883 if fo == b'l':
1884 s(_(b"the local file has been renamed to %s\n") % f1)
1884 s(_(b"the local file has been renamed to %s\n") % f1)
1885 else:
1885 else:
1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1888 ms.addpath(f, f1, fo)
1888 ms.addpath(f, f1, fo)
1889 progress.increment(item=f)
1889 progress.increment(item=f)
1890
1890
1891 # When merging in-memory, we can't support worker processes, so set the
1891 # When merging in-memory, we can't support worker processes, so set the
1892 # per-item cost at 0 in that case.
1892 # per-item cost at 0 in that case.
1893 cost = 0 if wctx.isinmemory() else 0.001
1893 cost = 0 if wctx.isinmemory() else 0.001
1894
1894
1895 # remove in parallel (must come before resolving path conflicts and getting)
1895 # remove in parallel (must come before resolving path conflicts and getting)
1896 prog = worker.worker(
1896 prog = worker.worker(
1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1898 )
1898 )
1899 for i, item in prog:
1899 for i, item in prog:
1900 progress.increment(step=i, item=item)
1900 progress.increment(step=i, item=item)
1901 removed = len(actions[ACTION_REMOVE])
1901 removed = len(actions[ACTION_REMOVE])
1902
1902
1903 # resolve path conflicts (must come before getting)
1903 # resolve path conflicts (must come before getting)
1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1906 (f0,) = args
1906 (f0,) = args
1907 if wctx[f0].lexists():
1907 if wctx[f0].lexists():
1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1909 wctx[f].audit()
1909 wctx[f].audit()
1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1911 wctx[f0].remove()
1911 wctx[f0].remove()
1912 progress.increment(item=f)
1912 progress.increment(item=f)
1913
1913
1914 # get in parallel.
1914 # get in parallel.
1915 threadsafe = repo.ui.configbool(
1915 threadsafe = repo.ui.configbool(
1916 b'experimental', b'worker.wdir-get-thread-safe'
1916 b'experimental', b'worker.wdir-get-thread-safe'
1917 )
1917 )
1918 prog = worker.worker(
1918 prog = worker.worker(
1919 repo.ui,
1919 repo.ui,
1920 cost,
1920 cost,
1921 batchget,
1921 batchget,
1922 (repo, mctx, wctx, wantfiledata),
1922 (repo, mctx, wctx, wantfiledata),
1923 actions[ACTION_GET],
1923 actions[ACTION_GET],
1924 threadsafe=threadsafe,
1924 threadsafe=threadsafe,
1925 hasretval=True,
1925 hasretval=True,
1926 )
1926 )
1927 getfiledata = {}
1927 getfiledata = {}
1928 for final, res in prog:
1928 for final, res in prog:
1929 if final:
1929 if final:
1930 getfiledata = res
1930 getfiledata = res
1931 else:
1931 else:
1932 i, item = res
1932 i, item = res
1933 progress.increment(step=i, item=item)
1933 progress.increment(step=i, item=item)
1934 updated = len(actions[ACTION_GET])
1934 updated = len(actions[ACTION_GET])
1935
1935
1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1938
1938
1939 # forget (manifest only, just log it) (must come first)
1939 # forget (manifest only, just log it) (must come first)
1940 for f, args, msg in actions[ACTION_FORGET]:
1940 for f, args, msg in actions[ACTION_FORGET]:
1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1942 progress.increment(item=f)
1942 progress.increment(item=f)
1943
1943
1944 # re-add (manifest only, just log it)
1944 # re-add (manifest only, just log it)
1945 for f, args, msg in actions[ACTION_ADD]:
1945 for f, args, msg in actions[ACTION_ADD]:
1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1947 progress.increment(item=f)
1947 progress.increment(item=f)
1948
1948
1949 # re-add/mark as modified (manifest only, just log it)
1949 # re-add/mark as modified (manifest only, just log it)
1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1952 progress.increment(item=f)
1952 progress.increment(item=f)
1953
1953
1954 # keep (noop, just log it)
1954 # keep (noop, just log it)
1955 for f, args, msg in actions[ACTION_KEEP]:
1955 for f, args, msg in actions[ACTION_KEEP]:
1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1957 # no progress
1957 # no progress
1958
1958
1959 # directory rename, move local
1959 # directory rename, move local
1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1962 progress.increment(item=f)
1962 progress.increment(item=f)
1963 f0, flags = args
1963 f0, flags = args
1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1965 wctx[f].audit()
1965 wctx[f].audit()
1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1967 wctx[f0].remove()
1967 wctx[f0].remove()
1968 updated += 1
1968 updated += 1
1969
1969
1970 # local directory rename, get
1970 # local directory rename, get
1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1973 progress.increment(item=f)
1973 progress.increment(item=f)
1974 f0, flags = args
1974 f0, flags = args
1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1977 updated += 1
1977 updated += 1
1978
1978
1979 # exec
1979 # exec
1980 for f, args, msg in actions[ACTION_EXEC]:
1980 for f, args, msg in actions[ACTION_EXEC]:
1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1982 progress.increment(item=f)
1982 progress.increment(item=f)
1983 (flags,) = args
1983 (flags,) = args
1984 wctx[f].audit()
1984 wctx[f].audit()
1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1986 updated += 1
1986 updated += 1
1987
1987
1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1989 # driver has changed, and we want to be able to bypass it when overwrite is
1989 # driver has changed, and we want to be able to bypass it when overwrite is
1990 # True
1990 # True
1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1992
1992
1993 if usemergedriver:
1993 if usemergedriver:
1994 if wctx.isinmemory():
1994 if wctx.isinmemory():
1995 raise error.InMemoryMergeConflictsError(
1995 raise error.InMemoryMergeConflictsError(
1996 b"in-memory merge does not support mergedriver"
1996 b"in-memory merge does not support mergedriver"
1997 )
1997 )
1998 ms.commit()
1998 ms.commit()
1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2000 # the driver might leave some files unresolved
2000 # the driver might leave some files unresolved
2001 unresolvedf = set(ms.unresolved())
2001 unresolvedf = set(ms.unresolved())
2002 if not proceed:
2002 if not proceed:
2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2004 # error out
2004 # error out
2005 return updateresult(
2005 return updateresult(
2006 updated, merged, removed, max(len(unresolvedf), 1)
2006 updated, merged, removed, max(len(unresolvedf), 1)
2007 )
2007 )
2008 newactions = []
2008 newactions = []
2009 for f, args, msg in mergeactions:
2009 for f, args, msg in mergeactions:
2010 if f in unresolvedf:
2010 if f in unresolvedf:
2011 newactions.append((f, args, msg))
2011 newactions.append((f, args, msg))
2012 mergeactions = newactions
2012 mergeactions = newactions
2013
2013
2014 try:
2014 try:
2015 # premerge
2015 # premerge
2016 tocomplete = []
2016 tocomplete = []
2017 for f, args, msg in mergeactions:
2017 for f, args, msg in mergeactions:
2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2019 progress.increment(item=f)
2019 progress.increment(item=f)
2020 if f == b'.hgsubstate': # subrepo states need updating
2020 if f == b'.hgsubstate': # subrepo states need updating
2021 subrepoutil.submerge(
2021 subrepoutil.submerge(
2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2023 )
2023 )
2024 continue
2024 continue
2025 wctx[f].audit()
2025 wctx[f].audit()
2026 complete, r = ms.preresolve(f, wctx)
2026 complete, r = ms.preresolve(f, wctx)
2027 if not complete:
2027 if not complete:
2028 numupdates += 1
2028 numupdates += 1
2029 tocomplete.append((f, args, msg))
2029 tocomplete.append((f, args, msg))
2030
2030
2031 # merge
2031 # merge
2032 for f, args, msg in tocomplete:
2032 for f, args, msg in tocomplete:
2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2034 progress.increment(item=f, total=numupdates)
2034 progress.increment(item=f, total=numupdates)
2035 ms.resolve(f, wctx)
2035 ms.resolve(f, wctx)
2036
2036
2037 finally:
2037 finally:
2038 ms.commit()
2038 ms.commit()
2039
2039
2040 unresolved = ms.unresolvedcount()
2040 unresolved = ms.unresolvedcount()
2041
2041
2042 if (
2042 if (
2043 usemergedriver
2043 usemergedriver
2044 and not unresolved
2044 and not unresolved
2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2046 ):
2046 ):
2047 if not driverconclude(repo, ms, wctx, labels=labels):
2047 if not driverconclude(repo, ms, wctx, labels=labels):
2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2049 # error out
2049 # error out
2050 unresolved = max(unresolved, 1)
2050 unresolved = max(unresolved, 1)
2051
2051
2052 ms.commit()
2052 ms.commit()
2053
2053
2054 msupdated, msmerged, msremoved = ms.counts()
2054 msupdated, msmerged, msremoved = ms.counts()
2055 updated += msupdated
2055 updated += msupdated
2056 merged += msmerged
2056 merged += msmerged
2057 removed += msremoved
2057 removed += msremoved
2058
2058
2059 extraactions = ms.actions()
2059 extraactions = ms.actions()
2060 if extraactions:
2060 if extraactions:
2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2062 for k, acts in pycompat.iteritems(extraactions):
2062 for k, acts in pycompat.iteritems(extraactions):
2063 actions[k].extend(acts)
2063 actions[k].extend(acts)
2064 if k == ACTION_GET and wantfiledata:
2064 if k == ACTION_GET and wantfiledata:
2065 # no filedata until mergestate is updated to provide it
2065 # no filedata until mergestate is updated to provide it
2066 for a in acts:
2066 for a in acts:
2067 getfiledata[a[0]] = None
2067 getfiledata[a[0]] = None
2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2070 # are processed after files in other actions, and the merge driver
2070 # are processed after files in other actions, and the merge driver
2071 # might add files to those actions via extraactions above. This can
2071 # might add files to those actions via extraactions above. This can
2072 # lead to a file being recorded twice, with poor results. This is
2072 # lead to a file being recorded twice, with poor results. This is
2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2074 # possible with the merge driver in the initial merge process;
2074 # possible with the merge driver in the initial merge process;
2075 # interrupted merges don't go through this flow).
2075 # interrupted merges don't go through this flow).
2076 #
2076 #
2077 # The real fix here is to have indexes by both file and action so
2077 # The real fix here is to have indexes by both file and action so
2078 # that when the action for a file is changed it is automatically
2078 # that when the action for a file is changed it is automatically
2079 # reflected in the other action lists. But that involves a more
2079 # reflected in the other action lists. But that involves a more
2080 # complex data structure, so this will do for now.
2080 # complex data structure, so this will do for now.
2081 #
2081 #
2082 # We don't need to do the same operation for 'dc' and 'cd' because
2082 # We don't need to do the same operation for 'dc' and 'cd' because
2083 # those lists aren't consulted again.
2083 # those lists aren't consulted again.
2084 mfiles.difference_update(a[0] for a in acts)
2084 mfiles.difference_update(a[0] for a in acts)
2085
2085
2086 actions[ACTION_MERGE] = [
2086 actions[ACTION_MERGE] = [
2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2088 ]
2088 ]
2089
2089
2090 progress.complete()
2090 progress.complete()
2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2093
2093
2094
2094
2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2096 """record merge actions to the dirstate"""
2096 """record merge actions to the dirstate"""
2097 # remove (must come first)
2097 # remove (must come first)
2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2099 if branchmerge:
2099 if branchmerge:
2100 repo.dirstate.remove(f)
2100 repo.dirstate.remove(f)
2101 else:
2101 else:
2102 repo.dirstate.drop(f)
2102 repo.dirstate.drop(f)
2103
2103
2104 # forget (must come first)
2104 # forget (must come first)
2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2106 repo.dirstate.drop(f)
2106 repo.dirstate.drop(f)
2107
2107
2108 # resolve path conflicts
2108 # resolve path conflicts
2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2110 (f0,) = args
2110 (f0,) = args
2111 origf0 = repo.dirstate.copied(f0) or f0
2111 origf0 = repo.dirstate.copied(f0) or f0
2112 repo.dirstate.add(f)
2112 repo.dirstate.add(f)
2113 repo.dirstate.copy(origf0, f)
2113 repo.dirstate.copy(origf0, f)
2114 if f0 == origf0:
2114 if f0 == origf0:
2115 repo.dirstate.remove(f0)
2115 repo.dirstate.remove(f0)
2116 else:
2116 else:
2117 repo.dirstate.drop(f0)
2117 repo.dirstate.drop(f0)
2118
2118
2119 # re-add
2119 # re-add
2120 for f, args, msg in actions.get(ACTION_ADD, []):
2120 for f, args, msg in actions.get(ACTION_ADD, []):
2121 repo.dirstate.add(f)
2121 repo.dirstate.add(f)
2122
2122
2123 # re-add/mark as modified
2123 # re-add/mark as modified
2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2125 if branchmerge:
2125 if branchmerge:
2126 repo.dirstate.normallookup(f)
2126 repo.dirstate.normallookup(f)
2127 else:
2127 else:
2128 repo.dirstate.add(f)
2128 repo.dirstate.add(f)
2129
2129
2130 # exec change
2130 # exec change
2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2132 repo.dirstate.normallookup(f)
2132 repo.dirstate.normallookup(f)
2133
2133
2134 # keep
2134 # keep
2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2136 pass
2136 pass
2137
2137
2138 # get
2138 # get
2139 for f, args, msg in actions.get(ACTION_GET, []):
2139 for f, args, msg in actions.get(ACTION_GET, []):
2140 if branchmerge:
2140 if branchmerge:
2141 repo.dirstate.otherparent(f)
2141 repo.dirstate.otherparent(f)
2142 else:
2142 else:
2143 parentfiledata = getfiledata[f] if getfiledata else None
2143 parentfiledata = getfiledata[f] if getfiledata else None
2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2145
2145
2146 # merge
2146 # merge
2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2148 f1, f2, fa, move, anc = args
2148 f1, f2, fa, move, anc = args
2149 if branchmerge:
2149 if branchmerge:
2150 # We've done a branch merge, mark this file as merged
2150 # We've done a branch merge, mark this file as merged
2151 # so that we properly record the merger later
2151 # so that we properly record the merger later
2152 repo.dirstate.merge(f)
2152 repo.dirstate.merge(f)
2153 if f1 != f2: # copy/rename
2153 if f1 != f2: # copy/rename
2154 if move:
2154 if move:
2155 repo.dirstate.remove(f1)
2155 repo.dirstate.remove(f1)
2156 if f1 != f:
2156 if f1 != f:
2157 repo.dirstate.copy(f1, f)
2157 repo.dirstate.copy(f1, f)
2158 else:
2158 else:
2159 repo.dirstate.copy(f2, f)
2159 repo.dirstate.copy(f2, f)
2160 else:
2160 else:
2161 # We've update-merged a locally modified file, so
2161 # We've update-merged a locally modified file, so
2162 # we set the dirstate to emulate a normal checkout
2162 # we set the dirstate to emulate a normal checkout
2163 # of that file some time in the past. Thus our
2163 # of that file some time in the past. Thus our
2164 # merge will appear as a normal local file
2164 # merge will appear as a normal local file
2165 # modification.
2165 # modification.
2166 if f2 == f: # file not locally copied/moved
2166 if f2 == f: # file not locally copied/moved
2167 repo.dirstate.normallookup(f)
2167 repo.dirstate.normallookup(f)
2168 if move:
2168 if move:
2169 repo.dirstate.drop(f1)
2169 repo.dirstate.drop(f1)
2170
2170
2171 # directory rename, move local
2171 # directory rename, move local
2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2173 f0, flag = args
2173 f0, flag = args
2174 if branchmerge:
2174 if branchmerge:
2175 repo.dirstate.add(f)
2175 repo.dirstate.add(f)
2176 repo.dirstate.remove(f0)
2176 repo.dirstate.remove(f0)
2177 repo.dirstate.copy(f0, f)
2177 repo.dirstate.copy(f0, f)
2178 else:
2178 else:
2179 repo.dirstate.normal(f)
2179 repo.dirstate.normal(f)
2180 repo.dirstate.drop(f0)
2180 repo.dirstate.drop(f0)
2181
2181
2182 # directory rename, get
2182 # directory rename, get
2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2184 f0, flag = args
2184 f0, flag = args
2185 if branchmerge:
2185 if branchmerge:
2186 repo.dirstate.add(f)
2186 repo.dirstate.add(f)
2187 repo.dirstate.copy(f0, f)
2187 repo.dirstate.copy(f0, f)
2188 else:
2188 else:
2189 repo.dirstate.normal(f)
2189 repo.dirstate.normal(f)
2190
2190
2191
2191
2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2193 UPDATECHECK_NONE = b'none'
2193 UPDATECHECK_NONE = b'none'
2194 UPDATECHECK_LINEAR = b'linear'
2194 UPDATECHECK_LINEAR = b'linear'
2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2196
2196
2197
2197
2198 def update(
2198 def update(
2199 repo,
2199 repo,
2200 node,
2200 node,
2201 branchmerge,
2201 branchmerge,
2202 force,
2202 force,
2203 ancestor=None,
2203 ancestor=None,
2204 mergeancestor=False,
2204 mergeancestor=False,
2205 labels=None,
2205 labels=None,
2206 matcher=None,
2206 matcher=None,
2207 mergeforce=False,
2207 mergeforce=False,
2208 updatecheck=None,
2208 updatecheck=None,
2209 wc=None,
2209 wc=None,
2210 ):
2210 ):
2211 """
2211 """
2212 Perform a merge between the working directory and the given node
2212 Perform a merge between the working directory and the given node
2213
2213
2214 node = the node to update to
2214 node = the node to update to
2215 branchmerge = whether to merge between branches
2215 branchmerge = whether to merge between branches
2216 force = whether to force branch merging or file overwriting
2216 force = whether to force branch merging or file overwriting
2217 matcher = a matcher to filter file lists (dirstate not updated)
2217 matcher = a matcher to filter file lists (dirstate not updated)
2218 mergeancestor = whether it is merging with an ancestor. If true,
2218 mergeancestor = whether it is merging with an ancestor. If true,
2219 we should accept the incoming changes for any prompts that occur.
2219 we should accept the incoming changes for any prompts that occur.
2220 If false, merging with an ancestor (fast-forward) is only allowed
2220 If false, merging with an ancestor (fast-forward) is only allowed
2221 between different named branches. This flag is used by rebase extension
2221 between different named branches. This flag is used by rebase extension
2222 as a temporary fix and should be avoided in general.
2222 as a temporary fix and should be avoided in general.
2223 labels = labels to use for base, local and other
2223 labels = labels to use for base, local and other
2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2225 this is True, then 'force' should be True as well.
2225 this is True, then 'force' should be True as well.
2226
2226
2227 The table below shows all the behaviors of the update command given the
2227 The table below shows all the behaviors of the update command given the
2228 -c/--check and -C/--clean or no options, whether the working directory is
2228 -c/--check and -C/--clean or no options, whether the working directory is
2229 dirty, whether a revision is specified, and the relationship of the parent
2229 dirty, whether a revision is specified, and the relationship of the parent
2230 rev to the target rev (linear or not). Match from top first. The -n
2230 rev to the target rev (linear or not). Match from top first. The -n
2231 option doesn't exist on the command line, but represents the
2231 option doesn't exist on the command line, but represents the
2232 experimental.updatecheck=noconflict option.
2232 experimental.updatecheck=noconflict option.
2233
2233
2234 This logic is tested by test-update-branches.t.
2234 This logic is tested by test-update-branches.t.
2235
2235
2236 -c -C -n -m dirty rev linear | result
2236 -c -C -n -m dirty rev linear | result
2237 y y * * * * * | (1)
2237 y y * * * * * | (1)
2238 y * y * * * * | (1)
2238 y * y * * * * | (1)
2239 y * * y * * * | (1)
2239 y * * y * * * | (1)
2240 * y y * * * * | (1)
2240 * y y * * * * | (1)
2241 * y * y * * * | (1)
2241 * y * y * * * | (1)
2242 * * y y * * * | (1)
2242 * * y y * * * | (1)
2243 * * * * * n n | x
2243 * * * * * n n | x
2244 * * * * n * * | ok
2244 * * * * n * * | ok
2245 n n n n y * y | merge
2245 n n n n y * y | merge
2246 n n n n y y n | (2)
2246 n n n n y y n | (2)
2247 n n n y y * * | merge
2247 n n n y y * * | merge
2248 n n y n y * * | merge if no conflict
2248 n n y n y * * | merge if no conflict
2249 n y n n y * * | discard
2249 n y n n y * * | discard
2250 y n n n y * * | (3)
2250 y n n n y * * | (3)
2251
2251
2252 x = can't happen
2252 x = can't happen
2253 * = don't-care
2253 * = don't-care
2254 1 = incompatible options (checked in commands.py)
2254 1 = incompatible options (checked in commands.py)
2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2256 3 = abort: uncommitted changes (checked in commands.py)
2256 3 = abort: uncommitted changes (checked in commands.py)
2257
2257
2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2259 to repo[None] if None is passed.
2259 to repo[None] if None is passed.
2260
2260
2261 Return the same tuple as applyupdates().
2261 Return the same tuple as applyupdates().
2262 """
2262 """
2263 # Avoid cycle.
2263 # Avoid cycle.
2264 from . import sparse
2264 from . import sparse
2265
2265
2266 # This function used to find the default destination if node was None, but
2266 # This function used to find the default destination if node was None, but
2267 # that's now in destutil.py.
2267 # that's now in destutil.py.
2268 assert node is not None
2268 assert node is not None
2269 if not branchmerge and not force:
2269 if not branchmerge and not force:
2270 # TODO: remove the default once all callers that pass branchmerge=False
2270 # TODO: remove the default once all callers that pass branchmerge=False
2271 # and force=False pass a value for updatecheck. We may want to allow
2271 # and force=False pass a value for updatecheck. We may want to allow
2272 # updatecheck='abort' to better suppport some of these callers.
2272 # updatecheck='abort' to better suppport some of these callers.
2273 if updatecheck is None:
2273 if updatecheck is None:
2274 updatecheck = UPDATECHECK_LINEAR
2274 updatecheck = UPDATECHECK_LINEAR
2275 if updatecheck not in (
2275 if updatecheck not in (
2276 UPDATECHECK_NONE,
2276 UPDATECHECK_NONE,
2277 UPDATECHECK_LINEAR,
2277 UPDATECHECK_LINEAR,
2278 UPDATECHECK_NO_CONFLICT,
2278 UPDATECHECK_NO_CONFLICT,
2279 ):
2279 ):
2280 raise ValueError(
2280 raise ValueError(
2281 r'Invalid updatecheck %r (can accept %r)'
2281 r'Invalid updatecheck %r (can accept %r)'
2282 % (
2282 % (
2283 updatecheck,
2283 updatecheck,
2284 (
2284 (
2285 UPDATECHECK_NONE,
2285 UPDATECHECK_NONE,
2286 UPDATECHECK_LINEAR,
2286 UPDATECHECK_LINEAR,
2287 UPDATECHECK_NO_CONFLICT,
2287 UPDATECHECK_NO_CONFLICT,
2288 ),
2288 ),
2289 )
2289 )
2290 )
2290 )
2291 # If we're doing a partial update, we need to skip updating
2291 # If we're doing a partial update, we need to skip updating
2292 # the dirstate, so make a note of any partial-ness to the
2292 # the dirstate, so make a note of any partial-ness to the
2293 # update here.
2293 # update here.
2294 if matcher is None or matcher.always():
2294 if matcher is None or matcher.always():
2295 partial = False
2295 partial = False
2296 else:
2296 else:
2297 partial = True
2297 partial = True
2298 with repo.wlock():
2298 with repo.wlock():
2299 if wc is None:
2299 if wc is None:
2300 wc = repo[None]
2300 wc = repo[None]
2301 pl = wc.parents()
2301 pl = wc.parents()
2302 p1 = pl[0]
2302 p1 = pl[0]
2303 p2 = repo[node]
2303 p2 = repo[node]
2304 if ancestor is not None:
2304 if ancestor is not None:
2305 pas = [repo[ancestor]]
2305 pas = [repo[ancestor]]
2306 else:
2306 else:
2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2310 else:
2310 else:
2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2312
2312
2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2314
2314
2315 overwrite = force and not branchmerge
2315 overwrite = force and not branchmerge
2316 ### check phase
2316 ### check phase
2317 if not overwrite:
2317 if not overwrite:
2318 if len(pl) > 1:
2318 if len(pl) > 1:
2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2320 ms = mergestate.read(repo)
2320 ms = mergestate.read(repo)
2321 if list(ms.unresolved()):
2321 if list(ms.unresolved()):
2322 raise error.Abort(
2322 raise error.Abort(
2323 _(b"outstanding merge conflicts"),
2323 _(b"outstanding merge conflicts"),
2324 hint=_(b"use 'hg resolve' to resolve"),
2324 hint=_(b"use 'hg resolve' to resolve"),
2325 )
2325 )
2326 if branchmerge:
2326 if branchmerge:
2327 if pas == [p2]:
2327 if pas == [p2]:
2328 raise error.Abort(
2328 raise error.Abort(
2329 _(
2329 _(
2330 b"merging with a working directory ancestor"
2330 b"merging with a working directory ancestor"
2331 b" has no effect"
2331 b" has no effect"
2332 )
2332 )
2333 )
2333 )
2334 elif pas == [p1]:
2334 elif pas == [p1]:
2335 if not mergeancestor and wc.branch() == p2.branch():
2335 if not mergeancestor and wc.branch() == p2.branch():
2336 raise error.Abort(
2336 raise error.Abort(
2337 _(b"nothing to merge"),
2337 _(b"nothing to merge"),
2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2339 )
2339 )
2340 if not force and (wc.files() or wc.deleted()):
2340 if not force and (wc.files() or wc.deleted()):
2341 raise error.Abort(
2341 raise error.Abort(
2342 _(b"uncommitted changes"),
2342 _(b"uncommitted changes"),
2343 hint=_(b"use 'hg status' to list changes"),
2343 hint=_(b"use 'hg status' to list changes"),
2344 )
2344 )
2345 if not wc.isinmemory():
2345 if not wc.isinmemory():
2346 for s in sorted(wc.substate):
2346 for s in sorted(wc.substate):
2347 wc.sub(s).bailifchanged()
2347 wc.sub(s).bailifchanged()
2348
2348
2349 elif not overwrite:
2349 elif not overwrite:
2350 if p1 == p2: # no-op update
2350 if p1 == p2: # no-op update
2351 # call the hooks and exit early
2351 # call the hooks and exit early
2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2354 return updateresult(0, 0, 0, 0)
2354 return updateresult(0, 0, 0, 0)
2355
2355
2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2357 [p1],
2357 [p1],
2358 [p2],
2358 [p2],
2359 ): # nonlinear
2359 ): # nonlinear
2360 dirty = wc.dirty(missing=True)
2360 dirty = wc.dirty(missing=True)
2361 if dirty:
2361 if dirty:
2362 # Branching is a bit strange to ensure we do the minimal
2362 # Branching is a bit strange to ensure we do the minimal
2363 # amount of call to obsutil.foreground.
2363 # amount of call to obsutil.foreground.
2364 foreground = obsutil.foreground(repo, [p1.node()])
2364 foreground = obsutil.foreground(repo, [p1.node()])
2365 # note: the <node> variable contains a random identifier
2365 # note: the <node> variable contains a random identifier
2366 if repo[node].node() in foreground:
2366 if repo[node].node() in foreground:
2367 pass # allow updating to successors
2367 pass # allow updating to successors
2368 else:
2368 else:
2369 msg = _(b"uncommitted changes")
2369 msg = _(b"uncommitted changes")
2370 hint = _(b"commit or update --clean to discard changes")
2370 hint = _(b"commit or update --clean to discard changes")
2371 raise error.UpdateAbort(msg, hint=hint)
2371 raise error.UpdateAbort(msg, hint=hint)
2372 else:
2372 else:
2373 # Allow jumping branches if clean and specific rev given
2373 # Allow jumping branches if clean and specific rev given
2374 pass
2374 pass
2375
2375
2376 if overwrite:
2376 if overwrite:
2377 pas = [wc]
2377 pas = [wc]
2378 elif not branchmerge:
2378 elif not branchmerge:
2379 pas = [p1]
2379 pas = [p1]
2380
2380
2381 # deprecated config: merge.followcopies
2381 # deprecated config: merge.followcopies
2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2383 if overwrite:
2383 if overwrite:
2384 followcopies = False
2384 followcopies = False
2385 elif not pas[0]:
2385 elif not pas[0]:
2386 followcopies = False
2386 followcopies = False
2387 if not branchmerge and not wc.dirty(missing=True):
2387 if not branchmerge and not wc.dirty(missing=True):
2388 followcopies = False
2388 followcopies = False
2389
2389
2390 ### calculate phase
2390 ### calculate phase
2391 actionbyfile, diverge, renamedelete = calculateupdates(
2391 actionbyfile, diverge, renamedelete = calculateupdates(
2392 repo,
2392 repo,
2393 wc,
2393 wc,
2394 p2,
2394 p2,
2395 pas,
2395 pas,
2396 branchmerge,
2396 branchmerge,
2397 force,
2397 force,
2398 mergeancestor,
2398 mergeancestor,
2399 followcopies,
2399 followcopies,
2400 matcher=matcher,
2400 matcher=matcher,
2401 mergeforce=mergeforce,
2401 mergeforce=mergeforce,
2402 )
2402 )
2403
2403
2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2406 if m not in (
2406 if m not in (
2407 ACTION_GET,
2407 ACTION_GET,
2408 ACTION_KEEP,
2408 ACTION_KEEP,
2409 ACTION_EXEC,
2409 ACTION_EXEC,
2410 ACTION_REMOVE,
2410 ACTION_REMOVE,
2411 ACTION_PATH_CONFLICT_RESOLVE,
2411 ACTION_PATH_CONFLICT_RESOLVE,
2412 ):
2412 ):
2413 msg = _(b"conflicting changes")
2413 msg = _(b"conflicting changes")
2414 hint = _(b"commit or update --clean to discard changes")
2414 hint = _(b"commit or update --clean to discard changes")
2415 raise error.Abort(msg, hint=hint)
2415 raise error.Abort(msg, hint=hint)
2416
2416
2417 # Prompt and create actions. Most of this is in the resolve phase
2417 # Prompt and create actions. Most of this is in the resolve phase
2418 # already, but we can't handle .hgsubstate in filemerge or
2418 # already, but we can't handle .hgsubstate in filemerge or
2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2420 if b'.hgsubstate' in actionbyfile:
2420 if b'.hgsubstate' in actionbyfile:
2421 f = b'.hgsubstate'
2421 f = b'.hgsubstate'
2422 m, args, msg = actionbyfile[f]
2422 m, args, msg = actionbyfile[f]
2423 prompts = filemerge.partextras(labels)
2423 prompts = filemerge.partextras(labels)
2424 prompts[b'f'] = f
2424 prompts[b'f'] = f
2425 if m == ACTION_CHANGED_DELETED:
2425 if m == ACTION_CHANGED_DELETED:
2426 if repo.ui.promptchoice(
2426 if repo.ui.promptchoice(
2427 _(
2427 _(
2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2429 b"use (c)hanged version or (d)elete?"
2429 b"use (c)hanged version or (d)elete?"
2430 b"$$ &Changed $$ &Delete"
2430 b"$$ &Changed $$ &Delete"
2431 )
2431 )
2432 % prompts,
2432 % prompts,
2433 0,
2433 0,
2434 ):
2434 ):
2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2436 elif f in p1:
2436 elif f in p1:
2437 actionbyfile[f] = (
2437 actionbyfile[f] = (
2438 ACTION_ADD_MODIFIED,
2438 ACTION_ADD_MODIFIED,
2439 None,
2439 None,
2440 b'prompt keep',
2440 b'prompt keep',
2441 )
2441 )
2442 else:
2442 else:
2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2444 elif m == ACTION_DELETED_CHANGED:
2444 elif m == ACTION_DELETED_CHANGED:
2445 f1, f2, fa, move, anc = args
2445 f1, f2, fa, move, anc = args
2446 flags = p2[f2].flags()
2446 flags = p2[f2].flags()
2447 if (
2447 if (
2448 repo.ui.promptchoice(
2448 repo.ui.promptchoice(
2449 _(
2449 _(
2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2451 b"use (c)hanged version or leave (d)eleted?"
2451 b"use (c)hanged version or leave (d)eleted?"
2452 b"$$ &Changed $$ &Deleted"
2452 b"$$ &Changed $$ &Deleted"
2453 )
2453 )
2454 % prompts,
2454 % prompts,
2455 0,
2455 0,
2456 )
2456 )
2457 == 0
2457 == 0
2458 ):
2458 ):
2459 actionbyfile[f] = (
2459 actionbyfile[f] = (
2460 ACTION_GET,
2460 ACTION_GET,
2461 (flags, False),
2461 (flags, False),
2462 b'prompt recreating',
2462 b'prompt recreating',
2463 )
2463 )
2464 else:
2464 else:
2465 del actionbyfile[f]
2465 del actionbyfile[f]
2466
2466
2467 # Convert to dictionary-of-lists format
2467 # Convert to dictionary-of-lists format
2468 actions = emptyactions()
2468 actions = emptyactions()
2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2470 if m not in actions:
2470 if m not in actions:
2471 actions[m] = []
2471 actions[m] = []
2472 actions[m].append((f, args, msg))
2472 actions[m].append((f, args, msg))
2473
2473
2474 if not util.fscasesensitive(repo.path):
2474 if not util.fscasesensitive(repo.path):
2475 # check collision between files only in p2 for clean update
2475 # check collision between files only in p2 for clean update
2476 if not branchmerge and (
2476 if not branchmerge and (
2477 force or not wc.dirty(missing=True, branch=False)
2477 force or not wc.dirty(missing=True, branch=False)
2478 ):
2478 ):
2479 _checkcollision(repo, p2.manifest(), None)
2479 _checkcollision(repo, p2.manifest(), None)
2480 else:
2480 else:
2481 _checkcollision(repo, wc.manifest(), actions)
2481 _checkcollision(repo, wc.manifest(), actions)
2482
2482
2483 # divergent renames
2483 # divergent renames
2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2485 repo.ui.warn(
2485 repo.ui.warn(
2486 _(
2486 _(
2487 b"note: possible conflict - %s was renamed "
2487 b"note: possible conflict - %s was renamed "
2488 b"multiple times to:\n"
2488 b"multiple times to:\n"
2489 )
2489 )
2490 % f
2490 % f
2491 )
2491 )
2492 for nf in sorted(fl):
2492 for nf in sorted(fl):
2493 repo.ui.warn(b" %s\n" % nf)
2493 repo.ui.warn(b" %s\n" % nf)
2494
2494
2495 # rename and delete
2495 # rename and delete
2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2497 repo.ui.warn(
2497 repo.ui.warn(
2498 _(
2498 _(
2499 b"note: possible conflict - %s was deleted "
2499 b"note: possible conflict - %s was deleted "
2500 b"and renamed to:\n"
2500 b"and renamed to:\n"
2501 )
2501 )
2502 % f
2502 % f
2503 )
2503 )
2504 for nf in sorted(fl):
2504 for nf in sorted(fl):
2505 repo.ui.warn(b" %s\n" % nf)
2505 repo.ui.warn(b" %s\n" % nf)
2506
2506
2507 ### apply phase
2507 ### apply phase
2508 if not branchmerge: # just jump to the new rev
2508 if not branchmerge: # just jump to the new rev
2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2510 if not partial and not wc.isinmemory():
2510 if not partial and not wc.isinmemory():
2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2512 # note that we're in the middle of an update
2512 # note that we're in the middle of an update
2513 repo.vfs.write(b'updatestate', p2.hex())
2513 repo.vfs.write(b'updatestate', p2.hex())
2514
2514
2515 # Advertise fsmonitor when its presence could be useful.
2515 # Advertise fsmonitor when its presence could be useful.
2516 #
2516 #
2517 # We only advertise when performing an update from an empty working
2517 # We only advertise when performing an update from an empty working
2518 # directory. This typically only occurs during initial clone.
2518 # directory. This typically only occurs during initial clone.
2519 #
2519 #
2520 # We give users a mechanism to disable the warning in case it is
2520 # We give users a mechanism to disable the warning in case it is
2521 # annoying.
2521 # annoying.
2522 #
2522 #
2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2524 # considered stable.
2524 # considered stable.
2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2526 fsmonitorthreshold = repo.ui.configint(
2526 fsmonitorthreshold = repo.ui.configint(
2527 b'fsmonitor', b'warn_update_file_count'
2527 b'fsmonitor', b'warn_update_file_count'
2528 )
2528 )
2529 try:
2529 try:
2530 # avoid cycle: extensions -> cmdutil -> merge
2530 # avoid cycle: extensions -> cmdutil -> merge
2531 from . import extensions
2531 from . import extensions
2532
2532
2533 extensions.find(b'fsmonitor')
2533 extensions.find(b'fsmonitor')
2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2535 # We intentionally don't look at whether fsmonitor has disabled
2535 # We intentionally don't look at whether fsmonitor has disabled
2536 # itself because a) fsmonitor may have already printed a warning
2536 # itself because a) fsmonitor may have already printed a warning
2537 # b) we only care about the config state here.
2537 # b) we only care about the config state here.
2538 except KeyError:
2538 except KeyError:
2539 fsmonitorenabled = False
2539 fsmonitorenabled = False
2540
2540
2541 if (
2541 if (
2542 fsmonitorwarning
2542 fsmonitorwarning
2543 and not fsmonitorenabled
2543 and not fsmonitorenabled
2544 and p1.node() == nullid
2544 and p1.node() == nullid
2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2547 ):
2547 ):
2548 repo.ui.warn(
2548 repo.ui.warn(
2549 _(
2549 _(
2550 b'(warning: large working directory being used without '
2550 b'(warning: large working directory being used without '
2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2552 b'see "hg help -e fsmonitor")\n'
2552 b'see "hg help -e fsmonitor")\n'
2553 )
2553 )
2554 )
2554 )
2555
2555
2556 updatedirstate = not partial and not wc.isinmemory()
2556 updatedirstate = not partial and not wc.isinmemory()
2557 wantfiledata = updatedirstate and not branchmerge
2557 wantfiledata = updatedirstate and not branchmerge
2558 stats, getfiledata = applyupdates(
2558 stats, getfiledata = applyupdates(
2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2560 )
2560 )
2561
2561
2562 if updatedirstate:
2562 if updatedirstate:
2563 with repo.dirstate.parentchange():
2563 with repo.dirstate.parentchange():
2564 repo.setparents(fp1, fp2)
2564 repo.setparents(fp1, fp2)
2565 recordupdates(repo, actions, branchmerge, getfiledata)
2565 recordupdates(repo, actions, branchmerge, getfiledata)
2566 # update completed, clear state
2566 # update completed, clear state
2567 util.unlink(repo.vfs.join(b'updatestate'))
2567 util.unlink(repo.vfs.join(b'updatestate'))
2568
2568
2569 if not branchmerge:
2569 if not branchmerge:
2570 repo.dirstate.setbranch(p2.branch())
2570 repo.dirstate.setbranch(p2.branch())
2571
2571
2572 # If we're updating to a location, clean up any stale temporary includes
2572 # If we're updating to a location, clean up any stale temporary includes
2573 # (ex: this happens during hg rebase --abort).
2573 # (ex: this happens during hg rebase --abort).
2574 if not branchmerge:
2574 if not branchmerge:
2575 sparse.prunetemporaryincludes(repo)
2575 sparse.prunetemporaryincludes(repo)
2576
2576
2577 if not partial:
2577 if not partial:
2578 repo.hook(
2578 repo.hook(
2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2580 )
2580 )
2581 return stats
2581 return stats
2582
2582
2583
2583
2584 def graft(
2584 def graft(
2585 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2585 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2586 ):
2586 ):
2587 """Do a graft-like merge.
2587 """Do a graft-like merge.
2588
2588
2589 This is a merge where the merge ancestor is chosen such that one
2589 This is a merge where the merge ancestor is chosen such that one
2590 or more changesets are grafted onto the current changeset. In
2590 or more changesets are grafted onto the current changeset. In
2591 addition to the merge, this fixes up the dirstate to include only
2591 addition to the merge, this fixes up the dirstate to include only
2592 a single parent (if keepparent is False) and tries to duplicate any
2592 a single parent (if keepparent is False) and tries to duplicate any
2593 renames/copies appropriately.
2593 renames/copies appropriately.
2594
2594
2595 ctx - changeset to rebase
2595 ctx - changeset to rebase
2596 base - merge base, usually ctx.p1()
2596 base - merge base, usually ctx.p1()
2597 labels - merge labels eg ['local', 'graft']
2597 labels - merge labels eg ['local', 'graft']
2598 keepparent - keep second parent if any
2598 keepparent - keep second parent if any
2599 keepconflictparent - if unresolved, keep parent used for the merge
2599 keepconflictparent - if unresolved, keep parent used for the merge
2600
2600
2601 """
2601 """
2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2604 # the destination is the same as the parent of the ctx (so we can use graft
2604 # the destination is the same as the parent of the ctx (so we can use graft
2605 # to copy commits), and 2) informs update that the incoming changes are
2605 # to copy commits), and 2) informs update that the incoming changes are
2606 # newer than the destination so it doesn't prompt about "remote changed foo
2606 # newer than the destination so it doesn't prompt about "remote changed foo
2607 # which local deleted".
2607 # which local deleted".
2608 pctx = repo[b'.']
2608 pctx = repo[b'.']
2609 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2609 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2610
2610
2611 stats = update(
2611 stats = update(
2612 repo,
2612 repo,
2613 ctx.node(),
2613 ctx.node(),
2614 True,
2614 True,
2615 True,
2615 True,
2616 base.node(),
2616 base.node(),
2617 mergeancestor=mergeancestor,
2617 mergeancestor=mergeancestor,
2618 labels=labels,
2618 labels=labels,
2619 )
2619 )
2620
2620
2621 if keepconflictparent and stats.unresolvedcount:
2621 if keepconflictparent and stats.unresolvedcount:
2622 pother = ctx.node()
2622 pother = ctx.node()
2623 else:
2623 else:
2624 pother = nullid
2624 pother = nullid
2625 parents = ctx.parents()
2625 parents = ctx.parents()
2626 if keepparent and len(parents) == 2 and base in parents:
2626 if keepparent and len(parents) == 2 and base in parents:
2627 parents.remove(base)
2627 parents.remove(base)
2628 pother = parents[0].node()
2628 pother = parents[0].node()
2629 # Never set both parents equal to each other
2629 # Never set both parents equal to each other
2630 if pother == pctx.node():
2630 if pother == pctx.node():
2631 pother = nullid
2631 pother = nullid
2632
2632
2633 with repo.dirstate.parentchange():
2633 with repo.dirstate.parentchange():
2634 repo.setparents(pctx.node(), pother)
2634 repo.setparents(pctx.node(), pother)
2635 repo.dirstate.write(repo.currenttransaction())
2635 repo.dirstate.write(repo.currenttransaction())
2636 # fix up dirstate for copies and renames
2636 # fix up dirstate for copies and renames
2637 copies.duplicatecopies(repo, repo[None], ctx.rev(), base.rev())
2637 copies.duplicatecopies(repo, repo[None], ctx.rev(), base.rev())
2638 return stats
2638 return stats
2639
2639
2640
2640
2641 def purge(
2641 def purge(
2642 repo,
2642 repo,
2643 matcher,
2643 matcher,
2644 ignored=False,
2644 ignored=False,
2645 removeemptydirs=True,
2645 removeemptydirs=True,
2646 removefiles=True,
2646 removefiles=True,
2647 abortonerror=False,
2647 abortonerror=False,
2648 noop=False,
2648 noop=False,
2649 ):
2649 ):
2650 """Purge the working directory of untracked files.
2650 """Purge the working directory of untracked files.
2651
2651
2652 ``matcher`` is a matcher configured to scan the working directory -
2652 ``matcher`` is a matcher configured to scan the working directory -
2653 potentially a subset.
2653 potentially a subset.
2654
2654
2655 ``ignored`` controls whether ignored files should also be purged.
2655 ``ignored`` controls whether ignored files should also be purged.
2656
2656
2657 ``removeemptydirs`` controls whether empty directories should be removed.
2657 ``removeemptydirs`` controls whether empty directories should be removed.
2658
2658
2659 ``removefiles`` controls whether files are removed.
2659 ``removefiles`` controls whether files are removed.
2660
2660
2661 ``abortonerror`` causes an exception to be raised if an error occurs
2661 ``abortonerror`` causes an exception to be raised if an error occurs
2662 deleting a file or directory.
2662 deleting a file or directory.
2663
2663
2664 ``noop`` controls whether to actually remove files. If not defined, actions
2664 ``noop`` controls whether to actually remove files. If not defined, actions
2665 will be taken.
2665 will be taken.
2666
2666
2667 Returns an iterable of relative paths in the working directory that were
2667 Returns an iterable of relative paths in the working directory that were
2668 or would be removed.
2668 or would be removed.
2669 """
2669 """
2670
2670
2671 def remove(removefn, path):
2671 def remove(removefn, path):
2672 try:
2672 try:
2673 removefn(path)
2673 removefn(path)
2674 except OSError:
2674 except OSError:
2675 m = _(b'%s cannot be removed') % path
2675 m = _(b'%s cannot be removed') % path
2676 if abortonerror:
2676 if abortonerror:
2677 raise error.Abort(m)
2677 raise error.Abort(m)
2678 else:
2678 else:
2679 repo.ui.warn(_(b'warning: %s\n') % m)
2679 repo.ui.warn(_(b'warning: %s\n') % m)
2680
2680
2681 # There's no API to copy a matcher. So mutate the passed matcher and
2681 # There's no API to copy a matcher. So mutate the passed matcher and
2682 # restore it when we're done.
2682 # restore it when we're done.
2683 oldtraversedir = matcher.traversedir
2683 oldtraversedir = matcher.traversedir
2684
2684
2685 res = []
2685 res = []
2686
2686
2687 try:
2687 try:
2688 if removeemptydirs:
2688 if removeemptydirs:
2689 directories = []
2689 directories = []
2690 matcher.traversedir = directories.append
2690 matcher.traversedir = directories.append
2691
2691
2692 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2692 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2693
2693
2694 if removefiles:
2694 if removefiles:
2695 for f in sorted(status.unknown + status.ignored):
2695 for f in sorted(status.unknown + status.ignored):
2696 if not noop:
2696 if not noop:
2697 repo.ui.note(_(b'removing file %s\n') % f)
2697 repo.ui.note(_(b'removing file %s\n') % f)
2698 remove(repo.wvfs.unlink, f)
2698 remove(repo.wvfs.unlink, f)
2699 res.append(f)
2699 res.append(f)
2700
2700
2701 if removeemptydirs:
2701 if removeemptydirs:
2702 for f in sorted(directories, reverse=True):
2702 for f in sorted(directories, reverse=True):
2703 if matcher(f) and not repo.wvfs.listdir(f):
2703 if matcher(f) and not repo.wvfs.listdir(f):
2704 if not noop:
2704 if not noop:
2705 repo.ui.note(_(b'removing directory %s\n') % f)
2705 repo.ui.note(_(b'removing directory %s\n') % f)
2706 remove(repo.wvfs.rmdir, f)
2706 remove(repo.wvfs.rmdir, f)
2707 res.append(f)
2707 res.append(f)
2708
2708
2709 return res
2709 return res
2710
2710
2711 finally:
2711 finally:
2712 matcher.traversedir = oldtraversedir
2712 matcher.traversedir = oldtraversedir
@@ -1,1144 +1,1146 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "predecessor" and possible
23 The old obsoleted changeset is called a "predecessor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a predecessor are called "successor markers of X" because they hold
25 a predecessor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "predecessor markers of Y" because they hold
27 a successors are call "predecessor markers of Y" because they hold
28 information about the predecessors of Y.
28 information about the predecessors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import hashlib
74 import struct
73 import struct
75
74
76 from .i18n import _
75 from .i18n import _
77 from .pycompat import getattr
76 from .pycompat import getattr
78 from . import (
77 from . import (
79 encoding,
78 encoding,
80 error,
79 error,
81 node,
80 node,
82 obsutil,
81 obsutil,
83 phases,
82 phases,
84 policy,
83 policy,
85 pycompat,
84 pycompat,
86 util,
85 util,
87 )
86 )
88 from .utils import dateutil
87 from .utils import (
88 dateutil,
89 hashutil,
90 )
89
91
90 parsers = policy.importmod('parsers')
92 parsers = policy.importmod('parsers')
91
93
92 _pack = struct.pack
94 _pack = struct.pack
93 _unpack = struct.unpack
95 _unpack = struct.unpack
94 _calcsize = struct.calcsize
96 _calcsize = struct.calcsize
95 propertycache = util.propertycache
97 propertycache = util.propertycache
96
98
97 # Options for obsolescence
99 # Options for obsolescence
98 createmarkersopt = b'createmarkers'
100 createmarkersopt = b'createmarkers'
99 allowunstableopt = b'allowunstable'
101 allowunstableopt = b'allowunstable'
100 exchangeopt = b'exchange'
102 exchangeopt = b'exchange'
101
103
102
104
103 def _getoptionvalue(repo, option):
105 def _getoptionvalue(repo, option):
104 """Returns True if the given repository has the given obsolete option
106 """Returns True if the given repository has the given obsolete option
105 enabled.
107 enabled.
106 """
108 """
107 configkey = b'evolution.%s' % option
109 configkey = b'evolution.%s' % option
108 newconfig = repo.ui.configbool(b'experimental', configkey)
110 newconfig = repo.ui.configbool(b'experimental', configkey)
109
111
110 # Return the value only if defined
112 # Return the value only if defined
111 if newconfig is not None:
113 if newconfig is not None:
112 return newconfig
114 return newconfig
113
115
114 # Fallback on generic option
116 # Fallback on generic option
115 try:
117 try:
116 return repo.ui.configbool(b'experimental', b'evolution')
118 return repo.ui.configbool(b'experimental', b'evolution')
117 except (error.ConfigError, AttributeError):
119 except (error.ConfigError, AttributeError):
118 # Fallback on old-fashion config
120 # Fallback on old-fashion config
119 # inconsistent config: experimental.evolution
121 # inconsistent config: experimental.evolution
120 result = set(repo.ui.configlist(b'experimental', b'evolution'))
122 result = set(repo.ui.configlist(b'experimental', b'evolution'))
121
123
122 if b'all' in result:
124 if b'all' in result:
123 return True
125 return True
124
126
125 # Temporary hack for next check
127 # Temporary hack for next check
126 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
128 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
127 if newconfig:
129 if newconfig:
128 result.add(b'createmarkers')
130 result.add(b'createmarkers')
129
131
130 return option in result
132 return option in result
131
133
132
134
133 def getoptions(repo):
135 def getoptions(repo):
134 """Returns dicts showing state of obsolescence features."""
136 """Returns dicts showing state of obsolescence features."""
135
137
136 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
138 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
137 unstablevalue = _getoptionvalue(repo, allowunstableopt)
139 unstablevalue = _getoptionvalue(repo, allowunstableopt)
138 exchangevalue = _getoptionvalue(repo, exchangeopt)
140 exchangevalue = _getoptionvalue(repo, exchangeopt)
139
141
140 # createmarkers must be enabled if other options are enabled
142 # createmarkers must be enabled if other options are enabled
141 if (unstablevalue or exchangevalue) and not createmarkersvalue:
143 if (unstablevalue or exchangevalue) and not createmarkersvalue:
142 raise error.Abort(
144 raise error.Abort(
143 _(
145 _(
144 b"'createmarkers' obsolete option must be enabled "
146 b"'createmarkers' obsolete option must be enabled "
145 b"if other obsolete options are enabled"
147 b"if other obsolete options are enabled"
146 )
148 )
147 )
149 )
148
150
149 return {
151 return {
150 createmarkersopt: createmarkersvalue,
152 createmarkersopt: createmarkersvalue,
151 allowunstableopt: unstablevalue,
153 allowunstableopt: unstablevalue,
152 exchangeopt: exchangevalue,
154 exchangeopt: exchangevalue,
153 }
155 }
154
156
155
157
156 def isenabled(repo, option):
158 def isenabled(repo, option):
157 """Returns True if the given repository has the given obsolete option
159 """Returns True if the given repository has the given obsolete option
158 enabled.
160 enabled.
159 """
161 """
160 return getoptions(repo)[option]
162 return getoptions(repo)[option]
161
163
162
164
163 # Creating aliases for marker flags because evolve extension looks for
165 # Creating aliases for marker flags because evolve extension looks for
164 # bumpedfix in obsolete.py
166 # bumpedfix in obsolete.py
165 bumpedfix = obsutil.bumpedfix
167 bumpedfix = obsutil.bumpedfix
166 usingsha256 = obsutil.usingsha256
168 usingsha256 = obsutil.usingsha256
167
169
168 ## Parsing and writing of version "0"
170 ## Parsing and writing of version "0"
169 #
171 #
170 # The header is followed by the markers. Each marker is made of:
172 # The header is followed by the markers. Each marker is made of:
171 #
173 #
172 # - 1 uint8 : number of new changesets "N", can be zero.
174 # - 1 uint8 : number of new changesets "N", can be zero.
173 #
175 #
174 # - 1 uint32: metadata size "M" in bytes.
176 # - 1 uint32: metadata size "M" in bytes.
175 #
177 #
176 # - 1 byte: a bit field. It is reserved for flags used in common
178 # - 1 byte: a bit field. It is reserved for flags used in common
177 # obsolete marker operations, to avoid repeated decoding of metadata
179 # obsolete marker operations, to avoid repeated decoding of metadata
178 # entries.
180 # entries.
179 #
181 #
180 # - 20 bytes: obsoleted changeset identifier.
182 # - 20 bytes: obsoleted changeset identifier.
181 #
183 #
182 # - N*20 bytes: new changesets identifiers.
184 # - N*20 bytes: new changesets identifiers.
183 #
185 #
184 # - M bytes: metadata as a sequence of nul-terminated strings. Each
186 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 # string contains a key and a value, separated by a colon ':', without
187 # string contains a key and a value, separated by a colon ':', without
186 # additional encoding. Keys cannot contain '\0' or ':' and values
188 # additional encoding. Keys cannot contain '\0' or ':' and values
187 # cannot contain '\0'.
189 # cannot contain '\0'.
188 _fm0version = 0
190 _fm0version = 0
189 _fm0fixed = b'>BIB20s'
191 _fm0fixed = b'>BIB20s'
190 _fm0node = b'20s'
192 _fm0node = b'20s'
191 _fm0fsize = _calcsize(_fm0fixed)
193 _fm0fsize = _calcsize(_fm0fixed)
192 _fm0fnodesize = _calcsize(_fm0node)
194 _fm0fnodesize = _calcsize(_fm0node)
193
195
194
196
195 def _fm0readmarkers(data, off, stop):
197 def _fm0readmarkers(data, off, stop):
196 # Loop on markers
198 # Loop on markers
197 while off < stop:
199 while off < stop:
198 # read fixed part
200 # read fixed part
199 cur = data[off : off + _fm0fsize]
201 cur = data[off : off + _fm0fsize]
200 off += _fm0fsize
202 off += _fm0fsize
201 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
203 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
202 # read replacement
204 # read replacement
203 sucs = ()
205 sucs = ()
204 if numsuc:
206 if numsuc:
205 s = _fm0fnodesize * numsuc
207 s = _fm0fnodesize * numsuc
206 cur = data[off : off + s]
208 cur = data[off : off + s]
207 sucs = _unpack(_fm0node * numsuc, cur)
209 sucs = _unpack(_fm0node * numsuc, cur)
208 off += s
210 off += s
209 # read metadata
211 # read metadata
210 # (metadata will be decoded on demand)
212 # (metadata will be decoded on demand)
211 metadata = data[off : off + mdsize]
213 metadata = data[off : off + mdsize]
212 if len(metadata) != mdsize:
214 if len(metadata) != mdsize:
213 raise error.Abort(
215 raise error.Abort(
214 _(
216 _(
215 b'parsing obsolete marker: metadata is too '
217 b'parsing obsolete marker: metadata is too '
216 b'short, %d bytes expected, got %d'
218 b'short, %d bytes expected, got %d'
217 )
219 )
218 % (mdsize, len(metadata))
220 % (mdsize, len(metadata))
219 )
221 )
220 off += mdsize
222 off += mdsize
221 metadata = _fm0decodemeta(metadata)
223 metadata = _fm0decodemeta(metadata)
222 try:
224 try:
223 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
225 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
224 date = float(when), int(offset)
226 date = float(when), int(offset)
225 except ValueError:
227 except ValueError:
226 date = (0.0, 0)
228 date = (0.0, 0)
227 parents = None
229 parents = None
228 if b'p2' in metadata:
230 if b'p2' in metadata:
229 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
231 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
230 elif b'p1' in metadata:
232 elif b'p1' in metadata:
231 parents = (metadata.pop(b'p1', None),)
233 parents = (metadata.pop(b'p1', None),)
232 elif b'p0' in metadata:
234 elif b'p0' in metadata:
233 parents = ()
235 parents = ()
234 if parents is not None:
236 if parents is not None:
235 try:
237 try:
236 parents = tuple(node.bin(p) for p in parents)
238 parents = tuple(node.bin(p) for p in parents)
237 # if parent content is not a nodeid, drop the data
239 # if parent content is not a nodeid, drop the data
238 for p in parents:
240 for p in parents:
239 if len(p) != 20:
241 if len(p) != 20:
240 parents = None
242 parents = None
241 break
243 break
242 except TypeError:
244 except TypeError:
243 # if content cannot be translated to nodeid drop the data.
245 # if content cannot be translated to nodeid drop the data.
244 parents = None
246 parents = None
245
247
246 metadata = tuple(sorted(pycompat.iteritems(metadata)))
248 metadata = tuple(sorted(pycompat.iteritems(metadata)))
247
249
248 yield (pre, sucs, flags, metadata, date, parents)
250 yield (pre, sucs, flags, metadata, date, parents)
249
251
250
252
251 def _fm0encodeonemarker(marker):
253 def _fm0encodeonemarker(marker):
252 pre, sucs, flags, metadata, date, parents = marker
254 pre, sucs, flags, metadata, date, parents = marker
253 if flags & usingsha256:
255 if flags & usingsha256:
254 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
256 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
255 metadata = dict(metadata)
257 metadata = dict(metadata)
256 time, tz = date
258 time, tz = date
257 metadata[b'date'] = b'%r %i' % (time, tz)
259 metadata[b'date'] = b'%r %i' % (time, tz)
258 if parents is not None:
260 if parents is not None:
259 if not parents:
261 if not parents:
260 # mark that we explicitly recorded no parents
262 # mark that we explicitly recorded no parents
261 metadata[b'p0'] = b''
263 metadata[b'p0'] = b''
262 for i, p in enumerate(parents, 1):
264 for i, p in enumerate(parents, 1):
263 metadata[b'p%i' % i] = node.hex(p)
265 metadata[b'p%i' % i] = node.hex(p)
264 metadata = _fm0encodemeta(metadata)
266 metadata = _fm0encodemeta(metadata)
265 numsuc = len(sucs)
267 numsuc = len(sucs)
266 format = _fm0fixed + (_fm0node * numsuc)
268 format = _fm0fixed + (_fm0node * numsuc)
267 data = [numsuc, len(metadata), flags, pre]
269 data = [numsuc, len(metadata), flags, pre]
268 data.extend(sucs)
270 data.extend(sucs)
269 return _pack(format, *data) + metadata
271 return _pack(format, *data) + metadata
270
272
271
273
272 def _fm0encodemeta(meta):
274 def _fm0encodemeta(meta):
273 """Return encoded metadata string to string mapping.
275 """Return encoded metadata string to string mapping.
274
276
275 Assume no ':' in key and no '\0' in both key and value."""
277 Assume no ':' in key and no '\0' in both key and value."""
276 for key, value in pycompat.iteritems(meta):
278 for key, value in pycompat.iteritems(meta):
277 if b':' in key or b'\0' in key:
279 if b':' in key or b'\0' in key:
278 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
280 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
279 if b'\0' in value:
281 if b'\0' in value:
280 raise ValueError(b"':' is forbidden in metadata value'")
282 raise ValueError(b"':' is forbidden in metadata value'")
281 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
283 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
282
284
283
285
284 def _fm0decodemeta(data):
286 def _fm0decodemeta(data):
285 """Return string to string dictionary from encoded version."""
287 """Return string to string dictionary from encoded version."""
286 d = {}
288 d = {}
287 for l in data.split(b'\0'):
289 for l in data.split(b'\0'):
288 if l:
290 if l:
289 key, value = l.split(b':', 1)
291 key, value = l.split(b':', 1)
290 d[key] = value
292 d[key] = value
291 return d
293 return d
292
294
293
295
294 ## Parsing and writing of version "1"
296 ## Parsing and writing of version "1"
295 #
297 #
296 # The header is followed by the markers. Each marker is made of:
298 # The header is followed by the markers. Each marker is made of:
297 #
299 #
298 # - uint32: total size of the marker (including this field)
300 # - uint32: total size of the marker (including this field)
299 #
301 #
300 # - float64: date in seconds since epoch
302 # - float64: date in seconds since epoch
301 #
303 #
302 # - int16: timezone offset in minutes
304 # - int16: timezone offset in minutes
303 #
305 #
304 # - uint16: a bit field. It is reserved for flags used in common
306 # - uint16: a bit field. It is reserved for flags used in common
305 # obsolete marker operations, to avoid repeated decoding of metadata
307 # obsolete marker operations, to avoid repeated decoding of metadata
306 # entries.
308 # entries.
307 #
309 #
308 # - uint8: number of successors "N", can be zero.
310 # - uint8: number of successors "N", can be zero.
309 #
311 #
310 # - uint8: number of parents "P", can be zero.
312 # - uint8: number of parents "P", can be zero.
311 #
313 #
312 # 0: parents data stored but no parent,
314 # 0: parents data stored but no parent,
313 # 1: one parent stored,
315 # 1: one parent stored,
314 # 2: two parents stored,
316 # 2: two parents stored,
315 # 3: no parent data stored
317 # 3: no parent data stored
316 #
318 #
317 # - uint8: number of metadata entries M
319 # - uint8: number of metadata entries M
318 #
320 #
319 # - 20 or 32 bytes: predecessor changeset identifier.
321 # - 20 or 32 bytes: predecessor changeset identifier.
320 #
322 #
321 # - N*(20 or 32) bytes: successors changesets identifiers.
323 # - N*(20 or 32) bytes: successors changesets identifiers.
322 #
324 #
323 # - P*(20 or 32) bytes: parents of the predecessors changesets.
325 # - P*(20 or 32) bytes: parents of the predecessors changesets.
324 #
326 #
325 # - M*(uint8, uint8): size of all metadata entries (key and value)
327 # - M*(uint8, uint8): size of all metadata entries (key and value)
326 #
328 #
327 # - remaining bytes: the metadata, each (key, value) pair after the other.
329 # - remaining bytes: the metadata, each (key, value) pair after the other.
328 _fm1version = 1
330 _fm1version = 1
329 _fm1fixed = b'>IdhHBBB20s'
331 _fm1fixed = b'>IdhHBBB20s'
330 _fm1nodesha1 = b'20s'
332 _fm1nodesha1 = b'20s'
331 _fm1nodesha256 = b'32s'
333 _fm1nodesha256 = b'32s'
332 _fm1nodesha1size = _calcsize(_fm1nodesha1)
334 _fm1nodesha1size = _calcsize(_fm1nodesha1)
333 _fm1nodesha256size = _calcsize(_fm1nodesha256)
335 _fm1nodesha256size = _calcsize(_fm1nodesha256)
334 _fm1fsize = _calcsize(_fm1fixed)
336 _fm1fsize = _calcsize(_fm1fixed)
335 _fm1parentnone = 3
337 _fm1parentnone = 3
336 _fm1parentshift = 14
338 _fm1parentshift = 14
337 _fm1parentmask = _fm1parentnone << _fm1parentshift
339 _fm1parentmask = _fm1parentnone << _fm1parentshift
338 _fm1metapair = b'BB'
340 _fm1metapair = b'BB'
339 _fm1metapairsize = _calcsize(_fm1metapair)
341 _fm1metapairsize = _calcsize(_fm1metapair)
340
342
341
343
342 def _fm1purereadmarkers(data, off, stop):
344 def _fm1purereadmarkers(data, off, stop):
343 # make some global constants local for performance
345 # make some global constants local for performance
344 noneflag = _fm1parentnone
346 noneflag = _fm1parentnone
345 sha2flag = usingsha256
347 sha2flag = usingsha256
346 sha1size = _fm1nodesha1size
348 sha1size = _fm1nodesha1size
347 sha2size = _fm1nodesha256size
349 sha2size = _fm1nodesha256size
348 sha1fmt = _fm1nodesha1
350 sha1fmt = _fm1nodesha1
349 sha2fmt = _fm1nodesha256
351 sha2fmt = _fm1nodesha256
350 metasize = _fm1metapairsize
352 metasize = _fm1metapairsize
351 metafmt = _fm1metapair
353 metafmt = _fm1metapair
352 fsize = _fm1fsize
354 fsize = _fm1fsize
353 unpack = _unpack
355 unpack = _unpack
354
356
355 # Loop on markers
357 # Loop on markers
356 ufixed = struct.Struct(_fm1fixed).unpack
358 ufixed = struct.Struct(_fm1fixed).unpack
357
359
358 while off < stop:
360 while off < stop:
359 # read fixed part
361 # read fixed part
360 o1 = off + fsize
362 o1 = off + fsize
361 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
363 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
362
364
363 if flags & sha2flag:
365 if flags & sha2flag:
364 # FIXME: prec was read as a SHA1, needs to be amended
366 # FIXME: prec was read as a SHA1, needs to be amended
365
367
366 # read 0 or more successors
368 # read 0 or more successors
367 if numsuc == 1:
369 if numsuc == 1:
368 o2 = o1 + sha2size
370 o2 = o1 + sha2size
369 sucs = (data[o1:o2],)
371 sucs = (data[o1:o2],)
370 else:
372 else:
371 o2 = o1 + sha2size * numsuc
373 o2 = o1 + sha2size * numsuc
372 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
374 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
373
375
374 # read parents
376 # read parents
375 if numpar == noneflag:
377 if numpar == noneflag:
376 o3 = o2
378 o3 = o2
377 parents = None
379 parents = None
378 elif numpar == 1:
380 elif numpar == 1:
379 o3 = o2 + sha2size
381 o3 = o2 + sha2size
380 parents = (data[o2:o3],)
382 parents = (data[o2:o3],)
381 else:
383 else:
382 o3 = o2 + sha2size * numpar
384 o3 = o2 + sha2size * numpar
383 parents = unpack(sha2fmt * numpar, data[o2:o3])
385 parents = unpack(sha2fmt * numpar, data[o2:o3])
384 else:
386 else:
385 # read 0 or more successors
387 # read 0 or more successors
386 if numsuc == 1:
388 if numsuc == 1:
387 o2 = o1 + sha1size
389 o2 = o1 + sha1size
388 sucs = (data[o1:o2],)
390 sucs = (data[o1:o2],)
389 else:
391 else:
390 o2 = o1 + sha1size * numsuc
392 o2 = o1 + sha1size * numsuc
391 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
393 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
392
394
393 # read parents
395 # read parents
394 if numpar == noneflag:
396 if numpar == noneflag:
395 o3 = o2
397 o3 = o2
396 parents = None
398 parents = None
397 elif numpar == 1:
399 elif numpar == 1:
398 o3 = o2 + sha1size
400 o3 = o2 + sha1size
399 parents = (data[o2:o3],)
401 parents = (data[o2:o3],)
400 else:
402 else:
401 o3 = o2 + sha1size * numpar
403 o3 = o2 + sha1size * numpar
402 parents = unpack(sha1fmt * numpar, data[o2:o3])
404 parents = unpack(sha1fmt * numpar, data[o2:o3])
403
405
404 # read metadata
406 # read metadata
405 off = o3 + metasize * nummeta
407 off = o3 + metasize * nummeta
406 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
408 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
407 metadata = []
409 metadata = []
408 for idx in pycompat.xrange(0, len(metapairsize), 2):
410 for idx in pycompat.xrange(0, len(metapairsize), 2):
409 o1 = off + metapairsize[idx]
411 o1 = off + metapairsize[idx]
410 o2 = o1 + metapairsize[idx + 1]
412 o2 = o1 + metapairsize[idx + 1]
411 metadata.append((data[off:o1], data[o1:o2]))
413 metadata.append((data[off:o1], data[o1:o2]))
412 off = o2
414 off = o2
413
415
414 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
416 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
415
417
416
418
417 def _fm1encodeonemarker(marker):
419 def _fm1encodeonemarker(marker):
418 pre, sucs, flags, metadata, date, parents = marker
420 pre, sucs, flags, metadata, date, parents = marker
419 # determine node size
421 # determine node size
420 _fm1node = _fm1nodesha1
422 _fm1node = _fm1nodesha1
421 if flags & usingsha256:
423 if flags & usingsha256:
422 _fm1node = _fm1nodesha256
424 _fm1node = _fm1nodesha256
423 numsuc = len(sucs)
425 numsuc = len(sucs)
424 numextranodes = numsuc
426 numextranodes = numsuc
425 if parents is None:
427 if parents is None:
426 numpar = _fm1parentnone
428 numpar = _fm1parentnone
427 else:
429 else:
428 numpar = len(parents)
430 numpar = len(parents)
429 numextranodes += numpar
431 numextranodes += numpar
430 formatnodes = _fm1node * numextranodes
432 formatnodes = _fm1node * numextranodes
431 formatmeta = _fm1metapair * len(metadata)
433 formatmeta = _fm1metapair * len(metadata)
432 format = _fm1fixed + formatnodes + formatmeta
434 format = _fm1fixed + formatnodes + formatmeta
433 # tz is stored in minutes so we divide by 60
435 # tz is stored in minutes so we divide by 60
434 tz = date[1] // 60
436 tz = date[1] // 60
435 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
437 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
436 data.extend(sucs)
438 data.extend(sucs)
437 if parents is not None:
439 if parents is not None:
438 data.extend(parents)
440 data.extend(parents)
439 totalsize = _calcsize(format)
441 totalsize = _calcsize(format)
440 for key, value in metadata:
442 for key, value in metadata:
441 lk = len(key)
443 lk = len(key)
442 lv = len(value)
444 lv = len(value)
443 if lk > 255:
445 if lk > 255:
444 msg = (
446 msg = (
445 b'obsstore metadata key cannot be longer than 255 bytes'
447 b'obsstore metadata key cannot be longer than 255 bytes'
446 b' (key "%s" is %u bytes)'
448 b' (key "%s" is %u bytes)'
447 ) % (key, lk)
449 ) % (key, lk)
448 raise error.ProgrammingError(msg)
450 raise error.ProgrammingError(msg)
449 if lv > 255:
451 if lv > 255:
450 msg = (
452 msg = (
451 b'obsstore metadata value cannot be longer than 255 bytes'
453 b'obsstore metadata value cannot be longer than 255 bytes'
452 b' (value "%s" for key "%s" is %u bytes)'
454 b' (value "%s" for key "%s" is %u bytes)'
453 ) % (value, key, lv)
455 ) % (value, key, lv)
454 raise error.ProgrammingError(msg)
456 raise error.ProgrammingError(msg)
455 data.append(lk)
457 data.append(lk)
456 data.append(lv)
458 data.append(lv)
457 totalsize += lk + lv
459 totalsize += lk + lv
458 data[0] = totalsize
460 data[0] = totalsize
459 data = [_pack(format, *data)]
461 data = [_pack(format, *data)]
460 for key, value in metadata:
462 for key, value in metadata:
461 data.append(key)
463 data.append(key)
462 data.append(value)
464 data.append(value)
463 return b''.join(data)
465 return b''.join(data)
464
466
465
467
466 def _fm1readmarkers(data, off, stop):
468 def _fm1readmarkers(data, off, stop):
467 native = getattr(parsers, 'fm1readmarkers', None)
469 native = getattr(parsers, 'fm1readmarkers', None)
468 if not native:
470 if not native:
469 return _fm1purereadmarkers(data, off, stop)
471 return _fm1purereadmarkers(data, off, stop)
470 return native(data, off, stop)
472 return native(data, off, stop)
471
473
472
474
473 # mapping to read/write various marker formats
475 # mapping to read/write various marker formats
474 # <version> -> (decoder, encoder)
476 # <version> -> (decoder, encoder)
475 formats = {
477 formats = {
476 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
478 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
477 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
479 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
478 }
480 }
479
481
480
482
481 def _readmarkerversion(data):
483 def _readmarkerversion(data):
482 return _unpack(b'>B', data[0:1])[0]
484 return _unpack(b'>B', data[0:1])[0]
483
485
484
486
485 @util.nogc
487 @util.nogc
486 def _readmarkers(data, off=None, stop=None):
488 def _readmarkers(data, off=None, stop=None):
487 """Read and enumerate markers from raw data"""
489 """Read and enumerate markers from raw data"""
488 diskversion = _readmarkerversion(data)
490 diskversion = _readmarkerversion(data)
489 if not off:
491 if not off:
490 off = 1 # skip 1 byte version number
492 off = 1 # skip 1 byte version number
491 if stop is None:
493 if stop is None:
492 stop = len(data)
494 stop = len(data)
493 if diskversion not in formats:
495 if diskversion not in formats:
494 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
496 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
495 raise error.UnknownVersion(msg, version=diskversion)
497 raise error.UnknownVersion(msg, version=diskversion)
496 return diskversion, formats[diskversion][0](data, off, stop)
498 return diskversion, formats[diskversion][0](data, off, stop)
497
499
498
500
499 def encodeheader(version=_fm0version):
501 def encodeheader(version=_fm0version):
500 return _pack(b'>B', version)
502 return _pack(b'>B', version)
501
503
502
504
503 def encodemarkers(markers, addheader=False, version=_fm0version):
505 def encodemarkers(markers, addheader=False, version=_fm0version):
504 # Kept separate from flushmarkers(), it will be reused for
506 # Kept separate from flushmarkers(), it will be reused for
505 # markers exchange.
507 # markers exchange.
506 encodeone = formats[version][1]
508 encodeone = formats[version][1]
507 if addheader:
509 if addheader:
508 yield encodeheader(version)
510 yield encodeheader(version)
509 for marker in markers:
511 for marker in markers:
510 yield encodeone(marker)
512 yield encodeone(marker)
511
513
512
514
513 @util.nogc
515 @util.nogc
514 def _addsuccessors(successors, markers):
516 def _addsuccessors(successors, markers):
515 for mark in markers:
517 for mark in markers:
516 successors.setdefault(mark[0], set()).add(mark)
518 successors.setdefault(mark[0], set()).add(mark)
517
519
518
520
519 @util.nogc
521 @util.nogc
520 def _addpredecessors(predecessors, markers):
522 def _addpredecessors(predecessors, markers):
521 for mark in markers:
523 for mark in markers:
522 for suc in mark[1]:
524 for suc in mark[1]:
523 predecessors.setdefault(suc, set()).add(mark)
525 predecessors.setdefault(suc, set()).add(mark)
524
526
525
527
526 @util.nogc
528 @util.nogc
527 def _addchildren(children, markers):
529 def _addchildren(children, markers):
528 for mark in markers:
530 for mark in markers:
529 parents = mark[5]
531 parents = mark[5]
530 if parents is not None:
532 if parents is not None:
531 for p in parents:
533 for p in parents:
532 children.setdefault(p, set()).add(mark)
534 children.setdefault(p, set()).add(mark)
533
535
534
536
535 def _checkinvalidmarkers(markers):
537 def _checkinvalidmarkers(markers):
536 """search for marker with invalid data and raise error if needed
538 """search for marker with invalid data and raise error if needed
537
539
538 Exist as a separated function to allow the evolve extension for a more
540 Exist as a separated function to allow the evolve extension for a more
539 subtle handling.
541 subtle handling.
540 """
542 """
541 for mark in markers:
543 for mark in markers:
542 if node.nullid in mark[1]:
544 if node.nullid in mark[1]:
543 raise error.Abort(
545 raise error.Abort(
544 _(
546 _(
545 b'bad obsolescence marker detected: '
547 b'bad obsolescence marker detected: '
546 b'invalid successors nullid'
548 b'invalid successors nullid'
547 )
549 )
548 )
550 )
549
551
550
552
551 class obsstore(object):
553 class obsstore(object):
552 """Store obsolete markers
554 """Store obsolete markers
553
555
554 Markers can be accessed with two mappings:
556 Markers can be accessed with two mappings:
555 - predecessors[x] -> set(markers on predecessors edges of x)
557 - predecessors[x] -> set(markers on predecessors edges of x)
556 - successors[x] -> set(markers on successors edges of x)
558 - successors[x] -> set(markers on successors edges of x)
557 - children[x] -> set(markers on predecessors edges of children(x)
559 - children[x] -> set(markers on predecessors edges of children(x)
558 """
560 """
559
561
560 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
562 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
561 # prec: nodeid, predecessors changesets
563 # prec: nodeid, predecessors changesets
562 # succs: tuple of nodeid, successor changesets (0-N length)
564 # succs: tuple of nodeid, successor changesets (0-N length)
563 # flag: integer, flag field carrying modifier for the markers (see doc)
565 # flag: integer, flag field carrying modifier for the markers (see doc)
564 # meta: binary blob in UTF-8, encoded metadata dictionary
566 # meta: binary blob in UTF-8, encoded metadata dictionary
565 # date: (float, int) tuple, date of marker creation
567 # date: (float, int) tuple, date of marker creation
566 # parents: (tuple of nodeid) or None, parents of predecessors
568 # parents: (tuple of nodeid) or None, parents of predecessors
567 # None is used when no data has been recorded
569 # None is used when no data has been recorded
568
570
569 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
571 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
570 # caches for various obsolescence related cache
572 # caches for various obsolescence related cache
571 self.caches = {}
573 self.caches = {}
572 self.svfs = svfs
574 self.svfs = svfs
573 self._defaultformat = defaultformat
575 self._defaultformat = defaultformat
574 self._readonly = readonly
576 self._readonly = readonly
575
577
576 def __iter__(self):
578 def __iter__(self):
577 return iter(self._all)
579 return iter(self._all)
578
580
579 def __len__(self):
581 def __len__(self):
580 return len(self._all)
582 return len(self._all)
581
583
582 def __nonzero__(self):
584 def __nonzero__(self):
583 if not self._cached('_all'):
585 if not self._cached('_all'):
584 try:
586 try:
585 return self.svfs.stat(b'obsstore').st_size > 1
587 return self.svfs.stat(b'obsstore').st_size > 1
586 except OSError as inst:
588 except OSError as inst:
587 if inst.errno != errno.ENOENT:
589 if inst.errno != errno.ENOENT:
588 raise
590 raise
589 # just build an empty _all list if no obsstore exists, which
591 # just build an empty _all list if no obsstore exists, which
590 # avoids further stat() syscalls
592 # avoids further stat() syscalls
591 return bool(self._all)
593 return bool(self._all)
592
594
593 __bool__ = __nonzero__
595 __bool__ = __nonzero__
594
596
595 @property
597 @property
596 def readonly(self):
598 def readonly(self):
597 """True if marker creation is disabled
599 """True if marker creation is disabled
598
600
599 Remove me in the future when obsolete marker is always on."""
601 Remove me in the future when obsolete marker is always on."""
600 return self._readonly
602 return self._readonly
601
603
602 def create(
604 def create(
603 self,
605 self,
604 transaction,
606 transaction,
605 prec,
607 prec,
606 succs=(),
608 succs=(),
607 flag=0,
609 flag=0,
608 parents=None,
610 parents=None,
609 date=None,
611 date=None,
610 metadata=None,
612 metadata=None,
611 ui=None,
613 ui=None,
612 ):
614 ):
613 """obsolete: add a new obsolete marker
615 """obsolete: add a new obsolete marker
614
616
615 * ensuring it is hashable
617 * ensuring it is hashable
616 * check mandatory metadata
618 * check mandatory metadata
617 * encode metadata
619 * encode metadata
618
620
619 If you are a human writing code creating marker you want to use the
621 If you are a human writing code creating marker you want to use the
620 `createmarkers` function in this module instead.
622 `createmarkers` function in this module instead.
621
623
622 return True if a new marker have been added, False if the markers
624 return True if a new marker have been added, False if the markers
623 already existed (no op).
625 already existed (no op).
624 """
626 """
625 if metadata is None:
627 if metadata is None:
626 metadata = {}
628 metadata = {}
627 if date is None:
629 if date is None:
628 if b'date' in metadata:
630 if b'date' in metadata:
629 # as a courtesy for out-of-tree extensions
631 # as a courtesy for out-of-tree extensions
630 date = dateutil.parsedate(metadata.pop(b'date'))
632 date = dateutil.parsedate(metadata.pop(b'date'))
631 elif ui is not None:
633 elif ui is not None:
632 date = ui.configdate(b'devel', b'default-date')
634 date = ui.configdate(b'devel', b'default-date')
633 if date is None:
635 if date is None:
634 date = dateutil.makedate()
636 date = dateutil.makedate()
635 else:
637 else:
636 date = dateutil.makedate()
638 date = dateutil.makedate()
637 if len(prec) != 20:
639 if len(prec) != 20:
638 raise ValueError(prec)
640 raise ValueError(prec)
639 for succ in succs:
641 for succ in succs:
640 if len(succ) != 20:
642 if len(succ) != 20:
641 raise ValueError(succ)
643 raise ValueError(succ)
642 if prec in succs:
644 if prec in succs:
643 raise ValueError(
645 raise ValueError(
644 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
646 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
645 )
647 )
646
648
647 metadata = tuple(sorted(pycompat.iteritems(metadata)))
649 metadata = tuple(sorted(pycompat.iteritems(metadata)))
648 for k, v in metadata:
650 for k, v in metadata:
649 try:
651 try:
650 # might be better to reject non-ASCII keys
652 # might be better to reject non-ASCII keys
651 k.decode('utf-8')
653 k.decode('utf-8')
652 v.decode('utf-8')
654 v.decode('utf-8')
653 except UnicodeDecodeError:
655 except UnicodeDecodeError:
654 raise error.ProgrammingError(
656 raise error.ProgrammingError(
655 b'obsstore metadata must be valid UTF-8 sequence '
657 b'obsstore metadata must be valid UTF-8 sequence '
656 b'(key = %r, value = %r)'
658 b'(key = %r, value = %r)'
657 % (pycompat.bytestr(k), pycompat.bytestr(v))
659 % (pycompat.bytestr(k), pycompat.bytestr(v))
658 )
660 )
659
661
660 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
662 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
661 return bool(self.add(transaction, [marker]))
663 return bool(self.add(transaction, [marker]))
662
664
663 def add(self, transaction, markers):
665 def add(self, transaction, markers):
664 """Add new markers to the store
666 """Add new markers to the store
665
667
666 Take care of filtering duplicate.
668 Take care of filtering duplicate.
667 Return the number of new marker."""
669 Return the number of new marker."""
668 if self._readonly:
670 if self._readonly:
669 raise error.Abort(
671 raise error.Abort(
670 _(b'creating obsolete markers is not enabled on this repo')
672 _(b'creating obsolete markers is not enabled on this repo')
671 )
673 )
672 known = set()
674 known = set()
673 getsuccessors = self.successors.get
675 getsuccessors = self.successors.get
674 new = []
676 new = []
675 for m in markers:
677 for m in markers:
676 if m not in getsuccessors(m[0], ()) and m not in known:
678 if m not in getsuccessors(m[0], ()) and m not in known:
677 known.add(m)
679 known.add(m)
678 new.append(m)
680 new.append(m)
679 if new:
681 if new:
680 f = self.svfs(b'obsstore', b'ab')
682 f = self.svfs(b'obsstore', b'ab')
681 try:
683 try:
682 offset = f.tell()
684 offset = f.tell()
683 transaction.add(b'obsstore', offset)
685 transaction.add(b'obsstore', offset)
684 # offset == 0: new file - add the version header
686 # offset == 0: new file - add the version header
685 data = b''.join(encodemarkers(new, offset == 0, self._version))
687 data = b''.join(encodemarkers(new, offset == 0, self._version))
686 f.write(data)
688 f.write(data)
687 finally:
689 finally:
688 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
690 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
689 # call 'filecacheentry.refresh()' here
691 # call 'filecacheentry.refresh()' here
690 f.close()
692 f.close()
691 addedmarkers = transaction.changes.get(b'obsmarkers')
693 addedmarkers = transaction.changes.get(b'obsmarkers')
692 if addedmarkers is not None:
694 if addedmarkers is not None:
693 addedmarkers.update(new)
695 addedmarkers.update(new)
694 self._addmarkers(new, data)
696 self._addmarkers(new, data)
695 # new marker *may* have changed several set. invalidate the cache.
697 # new marker *may* have changed several set. invalidate the cache.
696 self.caches.clear()
698 self.caches.clear()
697 # records the number of new markers for the transaction hooks
699 # records the number of new markers for the transaction hooks
698 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
700 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
699 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
701 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
700 return len(new)
702 return len(new)
701
703
702 def mergemarkers(self, transaction, data):
704 def mergemarkers(self, transaction, data):
703 """merge a binary stream of markers inside the obsstore
705 """merge a binary stream of markers inside the obsstore
704
706
705 Returns the number of new markers added."""
707 Returns the number of new markers added."""
706 version, markers = _readmarkers(data)
708 version, markers = _readmarkers(data)
707 return self.add(transaction, markers)
709 return self.add(transaction, markers)
708
710
709 @propertycache
711 @propertycache
710 def _data(self):
712 def _data(self):
711 return self.svfs.tryread(b'obsstore')
713 return self.svfs.tryread(b'obsstore')
712
714
713 @propertycache
715 @propertycache
714 def _version(self):
716 def _version(self):
715 if len(self._data) >= 1:
717 if len(self._data) >= 1:
716 return _readmarkerversion(self._data)
718 return _readmarkerversion(self._data)
717 else:
719 else:
718 return self._defaultformat
720 return self._defaultformat
719
721
720 @propertycache
722 @propertycache
721 def _all(self):
723 def _all(self):
722 data = self._data
724 data = self._data
723 if not data:
725 if not data:
724 return []
726 return []
725 self._version, markers = _readmarkers(data)
727 self._version, markers = _readmarkers(data)
726 markers = list(markers)
728 markers = list(markers)
727 _checkinvalidmarkers(markers)
729 _checkinvalidmarkers(markers)
728 return markers
730 return markers
729
731
730 @propertycache
732 @propertycache
731 def successors(self):
733 def successors(self):
732 successors = {}
734 successors = {}
733 _addsuccessors(successors, self._all)
735 _addsuccessors(successors, self._all)
734 return successors
736 return successors
735
737
736 @propertycache
738 @propertycache
737 def predecessors(self):
739 def predecessors(self):
738 predecessors = {}
740 predecessors = {}
739 _addpredecessors(predecessors, self._all)
741 _addpredecessors(predecessors, self._all)
740 return predecessors
742 return predecessors
741
743
742 @propertycache
744 @propertycache
743 def children(self):
745 def children(self):
744 children = {}
746 children = {}
745 _addchildren(children, self._all)
747 _addchildren(children, self._all)
746 return children
748 return children
747
749
748 def _cached(self, attr):
750 def _cached(self, attr):
749 return attr in self.__dict__
751 return attr in self.__dict__
750
752
751 def _addmarkers(self, markers, rawdata):
753 def _addmarkers(self, markers, rawdata):
752 markers = list(markers) # to allow repeated iteration
754 markers = list(markers) # to allow repeated iteration
753 self._data = self._data + rawdata
755 self._data = self._data + rawdata
754 self._all.extend(markers)
756 self._all.extend(markers)
755 if self._cached('successors'):
757 if self._cached('successors'):
756 _addsuccessors(self.successors, markers)
758 _addsuccessors(self.successors, markers)
757 if self._cached('predecessors'):
759 if self._cached('predecessors'):
758 _addpredecessors(self.predecessors, markers)
760 _addpredecessors(self.predecessors, markers)
759 if self._cached('children'):
761 if self._cached('children'):
760 _addchildren(self.children, markers)
762 _addchildren(self.children, markers)
761 _checkinvalidmarkers(markers)
763 _checkinvalidmarkers(markers)
762
764
763 def relevantmarkers(self, nodes):
765 def relevantmarkers(self, nodes):
764 """return a set of all obsolescence markers relevant to a set of nodes.
766 """return a set of all obsolescence markers relevant to a set of nodes.
765
767
766 "relevant" to a set of nodes mean:
768 "relevant" to a set of nodes mean:
767
769
768 - marker that use this changeset as successor
770 - marker that use this changeset as successor
769 - prune marker of direct children on this changeset
771 - prune marker of direct children on this changeset
770 - recursive application of the two rules on predecessors of these
772 - recursive application of the two rules on predecessors of these
771 markers
773 markers
772
774
773 It is a set so you cannot rely on order."""
775 It is a set so you cannot rely on order."""
774
776
775 pendingnodes = set(nodes)
777 pendingnodes = set(nodes)
776 seenmarkers = set()
778 seenmarkers = set()
777 seennodes = set(pendingnodes)
779 seennodes = set(pendingnodes)
778 precursorsmarkers = self.predecessors
780 precursorsmarkers = self.predecessors
779 succsmarkers = self.successors
781 succsmarkers = self.successors
780 children = self.children
782 children = self.children
781 while pendingnodes:
783 while pendingnodes:
782 direct = set()
784 direct = set()
783 for current in pendingnodes:
785 for current in pendingnodes:
784 direct.update(precursorsmarkers.get(current, ()))
786 direct.update(precursorsmarkers.get(current, ()))
785 pruned = [m for m in children.get(current, ()) if not m[1]]
787 pruned = [m for m in children.get(current, ()) if not m[1]]
786 direct.update(pruned)
788 direct.update(pruned)
787 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
789 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
788 direct.update(pruned)
790 direct.update(pruned)
789 direct -= seenmarkers
791 direct -= seenmarkers
790 pendingnodes = {m[0] for m in direct}
792 pendingnodes = {m[0] for m in direct}
791 seenmarkers |= direct
793 seenmarkers |= direct
792 pendingnodes -= seennodes
794 pendingnodes -= seennodes
793 seennodes |= pendingnodes
795 seennodes |= pendingnodes
794 return seenmarkers
796 return seenmarkers
795
797
796
798
797 def makestore(ui, repo):
799 def makestore(ui, repo):
798 """Create an obsstore instance from a repo."""
800 """Create an obsstore instance from a repo."""
799 # read default format for new obsstore.
801 # read default format for new obsstore.
800 # developer config: format.obsstore-version
802 # developer config: format.obsstore-version
801 defaultformat = ui.configint(b'format', b'obsstore-version')
803 defaultformat = ui.configint(b'format', b'obsstore-version')
802 # rely on obsstore class default when possible.
804 # rely on obsstore class default when possible.
803 kwargs = {}
805 kwargs = {}
804 if defaultformat is not None:
806 if defaultformat is not None:
805 kwargs['defaultformat'] = defaultformat
807 kwargs['defaultformat'] = defaultformat
806 readonly = not isenabled(repo, createmarkersopt)
808 readonly = not isenabled(repo, createmarkersopt)
807 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
809 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
808 if store and readonly:
810 if store and readonly:
809 ui.warn(
811 ui.warn(
810 _(b'obsolete feature not enabled but %i markers found!\n')
812 _(b'obsolete feature not enabled but %i markers found!\n')
811 % len(list(store))
813 % len(list(store))
812 )
814 )
813 return store
815 return store
814
816
815
817
816 def commonversion(versions):
818 def commonversion(versions):
817 """Return the newest version listed in both versions and our local formats.
819 """Return the newest version listed in both versions and our local formats.
818
820
819 Returns None if no common version exists.
821 Returns None if no common version exists.
820 """
822 """
821 versions.sort(reverse=True)
823 versions.sort(reverse=True)
822 # search for highest version known on both side
824 # search for highest version known on both side
823 for v in versions:
825 for v in versions:
824 if v in formats:
826 if v in formats:
825 return v
827 return v
826 return None
828 return None
827
829
828
830
829 # arbitrary picked to fit into 8K limit from HTTP server
831 # arbitrary picked to fit into 8K limit from HTTP server
830 # you have to take in account:
832 # you have to take in account:
831 # - the version header
833 # - the version header
832 # - the base85 encoding
834 # - the base85 encoding
833 _maxpayload = 5300
835 _maxpayload = 5300
834
836
835
837
836 def _pushkeyescape(markers):
838 def _pushkeyescape(markers):
837 """encode markers into a dict suitable for pushkey exchange
839 """encode markers into a dict suitable for pushkey exchange
838
840
839 - binary data is base85 encoded
841 - binary data is base85 encoded
840 - split in chunks smaller than 5300 bytes"""
842 - split in chunks smaller than 5300 bytes"""
841 keys = {}
843 keys = {}
842 parts = []
844 parts = []
843 currentlen = _maxpayload * 2 # ensure we create a new part
845 currentlen = _maxpayload * 2 # ensure we create a new part
844 for marker in markers:
846 for marker in markers:
845 nextdata = _fm0encodeonemarker(marker)
847 nextdata = _fm0encodeonemarker(marker)
846 if len(nextdata) + currentlen > _maxpayload:
848 if len(nextdata) + currentlen > _maxpayload:
847 currentpart = []
849 currentpart = []
848 currentlen = 0
850 currentlen = 0
849 parts.append(currentpart)
851 parts.append(currentpart)
850 currentpart.append(nextdata)
852 currentpart.append(nextdata)
851 currentlen += len(nextdata)
853 currentlen += len(nextdata)
852 for idx, part in enumerate(reversed(parts)):
854 for idx, part in enumerate(reversed(parts)):
853 data = b''.join([_pack(b'>B', _fm0version)] + part)
855 data = b''.join([_pack(b'>B', _fm0version)] + part)
854 keys[b'dump%i' % idx] = util.b85encode(data)
856 keys[b'dump%i' % idx] = util.b85encode(data)
855 return keys
857 return keys
856
858
857
859
858 def listmarkers(repo):
860 def listmarkers(repo):
859 """List markers over pushkey"""
861 """List markers over pushkey"""
860 if not repo.obsstore:
862 if not repo.obsstore:
861 return {}
863 return {}
862 return _pushkeyescape(sorted(repo.obsstore))
864 return _pushkeyescape(sorted(repo.obsstore))
863
865
864
866
865 def pushmarker(repo, key, old, new):
867 def pushmarker(repo, key, old, new):
866 """Push markers over pushkey"""
868 """Push markers over pushkey"""
867 if not key.startswith(b'dump'):
869 if not key.startswith(b'dump'):
868 repo.ui.warn(_(b'unknown key: %r') % key)
870 repo.ui.warn(_(b'unknown key: %r') % key)
869 return False
871 return False
870 if old:
872 if old:
871 repo.ui.warn(_(b'unexpected old value for %r') % key)
873 repo.ui.warn(_(b'unexpected old value for %r') % key)
872 return False
874 return False
873 data = util.b85decode(new)
875 data = util.b85decode(new)
874 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
876 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
875 repo.obsstore.mergemarkers(tr, data)
877 repo.obsstore.mergemarkers(tr, data)
876 repo.invalidatevolatilesets()
878 repo.invalidatevolatilesets()
877 return True
879 return True
878
880
879
881
880 # mapping of 'set-name' -> <function to compute this set>
882 # mapping of 'set-name' -> <function to compute this set>
881 cachefuncs = {}
883 cachefuncs = {}
882
884
883
885
884 def cachefor(name):
886 def cachefor(name):
885 """Decorator to register a function as computing the cache for a set"""
887 """Decorator to register a function as computing the cache for a set"""
886
888
887 def decorator(func):
889 def decorator(func):
888 if name in cachefuncs:
890 if name in cachefuncs:
889 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
891 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
890 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
892 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
891 cachefuncs[name] = func
893 cachefuncs[name] = func
892 return func
894 return func
893
895
894 return decorator
896 return decorator
895
897
896
898
897 def getrevs(repo, name):
899 def getrevs(repo, name):
898 """Return the set of revision that belong to the <name> set
900 """Return the set of revision that belong to the <name> set
899
901
900 Such access may compute the set and cache it for future use"""
902 Such access may compute the set and cache it for future use"""
901 repo = repo.unfiltered()
903 repo = repo.unfiltered()
902 with util.timedcm('getrevs %s', name):
904 with util.timedcm('getrevs %s', name):
903 if not repo.obsstore:
905 if not repo.obsstore:
904 return frozenset()
906 return frozenset()
905 if name not in repo.obsstore.caches:
907 if name not in repo.obsstore.caches:
906 repo.obsstore.caches[name] = cachefuncs[name](repo)
908 repo.obsstore.caches[name] = cachefuncs[name](repo)
907 return repo.obsstore.caches[name]
909 return repo.obsstore.caches[name]
908
910
909
911
910 # To be simple we need to invalidate obsolescence cache when:
912 # To be simple we need to invalidate obsolescence cache when:
911 #
913 #
912 # - new changeset is added:
914 # - new changeset is added:
913 # - public phase is changed
915 # - public phase is changed
914 # - obsolescence marker are added
916 # - obsolescence marker are added
915 # - strip is used a repo
917 # - strip is used a repo
916 def clearobscaches(repo):
918 def clearobscaches(repo):
917 """Remove all obsolescence related cache from a repo
919 """Remove all obsolescence related cache from a repo
918
920
919 This remove all cache in obsstore is the obsstore already exist on the
921 This remove all cache in obsstore is the obsstore already exist on the
920 repo.
922 repo.
921
923
922 (We could be smarter here given the exact event that trigger the cache
924 (We could be smarter here given the exact event that trigger the cache
923 clearing)"""
925 clearing)"""
924 # only clear cache is there is obsstore data in this repo
926 # only clear cache is there is obsstore data in this repo
925 if b'obsstore' in repo._filecache:
927 if b'obsstore' in repo._filecache:
926 repo.obsstore.caches.clear()
928 repo.obsstore.caches.clear()
927
929
928
930
929 def _mutablerevs(repo):
931 def _mutablerevs(repo):
930 """the set of mutable revision in the repository"""
932 """the set of mutable revision in the repository"""
931 return repo._phasecache.getrevset(repo, phases.mutablephases)
933 return repo._phasecache.getrevset(repo, phases.mutablephases)
932
934
933
935
934 @cachefor(b'obsolete')
936 @cachefor(b'obsolete')
935 def _computeobsoleteset(repo):
937 def _computeobsoleteset(repo):
936 """the set of obsolete revisions"""
938 """the set of obsolete revisions"""
937 getnode = repo.changelog.node
939 getnode = repo.changelog.node
938 notpublic = _mutablerevs(repo)
940 notpublic = _mutablerevs(repo)
939 isobs = repo.obsstore.successors.__contains__
941 isobs = repo.obsstore.successors.__contains__
940 obs = set(r for r in notpublic if isobs(getnode(r)))
942 obs = set(r for r in notpublic if isobs(getnode(r)))
941 return obs
943 return obs
942
944
943
945
944 @cachefor(b'orphan')
946 @cachefor(b'orphan')
945 def _computeorphanset(repo):
947 def _computeorphanset(repo):
946 """the set of non obsolete revisions with obsolete parents"""
948 """the set of non obsolete revisions with obsolete parents"""
947 pfunc = repo.changelog.parentrevs
949 pfunc = repo.changelog.parentrevs
948 mutable = _mutablerevs(repo)
950 mutable = _mutablerevs(repo)
949 obsolete = getrevs(repo, b'obsolete')
951 obsolete = getrevs(repo, b'obsolete')
950 others = mutable - obsolete
952 others = mutable - obsolete
951 unstable = set()
953 unstable = set()
952 for r in sorted(others):
954 for r in sorted(others):
953 # A rev is unstable if one of its parent is obsolete or unstable
955 # A rev is unstable if one of its parent is obsolete or unstable
954 # this works since we traverse following growing rev order
956 # this works since we traverse following growing rev order
955 for p in pfunc(r):
957 for p in pfunc(r):
956 if p in obsolete or p in unstable:
958 if p in obsolete or p in unstable:
957 unstable.add(r)
959 unstable.add(r)
958 break
960 break
959 return unstable
961 return unstable
960
962
961
963
962 @cachefor(b'suspended')
964 @cachefor(b'suspended')
963 def _computesuspendedset(repo):
965 def _computesuspendedset(repo):
964 """the set of obsolete parents with non obsolete descendants"""
966 """the set of obsolete parents with non obsolete descendants"""
965 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
967 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
966 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
968 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
967
969
968
970
969 @cachefor(b'extinct')
971 @cachefor(b'extinct')
970 def _computeextinctset(repo):
972 def _computeextinctset(repo):
971 """the set of obsolete parents without non obsolete descendants"""
973 """the set of obsolete parents without non obsolete descendants"""
972 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
974 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
973
975
974
976
975 @cachefor(b'phasedivergent')
977 @cachefor(b'phasedivergent')
976 def _computephasedivergentset(repo):
978 def _computephasedivergentset(repo):
977 """the set of revs trying to obsolete public revisions"""
979 """the set of revs trying to obsolete public revisions"""
978 bumped = set()
980 bumped = set()
979 # util function (avoid attribute lookup in the loop)
981 # util function (avoid attribute lookup in the loop)
980 phase = repo._phasecache.phase # would be faster to grab the full list
982 phase = repo._phasecache.phase # would be faster to grab the full list
981 public = phases.public
983 public = phases.public
982 cl = repo.changelog
984 cl = repo.changelog
983 torev = cl.index.get_rev
985 torev = cl.index.get_rev
984 tonode = cl.node
986 tonode = cl.node
985 obsstore = repo.obsstore
987 obsstore = repo.obsstore
986 for rev in repo.revs(b'(not public()) and (not obsolete())'):
988 for rev in repo.revs(b'(not public()) and (not obsolete())'):
987 # We only evaluate mutable, non-obsolete revision
989 # We only evaluate mutable, non-obsolete revision
988 node = tonode(rev)
990 node = tonode(rev)
989 # (future) A cache of predecessors may worth if split is very common
991 # (future) A cache of predecessors may worth if split is very common
990 for pnode in obsutil.allpredecessors(
992 for pnode in obsutil.allpredecessors(
991 obsstore, [node], ignoreflags=bumpedfix
993 obsstore, [node], ignoreflags=bumpedfix
992 ):
994 ):
993 prev = torev(pnode) # unfiltered! but so is phasecache
995 prev = torev(pnode) # unfiltered! but so is phasecache
994 if (prev is not None) and (phase(repo, prev) <= public):
996 if (prev is not None) and (phase(repo, prev) <= public):
995 # we have a public predecessor
997 # we have a public predecessor
996 bumped.add(rev)
998 bumped.add(rev)
997 break # Next draft!
999 break # Next draft!
998 return bumped
1000 return bumped
999
1001
1000
1002
1001 @cachefor(b'contentdivergent')
1003 @cachefor(b'contentdivergent')
1002 def _computecontentdivergentset(repo):
1004 def _computecontentdivergentset(repo):
1003 """the set of rev that compete to be the final successors of some revision.
1005 """the set of rev that compete to be the final successors of some revision.
1004 """
1006 """
1005 divergent = set()
1007 divergent = set()
1006 obsstore = repo.obsstore
1008 obsstore = repo.obsstore
1007 newermap = {}
1009 newermap = {}
1008 tonode = repo.changelog.node
1010 tonode = repo.changelog.node
1009 for rev in repo.revs(b'(not public()) - obsolete()'):
1011 for rev in repo.revs(b'(not public()) - obsolete()'):
1010 node = tonode(rev)
1012 node = tonode(rev)
1011 mark = obsstore.predecessors.get(node, ())
1013 mark = obsstore.predecessors.get(node, ())
1012 toprocess = set(mark)
1014 toprocess = set(mark)
1013 seen = set()
1015 seen = set()
1014 while toprocess:
1016 while toprocess:
1015 prec = toprocess.pop()[0]
1017 prec = toprocess.pop()[0]
1016 if prec in seen:
1018 if prec in seen:
1017 continue # emergency cycle hanging prevention
1019 continue # emergency cycle hanging prevention
1018 seen.add(prec)
1020 seen.add(prec)
1019 if prec not in newermap:
1021 if prec not in newermap:
1020 obsutil.successorssets(repo, prec, cache=newermap)
1022 obsutil.successorssets(repo, prec, cache=newermap)
1021 newer = [n for n in newermap[prec] if n]
1023 newer = [n for n in newermap[prec] if n]
1022 if len(newer) > 1:
1024 if len(newer) > 1:
1023 divergent.add(rev)
1025 divergent.add(rev)
1024 break
1026 break
1025 toprocess.update(obsstore.predecessors.get(prec, ()))
1027 toprocess.update(obsstore.predecessors.get(prec, ()))
1026 return divergent
1028 return divergent
1027
1029
1028
1030
1029 def makefoldid(relation, user):
1031 def makefoldid(relation, user):
1030
1032
1031 folddigest = hashlib.sha1(user)
1033 folddigest = hashutil.sha1(user)
1032 for p in relation[0] + relation[1]:
1034 for p in relation[0] + relation[1]:
1033 folddigest.update(b'%d' % p.rev())
1035 folddigest.update(b'%d' % p.rev())
1034 folddigest.update(p.node())
1036 folddigest.update(p.node())
1035 # Since fold only has to compete against fold for the same successors, it
1037 # Since fold only has to compete against fold for the same successors, it
1036 # seems fine to use a small ID. Smaller ID save space.
1038 # seems fine to use a small ID. Smaller ID save space.
1037 return node.hex(folddigest.digest())[:8]
1039 return node.hex(folddigest.digest())[:8]
1038
1040
1039
1041
1040 def createmarkers(
1042 def createmarkers(
1041 repo, relations, flag=0, date=None, metadata=None, operation=None
1043 repo, relations, flag=0, date=None, metadata=None, operation=None
1042 ):
1044 ):
1043 """Add obsolete markers between changesets in a repo
1045 """Add obsolete markers between changesets in a repo
1044
1046
1045 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1047 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1046 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1048 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1047 containing metadata for this marker only. It is merged with the global
1049 containing metadata for this marker only. It is merged with the global
1048 metadata specified through the `metadata` argument of this function.
1050 metadata specified through the `metadata` argument of this function.
1049 Any string values in metadata must be UTF-8 bytes.
1051 Any string values in metadata must be UTF-8 bytes.
1050
1052
1051 Trying to obsolete a public changeset will raise an exception.
1053 Trying to obsolete a public changeset will raise an exception.
1052
1054
1053 Current user and date are used except if specified otherwise in the
1055 Current user and date are used except if specified otherwise in the
1054 metadata attribute.
1056 metadata attribute.
1055
1057
1056 This function operates within a transaction of its own, but does
1058 This function operates within a transaction of its own, but does
1057 not take any lock on the repo.
1059 not take any lock on the repo.
1058 """
1060 """
1059 # prepare metadata
1061 # prepare metadata
1060 if metadata is None:
1062 if metadata is None:
1061 metadata = {}
1063 metadata = {}
1062 if b'user' not in metadata:
1064 if b'user' not in metadata:
1063 luser = (
1065 luser = (
1064 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1066 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1065 )
1067 )
1066 metadata[b'user'] = encoding.fromlocal(luser)
1068 metadata[b'user'] = encoding.fromlocal(luser)
1067
1069
1068 # Operation metadata handling
1070 # Operation metadata handling
1069 useoperation = repo.ui.configbool(
1071 useoperation = repo.ui.configbool(
1070 b'experimental', b'evolution.track-operation'
1072 b'experimental', b'evolution.track-operation'
1071 )
1073 )
1072 if useoperation and operation:
1074 if useoperation and operation:
1073 metadata[b'operation'] = operation
1075 metadata[b'operation'] = operation
1074
1076
1075 # Effect flag metadata handling
1077 # Effect flag metadata handling
1076 saveeffectflag = repo.ui.configbool(
1078 saveeffectflag = repo.ui.configbool(
1077 b'experimental', b'evolution.effect-flags'
1079 b'experimental', b'evolution.effect-flags'
1078 )
1080 )
1079
1081
1080 with repo.transaction(b'add-obsolescence-marker') as tr:
1082 with repo.transaction(b'add-obsolescence-marker') as tr:
1081 markerargs = []
1083 markerargs = []
1082 for rel in relations:
1084 for rel in relations:
1083 predecessors = rel[0]
1085 predecessors = rel[0]
1084 if not isinstance(predecessors, tuple):
1086 if not isinstance(predecessors, tuple):
1085 # preserve compat with old API until all caller are migrated
1087 # preserve compat with old API until all caller are migrated
1086 predecessors = (predecessors,)
1088 predecessors = (predecessors,)
1087 if len(predecessors) > 1 and len(rel[1]) != 1:
1089 if len(predecessors) > 1 and len(rel[1]) != 1:
1088 msg = b'Fold markers can only have 1 successors, not %d'
1090 msg = b'Fold markers can only have 1 successors, not %d'
1089 raise error.ProgrammingError(msg % len(rel[1]))
1091 raise error.ProgrammingError(msg % len(rel[1]))
1090 foldid = None
1092 foldid = None
1091 foldsize = len(predecessors)
1093 foldsize = len(predecessors)
1092 if 1 < foldsize:
1094 if 1 < foldsize:
1093 foldid = makefoldid(rel, metadata[b'user'])
1095 foldid = makefoldid(rel, metadata[b'user'])
1094 for foldidx, prec in enumerate(predecessors, 1):
1096 for foldidx, prec in enumerate(predecessors, 1):
1095 sucs = rel[1]
1097 sucs = rel[1]
1096 localmetadata = metadata.copy()
1098 localmetadata = metadata.copy()
1097 if len(rel) > 2:
1099 if len(rel) > 2:
1098 localmetadata.update(rel[2])
1100 localmetadata.update(rel[2])
1099 if foldid is not None:
1101 if foldid is not None:
1100 localmetadata[b'fold-id'] = foldid
1102 localmetadata[b'fold-id'] = foldid
1101 localmetadata[b'fold-idx'] = b'%d' % foldidx
1103 localmetadata[b'fold-idx'] = b'%d' % foldidx
1102 localmetadata[b'fold-size'] = b'%d' % foldsize
1104 localmetadata[b'fold-size'] = b'%d' % foldsize
1103
1105
1104 if not prec.mutable():
1106 if not prec.mutable():
1105 raise error.Abort(
1107 raise error.Abort(
1106 _(b"cannot obsolete public changeset: %s") % prec,
1108 _(b"cannot obsolete public changeset: %s") % prec,
1107 hint=b"see 'hg help phases' for details",
1109 hint=b"see 'hg help phases' for details",
1108 )
1110 )
1109 nprec = prec.node()
1111 nprec = prec.node()
1110 nsucs = tuple(s.node() for s in sucs)
1112 nsucs = tuple(s.node() for s in sucs)
1111 npare = None
1113 npare = None
1112 if not nsucs:
1114 if not nsucs:
1113 npare = tuple(p.node() for p in prec.parents())
1115 npare = tuple(p.node() for p in prec.parents())
1114 if nprec in nsucs:
1116 if nprec in nsucs:
1115 raise error.Abort(
1117 raise error.Abort(
1116 _(b"changeset %s cannot obsolete itself") % prec
1118 _(b"changeset %s cannot obsolete itself") % prec
1117 )
1119 )
1118
1120
1119 # Effect flag can be different by relation
1121 # Effect flag can be different by relation
1120 if saveeffectflag:
1122 if saveeffectflag:
1121 # The effect flag is saved in a versioned field name for
1123 # The effect flag is saved in a versioned field name for
1122 # future evolution
1124 # future evolution
1123 effectflag = obsutil.geteffectflag(prec, sucs)
1125 effectflag = obsutil.geteffectflag(prec, sucs)
1124 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1126 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1125
1127
1126 # Creating the marker causes the hidden cache to become
1128 # Creating the marker causes the hidden cache to become
1127 # invalid, which causes recomputation when we ask for
1129 # invalid, which causes recomputation when we ask for
1128 # prec.parents() above. Resulting in n^2 behavior. So let's
1130 # prec.parents() above. Resulting in n^2 behavior. So let's
1129 # prepare all of the args first, then create the markers.
1131 # prepare all of the args first, then create the markers.
1130 markerargs.append((nprec, nsucs, npare, localmetadata))
1132 markerargs.append((nprec, nsucs, npare, localmetadata))
1131
1133
1132 for args in markerargs:
1134 for args in markerargs:
1133 nprec, nsucs, npare, localmetadata = args
1135 nprec, nsucs, npare, localmetadata = args
1134 repo.obsstore.create(
1136 repo.obsstore.create(
1135 tr,
1137 tr,
1136 nprec,
1138 nprec,
1137 nsucs,
1139 nsucs,
1138 flag,
1140 flag,
1139 parents=npare,
1141 parents=npare,
1140 date=date,
1142 date=date,
1141 metadata=localmetadata,
1143 metadata=localmetadata,
1142 ui=repo.ui,
1144 ui=repo.ui,
1143 )
1145 )
1144 repo.filteredrevcache.clear()
1146 repo.filteredrevcache.clear()
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now