##// END OF EJS Templates
peer: have a common constructor and use it...
marmoute -
r50646:a6e2a668 default
parent child Browse files
Show More
@@ -1,642 +1,642 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import errno
10 import errno
11 import io
11 import io
12 import os
12 import os
13 import socket
13 import socket
14 import struct
14 import struct
15
15
16 from concurrent import futures
16 from concurrent import futures
17 from .i18n import _
17 from .i18n import _
18 from .pycompat import getattr
18 from .pycompat import getattr
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 statichttprepo,
24 statichttprepo,
25 url as urlmod,
25 url as urlmod,
26 util,
26 util,
27 wireprotov1peer,
27 wireprotov1peer,
28 )
28 )
29 from .utils import urlutil
29 from .utils import urlutil
30
30
31 httplib = util.httplib
31 httplib = util.httplib
32 urlerr = util.urlerr
32 urlerr = util.urlerr
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35
35
36 def encodevalueinheaders(value, header, limit):
36 def encodevalueinheaders(value, header, limit):
37 """Encode a string value into multiple HTTP headers.
37 """Encode a string value into multiple HTTP headers.
38
38
39 ``value`` will be encoded into 1 or more HTTP headers with the names
39 ``value`` will be encoded into 1 or more HTTP headers with the names
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
41 name + value will be at most ``limit`` bytes long.
41 name + value will be at most ``limit`` bytes long.
42
42
43 Returns an iterable of 2-tuples consisting of header names and
43 Returns an iterable of 2-tuples consisting of header names and
44 values as native strings.
44 values as native strings.
45 """
45 """
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
47 # not bytes. This function always takes bytes in as arguments.
47 # not bytes. This function always takes bytes in as arguments.
48 fmt = pycompat.strurl(header) + r'-%s'
48 fmt = pycompat.strurl(header) + r'-%s'
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
49 # Note: it is *NOT* a bug that the last bit here is a bytestring
50 # and not a unicode: we're just getting the encoded length anyway,
50 # and not a unicode: we're just getting the encoded length anyway,
51 # and using an r-string to make it portable between Python 2 and 3
51 # and using an r-string to make it portable between Python 2 and 3
52 # doesn't work because then the \r is a literal backslash-r
52 # doesn't work because then the \r is a literal backslash-r
53 # instead of a carriage return.
53 # instead of a carriage return.
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
55 result = []
55 result = []
56
56
57 n = 0
57 n = 0
58 for i in range(0, len(value), valuelen):
58 for i in range(0, len(value), valuelen):
59 n += 1
59 n += 1
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
61
61
62 return result
62 return result
63
63
64
64
65 class _multifile:
65 class _multifile:
66 def __init__(self, *fileobjs):
66 def __init__(self, *fileobjs):
67 for f in fileobjs:
67 for f in fileobjs:
68 if not util.safehasattr(f, b'length'):
68 if not util.safehasattr(f, b'length'):
69 raise ValueError(
69 raise ValueError(
70 b'_multifile only supports file objects that '
70 b'_multifile only supports file objects that '
71 b'have a length but this one does not:',
71 b'have a length but this one does not:',
72 type(f),
72 type(f),
73 f,
73 f,
74 )
74 )
75 self._fileobjs = fileobjs
75 self._fileobjs = fileobjs
76 self._index = 0
76 self._index = 0
77
77
78 @property
78 @property
79 def length(self):
79 def length(self):
80 return sum(f.length for f in self._fileobjs)
80 return sum(f.length for f in self._fileobjs)
81
81
82 def read(self, amt=None):
82 def read(self, amt=None):
83 if amt <= 0:
83 if amt <= 0:
84 return b''.join(f.read() for f in self._fileobjs)
84 return b''.join(f.read() for f in self._fileobjs)
85 parts = []
85 parts = []
86 while amt and self._index < len(self._fileobjs):
86 while amt and self._index < len(self._fileobjs):
87 parts.append(self._fileobjs[self._index].read(amt))
87 parts.append(self._fileobjs[self._index].read(amt))
88 got = len(parts[-1])
88 got = len(parts[-1])
89 if got < amt:
89 if got < amt:
90 self._index += 1
90 self._index += 1
91 amt -= got
91 amt -= got
92 return b''.join(parts)
92 return b''.join(parts)
93
93
94 def seek(self, offset, whence=os.SEEK_SET):
94 def seek(self, offset, whence=os.SEEK_SET):
95 if whence != os.SEEK_SET:
95 if whence != os.SEEK_SET:
96 raise NotImplementedError(
96 raise NotImplementedError(
97 b'_multifile does not support anything other'
97 b'_multifile does not support anything other'
98 b' than os.SEEK_SET for whence on seek()'
98 b' than os.SEEK_SET for whence on seek()'
99 )
99 )
100 if offset != 0:
100 if offset != 0:
101 raise NotImplementedError(
101 raise NotImplementedError(
102 b'_multifile only supports seeking to start, but that '
102 b'_multifile only supports seeking to start, but that '
103 b'could be fixed if you need it'
103 b'could be fixed if you need it'
104 )
104 )
105 for f in self._fileobjs:
105 for f in self._fileobjs:
106 f.seek(0)
106 f.seek(0)
107 self._index = 0
107 self._index = 0
108
108
109
109
110 def makev1commandrequest(
110 def makev1commandrequest(
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
112 ):
112 ):
113 """Make an HTTP request to run a command for a version 1 client.
113 """Make an HTTP request to run a command for a version 1 client.
114
114
115 ``caps`` is a set of known server capabilities. The value may be
115 ``caps`` is a set of known server capabilities. The value may be
116 None if capabilities are not yet known.
116 None if capabilities are not yet known.
117
117
118 ``capablefn`` is a function to evaluate a capability.
118 ``capablefn`` is a function to evaluate a capability.
119
119
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 raw data to pass to it.
121 raw data to pass to it.
122 """
122 """
123 if cmd == b'pushkey':
123 if cmd == b'pushkey':
124 args[b'data'] = b''
124 args[b'data'] = b''
125 data = args.pop(b'data', None)
125 data = args.pop(b'data', None)
126 headers = args.pop(b'headers', {})
126 headers = args.pop(b'headers', {})
127
127
128 ui.debug(b"sending %s command\n" % cmd)
128 ui.debug(b"sending %s command\n" % cmd)
129 q = [(b'cmd', cmd)]
129 q = [(b'cmd', cmd)]
130 headersize = 0
130 headersize = 0
131 # Important: don't use self.capable() here or else you end up
131 # Important: don't use self.capable() here or else you end up
132 # with infinite recursion when trying to look up capabilities
132 # with infinite recursion when trying to look up capabilities
133 # for the first time.
133 # for the first time.
134 postargsok = caps is not None and b'httppostargs' in caps
134 postargsok = caps is not None and b'httppostargs' in caps
135
135
136 # Send arguments via POST.
136 # Send arguments via POST.
137 if postargsok and args:
137 if postargsok and args:
138 strargs = urlreq.urlencode(sorted(args.items()))
138 strargs = urlreq.urlencode(sorted(args.items()))
139 if not data:
139 if not data:
140 data = strargs
140 data = strargs
141 else:
141 else:
142 if isinstance(data, bytes):
142 if isinstance(data, bytes):
143 i = io.BytesIO(data)
143 i = io.BytesIO(data)
144 i.length = len(data)
144 i.length = len(data)
145 data = i
145 data = i
146 argsio = io.BytesIO(strargs)
146 argsio = io.BytesIO(strargs)
147 argsio.length = len(strargs)
147 argsio.length = len(strargs)
148 data = _multifile(argsio, data)
148 data = _multifile(argsio, data)
149 headers['X-HgArgs-Post'] = len(strargs)
149 headers['X-HgArgs-Post'] = len(strargs)
150 elif args:
150 elif args:
151 # Calling self.capable() can infinite loop if we are calling
151 # Calling self.capable() can infinite loop if we are calling
152 # "capabilities". But that command should never accept wire
152 # "capabilities". But that command should never accept wire
153 # protocol arguments. So this should never happen.
153 # protocol arguments. So this should never happen.
154 assert cmd != b'capabilities'
154 assert cmd != b'capabilities'
155 httpheader = capablefn(b'httpheader')
155 httpheader = capablefn(b'httpheader')
156 if httpheader:
156 if httpheader:
157 headersize = int(httpheader.split(b',', 1)[0])
157 headersize = int(httpheader.split(b',', 1)[0])
158
158
159 # Send arguments via HTTP headers.
159 # Send arguments via HTTP headers.
160 if headersize > 0:
160 if headersize > 0:
161 # The headers can typically carry more data than the URL.
161 # The headers can typically carry more data than the URL.
162 encoded_args = urlreq.urlencode(sorted(args.items()))
162 encoded_args = urlreq.urlencode(sorted(args.items()))
163 for header, value in encodevalueinheaders(
163 for header, value in encodevalueinheaders(
164 encoded_args, b'X-HgArg', headersize
164 encoded_args, b'X-HgArg', headersize
165 ):
165 ):
166 headers[header] = value
166 headers[header] = value
167 # Send arguments via query string (Mercurial <1.9).
167 # Send arguments via query string (Mercurial <1.9).
168 else:
168 else:
169 q += sorted(args.items())
169 q += sorted(args.items())
170
170
171 qs = b'?%s' % urlreq.urlencode(q)
171 qs = b'?%s' % urlreq.urlencode(q)
172 cu = b"%s%s" % (repobaseurl, qs)
172 cu = b"%s%s" % (repobaseurl, qs)
173 size = 0
173 size = 0
174 if util.safehasattr(data, b'length'):
174 if util.safehasattr(data, b'length'):
175 size = data.length
175 size = data.length
176 elif data is not None:
176 elif data is not None:
177 size = len(data)
177 size = len(data)
178 if data is not None and 'Content-Type' not in headers:
178 if data is not None and 'Content-Type' not in headers:
179 headers['Content-Type'] = 'application/mercurial-0.1'
179 headers['Content-Type'] = 'application/mercurial-0.1'
180
180
181 # Tell the server we accept application/mercurial-0.2 and multiple
181 # Tell the server we accept application/mercurial-0.2 and multiple
182 # compression formats if the server is capable of emitting those
182 # compression formats if the server is capable of emitting those
183 # payloads.
183 # payloads.
184 # Note: Keep this set empty by default, as client advertisement of
184 # Note: Keep this set empty by default, as client advertisement of
185 # protocol parameters should only occur after the handshake.
185 # protocol parameters should only occur after the handshake.
186 protoparams = set()
186 protoparams = set()
187
187
188 mediatypes = set()
188 mediatypes = set()
189 if caps is not None:
189 if caps is not None:
190 mt = capablefn(b'httpmediatype')
190 mt = capablefn(b'httpmediatype')
191 if mt:
191 if mt:
192 protoparams.add(b'0.1')
192 protoparams.add(b'0.1')
193 mediatypes = set(mt.split(b','))
193 mediatypes = set(mt.split(b','))
194
194
195 protoparams.add(b'partial-pull')
195 protoparams.add(b'partial-pull')
196
196
197 if b'0.2tx' in mediatypes:
197 if b'0.2tx' in mediatypes:
198 protoparams.add(b'0.2')
198 protoparams.add(b'0.2')
199
199
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
201 # We /could/ compare supported compression formats and prune
201 # We /could/ compare supported compression formats and prune
202 # non-mutually supported or error if nothing is mutually supported.
202 # non-mutually supported or error if nothing is mutually supported.
203 # For now, send the full list to the server and have it error.
203 # For now, send the full list to the server and have it error.
204 comps = [
204 comps = [
205 e.wireprotosupport().name
205 e.wireprotosupport().name
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
207 ]
207 ]
208 protoparams.add(b'comp=%s' % b','.join(comps))
208 protoparams.add(b'comp=%s' % b','.join(comps))
209
209
210 if protoparams:
210 if protoparams:
211 protoheaders = encodevalueinheaders(
211 protoheaders = encodevalueinheaders(
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
213 )
213 )
214 for header, value in protoheaders:
214 for header, value in protoheaders:
215 headers[header] = value
215 headers[header] = value
216
216
217 varyheaders = []
217 varyheaders = []
218 for header in headers:
218 for header in headers:
219 if header.lower().startswith('x-hg'):
219 if header.lower().startswith('x-hg'):
220 varyheaders.append(header)
220 varyheaders.append(header)
221
221
222 if varyheaders:
222 if varyheaders:
223 headers['Vary'] = ','.join(sorted(varyheaders))
223 headers['Vary'] = ','.join(sorted(varyheaders))
224
224
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
225 req = requestbuilder(pycompat.strurl(cu), data, headers)
226
226
227 if data is not None:
227 if data is not None:
228 ui.debug(b"sending %d bytes\n" % size)
228 ui.debug(b"sending %d bytes\n" % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
229 req.add_unredirected_header('Content-Length', '%d' % size)
230
230
231 return req, cu, qs
231 return req, cu, qs
232
232
233
233
234 def sendrequest(ui, opener, req):
234 def sendrequest(ui, opener, req):
235 """Send a prepared HTTP request.
235 """Send a prepared HTTP request.
236
236
237 Returns the response object.
237 Returns the response object.
238 """
238 """
239 dbg = ui.debug
239 dbg = ui.debug
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
241 line = b'devel-peer-request: %s\n'
241 line = b'devel-peer-request: %s\n'
242 dbg(
242 dbg(
243 line
243 line
244 % b'%s %s'
244 % b'%s %s'
245 % (
245 % (
246 pycompat.bytesurl(req.get_method()),
246 pycompat.bytesurl(req.get_method()),
247 pycompat.bytesurl(req.get_full_url()),
247 pycompat.bytesurl(req.get_full_url()),
248 )
248 )
249 )
249 )
250 hgargssize = None
250 hgargssize = None
251
251
252 for header, value in sorted(req.header_items()):
252 for header, value in sorted(req.header_items()):
253 header = pycompat.bytesurl(header)
253 header = pycompat.bytesurl(header)
254 value = pycompat.bytesurl(value)
254 value = pycompat.bytesurl(value)
255 if header.startswith(b'X-hgarg-'):
255 if header.startswith(b'X-hgarg-'):
256 if hgargssize is None:
256 if hgargssize is None:
257 hgargssize = 0
257 hgargssize = 0
258 hgargssize += len(value)
258 hgargssize += len(value)
259 else:
259 else:
260 dbg(line % b' %s %s' % (header, value))
260 dbg(line % b' %s %s' % (header, value))
261
261
262 if hgargssize is not None:
262 if hgargssize is not None:
263 dbg(
263 dbg(
264 line
264 line
265 % b' %d bytes of commands arguments in headers'
265 % b' %d bytes of commands arguments in headers'
266 % hgargssize
266 % hgargssize
267 )
267 )
268 data = req.data
268 data = req.data
269 if data is not None:
269 if data is not None:
270 length = getattr(data, 'length', None)
270 length = getattr(data, 'length', None)
271 if length is None:
271 if length is None:
272 length = len(data)
272 length = len(data)
273 dbg(line % b' %d bytes of data' % length)
273 dbg(line % b' %d bytes of data' % length)
274
274
275 start = util.timer()
275 start = util.timer()
276
276
277 res = None
277 res = None
278 try:
278 try:
279 res = opener.open(req)
279 res = opener.open(req)
280 except urlerr.httperror as inst:
280 except urlerr.httperror as inst:
281 if inst.code == 401:
281 if inst.code == 401:
282 raise error.Abort(_(b'authorization failed'))
282 raise error.Abort(_(b'authorization failed'))
283 raise
283 raise
284 except httplib.HTTPException as inst:
284 except httplib.HTTPException as inst:
285 ui.debug(
285 ui.debug(
286 b'http error requesting %s\n'
286 b'http error requesting %s\n'
287 % urlutil.hidepassword(req.get_full_url())
287 % urlutil.hidepassword(req.get_full_url())
288 )
288 )
289 ui.traceback()
289 ui.traceback()
290 raise IOError(None, inst)
290 raise IOError(None, inst)
291 finally:
291 finally:
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
293 code = res.code if res else -1
293 code = res.code if res else -1
294 dbg(
294 dbg(
295 line
295 line
296 % b' finished in %.4f seconds (%d)'
296 % b' finished in %.4f seconds (%d)'
297 % (util.timer() - start, code)
297 % (util.timer() - start, code)
298 )
298 )
299
299
300 # Insert error handlers for common I/O failures.
300 # Insert error handlers for common I/O failures.
301 urlmod.wrapresponse(res)
301 urlmod.wrapresponse(res)
302
302
303 return res
303 return res
304
304
305
305
306 class RedirectedRepoError(error.RepoError):
306 class RedirectedRepoError(error.RepoError):
307 def __init__(self, msg, respurl):
307 def __init__(self, msg, respurl):
308 super(RedirectedRepoError, self).__init__(msg)
308 super(RedirectedRepoError, self).__init__(msg)
309 self.respurl = respurl
309 self.respurl = respurl
310
310
311
311
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
313 # record the url we got redirected to
313 # record the url we got redirected to
314 redirected = False
314 redirected = False
315 respurl = pycompat.bytesurl(resp.geturl())
315 respurl = pycompat.bytesurl(resp.geturl())
316 if respurl.endswith(qs):
316 if respurl.endswith(qs):
317 respurl = respurl[: -len(qs)]
317 respurl = respurl[: -len(qs)]
318 qsdropped = False
318 qsdropped = False
319 else:
319 else:
320 qsdropped = True
320 qsdropped = True
321
321
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
323 redirected = True
323 redirected = True
324 if not ui.quiet:
324 if not ui.quiet:
325 ui.warn(_(b'real URL is %s\n') % respurl)
325 ui.warn(_(b'real URL is %s\n') % respurl)
326
326
327 try:
327 try:
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
329 except AttributeError:
329 except AttributeError:
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
331
331
332 safeurl = urlutil.hidepassword(baseurl)
332 safeurl = urlutil.hidepassword(baseurl)
333 if proto.startswith(b'application/hg-error'):
333 if proto.startswith(b'application/hg-error'):
334 raise error.OutOfBandError(resp.read())
334 raise error.OutOfBandError(resp.read())
335
335
336 # Pre 1.0 versions of Mercurial used text/plain and
336 # Pre 1.0 versions of Mercurial used text/plain and
337 # application/hg-changegroup. We don't support such old servers.
337 # application/hg-changegroup. We don't support such old servers.
338 if not proto.startswith(b'application/mercurial-'):
338 if not proto.startswith(b'application/mercurial-'):
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
340 msg = _(
340 msg = _(
341 b"'%s' does not appear to be an hg repository:\n"
341 b"'%s' does not appear to be an hg repository:\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
342 b"---%%<--- (%s)\n%s\n---%%<---\n"
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
344
344
345 # Some servers may strip the query string from the redirect. We
345 # Some servers may strip the query string from the redirect. We
346 # raise a special error type so callers can react to this specially.
346 # raise a special error type so callers can react to this specially.
347 if redirected and qsdropped:
347 if redirected and qsdropped:
348 raise RedirectedRepoError(msg, respurl)
348 raise RedirectedRepoError(msg, respurl)
349 else:
349 else:
350 raise error.RepoError(msg)
350 raise error.RepoError(msg)
351
351
352 try:
352 try:
353 subtype = proto.split(b'-', 1)[1]
353 subtype = proto.split(b'-', 1)[1]
354
354
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
355 version_info = tuple([int(n) for n in subtype.split(b'.')])
356 except ValueError:
356 except ValueError:
357 raise error.RepoError(
357 raise error.RepoError(
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
359 )
359 )
360
360
361 # TODO consider switching to a decompression reader that uses
361 # TODO consider switching to a decompression reader that uses
362 # generators.
362 # generators.
363 if version_info == (0, 1):
363 if version_info == (0, 1):
364 if compressible:
364 if compressible:
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
365 resp = util.compengines[b'zlib'].decompressorreader(resp)
366
366
367 elif version_info == (0, 2):
367 elif version_info == (0, 2):
368 # application/mercurial-0.2 always identifies the compression
368 # application/mercurial-0.2 always identifies the compression
369 # engine in the payload header.
369 # engine in the payload header.
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
371 ename = util.readexactly(resp, elen)
371 ename = util.readexactly(resp, elen)
372 engine = util.compengines.forwiretype(ename)
372 engine = util.compengines.forwiretype(ename)
373
373
374 resp = engine.decompressorreader(resp)
374 resp = engine.decompressorreader(resp)
375 else:
375 else:
376 raise error.RepoError(
376 raise error.RepoError(
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
378 )
378 )
379
379
380 return respurl, proto, resp
380 return respurl, proto, resp
381
381
382
382
383 class httppeer(wireprotov1peer.wirepeer):
383 class httppeer(wireprotov1peer.wirepeer):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 self.ui = ui
385 super().__init__(ui)
386 self._path = path
386 self._path = path
387 self._url = url
387 self._url = url
388 self._caps = caps
388 self._caps = caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
390 self._urlopener = opener
390 self._urlopener = opener
391 self._requestbuilder = requestbuilder
391 self._requestbuilder = requestbuilder
392
392
393 def __del__(self):
393 def __del__(self):
394 for h in self._urlopener.handlers:
394 for h in self._urlopener.handlers:
395 h.close()
395 h.close()
396 getattr(h, "close_all", lambda: None)()
396 getattr(h, "close_all", lambda: None)()
397
397
398 # Begin of ipeerconnection interface.
398 # Begin of ipeerconnection interface.
399
399
400 def url(self):
400 def url(self):
401 return self._path
401 return self._path
402
402
403 def local(self):
403 def local(self):
404 return None
404 return None
405
405
406 def canpush(self):
406 def canpush(self):
407 return True
407 return True
408
408
409 def close(self):
409 def close(self):
410 try:
410 try:
411 reqs, sent, recv = (
411 reqs, sent, recv = (
412 self._urlopener.requestscount,
412 self._urlopener.requestscount,
413 self._urlopener.sentbytescount,
413 self._urlopener.sentbytescount,
414 self._urlopener.receivedbytescount,
414 self._urlopener.receivedbytescount,
415 )
415 )
416 except AttributeError:
416 except AttributeError:
417 return
417 return
418 self.ui.note(
418 self.ui.note(
419 _(
419 _(
420 b'(sent %d HTTP requests and %d bytes; '
420 b'(sent %d HTTP requests and %d bytes; '
421 b'received %d bytes in responses)\n'
421 b'received %d bytes in responses)\n'
422 )
422 )
423 % (reqs, sent, recv)
423 % (reqs, sent, recv)
424 )
424 )
425
425
426 # End of ipeerconnection interface.
426 # End of ipeerconnection interface.
427
427
428 # Begin of ipeercommands interface.
428 # Begin of ipeercommands interface.
429
429
430 def capabilities(self):
430 def capabilities(self):
431 return self._caps
431 return self._caps
432
432
433 # End of ipeercommands interface.
433 # End of ipeercommands interface.
434
434
435 def _callstream(self, cmd, _compressible=False, **args):
435 def _callstream(self, cmd, _compressible=False, **args):
436 args = pycompat.byteskwargs(args)
436 args = pycompat.byteskwargs(args)
437
437
438 req, cu, qs = makev1commandrequest(
438 req, cu, qs = makev1commandrequest(
439 self.ui,
439 self.ui,
440 self._requestbuilder,
440 self._requestbuilder,
441 self._caps,
441 self._caps,
442 self.capable,
442 self.capable,
443 self._url,
443 self._url,
444 cmd,
444 cmd,
445 args,
445 args,
446 )
446 )
447
447
448 resp = sendrequest(self.ui, self._urlopener, req)
448 resp = sendrequest(self.ui, self._urlopener, req)
449
449
450 self._url, ct, resp = parsev1commandresponse(
450 self._url, ct, resp = parsev1commandresponse(
451 self.ui, self._url, cu, qs, resp, _compressible
451 self.ui, self._url, cu, qs, resp, _compressible
452 )
452 )
453
453
454 return resp
454 return resp
455
455
456 def _call(self, cmd, **args):
456 def _call(self, cmd, **args):
457 fp = self._callstream(cmd, **args)
457 fp = self._callstream(cmd, **args)
458 try:
458 try:
459 return fp.read()
459 return fp.read()
460 finally:
460 finally:
461 # if using keepalive, allow connection to be reused
461 # if using keepalive, allow connection to be reused
462 fp.close()
462 fp.close()
463
463
464 def _callpush(self, cmd, cg, **args):
464 def _callpush(self, cmd, cg, **args):
465 # have to stream bundle to a temp file because we do not have
465 # have to stream bundle to a temp file because we do not have
466 # http 1.1 chunked transfer.
466 # http 1.1 chunked transfer.
467
467
468 types = self.capable(b'unbundle')
468 types = self.capable(b'unbundle')
469 try:
469 try:
470 types = types.split(b',')
470 types = types.split(b',')
471 except AttributeError:
471 except AttributeError:
472 # servers older than d1b16a746db6 will send 'unbundle' as a
472 # servers older than d1b16a746db6 will send 'unbundle' as a
473 # boolean capability. They only support headerless/uncompressed
473 # boolean capability. They only support headerless/uncompressed
474 # bundles.
474 # bundles.
475 types = [b""]
475 types = [b""]
476 for x in types:
476 for x in types:
477 if x in bundle2.bundletypes:
477 if x in bundle2.bundletypes:
478 type = x
478 type = x
479 break
479 break
480
480
481 tempname = bundle2.writebundle(self.ui, cg, None, type)
481 tempname = bundle2.writebundle(self.ui, cg, None, type)
482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
483 headers = {'Content-Type': 'application/mercurial-0.1'}
483 headers = {'Content-Type': 'application/mercurial-0.1'}
484
484
485 try:
485 try:
486 r = self._call(cmd, data=fp, headers=headers, **args)
486 r = self._call(cmd, data=fp, headers=headers, **args)
487 vals = r.split(b'\n', 1)
487 vals = r.split(b'\n', 1)
488 if len(vals) < 2:
488 if len(vals) < 2:
489 raise error.ResponseError(_(b"unexpected response:"), r)
489 raise error.ResponseError(_(b"unexpected response:"), r)
490 return vals
490 return vals
491 except urlerr.httperror:
491 except urlerr.httperror:
492 # Catch and re-raise these so we don't try and treat them
492 # Catch and re-raise these so we don't try and treat them
493 # like generic socket errors. They lack any values in
493 # like generic socket errors. They lack any values in
494 # .args on Python 3 which breaks our socket.error block.
494 # .args on Python 3 which breaks our socket.error block.
495 raise
495 raise
496 except socket.error as err:
496 except socket.error as err:
497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
498 raise error.Abort(_(b'push failed: %s') % err.args[1])
498 raise error.Abort(_(b'push failed: %s') % err.args[1])
499 raise error.Abort(err.args[1])
499 raise error.Abort(err.args[1])
500 finally:
500 finally:
501 fp.close()
501 fp.close()
502 os.unlink(tempname)
502 os.unlink(tempname)
503
503
504 def _calltwowaystream(self, cmd, fp, **args):
504 def _calltwowaystream(self, cmd, fp, **args):
505 filename = None
505 filename = None
506 try:
506 try:
507 # dump bundle to disk
507 # dump bundle to disk
508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
509 with os.fdopen(fd, "wb") as fh:
509 with os.fdopen(fd, "wb") as fh:
510 d = fp.read(4096)
510 d = fp.read(4096)
511 while d:
511 while d:
512 fh.write(d)
512 fh.write(d)
513 d = fp.read(4096)
513 d = fp.read(4096)
514 # start http push
514 # start http push
515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
516 headers = {'Content-Type': 'application/mercurial-0.1'}
516 headers = {'Content-Type': 'application/mercurial-0.1'}
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
517 return self._callstream(cmd, data=fp_, headers=headers, **args)
518 finally:
518 finally:
519 if filename is not None:
519 if filename is not None:
520 os.unlink(filename)
520 os.unlink(filename)
521
521
522 def _callcompressable(self, cmd, **args):
522 def _callcompressable(self, cmd, **args):
523 return self._callstream(cmd, _compressible=True, **args)
523 return self._callstream(cmd, _compressible=True, **args)
524
524
525 def _abort(self, exception):
525 def _abort(self, exception):
526 raise exception
526 raise exception
527
527
528
528
529 class queuedcommandfuture(futures.Future):
529 class queuedcommandfuture(futures.Future):
530 """Wraps result() on command futures to trigger submission on call."""
530 """Wraps result() on command futures to trigger submission on call."""
531
531
532 def result(self, timeout=None):
532 def result(self, timeout=None):
533 if self.done():
533 if self.done():
534 return futures.Future.result(self, timeout)
534 return futures.Future.result(self, timeout)
535
535
536 self._peerexecutor.sendcommands()
536 self._peerexecutor.sendcommands()
537
537
538 # sendcommands() will restore the original __class__ and self.result
538 # sendcommands() will restore the original __class__ and self.result
539 # will resolve to Future.result.
539 # will resolve to Future.result.
540 return self.result(timeout)
540 return self.result(timeout)
541
541
542
542
543 def performhandshake(ui, url, opener, requestbuilder):
543 def performhandshake(ui, url, opener, requestbuilder):
544 # The handshake is a request to the capabilities command.
544 # The handshake is a request to the capabilities command.
545
545
546 caps = None
546 caps = None
547
547
548 def capable(x):
548 def capable(x):
549 raise error.ProgrammingError(b'should not be called')
549 raise error.ProgrammingError(b'should not be called')
550
550
551 args = {}
551 args = {}
552
552
553 req, requrl, qs = makev1commandrequest(
553 req, requrl, qs = makev1commandrequest(
554 ui, requestbuilder, caps, capable, url, b'capabilities', args
554 ui, requestbuilder, caps, capable, url, b'capabilities', args
555 )
555 )
556 resp = sendrequest(ui, opener, req)
556 resp = sendrequest(ui, opener, req)
557
557
558 # The server may redirect us to the repo root, stripping the
558 # The server may redirect us to the repo root, stripping the
559 # ?cmd=capabilities query string from the URL. The server would likely
559 # ?cmd=capabilities query string from the URL. The server would likely
560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
561 # We catch this special case and re-issue the capabilities request against
561 # We catch this special case and re-issue the capabilities request against
562 # the new URL.
562 # the new URL.
563 #
563 #
564 # We should ideally not do this, as a redirect that drops the query
564 # We should ideally not do this, as a redirect that drops the query
565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
566 # However, Mercurial clients for several years appeared to handle this
566 # However, Mercurial clients for several years appeared to handle this
567 # issue without behavior degradation. And according to issue 5860, it may
567 # issue without behavior degradation. And according to issue 5860, it may
568 # be a longstanding bug in some server implementations. So we allow a
568 # be a longstanding bug in some server implementations. So we allow a
569 # redirect that drops the query string to "just work."
569 # redirect that drops the query string to "just work."
570 try:
570 try:
571 respurl, ct, resp = parsev1commandresponse(
571 respurl, ct, resp = parsev1commandresponse(
572 ui, url, requrl, qs, resp, compressible=False
572 ui, url, requrl, qs, resp, compressible=False
573 )
573 )
574 except RedirectedRepoError as e:
574 except RedirectedRepoError as e:
575 req, requrl, qs = makev1commandrequest(
575 req, requrl, qs = makev1commandrequest(
576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
577 )
577 )
578 resp = sendrequest(ui, opener, req)
578 resp = sendrequest(ui, opener, req)
579 respurl, ct, resp = parsev1commandresponse(
579 respurl, ct, resp = parsev1commandresponse(
580 ui, url, requrl, qs, resp, compressible=False
580 ui, url, requrl, qs, resp, compressible=False
581 )
581 )
582
582
583 try:
583 try:
584 rawdata = resp.read()
584 rawdata = resp.read()
585 finally:
585 finally:
586 resp.close()
586 resp.close()
587
587
588 if not ct.startswith(b'application/mercurial-'):
588 if not ct.startswith(b'application/mercurial-'):
589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
590
590
591 info = {b'v1capabilities': set(rawdata.split())}
591 info = {b'v1capabilities': set(rawdata.split())}
592
592
593 return respurl, info
593 return respurl, info
594
594
595
595
596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
597 """Construct an appropriate HTTP peer instance.
597 """Construct an appropriate HTTP peer instance.
598
598
599 ``opener`` is an ``url.opener`` that should be used to establish
599 ``opener`` is an ``url.opener`` that should be used to establish
600 connections, perform HTTP requests.
600 connections, perform HTTP requests.
601
601
602 ``requestbuilder`` is the type used for constructing HTTP requests.
602 ``requestbuilder`` is the type used for constructing HTTP requests.
603 It exists as an argument so extensions can override the default.
603 It exists as an argument so extensions can override the default.
604 """
604 """
605 u = urlutil.url(path)
605 u = urlutil.url(path)
606 if u.query or u.fragment:
606 if u.query or u.fragment:
607 raise error.Abort(
607 raise error.Abort(
608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
609 )
609 )
610
610
611 # urllib cannot handle URLs with embedded user or passwd.
611 # urllib cannot handle URLs with embedded user or passwd.
612 url, authinfo = u.authinfo()
612 url, authinfo = u.authinfo()
613 ui.debug(b'using %s\n' % url)
613 ui.debug(b'using %s\n' % url)
614
614
615 opener = opener or urlmod.opener(ui, authinfo)
615 opener = opener or urlmod.opener(ui, authinfo)
616
616
617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
618
618
619 return httppeer(
619 return httppeer(
620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
621 )
621 )
622
622
623
623
624 def make_peer(ui, path, create, intents=None, createopts=None):
624 def make_peer(ui, path, create, intents=None, createopts=None):
625 if create:
625 if create:
626 raise error.Abort(_(b'cannot create new http repository'))
626 raise error.Abort(_(b'cannot create new http repository'))
627 try:
627 try:
628 if path.startswith(b'https:') and not urlmod.has_https:
628 if path.startswith(b'https:') and not urlmod.has_https:
629 raise error.Abort(
629 raise error.Abort(
630 _(b'Python support for SSL and HTTPS is not installed')
630 _(b'Python support for SSL and HTTPS is not installed')
631 )
631 )
632
632
633 inst = makepeer(ui, path)
633 inst = makepeer(ui, path)
634
634
635 return inst
635 return inst
636 except error.RepoError as httpexception:
636 except error.RepoError as httpexception:
637 try:
637 try:
638 r = statichttprepo.make_peer(ui, b"static-" + path, create)
638 r = statichttprepo.make_peer(ui, b"static-" + path, create)
639 ui.note(_(b'(falling back to static-http)\n'))
639 ui.note(_(b'(falling back to static-http)\n'))
640 return r
640 return r
641 except error.RepoError:
641 except error.RepoError:
642 raise httpexception # use the original http RepoError instead
642 raise httpexception # use the original http RepoError instead
@@ -1,2053 +1,2059 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Repository supports (at least) some sidedata to be stored
24 # Repository supports (at least) some sidedata to be stored
25 REPO_FEATURE_SIDE_DATA = b'side-data'
25 REPO_FEATURE_SIDE_DATA = b'side-data'
26 # Files storage may lack data for all ancestors.
26 # Files storage may lack data for all ancestors.
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
27 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
28
28
29 REVISION_FLAG_CENSORED = 1 << 15
29 REVISION_FLAG_CENSORED = 1 << 15
30 REVISION_FLAG_ELLIPSIS = 1 << 14
30 REVISION_FLAG_ELLIPSIS = 1 << 14
31 REVISION_FLAG_EXTSTORED = 1 << 13
31 REVISION_FLAG_EXTSTORED = 1 << 13
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
32 REVISION_FLAG_HASCOPIESINFO = 1 << 12
33
33
34 REVISION_FLAGS_KNOWN = (
34 REVISION_FLAGS_KNOWN = (
35 REVISION_FLAG_CENSORED
35 REVISION_FLAG_CENSORED
36 | REVISION_FLAG_ELLIPSIS
36 | REVISION_FLAG_ELLIPSIS
37 | REVISION_FLAG_EXTSTORED
37 | REVISION_FLAG_EXTSTORED
38 | REVISION_FLAG_HASCOPIESINFO
38 | REVISION_FLAG_HASCOPIESINFO
39 )
39 )
40
40
41 CG_DELTAMODE_STD = b'default'
41 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_PREV = b'previous'
42 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_FULL = b'fulltext'
43 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_P1 = b'p1'
44 CG_DELTAMODE_P1 = b'p1'
45
45
46
46
47 ## Cache related constants:
47 ## Cache related constants:
48 #
48 #
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
49 # Used to control which cache should be warmed in a repo.updatecaches(…) call.
50
50
51 # Warm branchmaps of all known repoview's filter-level
51 # Warm branchmaps of all known repoview's filter-level
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
52 CACHE_BRANCHMAP_ALL = b"branchmap-all"
53 # Warm branchmaps of repoview's filter-level used by server
53 # Warm branchmaps of repoview's filter-level used by server
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
54 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
55 # Warm internal changelog cache (eg: persistent nodemap)
55 # Warm internal changelog cache (eg: persistent nodemap)
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
56 CACHE_CHANGELOG_CACHE = b"changelog-cache"
57 # Warm full manifest cache
57 # Warm full manifest cache
58 CACHE_FULL_MANIFEST = b"full-manifest"
58 CACHE_FULL_MANIFEST = b"full-manifest"
59 # Warm file-node-tags cache
59 # Warm file-node-tags cache
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
60 CACHE_FILE_NODE_TAGS = b"file-node-tags"
61 # Warm internal manifestlog cache (eg: persistent nodemap)
61 # Warm internal manifestlog cache (eg: persistent nodemap)
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
62 CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
63 # Warn rev branch cache
63 # Warn rev branch cache
64 CACHE_REV_BRANCH = b"rev-branch-cache"
64 CACHE_REV_BRANCH = b"rev-branch-cache"
65 # Warm tags' cache for default repoview'
65 # Warm tags' cache for default repoview'
66 CACHE_TAGS_DEFAULT = b"tags-default"
66 CACHE_TAGS_DEFAULT = b"tags-default"
67 # Warm tags' cache for repoview's filter-level used by server
67 # Warm tags' cache for repoview's filter-level used by server
68 CACHE_TAGS_SERVED = b"tags-served"
68 CACHE_TAGS_SERVED = b"tags-served"
69
69
70 # the cache to warm by default after a simple transaction
70 # the cache to warm by default after a simple transaction
71 # (this is a mutable set to let extension update it)
71 # (this is a mutable set to let extension update it)
72 CACHES_DEFAULT = {
72 CACHES_DEFAULT = {
73 CACHE_BRANCHMAP_SERVED,
73 CACHE_BRANCHMAP_SERVED,
74 }
74 }
75
75
76 # the caches to warm when warming all of them
76 # the caches to warm when warming all of them
77 # (this is a mutable set to let extension update it)
77 # (this is a mutable set to let extension update it)
78 CACHES_ALL = {
78 CACHES_ALL = {
79 CACHE_BRANCHMAP_SERVED,
79 CACHE_BRANCHMAP_SERVED,
80 CACHE_BRANCHMAP_ALL,
80 CACHE_BRANCHMAP_ALL,
81 CACHE_CHANGELOG_CACHE,
81 CACHE_CHANGELOG_CACHE,
82 CACHE_FILE_NODE_TAGS,
82 CACHE_FILE_NODE_TAGS,
83 CACHE_FULL_MANIFEST,
83 CACHE_FULL_MANIFEST,
84 CACHE_MANIFESTLOG_CACHE,
84 CACHE_MANIFESTLOG_CACHE,
85 CACHE_TAGS_DEFAULT,
85 CACHE_TAGS_DEFAULT,
86 CACHE_TAGS_SERVED,
86 CACHE_TAGS_SERVED,
87 }
87 }
88
88
89 # the cache to warm by default on simple call
89 # the cache to warm by default on simple call
90 # (this is a mutable set to let extension update it)
90 # (this is a mutable set to let extension update it)
91 CACHES_POST_CLONE = CACHES_ALL.copy()
91 CACHES_POST_CLONE = CACHES_ALL.copy()
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
92 CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
93
93
94
94
95 class ipeerconnection(interfaceutil.Interface):
95 class ipeerconnection(interfaceutil.Interface):
96 """Represents a "connection" to a repository.
96 """Represents a "connection" to a repository.
97
97
98 This is the base interface for representing a connection to a repository.
98 This is the base interface for representing a connection to a repository.
99 It holds basic properties and methods applicable to all peer types.
99 It holds basic properties and methods applicable to all peer types.
100
100
101 This is not a complete interface definition and should not be used
101 This is not a complete interface definition and should not be used
102 outside of this module.
102 outside of this module.
103 """
103 """
104
104
105 ui = interfaceutil.Attribute("""ui.ui instance""")
105 ui = interfaceutil.Attribute("""ui.ui instance""")
106
106
107 def url():
107 def url():
108 """Returns a URL string representing this peer.
108 """Returns a URL string representing this peer.
109
109
110 Currently, implementations expose the raw URL used to construct the
110 Currently, implementations expose the raw URL used to construct the
111 instance. It may contain credentials as part of the URL. The
111 instance. It may contain credentials as part of the URL. The
112 expectations of the value aren't well-defined and this could lead to
112 expectations of the value aren't well-defined and this could lead to
113 data leakage.
113 data leakage.
114
114
115 TODO audit/clean consumers and more clearly define the contents of this
115 TODO audit/clean consumers and more clearly define the contents of this
116 value.
116 value.
117 """
117 """
118
118
119 def local():
119 def local():
120 """Returns a local repository instance.
120 """Returns a local repository instance.
121
121
122 If the peer represents a local repository, returns an object that
122 If the peer represents a local repository, returns an object that
123 can be used to interface with it. Otherwise returns ``None``.
123 can be used to interface with it. Otherwise returns ``None``.
124 """
124 """
125
125
126 def canpush():
126 def canpush():
127 """Returns a boolean indicating if this peer can be pushed to."""
127 """Returns a boolean indicating if this peer can be pushed to."""
128
128
129 def close():
129 def close():
130 """Close the connection to this peer.
130 """Close the connection to this peer.
131
131
132 This is called when the peer will no longer be used. Resources
132 This is called when the peer will no longer be used. Resources
133 associated with the peer should be cleaned up.
133 associated with the peer should be cleaned up.
134 """
134 """
135
135
136
136
137 class ipeercapabilities(interfaceutil.Interface):
137 class ipeercapabilities(interfaceutil.Interface):
138 """Peer sub-interface related to capabilities."""
138 """Peer sub-interface related to capabilities."""
139
139
140 def capable(name):
140 def capable(name):
141 """Determine support for a named capability.
141 """Determine support for a named capability.
142
142
143 Returns ``False`` if capability not supported.
143 Returns ``False`` if capability not supported.
144
144
145 Returns ``True`` if boolean capability is supported. Returns a string
145 Returns ``True`` if boolean capability is supported. Returns a string
146 if capability support is non-boolean.
146 if capability support is non-boolean.
147
147
148 Capability strings may or may not map to wire protocol capabilities.
148 Capability strings may or may not map to wire protocol capabilities.
149 """
149 """
150
150
151 def requirecap(name, purpose):
151 def requirecap(name, purpose):
152 """Require a capability to be present.
152 """Require a capability to be present.
153
153
154 Raises a ``CapabilityError`` if the capability isn't present.
154 Raises a ``CapabilityError`` if the capability isn't present.
155 """
155 """
156
156
157
157
158 class ipeercommands(interfaceutil.Interface):
158 class ipeercommands(interfaceutil.Interface):
159 """Client-side interface for communicating over the wire protocol.
159 """Client-side interface for communicating over the wire protocol.
160
160
161 This interface is used as a gateway to the Mercurial wire protocol.
161 This interface is used as a gateway to the Mercurial wire protocol.
162 methods commonly call wire protocol commands of the same name.
162 methods commonly call wire protocol commands of the same name.
163 """
163 """
164
164
165 def branchmap():
165 def branchmap():
166 """Obtain heads in named branches.
166 """Obtain heads in named branches.
167
167
168 Returns a dict mapping branch name to an iterable of nodes that are
168 Returns a dict mapping branch name to an iterable of nodes that are
169 heads on that branch.
169 heads on that branch.
170 """
170 """
171
171
172 def capabilities():
172 def capabilities():
173 """Obtain capabilities of the peer.
173 """Obtain capabilities of the peer.
174
174
175 Returns a set of string capabilities.
175 Returns a set of string capabilities.
176 """
176 """
177
177
178 def clonebundles():
178 def clonebundles():
179 """Obtains the clone bundles manifest for the repo.
179 """Obtains the clone bundles manifest for the repo.
180
180
181 Returns the manifest as unparsed bytes.
181 Returns the manifest as unparsed bytes.
182 """
182 """
183
183
184 def debugwireargs(one, two, three=None, four=None, five=None):
184 def debugwireargs(one, two, three=None, four=None, five=None):
185 """Used to facilitate debugging of arguments passed over the wire."""
185 """Used to facilitate debugging of arguments passed over the wire."""
186
186
187 def getbundle(source, **kwargs):
187 def getbundle(source, **kwargs):
188 """Obtain remote repository data as a bundle.
188 """Obtain remote repository data as a bundle.
189
189
190 This command is how the bulk of repository data is transferred from
190 This command is how the bulk of repository data is transferred from
191 the peer to the local repository
191 the peer to the local repository
192
192
193 Returns a generator of bundle data.
193 Returns a generator of bundle data.
194 """
194 """
195
195
196 def heads():
196 def heads():
197 """Determine all known head revisions in the peer.
197 """Determine all known head revisions in the peer.
198
198
199 Returns an iterable of binary nodes.
199 Returns an iterable of binary nodes.
200 """
200 """
201
201
202 def known(nodes):
202 def known(nodes):
203 """Determine whether multiple nodes are known.
203 """Determine whether multiple nodes are known.
204
204
205 Accepts an iterable of nodes whose presence to check for.
205 Accepts an iterable of nodes whose presence to check for.
206
206
207 Returns an iterable of booleans indicating of the corresponding node
207 Returns an iterable of booleans indicating of the corresponding node
208 at that index is known to the peer.
208 at that index is known to the peer.
209 """
209 """
210
210
211 def listkeys(namespace):
211 def listkeys(namespace):
212 """Obtain all keys in a pushkey namespace.
212 """Obtain all keys in a pushkey namespace.
213
213
214 Returns an iterable of key names.
214 Returns an iterable of key names.
215 """
215 """
216
216
217 def lookup(key):
217 def lookup(key):
218 """Resolve a value to a known revision.
218 """Resolve a value to a known revision.
219
219
220 Returns a binary node of the resolved revision on success.
220 Returns a binary node of the resolved revision on success.
221 """
221 """
222
222
223 def pushkey(namespace, key, old, new):
223 def pushkey(namespace, key, old, new):
224 """Set a value using the ``pushkey`` protocol.
224 """Set a value using the ``pushkey`` protocol.
225
225
226 Arguments correspond to the pushkey namespace and key to operate on and
226 Arguments correspond to the pushkey namespace and key to operate on and
227 the old and new values for that key.
227 the old and new values for that key.
228
228
229 Returns a string with the peer result. The value inside varies by the
229 Returns a string with the peer result. The value inside varies by the
230 namespace.
230 namespace.
231 """
231 """
232
232
233 def stream_out():
233 def stream_out():
234 """Obtain streaming clone data.
234 """Obtain streaming clone data.
235
235
236 Successful result should be a generator of data chunks.
236 Successful result should be a generator of data chunks.
237 """
237 """
238
238
239 def unbundle(bundle, heads, url):
239 def unbundle(bundle, heads, url):
240 """Transfer repository data to the peer.
240 """Transfer repository data to the peer.
241
241
242 This is how the bulk of data during a push is transferred.
242 This is how the bulk of data during a push is transferred.
243
243
244 Returns the integer number of heads added to the peer.
244 Returns the integer number of heads added to the peer.
245 """
245 """
246
246
247
247
248 class ipeerlegacycommands(interfaceutil.Interface):
248 class ipeerlegacycommands(interfaceutil.Interface):
249 """Interface for implementing support for legacy wire protocol commands.
249 """Interface for implementing support for legacy wire protocol commands.
250
250
251 Wire protocol commands transition to legacy status when they are no longer
251 Wire protocol commands transition to legacy status when they are no longer
252 used by modern clients. To facilitate identifying which commands are
252 used by modern clients. To facilitate identifying which commands are
253 legacy, the interfaces are split.
253 legacy, the interfaces are split.
254 """
254 """
255
255
256 def between(pairs):
256 def between(pairs):
257 """Obtain nodes between pairs of nodes.
257 """Obtain nodes between pairs of nodes.
258
258
259 ``pairs`` is an iterable of node pairs.
259 ``pairs`` is an iterable of node pairs.
260
260
261 Returns an iterable of iterables of nodes corresponding to each
261 Returns an iterable of iterables of nodes corresponding to each
262 requested pair.
262 requested pair.
263 """
263 """
264
264
265 def branches(nodes):
265 def branches(nodes):
266 """Obtain ancestor changesets of specific nodes back to a branch point.
266 """Obtain ancestor changesets of specific nodes back to a branch point.
267
267
268 For each requested node, the peer finds the first ancestor node that is
268 For each requested node, the peer finds the first ancestor node that is
269 a DAG root or is a merge.
269 a DAG root or is a merge.
270
270
271 Returns an iterable of iterables with the resolved values for each node.
271 Returns an iterable of iterables with the resolved values for each node.
272 """
272 """
273
273
274 def changegroup(nodes, source):
274 def changegroup(nodes, source):
275 """Obtain a changegroup with data for descendants of specified nodes."""
275 """Obtain a changegroup with data for descendants of specified nodes."""
276
276
277 def changegroupsubset(bases, heads, source):
277 def changegroupsubset(bases, heads, source):
278 pass
278 pass
279
279
280
280
281 class ipeercommandexecutor(interfaceutil.Interface):
281 class ipeercommandexecutor(interfaceutil.Interface):
282 """Represents a mechanism to execute remote commands.
282 """Represents a mechanism to execute remote commands.
283
283
284 This is the primary interface for requesting that wire protocol commands
284 This is the primary interface for requesting that wire protocol commands
285 be executed. Instances of this interface are active in a context manager
285 be executed. Instances of this interface are active in a context manager
286 and have a well-defined lifetime. When the context manager exits, all
286 and have a well-defined lifetime. When the context manager exits, all
287 outstanding requests are waited on.
287 outstanding requests are waited on.
288 """
288 """
289
289
290 def callcommand(name, args):
290 def callcommand(name, args):
291 """Request that a named command be executed.
291 """Request that a named command be executed.
292
292
293 Receives the command name and a dictionary of command arguments.
293 Receives the command name and a dictionary of command arguments.
294
294
295 Returns a ``concurrent.futures.Future`` that will resolve to the
295 Returns a ``concurrent.futures.Future`` that will resolve to the
296 result of that command request. That exact value is left up to
296 result of that command request. That exact value is left up to
297 the implementation and possibly varies by command.
297 the implementation and possibly varies by command.
298
298
299 Not all commands can coexist with other commands in an executor
299 Not all commands can coexist with other commands in an executor
300 instance: it depends on the underlying wire protocol transport being
300 instance: it depends on the underlying wire protocol transport being
301 used and the command itself.
301 used and the command itself.
302
302
303 Implementations MAY call ``sendcommands()`` automatically if the
303 Implementations MAY call ``sendcommands()`` automatically if the
304 requested command can not coexist with other commands in this executor.
304 requested command can not coexist with other commands in this executor.
305
305
306 Implementations MAY call ``sendcommands()`` automatically when the
306 Implementations MAY call ``sendcommands()`` automatically when the
307 future's ``result()`` is called. So, consumers using multiple
307 future's ``result()`` is called. So, consumers using multiple
308 commands with an executor MUST ensure that ``result()`` is not called
308 commands with an executor MUST ensure that ``result()`` is not called
309 until all command requests have been issued.
309 until all command requests have been issued.
310 """
310 """
311
311
312 def sendcommands():
312 def sendcommands():
313 """Trigger submission of queued command requests.
313 """Trigger submission of queued command requests.
314
314
315 Not all transports submit commands as soon as they are requested to
315 Not all transports submit commands as soon as they are requested to
316 run. When called, this method forces queued command requests to be
316 run. When called, this method forces queued command requests to be
317 issued. It will no-op if all commands have already been sent.
317 issued. It will no-op if all commands have already been sent.
318
318
319 When called, no more new commands may be issued with this executor.
319 When called, no more new commands may be issued with this executor.
320 """
320 """
321
321
322 def close():
322 def close():
323 """Signal that this command request is finished.
323 """Signal that this command request is finished.
324
324
325 When called, no more new commands may be issued. All outstanding
325 When called, no more new commands may be issued. All outstanding
326 commands that have previously been issued are waited on before
326 commands that have previously been issued are waited on before
327 returning. This not only includes waiting for the futures to resolve,
327 returning. This not only includes waiting for the futures to resolve,
328 but also waiting for all response data to arrive. In other words,
328 but also waiting for all response data to arrive. In other words,
329 calling this waits for all on-wire state for issued command requests
329 calling this waits for all on-wire state for issued command requests
330 to finish.
330 to finish.
331
331
332 When used as a context manager, this method is called when exiting the
332 When used as a context manager, this method is called when exiting the
333 context manager.
333 context manager.
334
334
335 This method may call ``sendcommands()`` if there are buffered commands.
335 This method may call ``sendcommands()`` if there are buffered commands.
336 """
336 """
337
337
338
338
339 class ipeerrequests(interfaceutil.Interface):
339 class ipeerrequests(interfaceutil.Interface):
340 """Interface for executing commands on a peer."""
340 """Interface for executing commands on a peer."""
341
341
342 limitedarguments = interfaceutil.Attribute(
342 limitedarguments = interfaceutil.Attribute(
343 """True if the peer cannot receive large argument value for commands."""
343 """True if the peer cannot receive large argument value for commands."""
344 )
344 )
345
345
346 def commandexecutor():
346 def commandexecutor():
347 """A context manager that resolves to an ipeercommandexecutor.
347 """A context manager that resolves to an ipeercommandexecutor.
348
348
349 The object this resolves to can be used to issue command requests
349 The object this resolves to can be used to issue command requests
350 to the peer.
350 to the peer.
351
351
352 Callers should call its ``callcommand`` method to issue command
352 Callers should call its ``callcommand`` method to issue command
353 requests.
353 requests.
354
354
355 A new executor should be obtained for each distinct set of commands
355 A new executor should be obtained for each distinct set of commands
356 (possibly just a single command) that the consumer wants to execute
356 (possibly just a single command) that the consumer wants to execute
357 as part of a single operation or round trip. This is because some
357 as part of a single operation or round trip. This is because some
358 peers are half-duplex and/or don't support persistent connections.
358 peers are half-duplex and/or don't support persistent connections.
359 e.g. in the case of HTTP peers, commands sent to an executor represent
359 e.g. in the case of HTTP peers, commands sent to an executor represent
360 a single HTTP request. While some peers may support multiple command
360 a single HTTP request. While some peers may support multiple command
361 sends over the wire per executor, consumers need to code to the least
361 sends over the wire per executor, consumers need to code to the least
362 capable peer. So it should be assumed that command executors buffer
362 capable peer. So it should be assumed that command executors buffer
363 called commands until they are told to send them and that each
363 called commands until they are told to send them and that each
364 command executor could result in a new connection or wire-level request
364 command executor could result in a new connection or wire-level request
365 being issued.
365 being issued.
366 """
366 """
367
367
368
368
369 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
369 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
370 """Unified interface for peer repositories.
370 """Unified interface for peer repositories.
371
371
372 All peer instances must conform to this interface.
372 All peer instances must conform to this interface.
373 """
373 """
374
374
375
375
376 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
376 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
377 """Unified peer interface for wire protocol version 2 peers."""
377 """Unified peer interface for wire protocol version 2 peers."""
378
378
379 apidescriptor = interfaceutil.Attribute(
379 apidescriptor = interfaceutil.Attribute(
380 """Data structure holding description of server API."""
380 """Data structure holding description of server API."""
381 )
381 )
382
382
383
383
384 @interfaceutil.implementer(ipeerbase)
384 @interfaceutil.implementer(ipeerbase)
385 class peer:
385 class peer:
386 """Base class for peer repositories."""
386 """Base class for peer repositories."""
387
387
388 limitedarguments = False
388 limitedarguments = False
389
389
390 def __init__(
391 self,
392 ui,
393 ):
394 self.ui = ui
395
390 def capable(self, name):
396 def capable(self, name):
391 caps = self.capabilities()
397 caps = self.capabilities()
392 if name in caps:
398 if name in caps:
393 return True
399 return True
394
400
395 name = b'%s=' % name
401 name = b'%s=' % name
396 for cap in caps:
402 for cap in caps:
397 if cap.startswith(name):
403 if cap.startswith(name):
398 return cap[len(name) :]
404 return cap[len(name) :]
399
405
400 return False
406 return False
401
407
402 def requirecap(self, name, purpose):
408 def requirecap(self, name, purpose):
403 if self.capable(name):
409 if self.capable(name):
404 return
410 return
405
411
406 raise error.CapabilityError(
412 raise error.CapabilityError(
407 _(
413 _(
408 b'cannot %s; remote repository does not support the '
414 b'cannot %s; remote repository does not support the '
409 b'\'%s\' capability'
415 b'\'%s\' capability'
410 )
416 )
411 % (purpose, name)
417 % (purpose, name)
412 )
418 )
413
419
414
420
415 class iverifyproblem(interfaceutil.Interface):
421 class iverifyproblem(interfaceutil.Interface):
416 """Represents a problem with the integrity of the repository.
422 """Represents a problem with the integrity of the repository.
417
423
418 Instances of this interface are emitted to describe an integrity issue
424 Instances of this interface are emitted to describe an integrity issue
419 with a repository (e.g. corrupt storage, missing data, etc).
425 with a repository (e.g. corrupt storage, missing data, etc).
420
426
421 Instances are essentially messages associated with severity.
427 Instances are essentially messages associated with severity.
422 """
428 """
423
429
424 warning = interfaceutil.Attribute(
430 warning = interfaceutil.Attribute(
425 """Message indicating a non-fatal problem."""
431 """Message indicating a non-fatal problem."""
426 )
432 )
427
433
428 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
434 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
429
435
430 node = interfaceutil.Attribute(
436 node = interfaceutil.Attribute(
431 """Revision encountering the problem.
437 """Revision encountering the problem.
432
438
433 ``None`` means the problem doesn't apply to a single revision.
439 ``None`` means the problem doesn't apply to a single revision.
434 """
440 """
435 )
441 )
436
442
437
443
438 class irevisiondelta(interfaceutil.Interface):
444 class irevisiondelta(interfaceutil.Interface):
439 """Represents a delta between one revision and another.
445 """Represents a delta between one revision and another.
440
446
441 Instances convey enough information to allow a revision to be exchanged
447 Instances convey enough information to allow a revision to be exchanged
442 with another repository.
448 with another repository.
443
449
444 Instances represent the fulltext revision data or a delta against
450 Instances represent the fulltext revision data or a delta against
445 another revision. Therefore the ``revision`` and ``delta`` attributes
451 another revision. Therefore the ``revision`` and ``delta`` attributes
446 are mutually exclusive.
452 are mutually exclusive.
447
453
448 Typically used for changegroup generation.
454 Typically used for changegroup generation.
449 """
455 """
450
456
451 node = interfaceutil.Attribute("""20 byte node of this revision.""")
457 node = interfaceutil.Attribute("""20 byte node of this revision.""")
452
458
453 p1node = interfaceutil.Attribute(
459 p1node = interfaceutil.Attribute(
454 """20 byte node of 1st parent of this revision."""
460 """20 byte node of 1st parent of this revision."""
455 )
461 )
456
462
457 p2node = interfaceutil.Attribute(
463 p2node = interfaceutil.Attribute(
458 """20 byte node of 2nd parent of this revision."""
464 """20 byte node of 2nd parent of this revision."""
459 )
465 )
460
466
461 linknode = interfaceutil.Attribute(
467 linknode = interfaceutil.Attribute(
462 """20 byte node of the changelog revision this node is linked to."""
468 """20 byte node of the changelog revision this node is linked to."""
463 )
469 )
464
470
465 flags = interfaceutil.Attribute(
471 flags = interfaceutil.Attribute(
466 """2 bytes of integer flags that apply to this revision.
472 """2 bytes of integer flags that apply to this revision.
467
473
468 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
474 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
469 """
475 """
470 )
476 )
471
477
472 basenode = interfaceutil.Attribute(
478 basenode = interfaceutil.Attribute(
473 """20 byte node of the revision this data is a delta against.
479 """20 byte node of the revision this data is a delta against.
474
480
475 ``nullid`` indicates that the revision is a full revision and not
481 ``nullid`` indicates that the revision is a full revision and not
476 a delta.
482 a delta.
477 """
483 """
478 )
484 )
479
485
480 baserevisionsize = interfaceutil.Attribute(
486 baserevisionsize = interfaceutil.Attribute(
481 """Size of base revision this delta is against.
487 """Size of base revision this delta is against.
482
488
483 May be ``None`` if ``basenode`` is ``nullid``.
489 May be ``None`` if ``basenode`` is ``nullid``.
484 """
490 """
485 )
491 )
486
492
487 revision = interfaceutil.Attribute(
493 revision = interfaceutil.Attribute(
488 """Raw fulltext of revision data for this node."""
494 """Raw fulltext of revision data for this node."""
489 )
495 )
490
496
491 delta = interfaceutil.Attribute(
497 delta = interfaceutil.Attribute(
492 """Delta between ``basenode`` and ``node``.
498 """Delta between ``basenode`` and ``node``.
493
499
494 Stored in the bdiff delta format.
500 Stored in the bdiff delta format.
495 """
501 """
496 )
502 )
497
503
498 sidedata = interfaceutil.Attribute(
504 sidedata = interfaceutil.Attribute(
499 """Raw sidedata bytes for the given revision."""
505 """Raw sidedata bytes for the given revision."""
500 )
506 )
501
507
502 protocol_flags = interfaceutil.Attribute(
508 protocol_flags = interfaceutil.Attribute(
503 """Single byte of integer flags that can influence the protocol.
509 """Single byte of integer flags that can influence the protocol.
504
510
505 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
511 This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
506 """
512 """
507 )
513 )
508
514
509
515
510 class ifilerevisionssequence(interfaceutil.Interface):
516 class ifilerevisionssequence(interfaceutil.Interface):
511 """Contains index data for all revisions of a file.
517 """Contains index data for all revisions of a file.
512
518
513 Types implementing this behave like lists of tuples. The index
519 Types implementing this behave like lists of tuples. The index
514 in the list corresponds to the revision number. The values contain
520 in the list corresponds to the revision number. The values contain
515 index metadata.
521 index metadata.
516
522
517 The *null* revision (revision number -1) is always the last item
523 The *null* revision (revision number -1) is always the last item
518 in the index.
524 in the index.
519 """
525 """
520
526
521 def __len__():
527 def __len__():
522 """The total number of revisions."""
528 """The total number of revisions."""
523
529
524 def __getitem__(rev):
530 def __getitem__(rev):
525 """Returns the object having a specific revision number.
531 """Returns the object having a specific revision number.
526
532
527 Returns an 8-tuple with the following fields:
533 Returns an 8-tuple with the following fields:
528
534
529 offset+flags
535 offset+flags
530 Contains the offset and flags for the revision. 64-bit unsigned
536 Contains the offset and flags for the revision. 64-bit unsigned
531 integer where first 6 bytes are the offset and the next 2 bytes
537 integer where first 6 bytes are the offset and the next 2 bytes
532 are flags. The offset can be 0 if it is not used by the store.
538 are flags. The offset can be 0 if it is not used by the store.
533 compressed size
539 compressed size
534 Size of the revision data in the store. It can be 0 if it isn't
540 Size of the revision data in the store. It can be 0 if it isn't
535 needed by the store.
541 needed by the store.
536 uncompressed size
542 uncompressed size
537 Fulltext size. It can be 0 if it isn't needed by the store.
543 Fulltext size. It can be 0 if it isn't needed by the store.
538 base revision
544 base revision
539 Revision number of revision the delta for storage is encoded
545 Revision number of revision the delta for storage is encoded
540 against. -1 indicates not encoded against a base revision.
546 against. -1 indicates not encoded against a base revision.
541 link revision
547 link revision
542 Revision number of changelog revision this entry is related to.
548 Revision number of changelog revision this entry is related to.
543 p1 revision
549 p1 revision
544 Revision number of 1st parent. -1 if no 1st parent.
550 Revision number of 1st parent. -1 if no 1st parent.
545 p2 revision
551 p2 revision
546 Revision number of 2nd parent. -1 if no 1st parent.
552 Revision number of 2nd parent. -1 if no 1st parent.
547 node
553 node
548 Binary node value for this revision number.
554 Binary node value for this revision number.
549
555
550 Negative values should index off the end of the sequence. ``-1``
556 Negative values should index off the end of the sequence. ``-1``
551 should return the null revision. ``-2`` should return the most
557 should return the null revision. ``-2`` should return the most
552 recent revision.
558 recent revision.
553 """
559 """
554
560
555 def __contains__(rev):
561 def __contains__(rev):
556 """Whether a revision number exists."""
562 """Whether a revision number exists."""
557
563
558 def insert(self, i, entry):
564 def insert(self, i, entry):
559 """Add an item to the index at specific revision."""
565 """Add an item to the index at specific revision."""
560
566
561
567
562 class ifileindex(interfaceutil.Interface):
568 class ifileindex(interfaceutil.Interface):
563 """Storage interface for index data of a single file.
569 """Storage interface for index data of a single file.
564
570
565 File storage data is divided into index metadata and data storage.
571 File storage data is divided into index metadata and data storage.
566 This interface defines the index portion of the interface.
572 This interface defines the index portion of the interface.
567
573
568 The index logically consists of:
574 The index logically consists of:
569
575
570 * A mapping between revision numbers and nodes.
576 * A mapping between revision numbers and nodes.
571 * DAG data (storing and querying the relationship between nodes).
577 * DAG data (storing and querying the relationship between nodes).
572 * Metadata to facilitate storage.
578 * Metadata to facilitate storage.
573 """
579 """
574
580
575 nullid = interfaceutil.Attribute(
581 nullid = interfaceutil.Attribute(
576 """node for the null revision for use as delta base."""
582 """node for the null revision for use as delta base."""
577 )
583 )
578
584
579 def __len__():
585 def __len__():
580 """Obtain the number of revisions stored for this file."""
586 """Obtain the number of revisions stored for this file."""
581
587
582 def __iter__():
588 def __iter__():
583 """Iterate over revision numbers for this file."""
589 """Iterate over revision numbers for this file."""
584
590
585 def hasnode(node):
591 def hasnode(node):
586 """Returns a bool indicating if a node is known to this store.
592 """Returns a bool indicating if a node is known to this store.
587
593
588 Implementations must only return True for full, binary node values:
594 Implementations must only return True for full, binary node values:
589 hex nodes, revision numbers, and partial node matches must be
595 hex nodes, revision numbers, and partial node matches must be
590 rejected.
596 rejected.
591
597
592 The null node is never present.
598 The null node is never present.
593 """
599 """
594
600
595 def revs(start=0, stop=None):
601 def revs(start=0, stop=None):
596 """Iterate over revision numbers for this file, with control."""
602 """Iterate over revision numbers for this file, with control."""
597
603
598 def parents(node):
604 def parents(node):
599 """Returns a 2-tuple of parent nodes for a revision.
605 """Returns a 2-tuple of parent nodes for a revision.
600
606
601 Values will be ``nullid`` if the parent is empty.
607 Values will be ``nullid`` if the parent is empty.
602 """
608 """
603
609
604 def parentrevs(rev):
610 def parentrevs(rev):
605 """Like parents() but operates on revision numbers."""
611 """Like parents() but operates on revision numbers."""
606
612
607 def rev(node):
613 def rev(node):
608 """Obtain the revision number given a node.
614 """Obtain the revision number given a node.
609
615
610 Raises ``error.LookupError`` if the node is not known.
616 Raises ``error.LookupError`` if the node is not known.
611 """
617 """
612
618
613 def node(rev):
619 def node(rev):
614 """Obtain the node value given a revision number.
620 """Obtain the node value given a revision number.
615
621
616 Raises ``IndexError`` if the node is not known.
622 Raises ``IndexError`` if the node is not known.
617 """
623 """
618
624
619 def lookup(node):
625 def lookup(node):
620 """Attempt to resolve a value to a node.
626 """Attempt to resolve a value to a node.
621
627
622 Value can be a binary node, hex node, revision number, or a string
628 Value can be a binary node, hex node, revision number, or a string
623 that can be converted to an integer.
629 that can be converted to an integer.
624
630
625 Raises ``error.LookupError`` if a node could not be resolved.
631 Raises ``error.LookupError`` if a node could not be resolved.
626 """
632 """
627
633
628 def linkrev(rev):
634 def linkrev(rev):
629 """Obtain the changeset revision number a revision is linked to."""
635 """Obtain the changeset revision number a revision is linked to."""
630
636
631 def iscensored(rev):
637 def iscensored(rev):
632 """Return whether a revision's content has been censored."""
638 """Return whether a revision's content has been censored."""
633
639
634 def commonancestorsheads(node1, node2):
640 def commonancestorsheads(node1, node2):
635 """Obtain an iterable of nodes containing heads of common ancestors.
641 """Obtain an iterable of nodes containing heads of common ancestors.
636
642
637 See ``ancestor.commonancestorsheads()``.
643 See ``ancestor.commonancestorsheads()``.
638 """
644 """
639
645
640 def descendants(revs):
646 def descendants(revs):
641 """Obtain descendant revision numbers for a set of revision numbers.
647 """Obtain descendant revision numbers for a set of revision numbers.
642
648
643 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
649 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
644 """
650 """
645
651
646 def heads(start=None, stop=None):
652 def heads(start=None, stop=None):
647 """Obtain a list of nodes that are DAG heads, with control.
653 """Obtain a list of nodes that are DAG heads, with control.
648
654
649 The set of revisions examined can be limited by specifying
655 The set of revisions examined can be limited by specifying
650 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
656 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
651 iterable of nodes. DAG traversal starts at earlier revision
657 iterable of nodes. DAG traversal starts at earlier revision
652 ``start`` and iterates forward until any node in ``stop`` is
658 ``start`` and iterates forward until any node in ``stop`` is
653 encountered.
659 encountered.
654 """
660 """
655
661
656 def children(node):
662 def children(node):
657 """Obtain nodes that are children of a node.
663 """Obtain nodes that are children of a node.
658
664
659 Returns a list of nodes.
665 Returns a list of nodes.
660 """
666 """
661
667
662
668
663 class ifiledata(interfaceutil.Interface):
669 class ifiledata(interfaceutil.Interface):
664 """Storage interface for data storage of a specific file.
670 """Storage interface for data storage of a specific file.
665
671
666 This complements ``ifileindex`` and provides an interface for accessing
672 This complements ``ifileindex`` and provides an interface for accessing
667 data for a tracked file.
673 data for a tracked file.
668 """
674 """
669
675
670 def size(rev):
676 def size(rev):
671 """Obtain the fulltext size of file data.
677 """Obtain the fulltext size of file data.
672
678
673 Any metadata is excluded from size measurements.
679 Any metadata is excluded from size measurements.
674 """
680 """
675
681
676 def revision(node, raw=False):
682 def revision(node, raw=False):
677 """Obtain fulltext data for a node.
683 """Obtain fulltext data for a node.
678
684
679 By default, any storage transformations are applied before the data
685 By default, any storage transformations are applied before the data
680 is returned. If ``raw`` is True, non-raw storage transformations
686 is returned. If ``raw`` is True, non-raw storage transformations
681 are not applied.
687 are not applied.
682
688
683 The fulltext data may contain a header containing metadata. Most
689 The fulltext data may contain a header containing metadata. Most
684 consumers should use ``read()`` to obtain the actual file data.
690 consumers should use ``read()`` to obtain the actual file data.
685 """
691 """
686
692
687 def rawdata(node):
693 def rawdata(node):
688 """Obtain raw data for a node."""
694 """Obtain raw data for a node."""
689
695
690 def read(node):
696 def read(node):
691 """Resolve file fulltext data.
697 """Resolve file fulltext data.
692
698
693 This is similar to ``revision()`` except any metadata in the data
699 This is similar to ``revision()`` except any metadata in the data
694 headers is stripped.
700 headers is stripped.
695 """
701 """
696
702
697 def renamed(node):
703 def renamed(node):
698 """Obtain copy metadata for a node.
704 """Obtain copy metadata for a node.
699
705
700 Returns ``False`` if no copy metadata is stored or a 2-tuple of
706 Returns ``False`` if no copy metadata is stored or a 2-tuple of
701 (path, node) from which this revision was copied.
707 (path, node) from which this revision was copied.
702 """
708 """
703
709
704 def cmp(node, fulltext):
710 def cmp(node, fulltext):
705 """Compare fulltext to another revision.
711 """Compare fulltext to another revision.
706
712
707 Returns True if the fulltext is different from what is stored.
713 Returns True if the fulltext is different from what is stored.
708
714
709 This takes copy metadata into account.
715 This takes copy metadata into account.
710
716
711 TODO better document the copy metadata and censoring logic.
717 TODO better document the copy metadata and censoring logic.
712 """
718 """
713
719
714 def emitrevisions(
720 def emitrevisions(
715 nodes,
721 nodes,
716 nodesorder=None,
722 nodesorder=None,
717 revisiondata=False,
723 revisiondata=False,
718 assumehaveparentrevisions=False,
724 assumehaveparentrevisions=False,
719 deltamode=CG_DELTAMODE_STD,
725 deltamode=CG_DELTAMODE_STD,
720 ):
726 ):
721 """Produce ``irevisiondelta`` for revisions.
727 """Produce ``irevisiondelta`` for revisions.
722
728
723 Given an iterable of nodes, emits objects conforming to the
729 Given an iterable of nodes, emits objects conforming to the
724 ``irevisiondelta`` interface that describe revisions in storage.
730 ``irevisiondelta`` interface that describe revisions in storage.
725
731
726 This method is a generator.
732 This method is a generator.
727
733
728 The input nodes may be unordered. Implementations must ensure that a
734 The input nodes may be unordered. Implementations must ensure that a
729 node's parents are emitted before the node itself. Transitively, this
735 node's parents are emitted before the node itself. Transitively, this
730 means that a node may only be emitted once all its ancestors in
736 means that a node may only be emitted once all its ancestors in
731 ``nodes`` have also been emitted.
737 ``nodes`` have also been emitted.
732
738
733 By default, emits "index" data (the ``node``, ``p1node``, and
739 By default, emits "index" data (the ``node``, ``p1node``, and
734 ``p2node`` attributes). If ``revisiondata`` is set, revision data
740 ``p2node`` attributes). If ``revisiondata`` is set, revision data
735 will also be present on the emitted objects.
741 will also be present on the emitted objects.
736
742
737 With default argument values, implementations can choose to emit
743 With default argument values, implementations can choose to emit
738 either fulltext revision data or a delta. When emitting deltas,
744 either fulltext revision data or a delta. When emitting deltas,
739 implementations must consider whether the delta's base revision
745 implementations must consider whether the delta's base revision
740 fulltext is available to the receiver.
746 fulltext is available to the receiver.
741
747
742 The base revision fulltext is guaranteed to be available if any of
748 The base revision fulltext is guaranteed to be available if any of
743 the following are met:
749 the following are met:
744
750
745 * Its fulltext revision was emitted by this method call.
751 * Its fulltext revision was emitted by this method call.
746 * A delta for that revision was emitted by this method call.
752 * A delta for that revision was emitted by this method call.
747 * ``assumehaveparentrevisions`` is True and the base revision is a
753 * ``assumehaveparentrevisions`` is True and the base revision is a
748 parent of the node.
754 parent of the node.
749
755
750 ``nodesorder`` can be used to control the order that revisions are
756 ``nodesorder`` can be used to control the order that revisions are
751 emitted. By default, revisions can be reordered as long as they are
757 emitted. By default, revisions can be reordered as long as they are
752 in DAG topological order (see above). If the value is ``nodes``,
758 in DAG topological order (see above). If the value is ``nodes``,
753 the iteration order from ``nodes`` should be used. If the value is
759 the iteration order from ``nodes`` should be used. If the value is
754 ``storage``, then the native order from the backing storage layer
760 ``storage``, then the native order from the backing storage layer
755 is used. (Not all storage layers will have strong ordering and behavior
761 is used. (Not all storage layers will have strong ordering and behavior
756 of this mode is storage-dependent.) ``nodes`` ordering can force
762 of this mode is storage-dependent.) ``nodes`` ordering can force
757 revisions to be emitted before their ancestors, so consumers should
763 revisions to be emitted before their ancestors, so consumers should
758 use it with care.
764 use it with care.
759
765
760 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
766 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
761 be set and it is the caller's responsibility to resolve it, if needed.
767 be set and it is the caller's responsibility to resolve it, if needed.
762
768
763 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
769 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
764 all revision data should be emitted as deltas against the revision
770 all revision data should be emitted as deltas against the revision
765 emitted just prior. The initial revision should be a delta against its
771 emitted just prior. The initial revision should be a delta against its
766 1st parent.
772 1st parent.
767 """
773 """
768
774
769
775
770 class ifilemutation(interfaceutil.Interface):
776 class ifilemutation(interfaceutil.Interface):
771 """Storage interface for mutation events of a tracked file."""
777 """Storage interface for mutation events of a tracked file."""
772
778
773 def add(filedata, meta, transaction, linkrev, p1, p2):
779 def add(filedata, meta, transaction, linkrev, p1, p2):
774 """Add a new revision to the store.
780 """Add a new revision to the store.
775
781
776 Takes file data, dictionary of metadata, a transaction, linkrev,
782 Takes file data, dictionary of metadata, a transaction, linkrev,
777 and parent nodes.
783 and parent nodes.
778
784
779 Returns the node that was added.
785 Returns the node that was added.
780
786
781 May no-op if a revision matching the supplied data is already stored.
787 May no-op if a revision matching the supplied data is already stored.
782 """
788 """
783
789
784 def addrevision(
790 def addrevision(
785 revisiondata,
791 revisiondata,
786 transaction,
792 transaction,
787 linkrev,
793 linkrev,
788 p1,
794 p1,
789 p2,
795 p2,
790 node=None,
796 node=None,
791 flags=0,
797 flags=0,
792 cachedelta=None,
798 cachedelta=None,
793 ):
799 ):
794 """Add a new revision to the store and return its number.
800 """Add a new revision to the store and return its number.
795
801
796 This is similar to ``add()`` except it operates at a lower level.
802 This is similar to ``add()`` except it operates at a lower level.
797
803
798 The data passed in already contains a metadata header, if any.
804 The data passed in already contains a metadata header, if any.
799
805
800 ``node`` and ``flags`` can be used to define the expected node and
806 ``node`` and ``flags`` can be used to define the expected node and
801 the flags to use with storage. ``flags`` is a bitwise value composed
807 the flags to use with storage. ``flags`` is a bitwise value composed
802 of the various ``REVISION_FLAG_*`` constants.
808 of the various ``REVISION_FLAG_*`` constants.
803
809
804 ``add()`` is usually called when adding files from e.g. the working
810 ``add()`` is usually called when adding files from e.g. the working
805 directory. ``addrevision()`` is often called by ``add()`` and for
811 directory. ``addrevision()`` is often called by ``add()`` and for
806 scenarios where revision data has already been computed, such as when
812 scenarios where revision data has already been computed, such as when
807 applying raw data from a peer repo.
813 applying raw data from a peer repo.
808 """
814 """
809
815
810 def addgroup(
816 def addgroup(
811 deltas,
817 deltas,
812 linkmapper,
818 linkmapper,
813 transaction,
819 transaction,
814 addrevisioncb=None,
820 addrevisioncb=None,
815 duplicaterevisioncb=None,
821 duplicaterevisioncb=None,
816 maybemissingparents=False,
822 maybemissingparents=False,
817 ):
823 ):
818 """Process a series of deltas for storage.
824 """Process a series of deltas for storage.
819
825
820 ``deltas`` is an iterable of 7-tuples of
826 ``deltas`` is an iterable of 7-tuples of
821 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
827 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
822 to add.
828 to add.
823
829
824 The ``delta`` field contains ``mpatch`` data to apply to a base
830 The ``delta`` field contains ``mpatch`` data to apply to a base
825 revision, identified by ``deltabase``. The base node can be
831 revision, identified by ``deltabase``. The base node can be
826 ``nullid``, in which case the header from the delta can be ignored
832 ``nullid``, in which case the header from the delta can be ignored
827 and the delta used as the fulltext.
833 and the delta used as the fulltext.
828
834
829 ``alwayscache`` instructs the lower layers to cache the content of the
835 ``alwayscache`` instructs the lower layers to cache the content of the
830 newly added revision, even if it needs to be explicitly computed.
836 newly added revision, even if it needs to be explicitly computed.
831 This used to be the default when ``addrevisioncb`` was provided up to
837 This used to be the default when ``addrevisioncb`` was provided up to
832 Mercurial 5.8.
838 Mercurial 5.8.
833
839
834 ``addrevisioncb`` should be called for each new rev as it is committed.
840 ``addrevisioncb`` should be called for each new rev as it is committed.
835 ``duplicaterevisioncb`` should be called for all revs with a
841 ``duplicaterevisioncb`` should be called for all revs with a
836 pre-existing node.
842 pre-existing node.
837
843
838 ``maybemissingparents`` is a bool indicating whether the incoming
844 ``maybemissingparents`` is a bool indicating whether the incoming
839 data may reference parents/ancestor revisions that aren't present.
845 data may reference parents/ancestor revisions that aren't present.
840 This flag is set when receiving data into a "shallow" store that
846 This flag is set when receiving data into a "shallow" store that
841 doesn't hold all history.
847 doesn't hold all history.
842
848
843 Returns a list of nodes that were processed. A node will be in the list
849 Returns a list of nodes that were processed. A node will be in the list
844 even if it existed in the store previously.
850 even if it existed in the store previously.
845 """
851 """
846
852
847 def censorrevision(tr, node, tombstone=b''):
853 def censorrevision(tr, node, tombstone=b''):
848 """Remove the content of a single revision.
854 """Remove the content of a single revision.
849
855
850 The specified ``node`` will have its content purged from storage.
856 The specified ``node`` will have its content purged from storage.
851 Future attempts to access the revision data for this node will
857 Future attempts to access the revision data for this node will
852 result in failure.
858 result in failure.
853
859
854 A ``tombstone`` message can optionally be stored. This message may be
860 A ``tombstone`` message can optionally be stored. This message may be
855 displayed to users when they attempt to access the missing revision
861 displayed to users when they attempt to access the missing revision
856 data.
862 data.
857
863
858 Storage backends may have stored deltas against the previous content
864 Storage backends may have stored deltas against the previous content
859 in this revision. As part of censoring a revision, these storage
865 in this revision. As part of censoring a revision, these storage
860 backends are expected to rewrite any internally stored deltas such
866 backends are expected to rewrite any internally stored deltas such
861 that they no longer reference the deleted content.
867 that they no longer reference the deleted content.
862 """
868 """
863
869
864 def getstrippoint(minlink):
870 def getstrippoint(minlink):
865 """Find the minimum revision that must be stripped to strip a linkrev.
871 """Find the minimum revision that must be stripped to strip a linkrev.
866
872
867 Returns a 2-tuple containing the minimum revision number and a set
873 Returns a 2-tuple containing the minimum revision number and a set
868 of all revisions numbers that would be broken by this strip.
874 of all revisions numbers that would be broken by this strip.
869
875
870 TODO this is highly revlog centric and should be abstracted into
876 TODO this is highly revlog centric and should be abstracted into
871 a higher-level deletion API. ``repair.strip()`` relies on this.
877 a higher-level deletion API. ``repair.strip()`` relies on this.
872 """
878 """
873
879
874 def strip(minlink, transaction):
880 def strip(minlink, transaction):
875 """Remove storage of items starting at a linkrev.
881 """Remove storage of items starting at a linkrev.
876
882
877 This uses ``getstrippoint()`` to determine the first node to remove.
883 This uses ``getstrippoint()`` to determine the first node to remove.
878 Then it effectively truncates storage for all revisions after that.
884 Then it effectively truncates storage for all revisions after that.
879
885
880 TODO this is highly revlog centric and should be abstracted into a
886 TODO this is highly revlog centric and should be abstracted into a
881 higher-level deletion API.
887 higher-level deletion API.
882 """
888 """
883
889
884
890
885 class ifilestorage(ifileindex, ifiledata, ifilemutation):
891 class ifilestorage(ifileindex, ifiledata, ifilemutation):
886 """Complete storage interface for a single tracked file."""
892 """Complete storage interface for a single tracked file."""
887
893
888 def files():
894 def files():
889 """Obtain paths that are backing storage for this file.
895 """Obtain paths that are backing storage for this file.
890
896
891 TODO this is used heavily by verify code and there should probably
897 TODO this is used heavily by verify code and there should probably
892 be a better API for that.
898 be a better API for that.
893 """
899 """
894
900
895 def storageinfo(
901 def storageinfo(
896 exclusivefiles=False,
902 exclusivefiles=False,
897 sharedfiles=False,
903 sharedfiles=False,
898 revisionscount=False,
904 revisionscount=False,
899 trackedsize=False,
905 trackedsize=False,
900 storedsize=False,
906 storedsize=False,
901 ):
907 ):
902 """Obtain information about storage for this file's data.
908 """Obtain information about storage for this file's data.
903
909
904 Returns a dict describing storage for this tracked path. The keys
910 Returns a dict describing storage for this tracked path. The keys
905 in the dict map to arguments of the same. The arguments are bools
911 in the dict map to arguments of the same. The arguments are bools
906 indicating whether to calculate and obtain that data.
912 indicating whether to calculate and obtain that data.
907
913
908 exclusivefiles
914 exclusivefiles
909 Iterable of (vfs, path) describing files that are exclusively
915 Iterable of (vfs, path) describing files that are exclusively
910 used to back storage for this tracked path.
916 used to back storage for this tracked path.
911
917
912 sharedfiles
918 sharedfiles
913 Iterable of (vfs, path) describing files that are used to back
919 Iterable of (vfs, path) describing files that are used to back
914 storage for this tracked path. Those files may also provide storage
920 storage for this tracked path. Those files may also provide storage
915 for other stored entities.
921 for other stored entities.
916
922
917 revisionscount
923 revisionscount
918 Number of revisions available for retrieval.
924 Number of revisions available for retrieval.
919
925
920 trackedsize
926 trackedsize
921 Total size in bytes of all tracked revisions. This is a sum of the
927 Total size in bytes of all tracked revisions. This is a sum of the
922 length of the fulltext of all revisions.
928 length of the fulltext of all revisions.
923
929
924 storedsize
930 storedsize
925 Total size in bytes used to store data for all tracked revisions.
931 Total size in bytes used to store data for all tracked revisions.
926 This is commonly less than ``trackedsize`` due to internal usage
932 This is commonly less than ``trackedsize`` due to internal usage
927 of deltas rather than fulltext revisions.
933 of deltas rather than fulltext revisions.
928
934
929 Not all storage backends may support all queries are have a reasonable
935 Not all storage backends may support all queries are have a reasonable
930 value to use. In that case, the value should be set to ``None`` and
936 value to use. In that case, the value should be set to ``None`` and
931 callers are expected to handle this special value.
937 callers are expected to handle this special value.
932 """
938 """
933
939
934 def verifyintegrity(state):
940 def verifyintegrity(state):
935 """Verifies the integrity of file storage.
941 """Verifies the integrity of file storage.
936
942
937 ``state`` is a dict holding state of the verifier process. It can be
943 ``state`` is a dict holding state of the verifier process. It can be
938 used to communicate data between invocations of multiple storage
944 used to communicate data between invocations of multiple storage
939 primitives.
945 primitives.
940
946
941 If individual revisions cannot have their revision content resolved,
947 If individual revisions cannot have their revision content resolved,
942 the method is expected to set the ``skipread`` key to a set of nodes
948 the method is expected to set the ``skipread`` key to a set of nodes
943 that encountered problems. If set, the method can also add the node(s)
949 that encountered problems. If set, the method can also add the node(s)
944 to ``safe_renamed`` in order to indicate nodes that may perform the
950 to ``safe_renamed`` in order to indicate nodes that may perform the
945 rename checks with currently accessible data.
951 rename checks with currently accessible data.
946
952
947 The method yields objects conforming to the ``iverifyproblem``
953 The method yields objects conforming to the ``iverifyproblem``
948 interface.
954 interface.
949 """
955 """
950
956
951
957
952 class idirs(interfaceutil.Interface):
958 class idirs(interfaceutil.Interface):
953 """Interface representing a collection of directories from paths.
959 """Interface representing a collection of directories from paths.
954
960
955 This interface is essentially a derived data structure representing
961 This interface is essentially a derived data structure representing
956 directories from a collection of paths.
962 directories from a collection of paths.
957 """
963 """
958
964
959 def addpath(path):
965 def addpath(path):
960 """Add a path to the collection.
966 """Add a path to the collection.
961
967
962 All directories in the path will be added to the collection.
968 All directories in the path will be added to the collection.
963 """
969 """
964
970
965 def delpath(path):
971 def delpath(path):
966 """Remove a path from the collection.
972 """Remove a path from the collection.
967
973
968 If the removal was the last path in a particular directory, the
974 If the removal was the last path in a particular directory, the
969 directory is removed from the collection.
975 directory is removed from the collection.
970 """
976 """
971
977
972 def __iter__():
978 def __iter__():
973 """Iterate over the directories in this collection of paths."""
979 """Iterate over the directories in this collection of paths."""
974
980
975 def __contains__(path):
981 def __contains__(path):
976 """Whether a specific directory is in this collection."""
982 """Whether a specific directory is in this collection."""
977
983
978
984
979 class imanifestdict(interfaceutil.Interface):
985 class imanifestdict(interfaceutil.Interface):
980 """Interface representing a manifest data structure.
986 """Interface representing a manifest data structure.
981
987
982 A manifest is effectively a dict mapping paths to entries. Each entry
988 A manifest is effectively a dict mapping paths to entries. Each entry
983 consists of a binary node and extra flags affecting that entry.
989 consists of a binary node and extra flags affecting that entry.
984 """
990 """
985
991
986 def __getitem__(path):
992 def __getitem__(path):
987 """Returns the binary node value for a path in the manifest.
993 """Returns the binary node value for a path in the manifest.
988
994
989 Raises ``KeyError`` if the path does not exist in the manifest.
995 Raises ``KeyError`` if the path does not exist in the manifest.
990
996
991 Equivalent to ``self.find(path)[0]``.
997 Equivalent to ``self.find(path)[0]``.
992 """
998 """
993
999
994 def find(path):
1000 def find(path):
995 """Returns the entry for a path in the manifest.
1001 """Returns the entry for a path in the manifest.
996
1002
997 Returns a 2-tuple of (node, flags).
1003 Returns a 2-tuple of (node, flags).
998
1004
999 Raises ``KeyError`` if the path does not exist in the manifest.
1005 Raises ``KeyError`` if the path does not exist in the manifest.
1000 """
1006 """
1001
1007
1002 def __len__():
1008 def __len__():
1003 """Return the number of entries in the manifest."""
1009 """Return the number of entries in the manifest."""
1004
1010
1005 def __nonzero__():
1011 def __nonzero__():
1006 """Returns True if the manifest has entries, False otherwise."""
1012 """Returns True if the manifest has entries, False otherwise."""
1007
1013
1008 __bool__ = __nonzero__
1014 __bool__ = __nonzero__
1009
1015
1010 def __setitem__(path, node):
1016 def __setitem__(path, node):
1011 """Define the node value for a path in the manifest.
1017 """Define the node value for a path in the manifest.
1012
1018
1013 If the path is already in the manifest, its flags will be copied to
1019 If the path is already in the manifest, its flags will be copied to
1014 the new entry.
1020 the new entry.
1015 """
1021 """
1016
1022
1017 def __contains__(path):
1023 def __contains__(path):
1018 """Whether a path exists in the manifest."""
1024 """Whether a path exists in the manifest."""
1019
1025
1020 def __delitem__(path):
1026 def __delitem__(path):
1021 """Remove a path from the manifest.
1027 """Remove a path from the manifest.
1022
1028
1023 Raises ``KeyError`` if the path is not in the manifest.
1029 Raises ``KeyError`` if the path is not in the manifest.
1024 """
1030 """
1025
1031
1026 def __iter__():
1032 def __iter__():
1027 """Iterate over paths in the manifest."""
1033 """Iterate over paths in the manifest."""
1028
1034
1029 def iterkeys():
1035 def iterkeys():
1030 """Iterate over paths in the manifest."""
1036 """Iterate over paths in the manifest."""
1031
1037
1032 def keys():
1038 def keys():
1033 """Obtain a list of paths in the manifest."""
1039 """Obtain a list of paths in the manifest."""
1034
1040
1035 def filesnotin(other, match=None):
1041 def filesnotin(other, match=None):
1036 """Obtain the set of paths in this manifest but not in another.
1042 """Obtain the set of paths in this manifest but not in another.
1037
1043
1038 ``match`` is an optional matcher function to be applied to both
1044 ``match`` is an optional matcher function to be applied to both
1039 manifests.
1045 manifests.
1040
1046
1041 Returns a set of paths.
1047 Returns a set of paths.
1042 """
1048 """
1043
1049
1044 def dirs():
1050 def dirs():
1045 """Returns an object implementing the ``idirs`` interface."""
1051 """Returns an object implementing the ``idirs`` interface."""
1046
1052
1047 def hasdir(dir):
1053 def hasdir(dir):
1048 """Returns a bool indicating if a directory is in this manifest."""
1054 """Returns a bool indicating if a directory is in this manifest."""
1049
1055
1050 def walk(match):
1056 def walk(match):
1051 """Generator of paths in manifest satisfying a matcher.
1057 """Generator of paths in manifest satisfying a matcher.
1052
1058
1053 If the matcher has explicit files listed and they don't exist in
1059 If the matcher has explicit files listed and they don't exist in
1054 the manifest, ``match.bad()`` is called for each missing file.
1060 the manifest, ``match.bad()`` is called for each missing file.
1055 """
1061 """
1056
1062
1057 def diff(other, match=None, clean=False):
1063 def diff(other, match=None, clean=False):
1058 """Find differences between this manifest and another.
1064 """Find differences between this manifest and another.
1059
1065
1060 This manifest is compared to ``other``.
1066 This manifest is compared to ``other``.
1061
1067
1062 If ``match`` is provided, the two manifests are filtered against this
1068 If ``match`` is provided, the two manifests are filtered against this
1063 matcher and only entries satisfying the matcher are compared.
1069 matcher and only entries satisfying the matcher are compared.
1064
1070
1065 If ``clean`` is True, unchanged files are included in the returned
1071 If ``clean`` is True, unchanged files are included in the returned
1066 object.
1072 object.
1067
1073
1068 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1074 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1069 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1075 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1070 represents the node and flags for this manifest and ``(node2, flag2)``
1076 represents the node and flags for this manifest and ``(node2, flag2)``
1071 are the same for the other manifest.
1077 are the same for the other manifest.
1072 """
1078 """
1073
1079
1074 def setflag(path, flag):
1080 def setflag(path, flag):
1075 """Set the flag value for a given path.
1081 """Set the flag value for a given path.
1076
1082
1077 Raises ``KeyError`` if the path is not already in the manifest.
1083 Raises ``KeyError`` if the path is not already in the manifest.
1078 """
1084 """
1079
1085
1080 def get(path, default=None):
1086 def get(path, default=None):
1081 """Obtain the node value for a path or a default value if missing."""
1087 """Obtain the node value for a path or a default value if missing."""
1082
1088
1083 def flags(path):
1089 def flags(path):
1084 """Return the flags value for a path (default: empty bytestring)."""
1090 """Return the flags value for a path (default: empty bytestring)."""
1085
1091
1086 def copy():
1092 def copy():
1087 """Return a copy of this manifest."""
1093 """Return a copy of this manifest."""
1088
1094
1089 def items():
1095 def items():
1090 """Returns an iterable of (path, node) for items in this manifest."""
1096 """Returns an iterable of (path, node) for items in this manifest."""
1091
1097
1092 def iteritems():
1098 def iteritems():
1093 """Identical to items()."""
1099 """Identical to items()."""
1094
1100
1095 def iterentries():
1101 def iterentries():
1096 """Returns an iterable of (path, node, flags) for this manifest.
1102 """Returns an iterable of (path, node, flags) for this manifest.
1097
1103
1098 Similar to ``iteritems()`` except items are a 3-tuple and include
1104 Similar to ``iteritems()`` except items are a 3-tuple and include
1099 flags.
1105 flags.
1100 """
1106 """
1101
1107
1102 def text():
1108 def text():
1103 """Obtain the raw data representation for this manifest.
1109 """Obtain the raw data representation for this manifest.
1104
1110
1105 Result is used to create a manifest revision.
1111 Result is used to create a manifest revision.
1106 """
1112 """
1107
1113
1108 def fastdelta(base, changes):
1114 def fastdelta(base, changes):
1109 """Obtain a delta between this manifest and another given changes.
1115 """Obtain a delta between this manifest and another given changes.
1110
1116
1111 ``base`` in the raw data representation for another manifest.
1117 ``base`` in the raw data representation for another manifest.
1112
1118
1113 ``changes`` is an iterable of ``(path, to_delete)``.
1119 ``changes`` is an iterable of ``(path, to_delete)``.
1114
1120
1115 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1121 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1116 delta between ``base`` and this manifest.
1122 delta between ``base`` and this manifest.
1117
1123
1118 If this manifest implementation can't support ``fastdelta()``,
1124 If this manifest implementation can't support ``fastdelta()``,
1119 raise ``mercurial.manifest.FastdeltaUnavailable``.
1125 raise ``mercurial.manifest.FastdeltaUnavailable``.
1120 """
1126 """
1121
1127
1122
1128
1123 class imanifestrevisionbase(interfaceutil.Interface):
1129 class imanifestrevisionbase(interfaceutil.Interface):
1124 """Base interface representing a single revision of a manifest.
1130 """Base interface representing a single revision of a manifest.
1125
1131
1126 Should not be used as a primary interface: should always be inherited
1132 Should not be used as a primary interface: should always be inherited
1127 as part of a larger interface.
1133 as part of a larger interface.
1128 """
1134 """
1129
1135
1130 def copy():
1136 def copy():
1131 """Obtain a copy of this manifest instance.
1137 """Obtain a copy of this manifest instance.
1132
1138
1133 Returns an object conforming to the ``imanifestrevisionwritable``
1139 Returns an object conforming to the ``imanifestrevisionwritable``
1134 interface. The instance will be associated with the same
1140 interface. The instance will be associated with the same
1135 ``imanifestlog`` collection as this instance.
1141 ``imanifestlog`` collection as this instance.
1136 """
1142 """
1137
1143
1138 def read():
1144 def read():
1139 """Obtain the parsed manifest data structure.
1145 """Obtain the parsed manifest data structure.
1140
1146
1141 The returned object conforms to the ``imanifestdict`` interface.
1147 The returned object conforms to the ``imanifestdict`` interface.
1142 """
1148 """
1143
1149
1144
1150
1145 class imanifestrevisionstored(imanifestrevisionbase):
1151 class imanifestrevisionstored(imanifestrevisionbase):
1146 """Interface representing a manifest revision committed to storage."""
1152 """Interface representing a manifest revision committed to storage."""
1147
1153
1148 def node():
1154 def node():
1149 """The binary node for this manifest."""
1155 """The binary node for this manifest."""
1150
1156
1151 parents = interfaceutil.Attribute(
1157 parents = interfaceutil.Attribute(
1152 """List of binary nodes that are parents for this manifest revision."""
1158 """List of binary nodes that are parents for this manifest revision."""
1153 )
1159 )
1154
1160
1155 def readdelta(shallow=False):
1161 def readdelta(shallow=False):
1156 """Obtain the manifest data structure representing changes from parent.
1162 """Obtain the manifest data structure representing changes from parent.
1157
1163
1158 This manifest is compared to its 1st parent. A new manifest representing
1164 This manifest is compared to its 1st parent. A new manifest representing
1159 those differences is constructed.
1165 those differences is constructed.
1160
1166
1161 The returned object conforms to the ``imanifestdict`` interface.
1167 The returned object conforms to the ``imanifestdict`` interface.
1162 """
1168 """
1163
1169
1164 def readfast(shallow=False):
1170 def readfast(shallow=False):
1165 """Calls either ``read()`` or ``readdelta()``.
1171 """Calls either ``read()`` or ``readdelta()``.
1166
1172
1167 The faster of the two options is called.
1173 The faster of the two options is called.
1168 """
1174 """
1169
1175
1170 def find(key):
1176 def find(key):
1171 """Calls self.read().find(key)``.
1177 """Calls self.read().find(key)``.
1172
1178
1173 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1179 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1174 """
1180 """
1175
1181
1176
1182
1177 class imanifestrevisionwritable(imanifestrevisionbase):
1183 class imanifestrevisionwritable(imanifestrevisionbase):
1178 """Interface representing a manifest revision that can be committed."""
1184 """Interface representing a manifest revision that can be committed."""
1179
1185
1180 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1186 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1181 """Add this revision to storage.
1187 """Add this revision to storage.
1182
1188
1183 Takes a transaction object, the changeset revision number it will
1189 Takes a transaction object, the changeset revision number it will
1184 be associated with, its parent nodes, and lists of added and
1190 be associated with, its parent nodes, and lists of added and
1185 removed paths.
1191 removed paths.
1186
1192
1187 If match is provided, storage can choose not to inspect or write out
1193 If match is provided, storage can choose not to inspect or write out
1188 items that do not match. Storage is still required to be able to provide
1194 items that do not match. Storage is still required to be able to provide
1189 the full manifest in the future for any directories written (these
1195 the full manifest in the future for any directories written (these
1190 manifests should not be "narrowed on disk").
1196 manifests should not be "narrowed on disk").
1191
1197
1192 Returns the binary node of the created revision.
1198 Returns the binary node of the created revision.
1193 """
1199 """
1194
1200
1195
1201
1196 class imanifeststorage(interfaceutil.Interface):
1202 class imanifeststorage(interfaceutil.Interface):
1197 """Storage interface for manifest data."""
1203 """Storage interface for manifest data."""
1198
1204
1199 nodeconstants = interfaceutil.Attribute(
1205 nodeconstants = interfaceutil.Attribute(
1200 """nodeconstants used by the current repository."""
1206 """nodeconstants used by the current repository."""
1201 )
1207 )
1202
1208
1203 tree = interfaceutil.Attribute(
1209 tree = interfaceutil.Attribute(
1204 """The path to the directory this manifest tracks.
1210 """The path to the directory this manifest tracks.
1205
1211
1206 The empty bytestring represents the root manifest.
1212 The empty bytestring represents the root manifest.
1207 """
1213 """
1208 )
1214 )
1209
1215
1210 index = interfaceutil.Attribute(
1216 index = interfaceutil.Attribute(
1211 """An ``ifilerevisionssequence`` instance."""
1217 """An ``ifilerevisionssequence`` instance."""
1212 )
1218 )
1213
1219
1214 opener = interfaceutil.Attribute(
1220 opener = interfaceutil.Attribute(
1215 """VFS opener to use to access underlying files used for storage.
1221 """VFS opener to use to access underlying files used for storage.
1216
1222
1217 TODO this is revlog specific and should not be exposed.
1223 TODO this is revlog specific and should not be exposed.
1218 """
1224 """
1219 )
1225 )
1220
1226
1221 _generaldelta = interfaceutil.Attribute(
1227 _generaldelta = interfaceutil.Attribute(
1222 """Whether generaldelta storage is being used.
1228 """Whether generaldelta storage is being used.
1223
1229
1224 TODO this is revlog specific and should not be exposed.
1230 TODO this is revlog specific and should not be exposed.
1225 """
1231 """
1226 )
1232 )
1227
1233
1228 fulltextcache = interfaceutil.Attribute(
1234 fulltextcache = interfaceutil.Attribute(
1229 """Dict with cache of fulltexts.
1235 """Dict with cache of fulltexts.
1230
1236
1231 TODO this doesn't feel appropriate for the storage interface.
1237 TODO this doesn't feel appropriate for the storage interface.
1232 """
1238 """
1233 )
1239 )
1234
1240
1235 def __len__():
1241 def __len__():
1236 """Obtain the number of revisions stored for this manifest."""
1242 """Obtain the number of revisions stored for this manifest."""
1237
1243
1238 def __iter__():
1244 def __iter__():
1239 """Iterate over revision numbers for this manifest."""
1245 """Iterate over revision numbers for this manifest."""
1240
1246
1241 def rev(node):
1247 def rev(node):
1242 """Obtain the revision number given a binary node.
1248 """Obtain the revision number given a binary node.
1243
1249
1244 Raises ``error.LookupError`` if the node is not known.
1250 Raises ``error.LookupError`` if the node is not known.
1245 """
1251 """
1246
1252
1247 def node(rev):
1253 def node(rev):
1248 """Obtain the node value given a revision number.
1254 """Obtain the node value given a revision number.
1249
1255
1250 Raises ``error.LookupError`` if the revision is not known.
1256 Raises ``error.LookupError`` if the revision is not known.
1251 """
1257 """
1252
1258
1253 def lookup(value):
1259 def lookup(value):
1254 """Attempt to resolve a value to a node.
1260 """Attempt to resolve a value to a node.
1255
1261
1256 Value can be a binary node, hex node, revision number, or a bytes
1262 Value can be a binary node, hex node, revision number, or a bytes
1257 that can be converted to an integer.
1263 that can be converted to an integer.
1258
1264
1259 Raises ``error.LookupError`` if a ndoe could not be resolved.
1265 Raises ``error.LookupError`` if a ndoe could not be resolved.
1260 """
1266 """
1261
1267
1262 def parents(node):
1268 def parents(node):
1263 """Returns a 2-tuple of parent nodes for a node.
1269 """Returns a 2-tuple of parent nodes for a node.
1264
1270
1265 Values will be ``nullid`` if the parent is empty.
1271 Values will be ``nullid`` if the parent is empty.
1266 """
1272 """
1267
1273
1268 def parentrevs(rev):
1274 def parentrevs(rev):
1269 """Like parents() but operates on revision numbers."""
1275 """Like parents() but operates on revision numbers."""
1270
1276
1271 def linkrev(rev):
1277 def linkrev(rev):
1272 """Obtain the changeset revision number a revision is linked to."""
1278 """Obtain the changeset revision number a revision is linked to."""
1273
1279
1274 def revision(node, _df=None):
1280 def revision(node, _df=None):
1275 """Obtain fulltext data for a node."""
1281 """Obtain fulltext data for a node."""
1276
1282
1277 def rawdata(node, _df=None):
1283 def rawdata(node, _df=None):
1278 """Obtain raw data for a node."""
1284 """Obtain raw data for a node."""
1279
1285
1280 def revdiff(rev1, rev2):
1286 def revdiff(rev1, rev2):
1281 """Obtain a delta between two revision numbers.
1287 """Obtain a delta between two revision numbers.
1282
1288
1283 The returned data is the result of ``bdiff.bdiff()`` on the raw
1289 The returned data is the result of ``bdiff.bdiff()`` on the raw
1284 revision data.
1290 revision data.
1285 """
1291 """
1286
1292
1287 def cmp(node, fulltext):
1293 def cmp(node, fulltext):
1288 """Compare fulltext to another revision.
1294 """Compare fulltext to another revision.
1289
1295
1290 Returns True if the fulltext is different from what is stored.
1296 Returns True if the fulltext is different from what is stored.
1291 """
1297 """
1292
1298
1293 def emitrevisions(
1299 def emitrevisions(
1294 nodes,
1300 nodes,
1295 nodesorder=None,
1301 nodesorder=None,
1296 revisiondata=False,
1302 revisiondata=False,
1297 assumehaveparentrevisions=False,
1303 assumehaveparentrevisions=False,
1298 ):
1304 ):
1299 """Produce ``irevisiondelta`` describing revisions.
1305 """Produce ``irevisiondelta`` describing revisions.
1300
1306
1301 See the documentation for ``ifiledata`` for more.
1307 See the documentation for ``ifiledata`` for more.
1302 """
1308 """
1303
1309
1304 def addgroup(
1310 def addgroup(
1305 deltas,
1311 deltas,
1306 linkmapper,
1312 linkmapper,
1307 transaction,
1313 transaction,
1308 addrevisioncb=None,
1314 addrevisioncb=None,
1309 duplicaterevisioncb=None,
1315 duplicaterevisioncb=None,
1310 ):
1316 ):
1311 """Process a series of deltas for storage.
1317 """Process a series of deltas for storage.
1312
1318
1313 See the documentation in ``ifilemutation`` for more.
1319 See the documentation in ``ifilemutation`` for more.
1314 """
1320 """
1315
1321
1316 def rawsize(rev):
1322 def rawsize(rev):
1317 """Obtain the size of tracked data.
1323 """Obtain the size of tracked data.
1318
1324
1319 Is equivalent to ``len(m.rawdata(node))``.
1325 Is equivalent to ``len(m.rawdata(node))``.
1320
1326
1321 TODO this method is only used by upgrade code and may be removed.
1327 TODO this method is only used by upgrade code and may be removed.
1322 """
1328 """
1323
1329
1324 def getstrippoint(minlink):
1330 def getstrippoint(minlink):
1325 """Find minimum revision that must be stripped to strip a linkrev.
1331 """Find minimum revision that must be stripped to strip a linkrev.
1326
1332
1327 See the documentation in ``ifilemutation`` for more.
1333 See the documentation in ``ifilemutation`` for more.
1328 """
1334 """
1329
1335
1330 def strip(minlink, transaction):
1336 def strip(minlink, transaction):
1331 """Remove storage of items starting at a linkrev.
1337 """Remove storage of items starting at a linkrev.
1332
1338
1333 See the documentation in ``ifilemutation`` for more.
1339 See the documentation in ``ifilemutation`` for more.
1334 """
1340 """
1335
1341
1336 def checksize():
1342 def checksize():
1337 """Obtain the expected sizes of backing files.
1343 """Obtain the expected sizes of backing files.
1338
1344
1339 TODO this is used by verify and it should not be part of the interface.
1345 TODO this is used by verify and it should not be part of the interface.
1340 """
1346 """
1341
1347
1342 def files():
1348 def files():
1343 """Obtain paths that are backing storage for this manifest.
1349 """Obtain paths that are backing storage for this manifest.
1344
1350
1345 TODO this is used by verify and there should probably be a better API
1351 TODO this is used by verify and there should probably be a better API
1346 for this functionality.
1352 for this functionality.
1347 """
1353 """
1348
1354
1349 def deltaparent(rev):
1355 def deltaparent(rev):
1350 """Obtain the revision that a revision is delta'd against.
1356 """Obtain the revision that a revision is delta'd against.
1351
1357
1352 TODO delta encoding is an implementation detail of storage and should
1358 TODO delta encoding is an implementation detail of storage and should
1353 not be exposed to the storage interface.
1359 not be exposed to the storage interface.
1354 """
1360 """
1355
1361
1356 def clone(tr, dest, **kwargs):
1362 def clone(tr, dest, **kwargs):
1357 """Clone this instance to another."""
1363 """Clone this instance to another."""
1358
1364
1359 def clearcaches(clear_persisted_data=False):
1365 def clearcaches(clear_persisted_data=False):
1360 """Clear any caches associated with this instance."""
1366 """Clear any caches associated with this instance."""
1361
1367
1362 def dirlog(d):
1368 def dirlog(d):
1363 """Obtain a manifest storage instance for a tree."""
1369 """Obtain a manifest storage instance for a tree."""
1364
1370
1365 def add(
1371 def add(
1366 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1372 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1367 ):
1373 ):
1368 """Add a revision to storage.
1374 """Add a revision to storage.
1369
1375
1370 ``m`` is an object conforming to ``imanifestdict``.
1376 ``m`` is an object conforming to ``imanifestdict``.
1371
1377
1372 ``link`` is the linkrev revision number.
1378 ``link`` is the linkrev revision number.
1373
1379
1374 ``p1`` and ``p2`` are the parent revision numbers.
1380 ``p1`` and ``p2`` are the parent revision numbers.
1375
1381
1376 ``added`` and ``removed`` are iterables of added and removed paths,
1382 ``added`` and ``removed`` are iterables of added and removed paths,
1377 respectively.
1383 respectively.
1378
1384
1379 ``readtree`` is a function that can be used to read the child tree(s)
1385 ``readtree`` is a function that can be used to read the child tree(s)
1380 when recursively writing the full tree structure when using
1386 when recursively writing the full tree structure when using
1381 treemanifets.
1387 treemanifets.
1382
1388
1383 ``match`` is a matcher that can be used to hint to storage that not all
1389 ``match`` is a matcher that can be used to hint to storage that not all
1384 paths must be inspected; this is an optimization and can be safely
1390 paths must be inspected; this is an optimization and can be safely
1385 ignored. Note that the storage must still be able to reproduce a full
1391 ignored. Note that the storage must still be able to reproduce a full
1386 manifest including files that did not match.
1392 manifest including files that did not match.
1387 """
1393 """
1388
1394
1389 def storageinfo(
1395 def storageinfo(
1390 exclusivefiles=False,
1396 exclusivefiles=False,
1391 sharedfiles=False,
1397 sharedfiles=False,
1392 revisionscount=False,
1398 revisionscount=False,
1393 trackedsize=False,
1399 trackedsize=False,
1394 storedsize=False,
1400 storedsize=False,
1395 ):
1401 ):
1396 """Obtain information about storage for this manifest's data.
1402 """Obtain information about storage for this manifest's data.
1397
1403
1398 See ``ifilestorage.storageinfo()`` for a description of this method.
1404 See ``ifilestorage.storageinfo()`` for a description of this method.
1399 This one behaves the same way, except for manifest data.
1405 This one behaves the same way, except for manifest data.
1400 """
1406 """
1401
1407
1402
1408
1403 class imanifestlog(interfaceutil.Interface):
1409 class imanifestlog(interfaceutil.Interface):
1404 """Interface representing a collection of manifest snapshots.
1410 """Interface representing a collection of manifest snapshots.
1405
1411
1406 Represents the root manifest in a repository.
1412 Represents the root manifest in a repository.
1407
1413
1408 Also serves as a means to access nested tree manifests and to cache
1414 Also serves as a means to access nested tree manifests and to cache
1409 tree manifests.
1415 tree manifests.
1410 """
1416 """
1411
1417
1412 nodeconstants = interfaceutil.Attribute(
1418 nodeconstants = interfaceutil.Attribute(
1413 """nodeconstants used by the current repository."""
1419 """nodeconstants used by the current repository."""
1414 )
1420 )
1415
1421
1416 def __getitem__(node):
1422 def __getitem__(node):
1417 """Obtain a manifest instance for a given binary node.
1423 """Obtain a manifest instance for a given binary node.
1418
1424
1419 Equivalent to calling ``self.get('', node)``.
1425 Equivalent to calling ``self.get('', node)``.
1420
1426
1421 The returned object conforms to the ``imanifestrevisionstored``
1427 The returned object conforms to the ``imanifestrevisionstored``
1422 interface.
1428 interface.
1423 """
1429 """
1424
1430
1425 def get(tree, node, verify=True):
1431 def get(tree, node, verify=True):
1426 """Retrieve the manifest instance for a given directory and binary node.
1432 """Retrieve the manifest instance for a given directory and binary node.
1427
1433
1428 ``node`` always refers to the node of the root manifest (which will be
1434 ``node`` always refers to the node of the root manifest (which will be
1429 the only manifest if flat manifests are being used).
1435 the only manifest if flat manifests are being used).
1430
1436
1431 If ``tree`` is the empty string, the root manifest is returned.
1437 If ``tree`` is the empty string, the root manifest is returned.
1432 Otherwise the manifest for the specified directory will be returned
1438 Otherwise the manifest for the specified directory will be returned
1433 (requires tree manifests).
1439 (requires tree manifests).
1434
1440
1435 If ``verify`` is True, ``LookupError`` is raised if the node is not
1441 If ``verify`` is True, ``LookupError`` is raised if the node is not
1436 known.
1442 known.
1437
1443
1438 The returned object conforms to the ``imanifestrevisionstored``
1444 The returned object conforms to the ``imanifestrevisionstored``
1439 interface.
1445 interface.
1440 """
1446 """
1441
1447
1442 def getstorage(tree):
1448 def getstorage(tree):
1443 """Retrieve an interface to storage for a particular tree.
1449 """Retrieve an interface to storage for a particular tree.
1444
1450
1445 If ``tree`` is the empty bytestring, storage for the root manifest will
1451 If ``tree`` is the empty bytestring, storage for the root manifest will
1446 be returned. Otherwise storage for a tree manifest is returned.
1452 be returned. Otherwise storage for a tree manifest is returned.
1447
1453
1448 TODO formalize interface for returned object.
1454 TODO formalize interface for returned object.
1449 """
1455 """
1450
1456
1451 def clearcaches():
1457 def clearcaches():
1452 """Clear caches associated with this collection."""
1458 """Clear caches associated with this collection."""
1453
1459
1454 def rev(node):
1460 def rev(node):
1455 """Obtain the revision number for a binary node.
1461 """Obtain the revision number for a binary node.
1456
1462
1457 Raises ``error.LookupError`` if the node is not known.
1463 Raises ``error.LookupError`` if the node is not known.
1458 """
1464 """
1459
1465
1460 def update_caches(transaction):
1466 def update_caches(transaction):
1461 """update whatever cache are relevant for the used storage."""
1467 """update whatever cache are relevant for the used storage."""
1462
1468
1463
1469
1464 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1470 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1465 """Local repository sub-interface providing access to tracked file storage.
1471 """Local repository sub-interface providing access to tracked file storage.
1466
1472
1467 This interface defines how a repository accesses storage for a single
1473 This interface defines how a repository accesses storage for a single
1468 tracked file path.
1474 tracked file path.
1469 """
1475 """
1470
1476
1471 def file(f):
1477 def file(f):
1472 """Obtain a filelog for a tracked path.
1478 """Obtain a filelog for a tracked path.
1473
1479
1474 The returned type conforms to the ``ifilestorage`` interface.
1480 The returned type conforms to the ``ifilestorage`` interface.
1475 """
1481 """
1476
1482
1477
1483
1478 class ilocalrepositorymain(interfaceutil.Interface):
1484 class ilocalrepositorymain(interfaceutil.Interface):
1479 """Main interface for local repositories.
1485 """Main interface for local repositories.
1480
1486
1481 This currently captures the reality of things - not how things should be.
1487 This currently captures the reality of things - not how things should be.
1482 """
1488 """
1483
1489
1484 nodeconstants = interfaceutil.Attribute(
1490 nodeconstants = interfaceutil.Attribute(
1485 """Constant nodes matching the hash function used by the repository."""
1491 """Constant nodes matching the hash function used by the repository."""
1486 )
1492 )
1487 nullid = interfaceutil.Attribute(
1493 nullid = interfaceutil.Attribute(
1488 """null revision for the hash function used by the repository."""
1494 """null revision for the hash function used by the repository."""
1489 )
1495 )
1490
1496
1491 supported = interfaceutil.Attribute(
1497 supported = interfaceutil.Attribute(
1492 """Set of requirements that this repo is capable of opening."""
1498 """Set of requirements that this repo is capable of opening."""
1493 )
1499 )
1494
1500
1495 requirements = interfaceutil.Attribute(
1501 requirements = interfaceutil.Attribute(
1496 """Set of requirements this repo uses."""
1502 """Set of requirements this repo uses."""
1497 )
1503 )
1498
1504
1499 features = interfaceutil.Attribute(
1505 features = interfaceutil.Attribute(
1500 """Set of "features" this repository supports.
1506 """Set of "features" this repository supports.
1501
1507
1502 A "feature" is a loosely-defined term. It can refer to a feature
1508 A "feature" is a loosely-defined term. It can refer to a feature
1503 in the classical sense or can describe an implementation detail
1509 in the classical sense or can describe an implementation detail
1504 of the repository. For example, a ``readonly`` feature may denote
1510 of the repository. For example, a ``readonly`` feature may denote
1505 the repository as read-only. Or a ``revlogfilestore`` feature may
1511 the repository as read-only. Or a ``revlogfilestore`` feature may
1506 denote that the repository is using revlogs for file storage.
1512 denote that the repository is using revlogs for file storage.
1507
1513
1508 The intent of features is to provide a machine-queryable mechanism
1514 The intent of features is to provide a machine-queryable mechanism
1509 for repo consumers to test for various repository characteristics.
1515 for repo consumers to test for various repository characteristics.
1510
1516
1511 Features are similar to ``requirements``. The main difference is that
1517 Features are similar to ``requirements``. The main difference is that
1512 requirements are stored on-disk and represent requirements to open the
1518 requirements are stored on-disk and represent requirements to open the
1513 repository. Features are more run-time capabilities of the repository
1519 repository. Features are more run-time capabilities of the repository
1514 and more granular capabilities (which may be derived from requirements).
1520 and more granular capabilities (which may be derived from requirements).
1515 """
1521 """
1516 )
1522 )
1517
1523
1518 filtername = interfaceutil.Attribute(
1524 filtername = interfaceutil.Attribute(
1519 """Name of the repoview that is active on this repo."""
1525 """Name of the repoview that is active on this repo."""
1520 )
1526 )
1521
1527
1522 wvfs = interfaceutil.Attribute(
1528 wvfs = interfaceutil.Attribute(
1523 """VFS used to access the working directory."""
1529 """VFS used to access the working directory."""
1524 )
1530 )
1525
1531
1526 vfs = interfaceutil.Attribute(
1532 vfs = interfaceutil.Attribute(
1527 """VFS rooted at the .hg directory.
1533 """VFS rooted at the .hg directory.
1528
1534
1529 Used to access repository data not in the store.
1535 Used to access repository data not in the store.
1530 """
1536 """
1531 )
1537 )
1532
1538
1533 svfs = interfaceutil.Attribute(
1539 svfs = interfaceutil.Attribute(
1534 """VFS rooted at the store.
1540 """VFS rooted at the store.
1535
1541
1536 Used to access repository data in the store. Typically .hg/store.
1542 Used to access repository data in the store. Typically .hg/store.
1537 But can point elsewhere if the store is shared.
1543 But can point elsewhere if the store is shared.
1538 """
1544 """
1539 )
1545 )
1540
1546
1541 root = interfaceutil.Attribute(
1547 root = interfaceutil.Attribute(
1542 """Path to the root of the working directory."""
1548 """Path to the root of the working directory."""
1543 )
1549 )
1544
1550
1545 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1551 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1546
1552
1547 origroot = interfaceutil.Attribute(
1553 origroot = interfaceutil.Attribute(
1548 """The filesystem path that was used to construct the repo."""
1554 """The filesystem path that was used to construct the repo."""
1549 )
1555 )
1550
1556
1551 auditor = interfaceutil.Attribute(
1557 auditor = interfaceutil.Attribute(
1552 """A pathauditor for the working directory.
1558 """A pathauditor for the working directory.
1553
1559
1554 This checks if a path refers to a nested repository.
1560 This checks if a path refers to a nested repository.
1555
1561
1556 Operates on the filesystem.
1562 Operates on the filesystem.
1557 """
1563 """
1558 )
1564 )
1559
1565
1560 nofsauditor = interfaceutil.Attribute(
1566 nofsauditor = interfaceutil.Attribute(
1561 """A pathauditor for the working directory.
1567 """A pathauditor for the working directory.
1562
1568
1563 This is like ``auditor`` except it doesn't do filesystem checks.
1569 This is like ``auditor`` except it doesn't do filesystem checks.
1564 """
1570 """
1565 )
1571 )
1566
1572
1567 baseui = interfaceutil.Attribute(
1573 baseui = interfaceutil.Attribute(
1568 """Original ui instance passed into constructor."""
1574 """Original ui instance passed into constructor."""
1569 )
1575 )
1570
1576
1571 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1577 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1572
1578
1573 sharedpath = interfaceutil.Attribute(
1579 sharedpath = interfaceutil.Attribute(
1574 """Path to the .hg directory of the repo this repo was shared from."""
1580 """Path to the .hg directory of the repo this repo was shared from."""
1575 )
1581 )
1576
1582
1577 store = interfaceutil.Attribute("""A store instance.""")
1583 store = interfaceutil.Attribute("""A store instance.""")
1578
1584
1579 spath = interfaceutil.Attribute("""Path to the store.""")
1585 spath = interfaceutil.Attribute("""Path to the store.""")
1580
1586
1581 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1587 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1582
1588
1583 cachevfs = interfaceutil.Attribute(
1589 cachevfs = interfaceutil.Attribute(
1584 """A VFS used to access the cache directory.
1590 """A VFS used to access the cache directory.
1585
1591
1586 Typically .hg/cache.
1592 Typically .hg/cache.
1587 """
1593 """
1588 )
1594 )
1589
1595
1590 wcachevfs = interfaceutil.Attribute(
1596 wcachevfs = interfaceutil.Attribute(
1591 """A VFS used to access the cache directory dedicated to working copy
1597 """A VFS used to access the cache directory dedicated to working copy
1592
1598
1593 Typically .hg/wcache.
1599 Typically .hg/wcache.
1594 """
1600 """
1595 )
1601 )
1596
1602
1597 filteredrevcache = interfaceutil.Attribute(
1603 filteredrevcache = interfaceutil.Attribute(
1598 """Holds sets of revisions to be filtered."""
1604 """Holds sets of revisions to be filtered."""
1599 )
1605 )
1600
1606
1601 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1607 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1602
1608
1603 filecopiesmode = interfaceutil.Attribute(
1609 filecopiesmode = interfaceutil.Attribute(
1604 """The way files copies should be dealt with in this repo."""
1610 """The way files copies should be dealt with in this repo."""
1605 )
1611 )
1606
1612
1607 def close():
1613 def close():
1608 """Close the handle on this repository."""
1614 """Close the handle on this repository."""
1609
1615
1610 def peer():
1616 def peer():
1611 """Obtain an object conforming to the ``peer`` interface."""
1617 """Obtain an object conforming to the ``peer`` interface."""
1612
1618
1613 def unfiltered():
1619 def unfiltered():
1614 """Obtain an unfiltered/raw view of this repo."""
1620 """Obtain an unfiltered/raw view of this repo."""
1615
1621
1616 def filtered(name, visibilityexceptions=None):
1622 def filtered(name, visibilityexceptions=None):
1617 """Obtain a named view of this repository."""
1623 """Obtain a named view of this repository."""
1618
1624
1619 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1625 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1620
1626
1621 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1627 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1622
1628
1623 manifestlog = interfaceutil.Attribute(
1629 manifestlog = interfaceutil.Attribute(
1624 """An instance conforming to the ``imanifestlog`` interface.
1630 """An instance conforming to the ``imanifestlog`` interface.
1625
1631
1626 Provides access to manifests for the repository.
1632 Provides access to manifests for the repository.
1627 """
1633 """
1628 )
1634 )
1629
1635
1630 dirstate = interfaceutil.Attribute("""Working directory state.""")
1636 dirstate = interfaceutil.Attribute("""Working directory state.""")
1631
1637
1632 narrowpats = interfaceutil.Attribute(
1638 narrowpats = interfaceutil.Attribute(
1633 """Matcher patterns for this repository's narrowspec."""
1639 """Matcher patterns for this repository's narrowspec."""
1634 )
1640 )
1635
1641
1636 def narrowmatch(match=None, includeexact=False):
1642 def narrowmatch(match=None, includeexact=False):
1637 """Obtain a matcher for the narrowspec."""
1643 """Obtain a matcher for the narrowspec."""
1638
1644
1639 def setnarrowpats(newincludes, newexcludes):
1645 def setnarrowpats(newincludes, newexcludes):
1640 """Define the narrowspec for this repository."""
1646 """Define the narrowspec for this repository."""
1641
1647
1642 def __getitem__(changeid):
1648 def __getitem__(changeid):
1643 """Try to resolve a changectx."""
1649 """Try to resolve a changectx."""
1644
1650
1645 def __contains__(changeid):
1651 def __contains__(changeid):
1646 """Whether a changeset exists."""
1652 """Whether a changeset exists."""
1647
1653
1648 def __nonzero__():
1654 def __nonzero__():
1649 """Always returns True."""
1655 """Always returns True."""
1650 return True
1656 return True
1651
1657
1652 __bool__ = __nonzero__
1658 __bool__ = __nonzero__
1653
1659
1654 def __len__():
1660 def __len__():
1655 """Returns the number of changesets in the repo."""
1661 """Returns the number of changesets in the repo."""
1656
1662
1657 def __iter__():
1663 def __iter__():
1658 """Iterate over revisions in the changelog."""
1664 """Iterate over revisions in the changelog."""
1659
1665
1660 def revs(expr, *args):
1666 def revs(expr, *args):
1661 """Evaluate a revset.
1667 """Evaluate a revset.
1662
1668
1663 Emits revisions.
1669 Emits revisions.
1664 """
1670 """
1665
1671
1666 def set(expr, *args):
1672 def set(expr, *args):
1667 """Evaluate a revset.
1673 """Evaluate a revset.
1668
1674
1669 Emits changectx instances.
1675 Emits changectx instances.
1670 """
1676 """
1671
1677
1672 def anyrevs(specs, user=False, localalias=None):
1678 def anyrevs(specs, user=False, localalias=None):
1673 """Find revisions matching one of the given revsets."""
1679 """Find revisions matching one of the given revsets."""
1674
1680
1675 def url():
1681 def url():
1676 """Returns a string representing the location of this repo."""
1682 """Returns a string representing the location of this repo."""
1677
1683
1678 def hook(name, throw=False, **args):
1684 def hook(name, throw=False, **args):
1679 """Call a hook."""
1685 """Call a hook."""
1680
1686
1681 def tags():
1687 def tags():
1682 """Return a mapping of tag to node."""
1688 """Return a mapping of tag to node."""
1683
1689
1684 def tagtype(tagname):
1690 def tagtype(tagname):
1685 """Return the type of a given tag."""
1691 """Return the type of a given tag."""
1686
1692
1687 def tagslist():
1693 def tagslist():
1688 """Return a list of tags ordered by revision."""
1694 """Return a list of tags ordered by revision."""
1689
1695
1690 def nodetags(node):
1696 def nodetags(node):
1691 """Return the tags associated with a node."""
1697 """Return the tags associated with a node."""
1692
1698
1693 def nodebookmarks(node):
1699 def nodebookmarks(node):
1694 """Return the list of bookmarks pointing to the specified node."""
1700 """Return the list of bookmarks pointing to the specified node."""
1695
1701
1696 def branchmap():
1702 def branchmap():
1697 """Return a mapping of branch to heads in that branch."""
1703 """Return a mapping of branch to heads in that branch."""
1698
1704
1699 def revbranchcache():
1705 def revbranchcache():
1700 pass
1706 pass
1701
1707
1702 def register_changeset(rev, changelogrevision):
1708 def register_changeset(rev, changelogrevision):
1703 """Extension point for caches for new nodes.
1709 """Extension point for caches for new nodes.
1704
1710
1705 Multiple consumers are expected to need parts of the changelogrevision,
1711 Multiple consumers are expected to need parts of the changelogrevision,
1706 so it is provided as optimization to avoid duplicate lookups. A simple
1712 so it is provided as optimization to avoid duplicate lookups. A simple
1707 cache would be fragile when other revisions are accessed, too."""
1713 cache would be fragile when other revisions are accessed, too."""
1708 pass
1714 pass
1709
1715
1710 def branchtip(branchtip, ignoremissing=False):
1716 def branchtip(branchtip, ignoremissing=False):
1711 """Return the tip node for a given branch."""
1717 """Return the tip node for a given branch."""
1712
1718
1713 def lookup(key):
1719 def lookup(key):
1714 """Resolve the node for a revision."""
1720 """Resolve the node for a revision."""
1715
1721
1716 def lookupbranch(key):
1722 def lookupbranch(key):
1717 """Look up the branch name of the given revision or branch name."""
1723 """Look up the branch name of the given revision or branch name."""
1718
1724
1719 def known(nodes):
1725 def known(nodes):
1720 """Determine whether a series of nodes is known.
1726 """Determine whether a series of nodes is known.
1721
1727
1722 Returns a list of bools.
1728 Returns a list of bools.
1723 """
1729 """
1724
1730
1725 def local():
1731 def local():
1726 """Whether the repository is local."""
1732 """Whether the repository is local."""
1727 return True
1733 return True
1728
1734
1729 def publishing():
1735 def publishing():
1730 """Whether the repository is a publishing repository."""
1736 """Whether the repository is a publishing repository."""
1731
1737
1732 def cancopy():
1738 def cancopy():
1733 pass
1739 pass
1734
1740
1735 def shared():
1741 def shared():
1736 """The type of shared repository or None."""
1742 """The type of shared repository or None."""
1737
1743
1738 def wjoin(f, *insidef):
1744 def wjoin(f, *insidef):
1739 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1745 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1740
1746
1741 def setparents(p1, p2):
1747 def setparents(p1, p2):
1742 """Set the parent nodes of the working directory."""
1748 """Set the parent nodes of the working directory."""
1743
1749
1744 def filectx(path, changeid=None, fileid=None):
1750 def filectx(path, changeid=None, fileid=None):
1745 """Obtain a filectx for the given file revision."""
1751 """Obtain a filectx for the given file revision."""
1746
1752
1747 def getcwd():
1753 def getcwd():
1748 """Obtain the current working directory from the dirstate."""
1754 """Obtain the current working directory from the dirstate."""
1749
1755
1750 def pathto(f, cwd=None):
1756 def pathto(f, cwd=None):
1751 """Obtain the relative path to a file."""
1757 """Obtain the relative path to a file."""
1752
1758
1753 def adddatafilter(name, fltr):
1759 def adddatafilter(name, fltr):
1754 pass
1760 pass
1755
1761
1756 def wread(filename):
1762 def wread(filename):
1757 """Read a file from wvfs, using data filters."""
1763 """Read a file from wvfs, using data filters."""
1758
1764
1759 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1765 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1760 """Write data to a file in the wvfs, using data filters."""
1766 """Write data to a file in the wvfs, using data filters."""
1761
1767
1762 def wwritedata(filename, data):
1768 def wwritedata(filename, data):
1763 """Resolve data for writing to the wvfs, using data filters."""
1769 """Resolve data for writing to the wvfs, using data filters."""
1764
1770
1765 def currenttransaction():
1771 def currenttransaction():
1766 """Obtain the current transaction instance or None."""
1772 """Obtain the current transaction instance or None."""
1767
1773
1768 def transaction(desc, report=None):
1774 def transaction(desc, report=None):
1769 """Open a new transaction to write to the repository."""
1775 """Open a new transaction to write to the repository."""
1770
1776
1771 def undofiles():
1777 def undofiles():
1772 """Returns a list of (vfs, path) for files to undo transactions."""
1778 """Returns a list of (vfs, path) for files to undo transactions."""
1773
1779
1774 def recover():
1780 def recover():
1775 """Roll back an interrupted transaction."""
1781 """Roll back an interrupted transaction."""
1776
1782
1777 def rollback(dryrun=False, force=False):
1783 def rollback(dryrun=False, force=False):
1778 """Undo the last transaction.
1784 """Undo the last transaction.
1779
1785
1780 DANGEROUS.
1786 DANGEROUS.
1781 """
1787 """
1782
1788
1783 def updatecaches(tr=None, full=False, caches=None):
1789 def updatecaches(tr=None, full=False, caches=None):
1784 """Warm repo caches."""
1790 """Warm repo caches."""
1785
1791
1786 def invalidatecaches():
1792 def invalidatecaches():
1787 """Invalidate cached data due to the repository mutating."""
1793 """Invalidate cached data due to the repository mutating."""
1788
1794
1789 def invalidatevolatilesets():
1795 def invalidatevolatilesets():
1790 pass
1796 pass
1791
1797
1792 def invalidatedirstate():
1798 def invalidatedirstate():
1793 """Invalidate the dirstate."""
1799 """Invalidate the dirstate."""
1794
1800
1795 def invalidate(clearfilecache=False):
1801 def invalidate(clearfilecache=False):
1796 pass
1802 pass
1797
1803
1798 def invalidateall():
1804 def invalidateall():
1799 pass
1805 pass
1800
1806
1801 def lock(wait=True):
1807 def lock(wait=True):
1802 """Lock the repository store and return a lock instance."""
1808 """Lock the repository store and return a lock instance."""
1803
1809
1804 def wlock(wait=True):
1810 def wlock(wait=True):
1805 """Lock the non-store parts of the repository."""
1811 """Lock the non-store parts of the repository."""
1806
1812
1807 def currentwlock():
1813 def currentwlock():
1808 """Return the wlock if it's held or None."""
1814 """Return the wlock if it's held or None."""
1809
1815
1810 def checkcommitpatterns(wctx, match, status, fail):
1816 def checkcommitpatterns(wctx, match, status, fail):
1811 pass
1817 pass
1812
1818
1813 def commit(
1819 def commit(
1814 text=b'',
1820 text=b'',
1815 user=None,
1821 user=None,
1816 date=None,
1822 date=None,
1817 match=None,
1823 match=None,
1818 force=False,
1824 force=False,
1819 editor=False,
1825 editor=False,
1820 extra=None,
1826 extra=None,
1821 ):
1827 ):
1822 """Add a new revision to the repository."""
1828 """Add a new revision to the repository."""
1823
1829
1824 def commitctx(ctx, error=False, origctx=None):
1830 def commitctx(ctx, error=False, origctx=None):
1825 """Commit a commitctx instance to the repository."""
1831 """Commit a commitctx instance to the repository."""
1826
1832
1827 def destroying():
1833 def destroying():
1828 """Inform the repository that nodes are about to be destroyed."""
1834 """Inform the repository that nodes are about to be destroyed."""
1829
1835
1830 def destroyed():
1836 def destroyed():
1831 """Inform the repository that nodes have been destroyed."""
1837 """Inform the repository that nodes have been destroyed."""
1832
1838
1833 def status(
1839 def status(
1834 node1=b'.',
1840 node1=b'.',
1835 node2=None,
1841 node2=None,
1836 match=None,
1842 match=None,
1837 ignored=False,
1843 ignored=False,
1838 clean=False,
1844 clean=False,
1839 unknown=False,
1845 unknown=False,
1840 listsubrepos=False,
1846 listsubrepos=False,
1841 ):
1847 ):
1842 """Convenience method to call repo[x].status()."""
1848 """Convenience method to call repo[x].status()."""
1843
1849
1844 def addpostdsstatus(ps):
1850 def addpostdsstatus(ps):
1845 pass
1851 pass
1846
1852
1847 def postdsstatus():
1853 def postdsstatus():
1848 pass
1854 pass
1849
1855
1850 def clearpostdsstatus():
1856 def clearpostdsstatus():
1851 pass
1857 pass
1852
1858
1853 def heads(start=None):
1859 def heads(start=None):
1854 """Obtain list of nodes that are DAG heads."""
1860 """Obtain list of nodes that are DAG heads."""
1855
1861
1856 def branchheads(branch=None, start=None, closed=False):
1862 def branchheads(branch=None, start=None, closed=False):
1857 pass
1863 pass
1858
1864
1859 def branches(nodes):
1865 def branches(nodes):
1860 pass
1866 pass
1861
1867
1862 def between(pairs):
1868 def between(pairs):
1863 pass
1869 pass
1864
1870
1865 def checkpush(pushop):
1871 def checkpush(pushop):
1866 pass
1872 pass
1867
1873
1868 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1874 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1869
1875
1870 def pushkey(namespace, key, old, new):
1876 def pushkey(namespace, key, old, new):
1871 pass
1877 pass
1872
1878
1873 def listkeys(namespace):
1879 def listkeys(namespace):
1874 pass
1880 pass
1875
1881
1876 def debugwireargs(one, two, three=None, four=None, five=None):
1882 def debugwireargs(one, two, three=None, four=None, five=None):
1877 pass
1883 pass
1878
1884
1879 def savecommitmessage(text):
1885 def savecommitmessage(text):
1880 pass
1886 pass
1881
1887
1882 def register_sidedata_computer(
1888 def register_sidedata_computer(
1883 kind, category, keys, computer, flags, replace=False
1889 kind, category, keys, computer, flags, replace=False
1884 ):
1890 ):
1885 pass
1891 pass
1886
1892
1887 def register_wanted_sidedata(category):
1893 def register_wanted_sidedata(category):
1888 pass
1894 pass
1889
1895
1890
1896
1891 class completelocalrepository(
1897 class completelocalrepository(
1892 ilocalrepositorymain, ilocalrepositoryfilestorage
1898 ilocalrepositorymain, ilocalrepositoryfilestorage
1893 ):
1899 ):
1894 """Complete interface for a local repository."""
1900 """Complete interface for a local repository."""
1895
1901
1896
1902
1897 class iwireprotocolcommandcacher(interfaceutil.Interface):
1903 class iwireprotocolcommandcacher(interfaceutil.Interface):
1898 """Represents a caching backend for wire protocol commands.
1904 """Represents a caching backend for wire protocol commands.
1899
1905
1900 Wire protocol version 2 supports transparent caching of many commands.
1906 Wire protocol version 2 supports transparent caching of many commands.
1901 To leverage this caching, servers can activate objects that cache
1907 To leverage this caching, servers can activate objects that cache
1902 command responses. Objects handle both cache writing and reading.
1908 command responses. Objects handle both cache writing and reading.
1903 This interface defines how that response caching mechanism works.
1909 This interface defines how that response caching mechanism works.
1904
1910
1905 Wire protocol version 2 commands emit a series of objects that are
1911 Wire protocol version 2 commands emit a series of objects that are
1906 serialized and sent to the client. The caching layer exists between
1912 serialized and sent to the client. The caching layer exists between
1907 the invocation of the command function and the sending of its output
1913 the invocation of the command function and the sending of its output
1908 objects to an output layer.
1914 objects to an output layer.
1909
1915
1910 Instances of this interface represent a binding to a cache that
1916 Instances of this interface represent a binding to a cache that
1911 can serve a response (in place of calling a command function) and/or
1917 can serve a response (in place of calling a command function) and/or
1912 write responses to a cache for subsequent use.
1918 write responses to a cache for subsequent use.
1913
1919
1914 When a command request arrives, the following happens with regards
1920 When a command request arrives, the following happens with regards
1915 to this interface:
1921 to this interface:
1916
1922
1917 1. The server determines whether the command request is cacheable.
1923 1. The server determines whether the command request is cacheable.
1918 2. If it is, an instance of this interface is spawned.
1924 2. If it is, an instance of this interface is spawned.
1919 3. The cacher is activated in a context manager (``__enter__`` is called).
1925 3. The cacher is activated in a context manager (``__enter__`` is called).
1920 4. A cache *key* for that request is derived. This will call the
1926 4. A cache *key* for that request is derived. This will call the
1921 instance's ``adjustcachekeystate()`` method so the derivation
1927 instance's ``adjustcachekeystate()`` method so the derivation
1922 can be influenced.
1928 can be influenced.
1923 5. The cacher is informed of the derived cache key via a call to
1929 5. The cacher is informed of the derived cache key via a call to
1924 ``setcachekey()``.
1930 ``setcachekey()``.
1925 6. The cacher's ``lookup()`` method is called to test for presence of
1931 6. The cacher's ``lookup()`` method is called to test for presence of
1926 the derived key in the cache.
1932 the derived key in the cache.
1927 7. If ``lookup()`` returns a hit, that cached result is used in place
1933 7. If ``lookup()`` returns a hit, that cached result is used in place
1928 of invoking the command function. ``__exit__`` is called and the instance
1934 of invoking the command function. ``__exit__`` is called and the instance
1929 is discarded.
1935 is discarded.
1930 8. The command function is invoked.
1936 8. The command function is invoked.
1931 9. ``onobject()`` is called for each object emitted by the command
1937 9. ``onobject()`` is called for each object emitted by the command
1932 function.
1938 function.
1933 10. After the final object is seen, ``onfinished()`` is called.
1939 10. After the final object is seen, ``onfinished()`` is called.
1934 11. ``__exit__`` is called to signal the end of use of the instance.
1940 11. ``__exit__`` is called to signal the end of use of the instance.
1935
1941
1936 Cache *key* derivation can be influenced by the instance.
1942 Cache *key* derivation can be influenced by the instance.
1937
1943
1938 Cache keys are initially derived by a deterministic representation of
1944 Cache keys are initially derived by a deterministic representation of
1939 the command request. This includes the command name, arguments, protocol
1945 the command request. This includes the command name, arguments, protocol
1940 version, etc. This initial key derivation is performed by CBOR-encoding a
1946 version, etc. This initial key derivation is performed by CBOR-encoding a
1941 data structure and feeding that output into a hasher.
1947 data structure and feeding that output into a hasher.
1942
1948
1943 Instances of this interface can influence this initial key derivation
1949 Instances of this interface can influence this initial key derivation
1944 via ``adjustcachekeystate()``.
1950 via ``adjustcachekeystate()``.
1945
1951
1946 The instance is informed of the derived cache key via a call to
1952 The instance is informed of the derived cache key via a call to
1947 ``setcachekey()``. The instance must store the key locally so it can
1953 ``setcachekey()``. The instance must store the key locally so it can
1948 be consulted on subsequent operations that may require it.
1954 be consulted on subsequent operations that may require it.
1949
1955
1950 When constructed, the instance has access to a callable that can be used
1956 When constructed, the instance has access to a callable that can be used
1951 for encoding response objects. This callable receives as its single
1957 for encoding response objects. This callable receives as its single
1952 argument an object emitted by a command function. It returns an iterable
1958 argument an object emitted by a command function. It returns an iterable
1953 of bytes chunks representing the encoded object. Unless the cacher is
1959 of bytes chunks representing the encoded object. Unless the cacher is
1954 caching native Python objects in memory or has a way of reconstructing
1960 caching native Python objects in memory or has a way of reconstructing
1955 the original Python objects, implementations typically call this function
1961 the original Python objects, implementations typically call this function
1956 to produce bytes from the output objects and then store those bytes in
1962 to produce bytes from the output objects and then store those bytes in
1957 the cache. When it comes time to re-emit those bytes, they are wrapped
1963 the cache. When it comes time to re-emit those bytes, they are wrapped
1958 in a ``wireprototypes.encodedresponse`` instance to tell the output
1964 in a ``wireprototypes.encodedresponse`` instance to tell the output
1959 layer that they are pre-encoded.
1965 layer that they are pre-encoded.
1960
1966
1961 When receiving the objects emitted by the command function, instances
1967 When receiving the objects emitted by the command function, instances
1962 can choose what to do with those objects. The simplest thing to do is
1968 can choose what to do with those objects. The simplest thing to do is
1963 re-emit the original objects. They will be forwarded to the output
1969 re-emit the original objects. They will be forwarded to the output
1964 layer and will be processed as if the cacher did not exist.
1970 layer and will be processed as if the cacher did not exist.
1965
1971
1966 Implementations could also choose to not emit objects - instead locally
1972 Implementations could also choose to not emit objects - instead locally
1967 buffering objects or their encoded representation. They could then emit
1973 buffering objects or their encoded representation. They could then emit
1968 a single "coalesced" object when ``onfinished()`` is called. In
1974 a single "coalesced" object when ``onfinished()`` is called. In
1969 this way, the implementation would function as a filtering layer of
1975 this way, the implementation would function as a filtering layer of
1970 sorts.
1976 sorts.
1971
1977
1972 When caching objects, typically the encoded form of the object will
1978 When caching objects, typically the encoded form of the object will
1973 be stored. Keep in mind that if the original object is forwarded to
1979 be stored. Keep in mind that if the original object is forwarded to
1974 the output layer, it will need to be encoded there as well. For large
1980 the output layer, it will need to be encoded there as well. For large
1975 output, this redundant encoding could add overhead. Implementations
1981 output, this redundant encoding could add overhead. Implementations
1976 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1982 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1977 instances to avoid this overhead.
1983 instances to avoid this overhead.
1978 """
1984 """
1979
1985
1980 def __enter__():
1986 def __enter__():
1981 """Marks the instance as active.
1987 """Marks the instance as active.
1982
1988
1983 Should return self.
1989 Should return self.
1984 """
1990 """
1985
1991
1986 def __exit__(exctype, excvalue, exctb):
1992 def __exit__(exctype, excvalue, exctb):
1987 """Called when cacher is no longer used.
1993 """Called when cacher is no longer used.
1988
1994
1989 This can be used by implementations to perform cleanup actions (e.g.
1995 This can be used by implementations to perform cleanup actions (e.g.
1990 disconnecting network sockets, aborting a partially cached response.
1996 disconnecting network sockets, aborting a partially cached response.
1991 """
1997 """
1992
1998
1993 def adjustcachekeystate(state):
1999 def adjustcachekeystate(state):
1994 """Influences cache key derivation by adjusting state to derive key.
2000 """Influences cache key derivation by adjusting state to derive key.
1995
2001
1996 A dict defining the state used to derive the cache key is passed.
2002 A dict defining the state used to derive the cache key is passed.
1997
2003
1998 Implementations can modify this dict to record additional state that
2004 Implementations can modify this dict to record additional state that
1999 is wanted to influence key derivation.
2005 is wanted to influence key derivation.
2000
2006
2001 Implementations are *highly* encouraged to not modify or delete
2007 Implementations are *highly* encouraged to not modify or delete
2002 existing keys.
2008 existing keys.
2003 """
2009 """
2004
2010
2005 def setcachekey(key):
2011 def setcachekey(key):
2006 """Record the derived cache key for this request.
2012 """Record the derived cache key for this request.
2007
2013
2008 Instances may mutate the key for internal usage, as desired. e.g.
2014 Instances may mutate the key for internal usage, as desired. e.g.
2009 instances may wish to prepend the repo name, introduce path
2015 instances may wish to prepend the repo name, introduce path
2010 components for filesystem or URL addressing, etc. Behavior is up to
2016 components for filesystem or URL addressing, etc. Behavior is up to
2011 the cache.
2017 the cache.
2012
2018
2013 Returns a bool indicating if the request is cacheable by this
2019 Returns a bool indicating if the request is cacheable by this
2014 instance.
2020 instance.
2015 """
2021 """
2016
2022
2017 def lookup():
2023 def lookup():
2018 """Attempt to resolve an entry in the cache.
2024 """Attempt to resolve an entry in the cache.
2019
2025
2020 The instance is instructed to look for the cache key that it was
2026 The instance is instructed to look for the cache key that it was
2021 informed about via the call to ``setcachekey()``.
2027 informed about via the call to ``setcachekey()``.
2022
2028
2023 If there's no cache hit or the cacher doesn't wish to use the cached
2029 If there's no cache hit or the cacher doesn't wish to use the cached
2024 entry, ``None`` should be returned.
2030 entry, ``None`` should be returned.
2025
2031
2026 Else, a dict defining the cached result should be returned. The
2032 Else, a dict defining the cached result should be returned. The
2027 dict may have the following keys:
2033 dict may have the following keys:
2028
2034
2029 objs
2035 objs
2030 An iterable of objects that should be sent to the client. That
2036 An iterable of objects that should be sent to the client. That
2031 iterable of objects is expected to be what the command function
2037 iterable of objects is expected to be what the command function
2032 would return if invoked or an equivalent representation thereof.
2038 would return if invoked or an equivalent representation thereof.
2033 """
2039 """
2034
2040
2035 def onobject(obj):
2041 def onobject(obj):
2036 """Called when a new object is emitted from the command function.
2042 """Called when a new object is emitted from the command function.
2037
2043
2038 Receives as its argument the object that was emitted from the
2044 Receives as its argument the object that was emitted from the
2039 command function.
2045 command function.
2040
2046
2041 This method returns an iterator of objects to forward to the output
2047 This method returns an iterator of objects to forward to the output
2042 layer. The easiest implementation is a generator that just
2048 layer. The easiest implementation is a generator that just
2043 ``yield obj``.
2049 ``yield obj``.
2044 """
2050 """
2045
2051
2046 def onfinished():
2052 def onfinished():
2047 """Called after all objects have been emitted from the command function.
2053 """Called after all objects have been emitted from the command function.
2048
2054
2049 Implementations should return an iterator of objects to forward to
2055 Implementations should return an iterator of objects to forward to
2050 the output layer.
2056 the output layer.
2051
2057
2052 This method can be a generator.
2058 This method can be a generator.
2053 """
2059 """
@@ -1,3977 +1,3976 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
18 from typing import (
19 Optional,
19 Optional,
20 )
20 )
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 bin,
24 bin,
25 hex,
25 hex,
26 nullrev,
26 nullrev,
27 sha1nodeconstants,
27 sha1nodeconstants,
28 short,
28 short,
29 )
29 )
30 from .pycompat import (
30 from .pycompat import (
31 delattr,
31 delattr,
32 getattr,
32 getattr,
33 )
33 )
34 from . import (
34 from . import (
35 bookmarks,
35 bookmarks,
36 branchmap,
36 branchmap,
37 bundle2,
37 bundle2,
38 bundlecaches,
38 bundlecaches,
39 changegroup,
39 changegroup,
40 color,
40 color,
41 commit,
41 commit,
42 context,
42 context,
43 dirstate,
43 dirstate,
44 dirstateguard,
44 dirstateguard,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
104 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
105 # - '' for svfs relative paths
105 # - '' for svfs relative paths
106 _cachedfiles = set()
106 _cachedfiles = set()
107
107
108
108
109 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
110 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
111
111
112 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
113 if repo is None:
113 if repo is None:
114 return self
114 return self
115 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
117 try:
117 try:
118 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
119 except KeyError:
119 except KeyError:
120 pass
120 pass
121 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
122
122
123 def set(self, repo, value):
123 def set(self, repo, value):
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125
125
126
126
127 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
128 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137
137
138
138
139 class storecache(_basefilecache):
139 class storecache(_basefilecache):
140 """filecache for files in the store"""
140 """filecache for files in the store"""
141
141
142 def __init__(self, *paths):
142 def __init__(self, *paths):
143 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
144 for path in paths:
144 for path in paths:
145 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
146
146
147 def join(self, obj, fname):
147 def join(self, obj, fname):
148 return obj.sjoin(fname)
148 return obj.sjoin(fname)
149
149
150
150
151 class changelogcache(storecache):
151 class changelogcache(storecache):
152 """filecache for the changelog"""
152 """filecache for the changelog"""
153
153
154 def __init__(self):
154 def __init__(self):
155 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
156 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
158
158
159 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
160 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
163 return paths
163 return paths
164
164
165
165
166 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
167 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
168
168
169 def __init__(self):
169 def __init__(self):
170 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
171 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
173
173
174 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
175 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
178 return paths
178 return paths
179
179
180
180
181 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
182 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
183
183
184 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
185 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
186 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
189
189
190 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
191 fname, location = fnameandlocation
191 fname, location = fnameandlocation
192 if location == b'plain':
192 if location == b'plain':
193 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
194 else:
194 else:
195 if location != b'':
195 if location != b'':
196 raise error.ProgrammingError(
196 raise error.ProgrammingError(
197 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
198 )
198 )
199 return obj.sjoin(fname)
199 return obj.sjoin(fname)
200
200
201
201
202 def isfilecached(repo, name):
202 def isfilecached(repo, name):
203 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
204
204
205 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
206 """
206 """
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 if not cacheentry:
208 if not cacheentry:
209 return None, False
209 return None, False
210 return cacheentry.obj, True
210 return cacheentry.obj, True
211
211
212
212
213 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
214 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
215
215
216 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218 if unfi is repo:
218 if unfi is repo:
219 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
220 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
221
221
222
222
223 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
224 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
225
225
226 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
227 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
228
228
229
229
230 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
231 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
233
233
234
234
235 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
236 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
237
237
238 @functools.wraps(orig)
238 @functools.wraps(orig)
239 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
240 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
241
241
242 return wrapper
242 return wrapper
243
243
244
244
245 moderncaps = {
245 moderncaps = {
246 b'lookup',
246 b'lookup',
247 b'branchmap',
247 b'branchmap',
248 b'pushkey',
248 b'pushkey',
249 b'known',
249 b'known',
250 b'getbundle',
250 b'getbundle',
251 b'unbundle',
251 b'unbundle',
252 }
252 }
253 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 class localcommandexecutor:
257 class localcommandexecutor:
258 def __init__(self, peer):
258 def __init__(self, peer):
259 self._peer = peer
259 self._peer = peer
260 self._sent = False
260 self._sent = False
261 self._closed = False
261 self._closed = False
262
262
263 def __enter__(self):
263 def __enter__(self):
264 return self
264 return self
265
265
266 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
267 self.close()
267 self.close()
268
268
269 def callcommand(self, command, args):
269 def callcommand(self, command, args):
270 if self._sent:
270 if self._sent:
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
273 )
273 )
274
274
275 if self._closed:
275 if self._closed:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
278 )
278 )
279
279
280 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
281 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
282 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
283
283
284 f = futures.Future()
284 f = futures.Future()
285
285
286 try:
286 try:
287 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
288 except Exception:
288 except Exception:
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 else:
290 else:
291 f.set_result(result)
291 f.set_result(result)
292
292
293 return f
293 return f
294
294
295 def sendcommands(self):
295 def sendcommands(self):
296 self._sent = True
296 self._sent = True
297
297
298 def close(self):
298 def close(self):
299 self._closed = True
299 self._closed = True
300
300
301
301
302 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
303 class localpeer(repository.peer):
303 class localpeer(repository.peer):
304 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
305
305
306 def __init__(self, repo, caps=None):
306 def __init__(self, repo, caps=None):
307 super(localpeer, self).__init__()
307 super(localpeer, self).__init__(repo.ui)
308
308
309 if caps is None:
309 if caps is None:
310 caps = moderncaps.copy()
310 caps = moderncaps.copy()
311 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
312 self.ui = repo.ui
313
312
314 if repo._wanted_sidedata:
313 if repo._wanted_sidedata:
315 formatted = bundle2.format_remote_wanted_sidedata(repo)
314 formatted = bundle2.format_remote_wanted_sidedata(repo)
316 caps.add(b'exp-wanted-sidedata=' + formatted)
315 caps.add(b'exp-wanted-sidedata=' + formatted)
317
316
318 self._caps = repo._restrictcapabilities(caps)
317 self._caps = repo._restrictcapabilities(caps)
319
318
320 # Begin of _basepeer interface.
319 # Begin of _basepeer interface.
321
320
322 def url(self):
321 def url(self):
323 return self._repo.url()
322 return self._repo.url()
324
323
325 def local(self):
324 def local(self):
326 return self._repo
325 return self._repo
327
326
328 def canpush(self):
327 def canpush(self):
329 return True
328 return True
330
329
331 def close(self):
330 def close(self):
332 self._repo.close()
331 self._repo.close()
333
332
334 # End of _basepeer interface.
333 # End of _basepeer interface.
335
334
336 # Begin of _basewirecommands interface.
335 # Begin of _basewirecommands interface.
337
336
338 def branchmap(self):
337 def branchmap(self):
339 return self._repo.branchmap()
338 return self._repo.branchmap()
340
339
341 def capabilities(self):
340 def capabilities(self):
342 return self._caps
341 return self._caps
343
342
344 def clonebundles(self):
343 def clonebundles(self):
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346
345
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 """Used to test argument passing over the wire"""
347 """Used to test argument passing over the wire"""
349 return b"%s %s %s %s %s" % (
348 return b"%s %s %s %s %s" % (
350 one,
349 one,
351 two,
350 two,
352 pycompat.bytestr(three),
351 pycompat.bytestr(three),
353 pycompat.bytestr(four),
352 pycompat.bytestr(four),
354 pycompat.bytestr(five),
353 pycompat.bytestr(five),
355 )
354 )
356
355
357 def getbundle(
356 def getbundle(
358 self,
357 self,
359 source,
358 source,
360 heads=None,
359 heads=None,
361 common=None,
360 common=None,
362 bundlecaps=None,
361 bundlecaps=None,
363 remote_sidedata=None,
362 remote_sidedata=None,
364 **kwargs
363 **kwargs
365 ):
364 ):
366 chunks = exchange.getbundlechunks(
365 chunks = exchange.getbundlechunks(
367 self._repo,
366 self._repo,
368 source,
367 source,
369 heads=heads,
368 heads=heads,
370 common=common,
369 common=common,
371 bundlecaps=bundlecaps,
370 bundlecaps=bundlecaps,
372 remote_sidedata=remote_sidedata,
371 remote_sidedata=remote_sidedata,
373 **kwargs
372 **kwargs
374 )[1]
373 )[1]
375 cb = util.chunkbuffer(chunks)
374 cb = util.chunkbuffer(chunks)
376
375
377 if exchange.bundle2requested(bundlecaps):
376 if exchange.bundle2requested(bundlecaps):
378 # When requesting a bundle2, getbundle returns a stream to make the
377 # When requesting a bundle2, getbundle returns a stream to make the
379 # wire level function happier. We need to build a proper object
378 # wire level function happier. We need to build a proper object
380 # from it in local peer.
379 # from it in local peer.
381 return bundle2.getunbundler(self.ui, cb)
380 return bundle2.getunbundler(self.ui, cb)
382 else:
381 else:
383 return changegroup.getunbundler(b'01', cb, None)
382 return changegroup.getunbundler(b'01', cb, None)
384
383
385 def heads(self):
384 def heads(self):
386 return self._repo.heads()
385 return self._repo.heads()
387
386
388 def known(self, nodes):
387 def known(self, nodes):
389 return self._repo.known(nodes)
388 return self._repo.known(nodes)
390
389
391 def listkeys(self, namespace):
390 def listkeys(self, namespace):
392 return self._repo.listkeys(namespace)
391 return self._repo.listkeys(namespace)
393
392
394 def lookup(self, key):
393 def lookup(self, key):
395 return self._repo.lookup(key)
394 return self._repo.lookup(key)
396
395
397 def pushkey(self, namespace, key, old, new):
396 def pushkey(self, namespace, key, old, new):
398 return self._repo.pushkey(namespace, key, old, new)
397 return self._repo.pushkey(namespace, key, old, new)
399
398
400 def stream_out(self):
399 def stream_out(self):
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402
401
403 def unbundle(self, bundle, heads, url):
402 def unbundle(self, bundle, heads, url):
404 """apply a bundle on a repo
403 """apply a bundle on a repo
405
404
406 This function handles the repo locking itself."""
405 This function handles the repo locking itself."""
407 try:
406 try:
408 try:
407 try:
409 bundle = exchange.readbundle(self.ui, bundle, None)
408 bundle = exchange.readbundle(self.ui, bundle, None)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 if util.safehasattr(ret, b'getchunks'):
410 if util.safehasattr(ret, b'getchunks'):
412 # This is a bundle20 object, turn it into an unbundler.
411 # This is a bundle20 object, turn it into an unbundler.
413 # This little dance should be dropped eventually when the
412 # This little dance should be dropped eventually when the
414 # API is finally improved.
413 # API is finally improved.
415 stream = util.chunkbuffer(ret.getchunks())
414 stream = util.chunkbuffer(ret.getchunks())
416 ret = bundle2.getunbundler(self.ui, stream)
415 ret = bundle2.getunbundler(self.ui, stream)
417 return ret
416 return ret
418 except Exception as exc:
417 except Exception as exc:
419 # If the exception contains output salvaged from a bundle2
418 # If the exception contains output salvaged from a bundle2
420 # reply, we need to make sure it is printed before continuing
419 # reply, we need to make sure it is printed before continuing
421 # to fail. So we build a bundle2 with such output and consume
420 # to fail. So we build a bundle2 with such output and consume
422 # it directly.
421 # it directly.
423 #
422 #
424 # This is not very elegant but allows a "simple" solution for
423 # This is not very elegant but allows a "simple" solution for
425 # issue4594
424 # issue4594
426 output = getattr(exc, '_bundle2salvagedoutput', ())
425 output = getattr(exc, '_bundle2salvagedoutput', ())
427 if output:
426 if output:
428 bundler = bundle2.bundle20(self._repo.ui)
427 bundler = bundle2.bundle20(self._repo.ui)
429 for out in output:
428 for out in output:
430 bundler.addpart(out)
429 bundler.addpart(out)
431 stream = util.chunkbuffer(bundler.getchunks())
430 stream = util.chunkbuffer(bundler.getchunks())
432 b = bundle2.getunbundler(self.ui, stream)
431 b = bundle2.getunbundler(self.ui, stream)
433 bundle2.processbundle(self._repo, b)
432 bundle2.processbundle(self._repo, b)
434 raise
433 raise
435 except error.PushRaced as exc:
434 except error.PushRaced as exc:
436 raise error.ResponseError(
435 raise error.ResponseError(
437 _(b'push failed:'), stringutil.forcebytestr(exc)
436 _(b'push failed:'), stringutil.forcebytestr(exc)
438 )
437 )
439
438
440 # End of _basewirecommands interface.
439 # End of _basewirecommands interface.
441
440
442 # Begin of peer interface.
441 # Begin of peer interface.
443
442
444 def commandexecutor(self):
443 def commandexecutor(self):
445 return localcommandexecutor(self)
444 return localcommandexecutor(self)
446
445
447 # End of peer interface.
446 # End of peer interface.
448
447
449
448
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 class locallegacypeer(localpeer):
450 class locallegacypeer(localpeer):
452 """peer extension which implements legacy methods too; used for tests with
451 """peer extension which implements legacy methods too; used for tests with
453 restricted capabilities"""
452 restricted capabilities"""
454
453
455 def __init__(self, repo):
454 def __init__(self, repo):
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457
456
458 # Begin of baselegacywirecommands interface.
457 # Begin of baselegacywirecommands interface.
459
458
460 def between(self, pairs):
459 def between(self, pairs):
461 return self._repo.between(pairs)
460 return self._repo.between(pairs)
462
461
463 def branches(self, nodes):
462 def branches(self, nodes):
464 return self._repo.branches(nodes)
463 return self._repo.branches(nodes)
465
464
466 def changegroup(self, nodes, source):
465 def changegroup(self, nodes, source):
467 outgoing = discovery.outgoing(
466 outgoing = discovery.outgoing(
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 )
468 )
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471
470
472 def changegroupsubset(self, bases, heads, source):
471 def changegroupsubset(self, bases, heads, source):
473 outgoing = discovery.outgoing(
472 outgoing = discovery.outgoing(
474 self._repo, missingroots=bases, ancestorsof=heads
473 self._repo, missingroots=bases, ancestorsof=heads
475 )
474 )
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477
476
478 # End of baselegacywirecommands interface.
477 # End of baselegacywirecommands interface.
479
478
480
479
481 # Functions receiving (ui, features) that extensions can register to impact
480 # Functions receiving (ui, features) that extensions can register to impact
482 # the ability to load repositories with custom requirements. Only
481 # the ability to load repositories with custom requirements. Only
483 # functions defined in loaded extensions are called.
482 # functions defined in loaded extensions are called.
484 #
483 #
485 # The function receives a set of requirement strings that the repository
484 # The function receives a set of requirement strings that the repository
486 # is capable of opening. Functions will typically add elements to the
485 # is capable of opening. Functions will typically add elements to the
487 # set to reflect that the extension knows how to handle that requirements.
486 # set to reflect that the extension knows how to handle that requirements.
488 featuresetupfuncs = set()
487 featuresetupfuncs = set()
489
488
490
489
491 def _getsharedvfs(hgvfs, requirements):
490 def _getsharedvfs(hgvfs, requirements):
492 """returns the vfs object pointing to root of shared source
491 """returns the vfs object pointing to root of shared source
493 repo for a shared repository
492 repo for a shared repository
494
493
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
497 """
496 """
498 # The ``shared`` or ``relshared`` requirements indicate the
497 # The ``shared`` or ``relshared`` requirements indicate the
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # This is an absolute path for ``shared`` and relative to
499 # This is an absolute path for ``shared`` and relative to
501 # ``.hg/`` for ``relshared``.
500 # ``.hg/`` for ``relshared``.
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
505
504
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507
506
508 if not sharedvfs.exists():
507 if not sharedvfs.exists():
509 raise error.RepoError(
508 raise error.RepoError(
510 _(b'.hg/sharedpath points to nonexistent directory %s')
509 _(b'.hg/sharedpath points to nonexistent directory %s')
511 % sharedvfs.base
510 % sharedvfs.base
512 )
511 )
513 return sharedvfs
512 return sharedvfs
514
513
515
514
516 def _readrequires(vfs, allowmissing):
515 def _readrequires(vfs, allowmissing):
517 """reads the require file present at root of this vfs
516 """reads the require file present at root of this vfs
518 and return a set of requirements
517 and return a set of requirements
519
518
520 If allowmissing is True, we suppress FileNotFoundError if raised"""
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
521 # requires file contains a newline-delimited list of
520 # requires file contains a newline-delimited list of
522 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
523 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
524 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
525 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
526 read = vfs.tryread if allowmissing else vfs.read
525 read = vfs.tryread if allowmissing else vfs.read
527 return set(read(b'requires').splitlines())
526 return set(read(b'requires').splitlines())
528
527
529
528
530 def makelocalrepository(baseui, path: bytes, intents=None):
529 def makelocalrepository(baseui, path: bytes, intents=None):
531 """Create a local repository object.
530 """Create a local repository object.
532
531
533 Given arguments needed to construct a local repository, this function
532 Given arguments needed to construct a local repository, this function
534 performs various early repository loading functionality (such as
533 performs various early repository loading functionality (such as
535 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
536 the repository can be opened, derives a type suitable for representing
535 the repository can be opened, derives a type suitable for representing
537 that repository, and returns an instance of it.
536 that repository, and returns an instance of it.
538
537
539 The returned object conforms to the ``repository.completelocalrepository``
538 The returned object conforms to the ``repository.completelocalrepository``
540 interface.
539 interface.
541
540
542 The repository type is derived by calling a series of factory functions
541 The repository type is derived by calling a series of factory functions
543 for each aspect/interface of the final repository. These are defined by
542 for each aspect/interface of the final repository. These are defined by
544 ``REPO_INTERFACES``.
543 ``REPO_INTERFACES``.
545
544
546 Each factory function is called to produce a type implementing a specific
545 Each factory function is called to produce a type implementing a specific
547 interface. The cumulative list of returned types will be combined into a
546 interface. The cumulative list of returned types will be combined into a
548 new type and that type will be instantiated to represent the local
547 new type and that type will be instantiated to represent the local
549 repository.
548 repository.
550
549
551 The factory functions each receive various state that may be consulted
550 The factory functions each receive various state that may be consulted
552 as part of deriving a type.
551 as part of deriving a type.
553
552
554 Extensions should wrap these factory functions to customize repository type
553 Extensions should wrap these factory functions to customize repository type
555 creation. Note that an extension's wrapped function may be called even if
554 creation. Note that an extension's wrapped function may be called even if
556 that extension is not loaded for the repo being constructed. Extensions
555 that extension is not loaded for the repo being constructed. Extensions
557 should check if their ``__name__`` appears in the
556 should check if their ``__name__`` appears in the
558 ``extensionmodulenames`` set passed to the factory function and no-op if
557 ``extensionmodulenames`` set passed to the factory function and no-op if
559 not.
558 not.
560 """
559 """
561 ui = baseui.copy()
560 ui = baseui.copy()
562 # Prevent copying repo configuration.
561 # Prevent copying repo configuration.
563 ui.copy = baseui.copy
562 ui.copy = baseui.copy
564
563
565 # Working directory VFS rooted at repository root.
564 # Working directory VFS rooted at repository root.
566 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
567
566
568 # Main VFS for .hg/ directory.
567 # Main VFS for .hg/ directory.
569 hgpath = wdirvfs.join(b'.hg')
568 hgpath = wdirvfs.join(b'.hg')
570 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
571 # Whether this repository is shared one or not
570 # Whether this repository is shared one or not
572 shared = False
571 shared = False
573 # If this repository is shared, vfs pointing to shared repo
572 # If this repository is shared, vfs pointing to shared repo
574 sharedvfs = None
573 sharedvfs = None
575
574
576 # The .hg/ path should exist and should be a directory. All other
575 # The .hg/ path should exist and should be a directory. All other
577 # cases are errors.
576 # cases are errors.
578 if not hgvfs.isdir():
577 if not hgvfs.isdir():
579 try:
578 try:
580 hgvfs.stat()
579 hgvfs.stat()
581 except FileNotFoundError:
580 except FileNotFoundError:
582 pass
581 pass
583 except ValueError as e:
582 except ValueError as e:
584 # Can be raised on Python 3.8 when path is invalid.
583 # Can be raised on Python 3.8 when path is invalid.
585 raise error.Abort(
584 raise error.Abort(
586 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
587 )
586 )
588
587
589 raise error.RepoError(_(b'repository %s not found') % path)
588 raise error.RepoError(_(b'repository %s not found') % path)
590
589
591 requirements = _readrequires(hgvfs, True)
590 requirements = _readrequires(hgvfs, True)
592 shared = (
591 shared = (
593 requirementsmod.SHARED_REQUIREMENT in requirements
592 requirementsmod.SHARED_REQUIREMENT in requirements
594 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
595 )
594 )
596 storevfs = None
595 storevfs = None
597 if shared:
596 if shared:
598 # This is a shared repo
597 # This is a shared repo
599 sharedvfs = _getsharedvfs(hgvfs, requirements)
598 sharedvfs = _getsharedvfs(hgvfs, requirements)
600 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
601 else:
600 else:
602 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
603
602
604 # if .hg/requires contains the sharesafe requirement, it means
603 # if .hg/requires contains the sharesafe requirement, it means
605 # there exists a `.hg/store/requires` too and we should read it
604 # there exists a `.hg/store/requires` too and we should read it
606 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
607 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
608 # is not present, refer checkrequirementscompat() for that
607 # is not present, refer checkrequirementscompat() for that
609 #
608 #
610 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
611 # repository was shared the old way. We check the share source .hg/requires
610 # repository was shared the old way. We check the share source .hg/requires
612 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
613 # to be reshared
612 # to be reshared
614 hint = _(b"see `hg help config.format.use-share-safe` for more information")
613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
615 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
616
615
617 if (
616 if (
618 shared
617 shared
619 and requirementsmod.SHARESAFE_REQUIREMENT
618 and requirementsmod.SHARESAFE_REQUIREMENT
620 not in _readrequires(sharedvfs, True)
619 not in _readrequires(sharedvfs, True)
621 ):
620 ):
622 mismatch_warn = ui.configbool(
621 mismatch_warn = ui.configbool(
623 b'share', b'safe-mismatch.source-not-safe.warn'
622 b'share', b'safe-mismatch.source-not-safe.warn'
624 )
623 )
625 mismatch_config = ui.config(
624 mismatch_config = ui.config(
626 b'share', b'safe-mismatch.source-not-safe'
625 b'share', b'safe-mismatch.source-not-safe'
627 )
626 )
628 mismatch_verbose_upgrade = ui.configbool(
627 mismatch_verbose_upgrade = ui.configbool(
629 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
628 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
630 )
629 )
631 if mismatch_config in (
630 if mismatch_config in (
632 b'downgrade-allow',
631 b'downgrade-allow',
633 b'allow',
632 b'allow',
634 b'downgrade-abort',
633 b'downgrade-abort',
635 ):
634 ):
636 # prevent cyclic import localrepo -> upgrade -> localrepo
635 # prevent cyclic import localrepo -> upgrade -> localrepo
637 from . import upgrade
636 from . import upgrade
638
637
639 upgrade.downgrade_share_to_non_safe(
638 upgrade.downgrade_share_to_non_safe(
640 ui,
639 ui,
641 hgvfs,
640 hgvfs,
642 sharedvfs,
641 sharedvfs,
643 requirements,
642 requirements,
644 mismatch_config,
643 mismatch_config,
645 mismatch_warn,
644 mismatch_warn,
646 mismatch_verbose_upgrade,
645 mismatch_verbose_upgrade,
647 )
646 )
648 elif mismatch_config == b'abort':
647 elif mismatch_config == b'abort':
649 raise error.Abort(
648 raise error.Abort(
650 _(b"share source does not support share-safe requirement"),
649 _(b"share source does not support share-safe requirement"),
651 hint=hint,
650 hint=hint,
652 )
651 )
653 else:
652 else:
654 raise error.Abort(
653 raise error.Abort(
655 _(
654 _(
656 b"share-safe mismatch with source.\nUnrecognized"
655 b"share-safe mismatch with source.\nUnrecognized"
657 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 b" value '%s' of `share.safe-mismatch.source-not-safe`"
658 b" set."
657 b" set."
659 )
658 )
660 % mismatch_config,
659 % mismatch_config,
661 hint=hint,
660 hint=hint,
662 )
661 )
663 else:
662 else:
664 requirements |= _readrequires(storevfs, False)
663 requirements |= _readrequires(storevfs, False)
665 elif shared:
664 elif shared:
666 sourcerequires = _readrequires(sharedvfs, False)
665 sourcerequires = _readrequires(sharedvfs, False)
667 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
668 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
669 mismatch_warn = ui.configbool(
668 mismatch_warn = ui.configbool(
670 b'share', b'safe-mismatch.source-safe.warn'
669 b'share', b'safe-mismatch.source-safe.warn'
671 )
670 )
672 mismatch_verbose_upgrade = ui.configbool(
671 mismatch_verbose_upgrade = ui.configbool(
673 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
672 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
674 )
673 )
675 if mismatch_config in (
674 if mismatch_config in (
676 b'upgrade-allow',
675 b'upgrade-allow',
677 b'allow',
676 b'allow',
678 b'upgrade-abort',
677 b'upgrade-abort',
679 ):
678 ):
680 # prevent cyclic import localrepo -> upgrade -> localrepo
679 # prevent cyclic import localrepo -> upgrade -> localrepo
681 from . import upgrade
680 from . import upgrade
682
681
683 upgrade.upgrade_share_to_safe(
682 upgrade.upgrade_share_to_safe(
684 ui,
683 ui,
685 hgvfs,
684 hgvfs,
686 storevfs,
685 storevfs,
687 requirements,
686 requirements,
688 mismatch_config,
687 mismatch_config,
689 mismatch_warn,
688 mismatch_warn,
690 mismatch_verbose_upgrade,
689 mismatch_verbose_upgrade,
691 )
690 )
692 elif mismatch_config == b'abort':
691 elif mismatch_config == b'abort':
693 raise error.Abort(
692 raise error.Abort(
694 _(
693 _(
695 b'version mismatch: source uses share-safe'
694 b'version mismatch: source uses share-safe'
696 b' functionality while the current share does not'
695 b' functionality while the current share does not'
697 ),
696 ),
698 hint=hint,
697 hint=hint,
699 )
698 )
700 else:
699 else:
701 raise error.Abort(
700 raise error.Abort(
702 _(
701 _(
703 b"share-safe mismatch with source.\nUnrecognized"
702 b"share-safe mismatch with source.\nUnrecognized"
704 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 b" value '%s' of `share.safe-mismatch.source-safe` set."
705 )
704 )
706 % mismatch_config,
705 % mismatch_config,
707 hint=hint,
706 hint=hint,
708 )
707 )
709
708
710 # The .hg/hgrc file may load extensions or contain config options
709 # The .hg/hgrc file may load extensions or contain config options
711 # that influence repository construction. Attempt to load it and
710 # that influence repository construction. Attempt to load it and
712 # process any new extensions that it may have pulled in.
711 # process any new extensions that it may have pulled in.
713 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
714 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
715 extensions.loadall(ui)
714 extensions.loadall(ui)
716 extensions.populateui(ui)
715 extensions.populateui(ui)
717
716
718 # Set of module names of extensions loaded for this repository.
717 # Set of module names of extensions loaded for this repository.
719 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
720
719
721 supportedrequirements = gathersupportedrequirements(ui)
720 supportedrequirements = gathersupportedrequirements(ui)
722
721
723 # We first validate the requirements are known.
722 # We first validate the requirements are known.
724 ensurerequirementsrecognized(requirements, supportedrequirements)
723 ensurerequirementsrecognized(requirements, supportedrequirements)
725
724
726 # Then we validate that the known set is reasonable to use together.
725 # Then we validate that the known set is reasonable to use together.
727 ensurerequirementscompatible(ui, requirements)
726 ensurerequirementscompatible(ui, requirements)
728
727
729 # TODO there are unhandled edge cases related to opening repositories with
728 # TODO there are unhandled edge cases related to opening repositories with
730 # shared storage. If storage is shared, we should also test for requirements
729 # shared storage. If storage is shared, we should also test for requirements
731 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
732 # that repo, as that repo may load extensions needed to open it. This is a
731 # that repo, as that repo may load extensions needed to open it. This is a
733 # bit complicated because we don't want the other hgrc to overwrite settings
732 # bit complicated because we don't want the other hgrc to overwrite settings
734 # in this hgrc.
733 # in this hgrc.
735 #
734 #
736 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
737 # file when sharing repos. But if a requirement is added after the share is
736 # file when sharing repos. But if a requirement is added after the share is
738 # performed, thereby introducing a new requirement for the opener, we may
737 # performed, thereby introducing a new requirement for the opener, we may
739 # will not see that and could encounter a run-time error interacting with
738 # will not see that and could encounter a run-time error interacting with
740 # that shared store since it has an unknown-to-us requirement.
739 # that shared store since it has an unknown-to-us requirement.
741
740
742 # At this point, we know we should be capable of opening the repository.
741 # At this point, we know we should be capable of opening the repository.
743 # Now get on with doing that.
742 # Now get on with doing that.
744
743
745 features = set()
744 features = set()
746
745
747 # The "store" part of the repository holds versioned data. How it is
746 # The "store" part of the repository holds versioned data. How it is
748 # accessed is determined by various requirements. If `shared` or
747 # accessed is determined by various requirements. If `shared` or
749 # `relshared` requirements are present, this indicates current repository
748 # `relshared` requirements are present, this indicates current repository
750 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 # is a share and store exists in path mentioned in `.hg/sharedpath`
751 if shared:
750 if shared:
752 storebasepath = sharedvfs.base
751 storebasepath = sharedvfs.base
753 cachepath = sharedvfs.join(b'cache')
752 cachepath = sharedvfs.join(b'cache')
754 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
755 else:
754 else:
756 storebasepath = hgvfs.base
755 storebasepath = hgvfs.base
757 cachepath = hgvfs.join(b'cache')
756 cachepath = hgvfs.join(b'cache')
758 wcachepath = hgvfs.join(b'wcache')
757 wcachepath = hgvfs.join(b'wcache')
759
758
760 # The store has changed over time and the exact layout is dictated by
759 # The store has changed over time and the exact layout is dictated by
761 # requirements. The store interface abstracts differences across all
760 # requirements. The store interface abstracts differences across all
762 # of them.
761 # of them.
763 store = makestore(
762 store = makestore(
764 requirements,
763 requirements,
765 storebasepath,
764 storebasepath,
766 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 lambda base: vfsmod.vfs(base, cacheaudited=True),
767 )
766 )
768 hgvfs.createmode = store.createmode
767 hgvfs.createmode = store.createmode
769
768
770 storevfs = store.vfs
769 storevfs = store.vfs
771 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
772
771
773 if (
772 if (
774 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 requirementsmod.REVLOGV2_REQUIREMENT in requirements
775 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
776 ):
775 ):
777 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 features.add(repository.REPO_FEATURE_SIDE_DATA)
778 # the revlogv2 docket introduced race condition that we need to fix
777 # the revlogv2 docket introduced race condition that we need to fix
779 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
780
779
781 # The cache vfs is used to manage cache files.
780 # The cache vfs is used to manage cache files.
782 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
783 cachevfs.createmode = store.createmode
782 cachevfs.createmode = store.createmode
784 # The cache vfs is used to manage cache files related to the working copy
783 # The cache vfs is used to manage cache files related to the working copy
785 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
786 wcachevfs.createmode = store.createmode
785 wcachevfs.createmode = store.createmode
787
786
788 # Now resolve the type for the repository object. We do this by repeatedly
787 # Now resolve the type for the repository object. We do this by repeatedly
789 # calling a factory function to produces types for specific aspects of the
788 # calling a factory function to produces types for specific aspects of the
790 # repo's operation. The aggregate returned types are used as base classes
789 # repo's operation. The aggregate returned types are used as base classes
791 # for a dynamically-derived type, which will represent our new repository.
790 # for a dynamically-derived type, which will represent our new repository.
792
791
793 bases = []
792 bases = []
794 extrastate = {}
793 extrastate = {}
795
794
796 for iface, fn in REPO_INTERFACES:
795 for iface, fn in REPO_INTERFACES:
797 # We pass all potentially useful state to give extensions tons of
796 # We pass all potentially useful state to give extensions tons of
798 # flexibility.
797 # flexibility.
799 typ = fn()(
798 typ = fn()(
800 ui=ui,
799 ui=ui,
801 intents=intents,
800 intents=intents,
802 requirements=requirements,
801 requirements=requirements,
803 features=features,
802 features=features,
804 wdirvfs=wdirvfs,
803 wdirvfs=wdirvfs,
805 hgvfs=hgvfs,
804 hgvfs=hgvfs,
806 store=store,
805 store=store,
807 storevfs=storevfs,
806 storevfs=storevfs,
808 storeoptions=storevfs.options,
807 storeoptions=storevfs.options,
809 cachevfs=cachevfs,
808 cachevfs=cachevfs,
810 wcachevfs=wcachevfs,
809 wcachevfs=wcachevfs,
811 extensionmodulenames=extensionmodulenames,
810 extensionmodulenames=extensionmodulenames,
812 extrastate=extrastate,
811 extrastate=extrastate,
813 baseclasses=bases,
812 baseclasses=bases,
814 )
813 )
815
814
816 if not isinstance(typ, type):
815 if not isinstance(typ, type):
817 raise error.ProgrammingError(
816 raise error.ProgrammingError(
818 b'unable to construct type for %s' % iface
817 b'unable to construct type for %s' % iface
819 )
818 )
820
819
821 bases.append(typ)
820 bases.append(typ)
822
821
823 # type() allows you to use characters in type names that wouldn't be
822 # type() allows you to use characters in type names that wouldn't be
824 # recognized as Python symbols in source code. We abuse that to add
823 # recognized as Python symbols in source code. We abuse that to add
825 # rich information about our constructed repo.
824 # rich information about our constructed repo.
826 name = pycompat.sysstr(
825 name = pycompat.sysstr(
827 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
828 )
827 )
829
828
830 cls = type(name, tuple(bases), {})
829 cls = type(name, tuple(bases), {})
831
830
832 return cls(
831 return cls(
833 baseui=baseui,
832 baseui=baseui,
834 ui=ui,
833 ui=ui,
835 origroot=path,
834 origroot=path,
836 wdirvfs=wdirvfs,
835 wdirvfs=wdirvfs,
837 hgvfs=hgvfs,
836 hgvfs=hgvfs,
838 requirements=requirements,
837 requirements=requirements,
839 supportedrequirements=supportedrequirements,
838 supportedrequirements=supportedrequirements,
840 sharedpath=storebasepath,
839 sharedpath=storebasepath,
841 store=store,
840 store=store,
842 cachevfs=cachevfs,
841 cachevfs=cachevfs,
843 wcachevfs=wcachevfs,
842 wcachevfs=wcachevfs,
844 features=features,
843 features=features,
845 intents=intents,
844 intents=intents,
846 )
845 )
847
846
848
847
849 def loadhgrc(
848 def loadhgrc(
850 ui,
849 ui,
851 wdirvfs: vfsmod.vfs,
850 wdirvfs: vfsmod.vfs,
852 hgvfs: vfsmod.vfs,
851 hgvfs: vfsmod.vfs,
853 requirements,
852 requirements,
854 sharedvfs: Optional[vfsmod.vfs] = None,
853 sharedvfs: Optional[vfsmod.vfs] = None,
855 ):
854 ):
856 """Load hgrc files/content into a ui instance.
855 """Load hgrc files/content into a ui instance.
857
856
858 This is called during repository opening to load any additional
857 This is called during repository opening to load any additional
859 config files or settings relevant to the current repository.
858 config files or settings relevant to the current repository.
860
859
861 Returns a bool indicating whether any additional configs were loaded.
860 Returns a bool indicating whether any additional configs were loaded.
862
861
863 Extensions should monkeypatch this function to modify how per-repo
862 Extensions should monkeypatch this function to modify how per-repo
864 configs are loaded. For example, an extension may wish to pull in
863 configs are loaded. For example, an extension may wish to pull in
865 configs from alternate files or sources.
864 configs from alternate files or sources.
866
865
867 sharedvfs is vfs object pointing to source repo if the current one is a
866 sharedvfs is vfs object pointing to source repo if the current one is a
868 shared one
867 shared one
869 """
868 """
870 if not rcutil.use_repo_hgrc():
869 if not rcutil.use_repo_hgrc():
871 return False
870 return False
872
871
873 ret = False
872 ret = False
874 # first load config from shared source if we has to
873 # first load config from shared source if we has to
875 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
874 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
876 try:
875 try:
877 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
876 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
878 ret = True
877 ret = True
879 except IOError:
878 except IOError:
880 pass
879 pass
881
880
882 try:
881 try:
883 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
882 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
884 ret = True
883 ret = True
885 except IOError:
884 except IOError:
886 pass
885 pass
887
886
888 try:
887 try:
889 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
888 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
890 ret = True
889 ret = True
891 except IOError:
890 except IOError:
892 pass
891 pass
893
892
894 return ret
893 return ret
895
894
896
895
897 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
896 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
898 """Perform additional actions after .hg/hgrc is loaded.
897 """Perform additional actions after .hg/hgrc is loaded.
899
898
900 This function is called during repository loading immediately after
899 This function is called during repository loading immediately after
901 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
900 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
902
901
903 The function can be used to validate configs, automatically add
902 The function can be used to validate configs, automatically add
904 options (including extensions) based on requirements, etc.
903 options (including extensions) based on requirements, etc.
905 """
904 """
906
905
907 # Map of requirements to list of extensions to load automatically when
906 # Map of requirements to list of extensions to load automatically when
908 # requirement is present.
907 # requirement is present.
909 autoextensions = {
908 autoextensions = {
910 b'git': [b'git'],
909 b'git': [b'git'],
911 b'largefiles': [b'largefiles'],
910 b'largefiles': [b'largefiles'],
912 b'lfs': [b'lfs'],
911 b'lfs': [b'lfs'],
913 }
912 }
914
913
915 for requirement, names in sorted(autoextensions.items()):
914 for requirement, names in sorted(autoextensions.items()):
916 if requirement not in requirements:
915 if requirement not in requirements:
917 continue
916 continue
918
917
919 for name in names:
918 for name in names:
920 if not ui.hasconfig(b'extensions', name):
919 if not ui.hasconfig(b'extensions', name):
921 ui.setconfig(b'extensions', name, b'', source=b'autoload')
920 ui.setconfig(b'extensions', name, b'', source=b'autoload')
922
921
923
922
924 def gathersupportedrequirements(ui):
923 def gathersupportedrequirements(ui):
925 """Determine the complete set of recognized requirements."""
924 """Determine the complete set of recognized requirements."""
926 # Start with all requirements supported by this file.
925 # Start with all requirements supported by this file.
927 supported = set(localrepository._basesupported)
926 supported = set(localrepository._basesupported)
928
927
929 # Execute ``featuresetupfuncs`` entries if they belong to an extension
928 # Execute ``featuresetupfuncs`` entries if they belong to an extension
930 # relevant to this ui instance.
929 # relevant to this ui instance.
931 modules = {m.__name__ for n, m in extensions.extensions(ui)}
930 modules = {m.__name__ for n, m in extensions.extensions(ui)}
932
931
933 for fn in featuresetupfuncs:
932 for fn in featuresetupfuncs:
934 if fn.__module__ in modules:
933 if fn.__module__ in modules:
935 fn(ui, supported)
934 fn(ui, supported)
936
935
937 # Add derived requirements from registered compression engines.
936 # Add derived requirements from registered compression engines.
938 for name in util.compengines:
937 for name in util.compengines:
939 engine = util.compengines[name]
938 engine = util.compengines[name]
940 if engine.available() and engine.revlogheader():
939 if engine.available() and engine.revlogheader():
941 supported.add(b'exp-compression-%s' % name)
940 supported.add(b'exp-compression-%s' % name)
942 if engine.name() == b'zstd':
941 if engine.name() == b'zstd':
943 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
942 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
944
943
945 return supported
944 return supported
946
945
947
946
948 def ensurerequirementsrecognized(requirements, supported):
947 def ensurerequirementsrecognized(requirements, supported):
949 """Validate that a set of local requirements is recognized.
948 """Validate that a set of local requirements is recognized.
950
949
951 Receives a set of requirements. Raises an ``error.RepoError`` if there
950 Receives a set of requirements. Raises an ``error.RepoError`` if there
952 exists any requirement in that set that currently loaded code doesn't
951 exists any requirement in that set that currently loaded code doesn't
953 recognize.
952 recognize.
954
953
955 Returns a set of supported requirements.
954 Returns a set of supported requirements.
956 """
955 """
957 missing = set()
956 missing = set()
958
957
959 for requirement in requirements:
958 for requirement in requirements:
960 if requirement in supported:
959 if requirement in supported:
961 continue
960 continue
962
961
963 if not requirement or not requirement[0:1].isalnum():
962 if not requirement or not requirement[0:1].isalnum():
964 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
963 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
965
964
966 missing.add(requirement)
965 missing.add(requirement)
967
966
968 if missing:
967 if missing:
969 raise error.RequirementError(
968 raise error.RequirementError(
970 _(b'repository requires features unknown to this Mercurial: %s')
969 _(b'repository requires features unknown to this Mercurial: %s')
971 % b' '.join(sorted(missing)),
970 % b' '.join(sorted(missing)),
972 hint=_(
971 hint=_(
973 b'see https://mercurial-scm.org/wiki/MissingRequirement '
972 b'see https://mercurial-scm.org/wiki/MissingRequirement '
974 b'for more information'
973 b'for more information'
975 ),
974 ),
976 )
975 )
977
976
978
977
979 def ensurerequirementscompatible(ui, requirements):
978 def ensurerequirementscompatible(ui, requirements):
980 """Validates that a set of recognized requirements is mutually compatible.
979 """Validates that a set of recognized requirements is mutually compatible.
981
980
982 Some requirements may not be compatible with others or require
981 Some requirements may not be compatible with others or require
983 config options that aren't enabled. This function is called during
982 config options that aren't enabled. This function is called during
984 repository opening to ensure that the set of requirements needed
983 repository opening to ensure that the set of requirements needed
985 to open a repository is sane and compatible with config options.
984 to open a repository is sane and compatible with config options.
986
985
987 Extensions can monkeypatch this function to perform additional
986 Extensions can monkeypatch this function to perform additional
988 checking.
987 checking.
989
988
990 ``error.RepoError`` should be raised on failure.
989 ``error.RepoError`` should be raised on failure.
991 """
990 """
992 if (
991 if (
993 requirementsmod.SPARSE_REQUIREMENT in requirements
992 requirementsmod.SPARSE_REQUIREMENT in requirements
994 and not sparse.enabled
993 and not sparse.enabled
995 ):
994 ):
996 raise error.RepoError(
995 raise error.RepoError(
997 _(
996 _(
998 b'repository is using sparse feature but '
997 b'repository is using sparse feature but '
999 b'sparse is not enabled; enable the '
998 b'sparse is not enabled; enable the '
1000 b'"sparse" extensions to access'
999 b'"sparse" extensions to access'
1001 )
1000 )
1002 )
1001 )
1003
1002
1004
1003
1005 def makestore(requirements, path, vfstype):
1004 def makestore(requirements, path, vfstype):
1006 """Construct a storage object for a repository."""
1005 """Construct a storage object for a repository."""
1007 if requirementsmod.STORE_REQUIREMENT in requirements:
1006 if requirementsmod.STORE_REQUIREMENT in requirements:
1008 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1007 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1009 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1008 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1010 return storemod.fncachestore(path, vfstype, dotencode)
1009 return storemod.fncachestore(path, vfstype, dotencode)
1011
1010
1012 return storemod.encodedstore(path, vfstype)
1011 return storemod.encodedstore(path, vfstype)
1013
1012
1014 return storemod.basicstore(path, vfstype)
1013 return storemod.basicstore(path, vfstype)
1015
1014
1016
1015
1017 def resolvestorevfsoptions(ui, requirements, features):
1016 def resolvestorevfsoptions(ui, requirements, features):
1018 """Resolve the options to pass to the store vfs opener.
1017 """Resolve the options to pass to the store vfs opener.
1019
1018
1020 The returned dict is used to influence behavior of the storage layer.
1019 The returned dict is used to influence behavior of the storage layer.
1021 """
1020 """
1022 options = {}
1021 options = {}
1023
1022
1024 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1023 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1025 options[b'treemanifest'] = True
1024 options[b'treemanifest'] = True
1026
1025
1027 # experimental config: format.manifestcachesize
1026 # experimental config: format.manifestcachesize
1028 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1027 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1029 if manifestcachesize is not None:
1028 if manifestcachesize is not None:
1030 options[b'manifestcachesize'] = manifestcachesize
1029 options[b'manifestcachesize'] = manifestcachesize
1031
1030
1032 # In the absence of another requirement superseding a revlog-related
1031 # In the absence of another requirement superseding a revlog-related
1033 # requirement, we have to assume the repo is using revlog version 0.
1032 # requirement, we have to assume the repo is using revlog version 0.
1034 # This revlog format is super old and we don't bother trying to parse
1033 # This revlog format is super old and we don't bother trying to parse
1035 # opener options for it because those options wouldn't do anything
1034 # opener options for it because those options wouldn't do anything
1036 # meaningful on such old repos.
1035 # meaningful on such old repos.
1037 if (
1036 if (
1038 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1037 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1039 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1038 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1040 ):
1039 ):
1041 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1040 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1042 else: # explicitly mark repo as using revlogv0
1041 else: # explicitly mark repo as using revlogv0
1043 options[b'revlogv0'] = True
1042 options[b'revlogv0'] = True
1044
1043
1045 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1044 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1046 options[b'copies-storage'] = b'changeset-sidedata'
1045 options[b'copies-storage'] = b'changeset-sidedata'
1047 else:
1046 else:
1048 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1047 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1049 copiesextramode = (b'changeset-only', b'compatibility')
1048 copiesextramode = (b'changeset-only', b'compatibility')
1050 if writecopiesto in copiesextramode:
1049 if writecopiesto in copiesextramode:
1051 options[b'copies-storage'] = b'extra'
1050 options[b'copies-storage'] = b'extra'
1052
1051
1053 return options
1052 return options
1054
1053
1055
1054
1056 def resolverevlogstorevfsoptions(ui, requirements, features):
1055 def resolverevlogstorevfsoptions(ui, requirements, features):
1057 """Resolve opener options specific to revlogs."""
1056 """Resolve opener options specific to revlogs."""
1058
1057
1059 options = {}
1058 options = {}
1060 options[b'flagprocessors'] = {}
1059 options[b'flagprocessors'] = {}
1061
1060
1062 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1061 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1063 options[b'revlogv1'] = True
1062 options[b'revlogv1'] = True
1064 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1063 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1065 options[b'revlogv2'] = True
1064 options[b'revlogv2'] = True
1066 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1065 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1067 options[b'changelogv2'] = True
1066 options[b'changelogv2'] = True
1068 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1067 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1069 options[b'changelogv2.compute-rank'] = cmp_rank
1068 options[b'changelogv2.compute-rank'] = cmp_rank
1070
1069
1071 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1070 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1072 options[b'generaldelta'] = True
1071 options[b'generaldelta'] = True
1073
1072
1074 # experimental config: format.chunkcachesize
1073 # experimental config: format.chunkcachesize
1075 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1074 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1076 if chunkcachesize is not None:
1075 if chunkcachesize is not None:
1077 options[b'chunkcachesize'] = chunkcachesize
1076 options[b'chunkcachesize'] = chunkcachesize
1078
1077
1079 deltabothparents = ui.configbool(
1078 deltabothparents = ui.configbool(
1080 b'storage', b'revlog.optimize-delta-parent-choice'
1079 b'storage', b'revlog.optimize-delta-parent-choice'
1081 )
1080 )
1082 options[b'deltabothparents'] = deltabothparents
1081 options[b'deltabothparents'] = deltabothparents
1083 dps_cgds = ui.configint(
1082 dps_cgds = ui.configint(
1084 b'storage',
1083 b'storage',
1085 b'revlog.delta-parent-search.candidate-group-chunk-size',
1084 b'revlog.delta-parent-search.candidate-group-chunk-size',
1086 )
1085 )
1087 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1086 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1088 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1087 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1089
1088
1090 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1089 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1091 options[b'issue6528.fix-incoming'] = issue6528
1090 options[b'issue6528.fix-incoming'] = issue6528
1092
1091
1093 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1092 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1094 lazydeltabase = False
1093 lazydeltabase = False
1095 if lazydelta:
1094 if lazydelta:
1096 lazydeltabase = ui.configbool(
1095 lazydeltabase = ui.configbool(
1097 b'storage', b'revlog.reuse-external-delta-parent'
1096 b'storage', b'revlog.reuse-external-delta-parent'
1098 )
1097 )
1099 if lazydeltabase is None:
1098 if lazydeltabase is None:
1100 lazydeltabase = not scmutil.gddeltaconfig(ui)
1099 lazydeltabase = not scmutil.gddeltaconfig(ui)
1101 options[b'lazydelta'] = lazydelta
1100 options[b'lazydelta'] = lazydelta
1102 options[b'lazydeltabase'] = lazydeltabase
1101 options[b'lazydeltabase'] = lazydeltabase
1103
1102
1104 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1103 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1105 if 0 <= chainspan:
1104 if 0 <= chainspan:
1106 options[b'maxdeltachainspan'] = chainspan
1105 options[b'maxdeltachainspan'] = chainspan
1107
1106
1108 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1107 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1109 if mmapindexthreshold is not None:
1108 if mmapindexthreshold is not None:
1110 options[b'mmapindexthreshold'] = mmapindexthreshold
1109 options[b'mmapindexthreshold'] = mmapindexthreshold
1111
1110
1112 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1111 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1113 srdensitythres = float(
1112 srdensitythres = float(
1114 ui.config(b'experimental', b'sparse-read.density-threshold')
1113 ui.config(b'experimental', b'sparse-read.density-threshold')
1115 )
1114 )
1116 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1115 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1117 options[b'with-sparse-read'] = withsparseread
1116 options[b'with-sparse-read'] = withsparseread
1118 options[b'sparse-read-density-threshold'] = srdensitythres
1117 options[b'sparse-read-density-threshold'] = srdensitythres
1119 options[b'sparse-read-min-gap-size'] = srmingapsize
1118 options[b'sparse-read-min-gap-size'] = srmingapsize
1120
1119
1121 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1120 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1122 options[b'sparse-revlog'] = sparserevlog
1121 options[b'sparse-revlog'] = sparserevlog
1123 if sparserevlog:
1122 if sparserevlog:
1124 options[b'generaldelta'] = True
1123 options[b'generaldelta'] = True
1125
1124
1126 maxchainlen = None
1125 maxchainlen = None
1127 if sparserevlog:
1126 if sparserevlog:
1128 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1127 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1129 # experimental config: format.maxchainlen
1128 # experimental config: format.maxchainlen
1130 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1129 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1131 if maxchainlen is not None:
1130 if maxchainlen is not None:
1132 options[b'maxchainlen'] = maxchainlen
1131 options[b'maxchainlen'] = maxchainlen
1133
1132
1134 for r in requirements:
1133 for r in requirements:
1135 # we allow multiple compression engine requirement to co-exist because
1134 # we allow multiple compression engine requirement to co-exist because
1136 # strickly speaking, revlog seems to support mixed compression style.
1135 # strickly speaking, revlog seems to support mixed compression style.
1137 #
1136 #
1138 # The compression used for new entries will be "the last one"
1137 # The compression used for new entries will be "the last one"
1139 prefix = r.startswith
1138 prefix = r.startswith
1140 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1139 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1141 options[b'compengine'] = r.split(b'-', 2)[2]
1140 options[b'compengine'] = r.split(b'-', 2)[2]
1142
1141
1143 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1142 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1144 if options[b'zlib.level'] is not None:
1143 if options[b'zlib.level'] is not None:
1145 if not (0 <= options[b'zlib.level'] <= 9):
1144 if not (0 <= options[b'zlib.level'] <= 9):
1146 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1145 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1147 raise error.Abort(msg % options[b'zlib.level'])
1146 raise error.Abort(msg % options[b'zlib.level'])
1148 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1147 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1149 if options[b'zstd.level'] is not None:
1148 if options[b'zstd.level'] is not None:
1150 if not (0 <= options[b'zstd.level'] <= 22):
1149 if not (0 <= options[b'zstd.level'] <= 22):
1151 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1150 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1152 raise error.Abort(msg % options[b'zstd.level'])
1151 raise error.Abort(msg % options[b'zstd.level'])
1153
1152
1154 if requirementsmod.NARROW_REQUIREMENT in requirements:
1153 if requirementsmod.NARROW_REQUIREMENT in requirements:
1155 options[b'enableellipsis'] = True
1154 options[b'enableellipsis'] = True
1156
1155
1157 if ui.configbool(b'experimental', b'rust.index'):
1156 if ui.configbool(b'experimental', b'rust.index'):
1158 options[b'rust.index'] = True
1157 options[b'rust.index'] = True
1159 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1158 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1160 slow_path = ui.config(
1159 slow_path = ui.config(
1161 b'storage', b'revlog.persistent-nodemap.slow-path'
1160 b'storage', b'revlog.persistent-nodemap.slow-path'
1162 )
1161 )
1163 if slow_path not in (b'allow', b'warn', b'abort'):
1162 if slow_path not in (b'allow', b'warn', b'abort'):
1164 default = ui.config_default(
1163 default = ui.config_default(
1165 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 b'storage', b'revlog.persistent-nodemap.slow-path'
1166 )
1165 )
1167 msg = _(
1166 msg = _(
1168 b'unknown value for config '
1167 b'unknown value for config '
1169 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1168 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1170 )
1169 )
1171 ui.warn(msg % slow_path)
1170 ui.warn(msg % slow_path)
1172 if not ui.quiet:
1171 if not ui.quiet:
1173 ui.warn(_(b'falling back to default value: %s\n') % default)
1172 ui.warn(_(b'falling back to default value: %s\n') % default)
1174 slow_path = default
1173 slow_path = default
1175
1174
1176 msg = _(
1175 msg = _(
1177 b"accessing `persistent-nodemap` repository without associated "
1176 b"accessing `persistent-nodemap` repository without associated "
1178 b"fast implementation."
1177 b"fast implementation."
1179 )
1178 )
1180 hint = _(
1179 hint = _(
1181 b"check `hg help config.format.use-persistent-nodemap` "
1180 b"check `hg help config.format.use-persistent-nodemap` "
1182 b"for details"
1181 b"for details"
1183 )
1182 )
1184 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1183 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1185 if slow_path == b'warn':
1184 if slow_path == b'warn':
1186 msg = b"warning: " + msg + b'\n'
1185 msg = b"warning: " + msg + b'\n'
1187 ui.warn(msg)
1186 ui.warn(msg)
1188 if not ui.quiet:
1187 if not ui.quiet:
1189 hint = b'(' + hint + b')\n'
1188 hint = b'(' + hint + b')\n'
1190 ui.warn(hint)
1189 ui.warn(hint)
1191 if slow_path == b'abort':
1190 if slow_path == b'abort':
1192 raise error.Abort(msg, hint=hint)
1191 raise error.Abort(msg, hint=hint)
1193 options[b'persistent-nodemap'] = True
1192 options[b'persistent-nodemap'] = True
1194 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1193 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1195 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1194 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1196 if slow_path not in (b'allow', b'warn', b'abort'):
1195 if slow_path not in (b'allow', b'warn', b'abort'):
1197 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1196 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1198 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1197 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1199 ui.warn(msg % slow_path)
1198 ui.warn(msg % slow_path)
1200 if not ui.quiet:
1199 if not ui.quiet:
1201 ui.warn(_(b'falling back to default value: %s\n') % default)
1200 ui.warn(_(b'falling back to default value: %s\n') % default)
1202 slow_path = default
1201 slow_path = default
1203
1202
1204 msg = _(
1203 msg = _(
1205 b"accessing `dirstate-v2` repository without associated "
1204 b"accessing `dirstate-v2` repository without associated "
1206 b"fast implementation."
1205 b"fast implementation."
1207 )
1206 )
1208 hint = _(
1207 hint = _(
1209 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1208 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1210 )
1209 )
1211 if not dirstate.HAS_FAST_DIRSTATE_V2:
1210 if not dirstate.HAS_FAST_DIRSTATE_V2:
1212 if slow_path == b'warn':
1211 if slow_path == b'warn':
1213 msg = b"warning: " + msg + b'\n'
1212 msg = b"warning: " + msg + b'\n'
1214 ui.warn(msg)
1213 ui.warn(msg)
1215 if not ui.quiet:
1214 if not ui.quiet:
1216 hint = b'(' + hint + b')\n'
1215 hint = b'(' + hint + b')\n'
1217 ui.warn(hint)
1216 ui.warn(hint)
1218 if slow_path == b'abort':
1217 if slow_path == b'abort':
1219 raise error.Abort(msg, hint=hint)
1218 raise error.Abort(msg, hint=hint)
1220 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1219 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1221 options[b'persistent-nodemap.mmap'] = True
1220 options[b'persistent-nodemap.mmap'] = True
1222 if ui.configbool(b'devel', b'persistent-nodemap'):
1221 if ui.configbool(b'devel', b'persistent-nodemap'):
1223 options[b'devel-force-nodemap'] = True
1222 options[b'devel-force-nodemap'] = True
1224
1223
1225 return options
1224 return options
1226
1225
1227
1226
1228 def makemain(**kwargs):
1227 def makemain(**kwargs):
1229 """Produce a type conforming to ``ilocalrepositorymain``."""
1228 """Produce a type conforming to ``ilocalrepositorymain``."""
1230 return localrepository
1229 return localrepository
1231
1230
1232
1231
1233 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1232 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1234 class revlogfilestorage:
1233 class revlogfilestorage:
1235 """File storage when using revlogs."""
1234 """File storage when using revlogs."""
1236
1235
1237 def file(self, path):
1236 def file(self, path):
1238 if path.startswith(b'/'):
1237 if path.startswith(b'/'):
1239 path = path[1:]
1238 path = path[1:]
1240
1239
1241 return filelog.filelog(self.svfs, path)
1240 return filelog.filelog(self.svfs, path)
1242
1241
1243
1242
1244 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1243 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1245 class revlognarrowfilestorage:
1244 class revlognarrowfilestorage:
1246 """File storage when using revlogs and narrow files."""
1245 """File storage when using revlogs and narrow files."""
1247
1246
1248 def file(self, path):
1247 def file(self, path):
1249 if path.startswith(b'/'):
1248 if path.startswith(b'/'):
1250 path = path[1:]
1249 path = path[1:]
1251
1250
1252 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1251 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1253
1252
1254
1253
1255 def makefilestorage(requirements, features, **kwargs):
1254 def makefilestorage(requirements, features, **kwargs):
1256 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1255 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1257 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1256 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1258 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1257 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1259
1258
1260 if requirementsmod.NARROW_REQUIREMENT in requirements:
1259 if requirementsmod.NARROW_REQUIREMENT in requirements:
1261 return revlognarrowfilestorage
1260 return revlognarrowfilestorage
1262 else:
1261 else:
1263 return revlogfilestorage
1262 return revlogfilestorage
1264
1263
1265
1264
1266 # List of repository interfaces and factory functions for them. Each
1265 # List of repository interfaces and factory functions for them. Each
1267 # will be called in order during ``makelocalrepository()`` to iteratively
1266 # will be called in order during ``makelocalrepository()`` to iteratively
1268 # derive the final type for a local repository instance. We capture the
1267 # derive the final type for a local repository instance. We capture the
1269 # function as a lambda so we don't hold a reference and the module-level
1268 # function as a lambda so we don't hold a reference and the module-level
1270 # functions can be wrapped.
1269 # functions can be wrapped.
1271 REPO_INTERFACES = [
1270 REPO_INTERFACES = [
1272 (repository.ilocalrepositorymain, lambda: makemain),
1271 (repository.ilocalrepositorymain, lambda: makemain),
1273 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1272 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1274 ]
1273 ]
1275
1274
1276
1275
1277 @interfaceutil.implementer(repository.ilocalrepositorymain)
1276 @interfaceutil.implementer(repository.ilocalrepositorymain)
1278 class localrepository:
1277 class localrepository:
1279 """Main class for representing local repositories.
1278 """Main class for representing local repositories.
1280
1279
1281 All local repositories are instances of this class.
1280 All local repositories are instances of this class.
1282
1281
1283 Constructed on its own, instances of this class are not usable as
1282 Constructed on its own, instances of this class are not usable as
1284 repository objects. To obtain a usable repository object, call
1283 repository objects. To obtain a usable repository object, call
1285 ``hg.repository()``, ``localrepo.instance()``, or
1284 ``hg.repository()``, ``localrepo.instance()``, or
1286 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1285 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1287 ``instance()`` adds support for creating new repositories.
1286 ``instance()`` adds support for creating new repositories.
1288 ``hg.repository()`` adds more extension integration, including calling
1287 ``hg.repository()`` adds more extension integration, including calling
1289 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1288 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1290 used.
1289 used.
1291 """
1290 """
1292
1291
1293 _basesupported = {
1292 _basesupported = {
1294 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1293 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1295 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1294 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1296 requirementsmod.CHANGELOGV2_REQUIREMENT,
1295 requirementsmod.CHANGELOGV2_REQUIREMENT,
1297 requirementsmod.COPIESSDC_REQUIREMENT,
1296 requirementsmod.COPIESSDC_REQUIREMENT,
1298 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1297 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1299 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1298 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1299 requirementsmod.DOTENCODE_REQUIREMENT,
1301 requirementsmod.FNCACHE_REQUIREMENT,
1300 requirementsmod.FNCACHE_REQUIREMENT,
1302 requirementsmod.GENERALDELTA_REQUIREMENT,
1301 requirementsmod.GENERALDELTA_REQUIREMENT,
1303 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1302 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1304 requirementsmod.NODEMAP_REQUIREMENT,
1303 requirementsmod.NODEMAP_REQUIREMENT,
1305 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1304 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1306 requirementsmod.REVLOGV1_REQUIREMENT,
1305 requirementsmod.REVLOGV1_REQUIREMENT,
1307 requirementsmod.REVLOGV2_REQUIREMENT,
1306 requirementsmod.REVLOGV2_REQUIREMENT,
1308 requirementsmod.SHARED_REQUIREMENT,
1307 requirementsmod.SHARED_REQUIREMENT,
1309 requirementsmod.SHARESAFE_REQUIREMENT,
1308 requirementsmod.SHARESAFE_REQUIREMENT,
1310 requirementsmod.SPARSE_REQUIREMENT,
1309 requirementsmod.SPARSE_REQUIREMENT,
1311 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1310 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1312 requirementsmod.STORE_REQUIREMENT,
1311 requirementsmod.STORE_REQUIREMENT,
1313 requirementsmod.TREEMANIFEST_REQUIREMENT,
1312 requirementsmod.TREEMANIFEST_REQUIREMENT,
1314 }
1313 }
1315
1314
1316 # list of prefix for file which can be written without 'wlock'
1315 # list of prefix for file which can be written without 'wlock'
1317 # Extensions should extend this list when needed
1316 # Extensions should extend this list when needed
1318 _wlockfreeprefix = {
1317 _wlockfreeprefix = {
1319 # We migh consider requiring 'wlock' for the next
1318 # We migh consider requiring 'wlock' for the next
1320 # two, but pretty much all the existing code assume
1319 # two, but pretty much all the existing code assume
1321 # wlock is not needed so we keep them excluded for
1320 # wlock is not needed so we keep them excluded for
1322 # now.
1321 # now.
1323 b'hgrc',
1322 b'hgrc',
1324 b'requires',
1323 b'requires',
1325 # XXX cache is a complicatged business someone
1324 # XXX cache is a complicatged business someone
1326 # should investigate this in depth at some point
1325 # should investigate this in depth at some point
1327 b'cache/',
1326 b'cache/',
1328 # XXX shouldn't be dirstate covered by the wlock?
1327 # XXX shouldn't be dirstate covered by the wlock?
1329 b'dirstate',
1328 b'dirstate',
1330 # XXX bisect was still a bit too messy at the time
1329 # XXX bisect was still a bit too messy at the time
1331 # this changeset was introduced. Someone should fix
1330 # this changeset was introduced. Someone should fix
1332 # the remainig bit and drop this line
1331 # the remainig bit and drop this line
1333 b'bisect.state',
1332 b'bisect.state',
1334 }
1333 }
1335
1334
1336 def __init__(
1335 def __init__(
1337 self,
1336 self,
1338 baseui,
1337 baseui,
1339 ui,
1338 ui,
1340 origroot: bytes,
1339 origroot: bytes,
1341 wdirvfs: vfsmod.vfs,
1340 wdirvfs: vfsmod.vfs,
1342 hgvfs: vfsmod.vfs,
1341 hgvfs: vfsmod.vfs,
1343 requirements,
1342 requirements,
1344 supportedrequirements,
1343 supportedrequirements,
1345 sharedpath: bytes,
1344 sharedpath: bytes,
1346 store,
1345 store,
1347 cachevfs: vfsmod.vfs,
1346 cachevfs: vfsmod.vfs,
1348 wcachevfs: vfsmod.vfs,
1347 wcachevfs: vfsmod.vfs,
1349 features,
1348 features,
1350 intents=None,
1349 intents=None,
1351 ):
1350 ):
1352 """Create a new local repository instance.
1351 """Create a new local repository instance.
1353
1352
1354 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1353 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1355 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1354 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1356 object.
1355 object.
1357
1356
1358 Arguments:
1357 Arguments:
1359
1358
1360 baseui
1359 baseui
1361 ``ui.ui`` instance that ``ui`` argument was based off of.
1360 ``ui.ui`` instance that ``ui`` argument was based off of.
1362
1361
1363 ui
1362 ui
1364 ``ui.ui`` instance for use by the repository.
1363 ``ui.ui`` instance for use by the repository.
1365
1364
1366 origroot
1365 origroot
1367 ``bytes`` path to working directory root of this repository.
1366 ``bytes`` path to working directory root of this repository.
1368
1367
1369 wdirvfs
1368 wdirvfs
1370 ``vfs.vfs`` rooted at the working directory.
1369 ``vfs.vfs`` rooted at the working directory.
1371
1370
1372 hgvfs
1371 hgvfs
1373 ``vfs.vfs`` rooted at .hg/
1372 ``vfs.vfs`` rooted at .hg/
1374
1373
1375 requirements
1374 requirements
1376 ``set`` of bytestrings representing repository opening requirements.
1375 ``set`` of bytestrings representing repository opening requirements.
1377
1376
1378 supportedrequirements
1377 supportedrequirements
1379 ``set`` of bytestrings representing repository requirements that we
1378 ``set`` of bytestrings representing repository requirements that we
1380 know how to open. May be a supetset of ``requirements``.
1379 know how to open. May be a supetset of ``requirements``.
1381
1380
1382 sharedpath
1381 sharedpath
1383 ``bytes`` Defining path to storage base directory. Points to a
1382 ``bytes`` Defining path to storage base directory. Points to a
1384 ``.hg/`` directory somewhere.
1383 ``.hg/`` directory somewhere.
1385
1384
1386 store
1385 store
1387 ``store.basicstore`` (or derived) instance providing access to
1386 ``store.basicstore`` (or derived) instance providing access to
1388 versioned storage.
1387 versioned storage.
1389
1388
1390 cachevfs
1389 cachevfs
1391 ``vfs.vfs`` used for cache files.
1390 ``vfs.vfs`` used for cache files.
1392
1391
1393 wcachevfs
1392 wcachevfs
1394 ``vfs.vfs`` used for cache files related to the working copy.
1393 ``vfs.vfs`` used for cache files related to the working copy.
1395
1394
1396 features
1395 features
1397 ``set`` of bytestrings defining features/capabilities of this
1396 ``set`` of bytestrings defining features/capabilities of this
1398 instance.
1397 instance.
1399
1398
1400 intents
1399 intents
1401 ``set`` of system strings indicating what this repo will be used
1400 ``set`` of system strings indicating what this repo will be used
1402 for.
1401 for.
1403 """
1402 """
1404 self.baseui = baseui
1403 self.baseui = baseui
1405 self.ui = ui
1404 self.ui = ui
1406 self.origroot = origroot
1405 self.origroot = origroot
1407 # vfs rooted at working directory.
1406 # vfs rooted at working directory.
1408 self.wvfs = wdirvfs
1407 self.wvfs = wdirvfs
1409 self.root = wdirvfs.base
1408 self.root = wdirvfs.base
1410 # vfs rooted at .hg/. Used to access most non-store paths.
1409 # vfs rooted at .hg/. Used to access most non-store paths.
1411 self.vfs = hgvfs
1410 self.vfs = hgvfs
1412 self.path = hgvfs.base
1411 self.path = hgvfs.base
1413 self.requirements = requirements
1412 self.requirements = requirements
1414 self.nodeconstants = sha1nodeconstants
1413 self.nodeconstants = sha1nodeconstants
1415 self.nullid = self.nodeconstants.nullid
1414 self.nullid = self.nodeconstants.nullid
1416 self.supported = supportedrequirements
1415 self.supported = supportedrequirements
1417 self.sharedpath = sharedpath
1416 self.sharedpath = sharedpath
1418 self.store = store
1417 self.store = store
1419 self.cachevfs = cachevfs
1418 self.cachevfs = cachevfs
1420 self.wcachevfs = wcachevfs
1419 self.wcachevfs = wcachevfs
1421 self.features = features
1420 self.features = features
1422
1421
1423 self.filtername = None
1422 self.filtername = None
1424
1423
1425 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1424 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1426 b'devel', b'check-locks'
1425 b'devel', b'check-locks'
1427 ):
1426 ):
1428 self.vfs.audit = self._getvfsward(self.vfs.audit)
1427 self.vfs.audit = self._getvfsward(self.vfs.audit)
1429 # A list of callback to shape the phase if no data were found.
1428 # A list of callback to shape the phase if no data were found.
1430 # Callback are in the form: func(repo, roots) --> processed root.
1429 # Callback are in the form: func(repo, roots) --> processed root.
1431 # This list it to be filled by extension during repo setup
1430 # This list it to be filled by extension during repo setup
1432 self._phasedefaults = []
1431 self._phasedefaults = []
1433
1432
1434 color.setup(self.ui)
1433 color.setup(self.ui)
1435
1434
1436 self.spath = self.store.path
1435 self.spath = self.store.path
1437 self.svfs = self.store.vfs
1436 self.svfs = self.store.vfs
1438 self.sjoin = self.store.join
1437 self.sjoin = self.store.join
1439 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1438 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1440 b'devel', b'check-locks'
1439 b'devel', b'check-locks'
1441 ):
1440 ):
1442 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1441 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1443 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1442 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1444 else: # standard vfs
1443 else: # standard vfs
1445 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1444 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1446
1445
1447 self._dirstatevalidatewarned = False
1446 self._dirstatevalidatewarned = False
1448
1447
1449 self._branchcaches = branchmap.BranchMapCache()
1448 self._branchcaches = branchmap.BranchMapCache()
1450 self._revbranchcache = None
1449 self._revbranchcache = None
1451 self._filterpats = {}
1450 self._filterpats = {}
1452 self._datafilters = {}
1451 self._datafilters = {}
1453 self._transref = self._lockref = self._wlockref = None
1452 self._transref = self._lockref = self._wlockref = None
1454
1453
1455 # A cache for various files under .hg/ that tracks file changes,
1454 # A cache for various files under .hg/ that tracks file changes,
1456 # (used by the filecache decorator)
1455 # (used by the filecache decorator)
1457 #
1456 #
1458 # Maps a property name to its util.filecacheentry
1457 # Maps a property name to its util.filecacheentry
1459 self._filecache = {}
1458 self._filecache = {}
1460
1459
1461 # hold sets of revision to be filtered
1460 # hold sets of revision to be filtered
1462 # should be cleared when something might have changed the filter value:
1461 # should be cleared when something might have changed the filter value:
1463 # - new changesets,
1462 # - new changesets,
1464 # - phase change,
1463 # - phase change,
1465 # - new obsolescence marker,
1464 # - new obsolescence marker,
1466 # - working directory parent change,
1465 # - working directory parent change,
1467 # - bookmark changes
1466 # - bookmark changes
1468 self.filteredrevcache = {}
1467 self.filteredrevcache = {}
1469
1468
1470 # post-dirstate-status hooks
1469 # post-dirstate-status hooks
1471 self._postdsstatus = []
1470 self._postdsstatus = []
1472
1471
1473 # generic mapping between names and nodes
1472 # generic mapping between names and nodes
1474 self.names = namespaces.namespaces()
1473 self.names = namespaces.namespaces()
1475
1474
1476 # Key to signature value.
1475 # Key to signature value.
1477 self._sparsesignaturecache = {}
1476 self._sparsesignaturecache = {}
1478 # Signature to cached matcher instance.
1477 # Signature to cached matcher instance.
1479 self._sparsematchercache = {}
1478 self._sparsematchercache = {}
1480
1479
1481 self._extrafilterid = repoview.extrafilter(ui)
1480 self._extrafilterid = repoview.extrafilter(ui)
1482
1481
1483 self.filecopiesmode = None
1482 self.filecopiesmode = None
1484 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1483 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1485 self.filecopiesmode = b'changeset-sidedata'
1484 self.filecopiesmode = b'changeset-sidedata'
1486
1485
1487 self._wanted_sidedata = set()
1486 self._wanted_sidedata = set()
1488 self._sidedata_computers = {}
1487 self._sidedata_computers = {}
1489 sidedatamod.set_sidedata_spec_for_repo(self)
1488 sidedatamod.set_sidedata_spec_for_repo(self)
1490
1489
1491 def _getvfsward(self, origfunc):
1490 def _getvfsward(self, origfunc):
1492 """build a ward for self.vfs"""
1491 """build a ward for self.vfs"""
1493 rref = weakref.ref(self)
1492 rref = weakref.ref(self)
1494
1493
1495 def checkvfs(path, mode=None):
1494 def checkvfs(path, mode=None):
1496 ret = origfunc(path, mode=mode)
1495 ret = origfunc(path, mode=mode)
1497 repo = rref()
1496 repo = rref()
1498 if (
1497 if (
1499 repo is None
1498 repo is None
1500 or not util.safehasattr(repo, b'_wlockref')
1499 or not util.safehasattr(repo, b'_wlockref')
1501 or not util.safehasattr(repo, b'_lockref')
1500 or not util.safehasattr(repo, b'_lockref')
1502 ):
1501 ):
1503 return
1502 return
1504 if mode in (None, b'r', b'rb'):
1503 if mode in (None, b'r', b'rb'):
1505 return
1504 return
1506 if path.startswith(repo.path):
1505 if path.startswith(repo.path):
1507 # truncate name relative to the repository (.hg)
1506 # truncate name relative to the repository (.hg)
1508 path = path[len(repo.path) + 1 :]
1507 path = path[len(repo.path) + 1 :]
1509 if path.startswith(b'cache/'):
1508 if path.startswith(b'cache/'):
1510 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1509 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1511 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1510 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1512 # path prefixes covered by 'lock'
1511 # path prefixes covered by 'lock'
1513 vfs_path_prefixes = (
1512 vfs_path_prefixes = (
1514 b'journal.',
1513 b'journal.',
1515 b'undo.',
1514 b'undo.',
1516 b'strip-backup/',
1515 b'strip-backup/',
1517 b'cache/',
1516 b'cache/',
1518 )
1517 )
1519 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1518 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1520 if repo._currentlock(repo._lockref) is None:
1519 if repo._currentlock(repo._lockref) is None:
1521 repo.ui.develwarn(
1520 repo.ui.develwarn(
1522 b'write with no lock: "%s"' % path,
1521 b'write with no lock: "%s"' % path,
1523 stacklevel=3,
1522 stacklevel=3,
1524 config=b'check-locks',
1523 config=b'check-locks',
1525 )
1524 )
1526 elif repo._currentlock(repo._wlockref) is None:
1525 elif repo._currentlock(repo._wlockref) is None:
1527 # rest of vfs files are covered by 'wlock'
1526 # rest of vfs files are covered by 'wlock'
1528 #
1527 #
1529 # exclude special files
1528 # exclude special files
1530 for prefix in self._wlockfreeprefix:
1529 for prefix in self._wlockfreeprefix:
1531 if path.startswith(prefix):
1530 if path.startswith(prefix):
1532 return
1531 return
1533 repo.ui.develwarn(
1532 repo.ui.develwarn(
1534 b'write with no wlock: "%s"' % path,
1533 b'write with no wlock: "%s"' % path,
1535 stacklevel=3,
1534 stacklevel=3,
1536 config=b'check-locks',
1535 config=b'check-locks',
1537 )
1536 )
1538 return ret
1537 return ret
1539
1538
1540 return checkvfs
1539 return checkvfs
1541
1540
1542 def _getsvfsward(self, origfunc):
1541 def _getsvfsward(self, origfunc):
1543 """build a ward for self.svfs"""
1542 """build a ward for self.svfs"""
1544 rref = weakref.ref(self)
1543 rref = weakref.ref(self)
1545
1544
1546 def checksvfs(path, mode=None):
1545 def checksvfs(path, mode=None):
1547 ret = origfunc(path, mode=mode)
1546 ret = origfunc(path, mode=mode)
1548 repo = rref()
1547 repo = rref()
1549 if repo is None or not util.safehasattr(repo, b'_lockref'):
1548 if repo is None or not util.safehasattr(repo, b'_lockref'):
1550 return
1549 return
1551 if mode in (None, b'r', b'rb'):
1550 if mode in (None, b'r', b'rb'):
1552 return
1551 return
1553 if path.startswith(repo.sharedpath):
1552 if path.startswith(repo.sharedpath):
1554 # truncate name relative to the repository (.hg)
1553 # truncate name relative to the repository (.hg)
1555 path = path[len(repo.sharedpath) + 1 :]
1554 path = path[len(repo.sharedpath) + 1 :]
1556 if repo._currentlock(repo._lockref) is None:
1555 if repo._currentlock(repo._lockref) is None:
1557 repo.ui.develwarn(
1556 repo.ui.develwarn(
1558 b'write with no lock: "%s"' % path, stacklevel=4
1557 b'write with no lock: "%s"' % path, stacklevel=4
1559 )
1558 )
1560 return ret
1559 return ret
1561
1560
1562 return checksvfs
1561 return checksvfs
1563
1562
1564 def close(self):
1563 def close(self):
1565 self._writecaches()
1564 self._writecaches()
1566
1565
1567 def _writecaches(self):
1566 def _writecaches(self):
1568 if self._revbranchcache:
1567 if self._revbranchcache:
1569 self._revbranchcache.write()
1568 self._revbranchcache.write()
1570
1569
1571 def _restrictcapabilities(self, caps):
1570 def _restrictcapabilities(self, caps):
1572 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1571 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1573 caps = set(caps)
1572 caps = set(caps)
1574 capsblob = bundle2.encodecaps(
1573 capsblob = bundle2.encodecaps(
1575 bundle2.getrepocaps(self, role=b'client')
1574 bundle2.getrepocaps(self, role=b'client')
1576 )
1575 )
1577 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1576 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1578 if self.ui.configbool(b'experimental', b'narrow'):
1577 if self.ui.configbool(b'experimental', b'narrow'):
1579 caps.add(wireprototypes.NARROWCAP)
1578 caps.add(wireprototypes.NARROWCAP)
1580 return caps
1579 return caps
1581
1580
1582 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1581 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1583 # self -> auditor -> self._checknested -> self
1582 # self -> auditor -> self._checknested -> self
1584
1583
1585 @property
1584 @property
1586 def auditor(self):
1585 def auditor(self):
1587 # This is only used by context.workingctx.match in order to
1586 # This is only used by context.workingctx.match in order to
1588 # detect files in subrepos.
1587 # detect files in subrepos.
1589 return pathutil.pathauditor(self.root, callback=self._checknested)
1588 return pathutil.pathauditor(self.root, callback=self._checknested)
1590
1589
1591 @property
1590 @property
1592 def nofsauditor(self):
1591 def nofsauditor(self):
1593 # This is only used by context.basectx.match in order to detect
1592 # This is only used by context.basectx.match in order to detect
1594 # files in subrepos.
1593 # files in subrepos.
1595 return pathutil.pathauditor(
1594 return pathutil.pathauditor(
1596 self.root, callback=self._checknested, realfs=False, cached=True
1595 self.root, callback=self._checknested, realfs=False, cached=True
1597 )
1596 )
1598
1597
1599 def _checknested(self, path):
1598 def _checknested(self, path):
1600 """Determine if path is a legal nested repository."""
1599 """Determine if path is a legal nested repository."""
1601 if not path.startswith(self.root):
1600 if not path.startswith(self.root):
1602 return False
1601 return False
1603 subpath = path[len(self.root) + 1 :]
1602 subpath = path[len(self.root) + 1 :]
1604 normsubpath = util.pconvert(subpath)
1603 normsubpath = util.pconvert(subpath)
1605
1604
1606 # XXX: Checking against the current working copy is wrong in
1605 # XXX: Checking against the current working copy is wrong in
1607 # the sense that it can reject things like
1606 # the sense that it can reject things like
1608 #
1607 #
1609 # $ hg cat -r 10 sub/x.txt
1608 # $ hg cat -r 10 sub/x.txt
1610 #
1609 #
1611 # if sub/ is no longer a subrepository in the working copy
1610 # if sub/ is no longer a subrepository in the working copy
1612 # parent revision.
1611 # parent revision.
1613 #
1612 #
1614 # However, it can of course also allow things that would have
1613 # However, it can of course also allow things that would have
1615 # been rejected before, such as the above cat command if sub/
1614 # been rejected before, such as the above cat command if sub/
1616 # is a subrepository now, but was a normal directory before.
1615 # is a subrepository now, but was a normal directory before.
1617 # The old path auditor would have rejected by mistake since it
1616 # The old path auditor would have rejected by mistake since it
1618 # panics when it sees sub/.hg/.
1617 # panics when it sees sub/.hg/.
1619 #
1618 #
1620 # All in all, checking against the working copy seems sensible
1619 # All in all, checking against the working copy seems sensible
1621 # since we want to prevent access to nested repositories on
1620 # since we want to prevent access to nested repositories on
1622 # the filesystem *now*.
1621 # the filesystem *now*.
1623 ctx = self[None]
1622 ctx = self[None]
1624 parts = util.splitpath(subpath)
1623 parts = util.splitpath(subpath)
1625 while parts:
1624 while parts:
1626 prefix = b'/'.join(parts)
1625 prefix = b'/'.join(parts)
1627 if prefix in ctx.substate:
1626 if prefix in ctx.substate:
1628 if prefix == normsubpath:
1627 if prefix == normsubpath:
1629 return True
1628 return True
1630 else:
1629 else:
1631 sub = ctx.sub(prefix)
1630 sub = ctx.sub(prefix)
1632 return sub.checknested(subpath[len(prefix) + 1 :])
1631 return sub.checknested(subpath[len(prefix) + 1 :])
1633 else:
1632 else:
1634 parts.pop()
1633 parts.pop()
1635 return False
1634 return False
1636
1635
1637 def peer(self):
1636 def peer(self):
1638 return localpeer(self) # not cached to avoid reference cycle
1637 return localpeer(self) # not cached to avoid reference cycle
1639
1638
1640 def unfiltered(self):
1639 def unfiltered(self):
1641 """Return unfiltered version of the repository
1640 """Return unfiltered version of the repository
1642
1641
1643 Intended to be overwritten by filtered repo."""
1642 Intended to be overwritten by filtered repo."""
1644 return self
1643 return self
1645
1644
1646 def filtered(self, name, visibilityexceptions=None):
1645 def filtered(self, name, visibilityexceptions=None):
1647 """Return a filtered version of a repository
1646 """Return a filtered version of a repository
1648
1647
1649 The `name` parameter is the identifier of the requested view. This
1648 The `name` parameter is the identifier of the requested view. This
1650 will return a repoview object set "exactly" to the specified view.
1649 will return a repoview object set "exactly" to the specified view.
1651
1650
1652 This function does not apply recursive filtering to a repository. For
1651 This function does not apply recursive filtering to a repository. For
1653 example calling `repo.filtered("served")` will return a repoview using
1652 example calling `repo.filtered("served")` will return a repoview using
1654 the "served" view, regardless of the initial view used by `repo`.
1653 the "served" view, regardless of the initial view used by `repo`.
1655
1654
1656 In other word, there is always only one level of `repoview` "filtering".
1655 In other word, there is always only one level of `repoview` "filtering".
1657 """
1656 """
1658 if self._extrafilterid is not None and b'%' not in name:
1657 if self._extrafilterid is not None and b'%' not in name:
1659 name = name + b'%' + self._extrafilterid
1658 name = name + b'%' + self._extrafilterid
1660
1659
1661 cls = repoview.newtype(self.unfiltered().__class__)
1660 cls = repoview.newtype(self.unfiltered().__class__)
1662 return cls(self, name, visibilityexceptions)
1661 return cls(self, name, visibilityexceptions)
1663
1662
1664 @mixedrepostorecache(
1663 @mixedrepostorecache(
1665 (b'bookmarks', b'plain'),
1664 (b'bookmarks', b'plain'),
1666 (b'bookmarks.current', b'plain'),
1665 (b'bookmarks.current', b'plain'),
1667 (b'bookmarks', b''),
1666 (b'bookmarks', b''),
1668 (b'00changelog.i', b''),
1667 (b'00changelog.i', b''),
1669 )
1668 )
1670 def _bookmarks(self):
1669 def _bookmarks(self):
1671 # Since the multiple files involved in the transaction cannot be
1670 # Since the multiple files involved in the transaction cannot be
1672 # written atomically (with current repository format), there is a race
1671 # written atomically (with current repository format), there is a race
1673 # condition here.
1672 # condition here.
1674 #
1673 #
1675 # 1) changelog content A is read
1674 # 1) changelog content A is read
1676 # 2) outside transaction update changelog to content B
1675 # 2) outside transaction update changelog to content B
1677 # 3) outside transaction update bookmark file referring to content B
1676 # 3) outside transaction update bookmark file referring to content B
1678 # 4) bookmarks file content is read and filtered against changelog-A
1677 # 4) bookmarks file content is read and filtered against changelog-A
1679 #
1678 #
1680 # When this happens, bookmarks against nodes missing from A are dropped.
1679 # When this happens, bookmarks against nodes missing from A are dropped.
1681 #
1680 #
1682 # Having this happening during read is not great, but it become worse
1681 # Having this happening during read is not great, but it become worse
1683 # when this happen during write because the bookmarks to the "unknown"
1682 # when this happen during write because the bookmarks to the "unknown"
1684 # nodes will be dropped for good. However, writes happen within locks.
1683 # nodes will be dropped for good. However, writes happen within locks.
1685 # This locking makes it possible to have a race free consistent read.
1684 # This locking makes it possible to have a race free consistent read.
1686 # For this purpose data read from disc before locking are
1685 # For this purpose data read from disc before locking are
1687 # "invalidated" right after the locks are taken. This invalidations are
1686 # "invalidated" right after the locks are taken. This invalidations are
1688 # "light", the `filecache` mechanism keep the data in memory and will
1687 # "light", the `filecache` mechanism keep the data in memory and will
1689 # reuse them if the underlying files did not changed. Not parsing the
1688 # reuse them if the underlying files did not changed. Not parsing the
1690 # same data multiple times helps performances.
1689 # same data multiple times helps performances.
1691 #
1690 #
1692 # Unfortunately in the case describe above, the files tracked by the
1691 # Unfortunately in the case describe above, the files tracked by the
1693 # bookmarks file cache might not have changed, but the in-memory
1692 # bookmarks file cache might not have changed, but the in-memory
1694 # content is still "wrong" because we used an older changelog content
1693 # content is still "wrong" because we used an older changelog content
1695 # to process the on-disk data. So after locking, the changelog would be
1694 # to process the on-disk data. So after locking, the changelog would be
1696 # refreshed but `_bookmarks` would be preserved.
1695 # refreshed but `_bookmarks` would be preserved.
1697 # Adding `00changelog.i` to the list of tracked file is not
1696 # Adding `00changelog.i` to the list of tracked file is not
1698 # enough, because at the time we build the content for `_bookmarks` in
1697 # enough, because at the time we build the content for `_bookmarks` in
1699 # (4), the changelog file has already diverged from the content used
1698 # (4), the changelog file has already diverged from the content used
1700 # for loading `changelog` in (1)
1699 # for loading `changelog` in (1)
1701 #
1700 #
1702 # To prevent the issue, we force the changelog to be explicitly
1701 # To prevent the issue, we force the changelog to be explicitly
1703 # reloaded while computing `_bookmarks`. The data race can still happen
1702 # reloaded while computing `_bookmarks`. The data race can still happen
1704 # without the lock (with a narrower window), but it would no longer go
1703 # without the lock (with a narrower window), but it would no longer go
1705 # undetected during the lock time refresh.
1704 # undetected during the lock time refresh.
1706 #
1705 #
1707 # The new schedule is as follow
1706 # The new schedule is as follow
1708 #
1707 #
1709 # 1) filecache logic detect that `_bookmarks` needs to be computed
1708 # 1) filecache logic detect that `_bookmarks` needs to be computed
1710 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1709 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1711 # 3) We force `changelog` filecache to be tested
1710 # 3) We force `changelog` filecache to be tested
1712 # 4) cachestat for `changelog` are captured (for changelog)
1711 # 4) cachestat for `changelog` are captured (for changelog)
1713 # 5) `_bookmarks` is computed and cached
1712 # 5) `_bookmarks` is computed and cached
1714 #
1713 #
1715 # The step in (3) ensure we have a changelog at least as recent as the
1714 # The step in (3) ensure we have a changelog at least as recent as the
1716 # cache stat computed in (1). As a result at locking time:
1715 # cache stat computed in (1). As a result at locking time:
1717 # * if the changelog did not changed since (1) -> we can reuse the data
1716 # * if the changelog did not changed since (1) -> we can reuse the data
1718 # * otherwise -> the bookmarks get refreshed.
1717 # * otherwise -> the bookmarks get refreshed.
1719 self._refreshchangelog()
1718 self._refreshchangelog()
1720 return bookmarks.bmstore(self)
1719 return bookmarks.bmstore(self)
1721
1720
1722 def _refreshchangelog(self):
1721 def _refreshchangelog(self):
1723 """make sure the in memory changelog match the on-disk one"""
1722 """make sure the in memory changelog match the on-disk one"""
1724 if 'changelog' in vars(self) and self.currenttransaction() is None:
1723 if 'changelog' in vars(self) and self.currenttransaction() is None:
1725 del self.changelog
1724 del self.changelog
1726
1725
1727 @property
1726 @property
1728 def _activebookmark(self):
1727 def _activebookmark(self):
1729 return self._bookmarks.active
1728 return self._bookmarks.active
1730
1729
1731 # _phasesets depend on changelog. what we need is to call
1730 # _phasesets depend on changelog. what we need is to call
1732 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1731 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1733 # can't be easily expressed in filecache mechanism.
1732 # can't be easily expressed in filecache mechanism.
1734 @storecache(b'phaseroots', b'00changelog.i')
1733 @storecache(b'phaseroots', b'00changelog.i')
1735 def _phasecache(self):
1734 def _phasecache(self):
1736 return phases.phasecache(self, self._phasedefaults)
1735 return phases.phasecache(self, self._phasedefaults)
1737
1736
1738 @storecache(b'obsstore')
1737 @storecache(b'obsstore')
1739 def obsstore(self):
1738 def obsstore(self):
1740 return obsolete.makestore(self.ui, self)
1739 return obsolete.makestore(self.ui, self)
1741
1740
1742 @changelogcache()
1741 @changelogcache()
1743 def changelog(repo):
1742 def changelog(repo):
1744 # load dirstate before changelog to avoid race see issue6303
1743 # load dirstate before changelog to avoid race see issue6303
1745 repo.dirstate.prefetch_parents()
1744 repo.dirstate.prefetch_parents()
1746 return repo.store.changelog(
1745 return repo.store.changelog(
1747 txnutil.mayhavepending(repo.root),
1746 txnutil.mayhavepending(repo.root),
1748 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1747 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1749 )
1748 )
1750
1749
1751 @manifestlogcache()
1750 @manifestlogcache()
1752 def manifestlog(self):
1751 def manifestlog(self):
1753 return self.store.manifestlog(self, self._storenarrowmatch)
1752 return self.store.manifestlog(self, self._storenarrowmatch)
1754
1753
1755 @repofilecache(b'dirstate')
1754 @repofilecache(b'dirstate')
1756 def dirstate(self):
1755 def dirstate(self):
1757 return self._makedirstate()
1756 return self._makedirstate()
1758
1757
1759 def _makedirstate(self):
1758 def _makedirstate(self):
1760 """Extension point for wrapping the dirstate per-repo."""
1759 """Extension point for wrapping the dirstate per-repo."""
1761 sparsematchfn = None
1760 sparsematchfn = None
1762 if sparse.use_sparse(self):
1761 if sparse.use_sparse(self):
1763 sparsematchfn = lambda: sparse.matcher(self)
1762 sparsematchfn = lambda: sparse.matcher(self)
1764 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1763 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1765 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1764 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1766 use_dirstate_v2 = v2_req in self.requirements
1765 use_dirstate_v2 = v2_req in self.requirements
1767 use_tracked_hint = th in self.requirements
1766 use_tracked_hint = th in self.requirements
1768
1767
1769 return dirstate.dirstate(
1768 return dirstate.dirstate(
1770 self.vfs,
1769 self.vfs,
1771 self.ui,
1770 self.ui,
1772 self.root,
1771 self.root,
1773 self._dirstatevalidate,
1772 self._dirstatevalidate,
1774 sparsematchfn,
1773 sparsematchfn,
1775 self.nodeconstants,
1774 self.nodeconstants,
1776 use_dirstate_v2,
1775 use_dirstate_v2,
1777 use_tracked_hint=use_tracked_hint,
1776 use_tracked_hint=use_tracked_hint,
1778 )
1777 )
1779
1778
1780 def _dirstatevalidate(self, node):
1779 def _dirstatevalidate(self, node):
1781 try:
1780 try:
1782 self.changelog.rev(node)
1781 self.changelog.rev(node)
1783 return node
1782 return node
1784 except error.LookupError:
1783 except error.LookupError:
1785 if not self._dirstatevalidatewarned:
1784 if not self._dirstatevalidatewarned:
1786 self._dirstatevalidatewarned = True
1785 self._dirstatevalidatewarned = True
1787 self.ui.warn(
1786 self.ui.warn(
1788 _(b"warning: ignoring unknown working parent %s!\n")
1787 _(b"warning: ignoring unknown working parent %s!\n")
1789 % short(node)
1788 % short(node)
1790 )
1789 )
1791 return self.nullid
1790 return self.nullid
1792
1791
1793 @storecache(narrowspec.FILENAME)
1792 @storecache(narrowspec.FILENAME)
1794 def narrowpats(self):
1793 def narrowpats(self):
1795 """matcher patterns for this repository's narrowspec
1794 """matcher patterns for this repository's narrowspec
1796
1795
1797 A tuple of (includes, excludes).
1796 A tuple of (includes, excludes).
1798 """
1797 """
1799 return narrowspec.load(self)
1798 return narrowspec.load(self)
1800
1799
1801 @storecache(narrowspec.FILENAME)
1800 @storecache(narrowspec.FILENAME)
1802 def _storenarrowmatch(self):
1801 def _storenarrowmatch(self):
1803 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1802 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1804 return matchmod.always()
1803 return matchmod.always()
1805 include, exclude = self.narrowpats
1804 include, exclude = self.narrowpats
1806 return narrowspec.match(self.root, include=include, exclude=exclude)
1805 return narrowspec.match(self.root, include=include, exclude=exclude)
1807
1806
1808 @storecache(narrowspec.FILENAME)
1807 @storecache(narrowspec.FILENAME)
1809 def _narrowmatch(self):
1808 def _narrowmatch(self):
1810 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1809 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1811 return matchmod.always()
1810 return matchmod.always()
1812 narrowspec.checkworkingcopynarrowspec(self)
1811 narrowspec.checkworkingcopynarrowspec(self)
1813 include, exclude = self.narrowpats
1812 include, exclude = self.narrowpats
1814 return narrowspec.match(self.root, include=include, exclude=exclude)
1813 return narrowspec.match(self.root, include=include, exclude=exclude)
1815
1814
1816 def narrowmatch(self, match=None, includeexact=False):
1815 def narrowmatch(self, match=None, includeexact=False):
1817 """matcher corresponding the the repo's narrowspec
1816 """matcher corresponding the the repo's narrowspec
1818
1817
1819 If `match` is given, then that will be intersected with the narrow
1818 If `match` is given, then that will be intersected with the narrow
1820 matcher.
1819 matcher.
1821
1820
1822 If `includeexact` is True, then any exact matches from `match` will
1821 If `includeexact` is True, then any exact matches from `match` will
1823 be included even if they're outside the narrowspec.
1822 be included even if they're outside the narrowspec.
1824 """
1823 """
1825 if match:
1824 if match:
1826 if includeexact and not self._narrowmatch.always():
1825 if includeexact and not self._narrowmatch.always():
1827 # do not exclude explicitly-specified paths so that they can
1826 # do not exclude explicitly-specified paths so that they can
1828 # be warned later on
1827 # be warned later on
1829 em = matchmod.exact(match.files())
1828 em = matchmod.exact(match.files())
1830 nm = matchmod.unionmatcher([self._narrowmatch, em])
1829 nm = matchmod.unionmatcher([self._narrowmatch, em])
1831 return matchmod.intersectmatchers(match, nm)
1830 return matchmod.intersectmatchers(match, nm)
1832 return matchmod.intersectmatchers(match, self._narrowmatch)
1831 return matchmod.intersectmatchers(match, self._narrowmatch)
1833 return self._narrowmatch
1832 return self._narrowmatch
1834
1833
1835 def setnarrowpats(self, newincludes, newexcludes):
1834 def setnarrowpats(self, newincludes, newexcludes):
1836 narrowspec.save(self, newincludes, newexcludes)
1835 narrowspec.save(self, newincludes, newexcludes)
1837 self.invalidate(clearfilecache=True)
1836 self.invalidate(clearfilecache=True)
1838
1837
1839 @unfilteredpropertycache
1838 @unfilteredpropertycache
1840 def _quick_access_changeid_null(self):
1839 def _quick_access_changeid_null(self):
1841 return {
1840 return {
1842 b'null': (nullrev, self.nodeconstants.nullid),
1841 b'null': (nullrev, self.nodeconstants.nullid),
1843 nullrev: (nullrev, self.nodeconstants.nullid),
1842 nullrev: (nullrev, self.nodeconstants.nullid),
1844 self.nullid: (nullrev, self.nullid),
1843 self.nullid: (nullrev, self.nullid),
1845 }
1844 }
1846
1845
1847 @unfilteredpropertycache
1846 @unfilteredpropertycache
1848 def _quick_access_changeid_wc(self):
1847 def _quick_access_changeid_wc(self):
1849 # also fast path access to the working copy parents
1848 # also fast path access to the working copy parents
1850 # however, only do it for filter that ensure wc is visible.
1849 # however, only do it for filter that ensure wc is visible.
1851 quick = self._quick_access_changeid_null.copy()
1850 quick = self._quick_access_changeid_null.copy()
1852 cl = self.unfiltered().changelog
1851 cl = self.unfiltered().changelog
1853 for node in self.dirstate.parents():
1852 for node in self.dirstate.parents():
1854 if node == self.nullid:
1853 if node == self.nullid:
1855 continue
1854 continue
1856 rev = cl.index.get_rev(node)
1855 rev = cl.index.get_rev(node)
1857 if rev is None:
1856 if rev is None:
1858 # unknown working copy parent case:
1857 # unknown working copy parent case:
1859 #
1858 #
1860 # skip the fast path and let higher code deal with it
1859 # skip the fast path and let higher code deal with it
1861 continue
1860 continue
1862 pair = (rev, node)
1861 pair = (rev, node)
1863 quick[rev] = pair
1862 quick[rev] = pair
1864 quick[node] = pair
1863 quick[node] = pair
1865 # also add the parents of the parents
1864 # also add the parents of the parents
1866 for r in cl.parentrevs(rev):
1865 for r in cl.parentrevs(rev):
1867 if r == nullrev:
1866 if r == nullrev:
1868 continue
1867 continue
1869 n = cl.node(r)
1868 n = cl.node(r)
1870 pair = (r, n)
1869 pair = (r, n)
1871 quick[r] = pair
1870 quick[r] = pair
1872 quick[n] = pair
1871 quick[n] = pair
1873 p1node = self.dirstate.p1()
1872 p1node = self.dirstate.p1()
1874 if p1node != self.nullid:
1873 if p1node != self.nullid:
1875 quick[b'.'] = quick[p1node]
1874 quick[b'.'] = quick[p1node]
1876 return quick
1875 return quick
1877
1876
1878 @unfilteredmethod
1877 @unfilteredmethod
1879 def _quick_access_changeid_invalidate(self):
1878 def _quick_access_changeid_invalidate(self):
1880 if '_quick_access_changeid_wc' in vars(self):
1879 if '_quick_access_changeid_wc' in vars(self):
1881 del self.__dict__['_quick_access_changeid_wc']
1880 del self.__dict__['_quick_access_changeid_wc']
1882
1881
1883 @property
1882 @property
1884 def _quick_access_changeid(self):
1883 def _quick_access_changeid(self):
1885 """an helper dictionnary for __getitem__ calls
1884 """an helper dictionnary for __getitem__ calls
1886
1885
1887 This contains a list of symbol we can recognise right away without
1886 This contains a list of symbol we can recognise right away without
1888 further processing.
1887 further processing.
1889 """
1888 """
1890 if self.filtername in repoview.filter_has_wc:
1889 if self.filtername in repoview.filter_has_wc:
1891 return self._quick_access_changeid_wc
1890 return self._quick_access_changeid_wc
1892 return self._quick_access_changeid_null
1891 return self._quick_access_changeid_null
1893
1892
1894 def __getitem__(self, changeid):
1893 def __getitem__(self, changeid):
1895 # dealing with special cases
1894 # dealing with special cases
1896 if changeid is None:
1895 if changeid is None:
1897 return context.workingctx(self)
1896 return context.workingctx(self)
1898 if isinstance(changeid, context.basectx):
1897 if isinstance(changeid, context.basectx):
1899 return changeid
1898 return changeid
1900
1899
1901 # dealing with multiple revisions
1900 # dealing with multiple revisions
1902 if isinstance(changeid, slice):
1901 if isinstance(changeid, slice):
1903 # wdirrev isn't contiguous so the slice shouldn't include it
1902 # wdirrev isn't contiguous so the slice shouldn't include it
1904 return [
1903 return [
1905 self[i]
1904 self[i]
1906 for i in range(*changeid.indices(len(self)))
1905 for i in range(*changeid.indices(len(self)))
1907 if i not in self.changelog.filteredrevs
1906 if i not in self.changelog.filteredrevs
1908 ]
1907 ]
1909
1908
1910 # dealing with some special values
1909 # dealing with some special values
1911 quick_access = self._quick_access_changeid.get(changeid)
1910 quick_access = self._quick_access_changeid.get(changeid)
1912 if quick_access is not None:
1911 if quick_access is not None:
1913 rev, node = quick_access
1912 rev, node = quick_access
1914 return context.changectx(self, rev, node, maybe_filtered=False)
1913 return context.changectx(self, rev, node, maybe_filtered=False)
1915 if changeid == b'tip':
1914 if changeid == b'tip':
1916 node = self.changelog.tip()
1915 node = self.changelog.tip()
1917 rev = self.changelog.rev(node)
1916 rev = self.changelog.rev(node)
1918 return context.changectx(self, rev, node)
1917 return context.changectx(self, rev, node)
1919
1918
1920 # dealing with arbitrary values
1919 # dealing with arbitrary values
1921 try:
1920 try:
1922 if isinstance(changeid, int):
1921 if isinstance(changeid, int):
1923 node = self.changelog.node(changeid)
1922 node = self.changelog.node(changeid)
1924 rev = changeid
1923 rev = changeid
1925 elif changeid == b'.':
1924 elif changeid == b'.':
1926 # this is a hack to delay/avoid loading obsmarkers
1925 # this is a hack to delay/avoid loading obsmarkers
1927 # when we know that '.' won't be hidden
1926 # when we know that '.' won't be hidden
1928 node = self.dirstate.p1()
1927 node = self.dirstate.p1()
1929 rev = self.unfiltered().changelog.rev(node)
1928 rev = self.unfiltered().changelog.rev(node)
1930 elif len(changeid) == self.nodeconstants.nodelen:
1929 elif len(changeid) == self.nodeconstants.nodelen:
1931 try:
1930 try:
1932 node = changeid
1931 node = changeid
1933 rev = self.changelog.rev(changeid)
1932 rev = self.changelog.rev(changeid)
1934 except error.FilteredLookupError:
1933 except error.FilteredLookupError:
1935 changeid = hex(changeid) # for the error message
1934 changeid = hex(changeid) # for the error message
1936 raise
1935 raise
1937 except LookupError:
1936 except LookupError:
1938 # check if it might have come from damaged dirstate
1937 # check if it might have come from damaged dirstate
1939 #
1938 #
1940 # XXX we could avoid the unfiltered if we had a recognizable
1939 # XXX we could avoid the unfiltered if we had a recognizable
1941 # exception for filtered changeset access
1940 # exception for filtered changeset access
1942 if (
1941 if (
1943 self.local()
1942 self.local()
1944 and changeid in self.unfiltered().dirstate.parents()
1943 and changeid in self.unfiltered().dirstate.parents()
1945 ):
1944 ):
1946 msg = _(b"working directory has unknown parent '%s'!")
1945 msg = _(b"working directory has unknown parent '%s'!")
1947 raise error.Abort(msg % short(changeid))
1946 raise error.Abort(msg % short(changeid))
1948 changeid = hex(changeid) # for the error message
1947 changeid = hex(changeid) # for the error message
1949 raise
1948 raise
1950
1949
1951 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1950 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1952 node = bin(changeid)
1951 node = bin(changeid)
1953 rev = self.changelog.rev(node)
1952 rev = self.changelog.rev(node)
1954 else:
1953 else:
1955 raise error.ProgrammingError(
1954 raise error.ProgrammingError(
1956 b"unsupported changeid '%s' of type %s"
1955 b"unsupported changeid '%s' of type %s"
1957 % (changeid, pycompat.bytestr(type(changeid)))
1956 % (changeid, pycompat.bytestr(type(changeid)))
1958 )
1957 )
1959
1958
1960 return context.changectx(self, rev, node)
1959 return context.changectx(self, rev, node)
1961
1960
1962 except (error.FilteredIndexError, error.FilteredLookupError):
1961 except (error.FilteredIndexError, error.FilteredLookupError):
1963 raise error.FilteredRepoLookupError(
1962 raise error.FilteredRepoLookupError(
1964 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1963 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1965 )
1964 )
1966 except (IndexError, LookupError):
1965 except (IndexError, LookupError):
1967 raise error.RepoLookupError(
1966 raise error.RepoLookupError(
1968 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1967 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1969 )
1968 )
1970 except error.WdirUnsupported:
1969 except error.WdirUnsupported:
1971 return context.workingctx(self)
1970 return context.workingctx(self)
1972
1971
1973 def __contains__(self, changeid):
1972 def __contains__(self, changeid):
1974 """True if the given changeid exists"""
1973 """True if the given changeid exists"""
1975 try:
1974 try:
1976 self[changeid]
1975 self[changeid]
1977 return True
1976 return True
1978 except error.RepoLookupError:
1977 except error.RepoLookupError:
1979 return False
1978 return False
1980
1979
1981 def __nonzero__(self):
1980 def __nonzero__(self):
1982 return True
1981 return True
1983
1982
1984 __bool__ = __nonzero__
1983 __bool__ = __nonzero__
1985
1984
1986 def __len__(self):
1985 def __len__(self):
1987 # no need to pay the cost of repoview.changelog
1986 # no need to pay the cost of repoview.changelog
1988 unfi = self.unfiltered()
1987 unfi = self.unfiltered()
1989 return len(unfi.changelog)
1988 return len(unfi.changelog)
1990
1989
1991 def __iter__(self):
1990 def __iter__(self):
1992 return iter(self.changelog)
1991 return iter(self.changelog)
1993
1992
1994 def revs(self, expr: bytes, *args):
1993 def revs(self, expr: bytes, *args):
1995 """Find revisions matching a revset.
1994 """Find revisions matching a revset.
1996
1995
1997 The revset is specified as a string ``expr`` that may contain
1996 The revset is specified as a string ``expr`` that may contain
1998 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1997 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1999
1998
2000 Revset aliases from the configuration are not expanded. To expand
1999 Revset aliases from the configuration are not expanded. To expand
2001 user aliases, consider calling ``scmutil.revrange()`` or
2000 user aliases, consider calling ``scmutil.revrange()`` or
2002 ``repo.anyrevs([expr], user=True)``.
2001 ``repo.anyrevs([expr], user=True)``.
2003
2002
2004 Returns a smartset.abstractsmartset, which is a list-like interface
2003 Returns a smartset.abstractsmartset, which is a list-like interface
2005 that contains integer revisions.
2004 that contains integer revisions.
2006 """
2005 """
2007 tree = revsetlang.spectree(expr, *args)
2006 tree = revsetlang.spectree(expr, *args)
2008 return revset.makematcher(tree)(self)
2007 return revset.makematcher(tree)(self)
2009
2008
2010 def set(self, expr: bytes, *args):
2009 def set(self, expr: bytes, *args):
2011 """Find revisions matching a revset and emit changectx instances.
2010 """Find revisions matching a revset and emit changectx instances.
2012
2011
2013 This is a convenience wrapper around ``revs()`` that iterates the
2012 This is a convenience wrapper around ``revs()`` that iterates the
2014 result and is a generator of changectx instances.
2013 result and is a generator of changectx instances.
2015
2014
2016 Revset aliases from the configuration are not expanded. To expand
2015 Revset aliases from the configuration are not expanded. To expand
2017 user aliases, consider calling ``scmutil.revrange()``.
2016 user aliases, consider calling ``scmutil.revrange()``.
2018 """
2017 """
2019 for r in self.revs(expr, *args):
2018 for r in self.revs(expr, *args):
2020 yield self[r]
2019 yield self[r]
2021
2020
2022 def anyrevs(self, specs: bytes, user=False, localalias=None):
2021 def anyrevs(self, specs: bytes, user=False, localalias=None):
2023 """Find revisions matching one of the given revsets.
2022 """Find revisions matching one of the given revsets.
2024
2023
2025 Revset aliases from the configuration are not expanded by default. To
2024 Revset aliases from the configuration are not expanded by default. To
2026 expand user aliases, specify ``user=True``. To provide some local
2025 expand user aliases, specify ``user=True``. To provide some local
2027 definitions overriding user aliases, set ``localalias`` to
2026 definitions overriding user aliases, set ``localalias`` to
2028 ``{name: definitionstring}``.
2027 ``{name: definitionstring}``.
2029 """
2028 """
2030 if specs == [b'null']:
2029 if specs == [b'null']:
2031 return revset.baseset([nullrev])
2030 return revset.baseset([nullrev])
2032 if specs == [b'.']:
2031 if specs == [b'.']:
2033 quick_data = self._quick_access_changeid.get(b'.')
2032 quick_data = self._quick_access_changeid.get(b'.')
2034 if quick_data is not None:
2033 if quick_data is not None:
2035 return revset.baseset([quick_data[0]])
2034 return revset.baseset([quick_data[0]])
2036 if user:
2035 if user:
2037 m = revset.matchany(
2036 m = revset.matchany(
2038 self.ui,
2037 self.ui,
2039 specs,
2038 specs,
2040 lookup=revset.lookupfn(self),
2039 lookup=revset.lookupfn(self),
2041 localalias=localalias,
2040 localalias=localalias,
2042 )
2041 )
2043 else:
2042 else:
2044 m = revset.matchany(None, specs, localalias=localalias)
2043 m = revset.matchany(None, specs, localalias=localalias)
2045 return m(self)
2044 return m(self)
2046
2045
2047 def url(self) -> bytes:
2046 def url(self) -> bytes:
2048 return b'file:' + self.root
2047 return b'file:' + self.root
2049
2048
2050 def hook(self, name, throw=False, **args):
2049 def hook(self, name, throw=False, **args):
2051 """Call a hook, passing this repo instance.
2050 """Call a hook, passing this repo instance.
2052
2051
2053 This a convenience method to aid invoking hooks. Extensions likely
2052 This a convenience method to aid invoking hooks. Extensions likely
2054 won't call this unless they have registered a custom hook or are
2053 won't call this unless they have registered a custom hook or are
2055 replacing code that is expected to call a hook.
2054 replacing code that is expected to call a hook.
2056 """
2055 """
2057 return hook.hook(self.ui, self, name, throw, **args)
2056 return hook.hook(self.ui, self, name, throw, **args)
2058
2057
2059 @filteredpropertycache
2058 @filteredpropertycache
2060 def _tagscache(self):
2059 def _tagscache(self):
2061 """Returns a tagscache object that contains various tags related
2060 """Returns a tagscache object that contains various tags related
2062 caches."""
2061 caches."""
2063
2062
2064 # This simplifies its cache management by having one decorated
2063 # This simplifies its cache management by having one decorated
2065 # function (this one) and the rest simply fetch things from it.
2064 # function (this one) and the rest simply fetch things from it.
2066 class tagscache:
2065 class tagscache:
2067 def __init__(self):
2066 def __init__(self):
2068 # These two define the set of tags for this repository. tags
2067 # These two define the set of tags for this repository. tags
2069 # maps tag name to node; tagtypes maps tag name to 'global' or
2068 # maps tag name to node; tagtypes maps tag name to 'global' or
2070 # 'local'. (Global tags are defined by .hgtags across all
2069 # 'local'. (Global tags are defined by .hgtags across all
2071 # heads, and local tags are defined in .hg/localtags.)
2070 # heads, and local tags are defined in .hg/localtags.)
2072 # They constitute the in-memory cache of tags.
2071 # They constitute the in-memory cache of tags.
2073 self.tags = self.tagtypes = None
2072 self.tags = self.tagtypes = None
2074
2073
2075 self.nodetagscache = self.tagslist = None
2074 self.nodetagscache = self.tagslist = None
2076
2075
2077 cache = tagscache()
2076 cache = tagscache()
2078 cache.tags, cache.tagtypes = self._findtags()
2077 cache.tags, cache.tagtypes = self._findtags()
2079
2078
2080 return cache
2079 return cache
2081
2080
2082 def tags(self):
2081 def tags(self):
2083 '''return a mapping of tag to node'''
2082 '''return a mapping of tag to node'''
2084 t = {}
2083 t = {}
2085 if self.changelog.filteredrevs:
2084 if self.changelog.filteredrevs:
2086 tags, tt = self._findtags()
2085 tags, tt = self._findtags()
2087 else:
2086 else:
2088 tags = self._tagscache.tags
2087 tags = self._tagscache.tags
2089 rev = self.changelog.rev
2088 rev = self.changelog.rev
2090 for k, v in tags.items():
2089 for k, v in tags.items():
2091 try:
2090 try:
2092 # ignore tags to unknown nodes
2091 # ignore tags to unknown nodes
2093 rev(v)
2092 rev(v)
2094 t[k] = v
2093 t[k] = v
2095 except (error.LookupError, ValueError):
2094 except (error.LookupError, ValueError):
2096 pass
2095 pass
2097 return t
2096 return t
2098
2097
2099 def _findtags(self):
2098 def _findtags(self):
2100 """Do the hard work of finding tags. Return a pair of dicts
2099 """Do the hard work of finding tags. Return a pair of dicts
2101 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2100 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2102 maps tag name to a string like \'global\' or \'local\'.
2101 maps tag name to a string like \'global\' or \'local\'.
2103 Subclasses or extensions are free to add their own tags, but
2102 Subclasses or extensions are free to add their own tags, but
2104 should be aware that the returned dicts will be retained for the
2103 should be aware that the returned dicts will be retained for the
2105 duration of the localrepo object."""
2104 duration of the localrepo object."""
2106
2105
2107 # XXX what tagtype should subclasses/extensions use? Currently
2106 # XXX what tagtype should subclasses/extensions use? Currently
2108 # mq and bookmarks add tags, but do not set the tagtype at all.
2107 # mq and bookmarks add tags, but do not set the tagtype at all.
2109 # Should each extension invent its own tag type? Should there
2108 # Should each extension invent its own tag type? Should there
2110 # be one tagtype for all such "virtual" tags? Or is the status
2109 # be one tagtype for all such "virtual" tags? Or is the status
2111 # quo fine?
2110 # quo fine?
2112
2111
2113 # map tag name to (node, hist)
2112 # map tag name to (node, hist)
2114 alltags = tagsmod.findglobaltags(self.ui, self)
2113 alltags = tagsmod.findglobaltags(self.ui, self)
2115 # map tag name to tag type
2114 # map tag name to tag type
2116 tagtypes = {tag: b'global' for tag in alltags}
2115 tagtypes = {tag: b'global' for tag in alltags}
2117
2116
2118 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2117 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2119
2118
2120 # Build the return dicts. Have to re-encode tag names because
2119 # Build the return dicts. Have to re-encode tag names because
2121 # the tags module always uses UTF-8 (in order not to lose info
2120 # the tags module always uses UTF-8 (in order not to lose info
2122 # writing to the cache), but the rest of Mercurial wants them in
2121 # writing to the cache), but the rest of Mercurial wants them in
2123 # local encoding.
2122 # local encoding.
2124 tags = {}
2123 tags = {}
2125 for (name, (node, hist)) in alltags.items():
2124 for (name, (node, hist)) in alltags.items():
2126 if node != self.nullid:
2125 if node != self.nullid:
2127 tags[encoding.tolocal(name)] = node
2126 tags[encoding.tolocal(name)] = node
2128 tags[b'tip'] = self.changelog.tip()
2127 tags[b'tip'] = self.changelog.tip()
2129 tagtypes = {
2128 tagtypes = {
2130 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2129 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2131 }
2130 }
2132 return (tags, tagtypes)
2131 return (tags, tagtypes)
2133
2132
2134 def tagtype(self, tagname):
2133 def tagtype(self, tagname):
2135 """
2134 """
2136 return the type of the given tag. result can be:
2135 return the type of the given tag. result can be:
2137
2136
2138 'local' : a local tag
2137 'local' : a local tag
2139 'global' : a global tag
2138 'global' : a global tag
2140 None : tag does not exist
2139 None : tag does not exist
2141 """
2140 """
2142
2141
2143 return self._tagscache.tagtypes.get(tagname)
2142 return self._tagscache.tagtypes.get(tagname)
2144
2143
2145 def tagslist(self):
2144 def tagslist(self):
2146 '''return a list of tags ordered by revision'''
2145 '''return a list of tags ordered by revision'''
2147 if not self._tagscache.tagslist:
2146 if not self._tagscache.tagslist:
2148 l = []
2147 l = []
2149 for t, n in self.tags().items():
2148 for t, n in self.tags().items():
2150 l.append((self.changelog.rev(n), t, n))
2149 l.append((self.changelog.rev(n), t, n))
2151 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2150 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2152
2151
2153 return self._tagscache.tagslist
2152 return self._tagscache.tagslist
2154
2153
2155 def nodetags(self, node):
2154 def nodetags(self, node):
2156 '''return the tags associated with a node'''
2155 '''return the tags associated with a node'''
2157 if not self._tagscache.nodetagscache:
2156 if not self._tagscache.nodetagscache:
2158 nodetagscache = {}
2157 nodetagscache = {}
2159 for t, n in self._tagscache.tags.items():
2158 for t, n in self._tagscache.tags.items():
2160 nodetagscache.setdefault(n, []).append(t)
2159 nodetagscache.setdefault(n, []).append(t)
2161 for tags in nodetagscache.values():
2160 for tags in nodetagscache.values():
2162 tags.sort()
2161 tags.sort()
2163 self._tagscache.nodetagscache = nodetagscache
2162 self._tagscache.nodetagscache = nodetagscache
2164 return self._tagscache.nodetagscache.get(node, [])
2163 return self._tagscache.nodetagscache.get(node, [])
2165
2164
2166 def nodebookmarks(self, node):
2165 def nodebookmarks(self, node):
2167 """return the list of bookmarks pointing to the specified node"""
2166 """return the list of bookmarks pointing to the specified node"""
2168 return self._bookmarks.names(node)
2167 return self._bookmarks.names(node)
2169
2168
2170 def branchmap(self):
2169 def branchmap(self):
2171 """returns a dictionary {branch: [branchheads]} with branchheads
2170 """returns a dictionary {branch: [branchheads]} with branchheads
2172 ordered by increasing revision number"""
2171 ordered by increasing revision number"""
2173 return self._branchcaches[self]
2172 return self._branchcaches[self]
2174
2173
2175 @unfilteredmethod
2174 @unfilteredmethod
2176 def revbranchcache(self):
2175 def revbranchcache(self):
2177 if not self._revbranchcache:
2176 if not self._revbranchcache:
2178 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2177 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2179 return self._revbranchcache
2178 return self._revbranchcache
2180
2179
2181 def register_changeset(self, rev, changelogrevision):
2180 def register_changeset(self, rev, changelogrevision):
2182 self.revbranchcache().setdata(rev, changelogrevision)
2181 self.revbranchcache().setdata(rev, changelogrevision)
2183
2182
2184 def branchtip(self, branch, ignoremissing=False):
2183 def branchtip(self, branch, ignoremissing=False):
2185 """return the tip node for a given branch
2184 """return the tip node for a given branch
2186
2185
2187 If ignoremissing is True, then this method will not raise an error.
2186 If ignoremissing is True, then this method will not raise an error.
2188 This is helpful for callers that only expect None for a missing branch
2187 This is helpful for callers that only expect None for a missing branch
2189 (e.g. namespace).
2188 (e.g. namespace).
2190
2189
2191 """
2190 """
2192 try:
2191 try:
2193 return self.branchmap().branchtip(branch)
2192 return self.branchmap().branchtip(branch)
2194 except KeyError:
2193 except KeyError:
2195 if not ignoremissing:
2194 if not ignoremissing:
2196 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2195 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2197 else:
2196 else:
2198 pass
2197 pass
2199
2198
2200 def lookup(self, key):
2199 def lookup(self, key):
2201 node = scmutil.revsymbol(self, key).node()
2200 node = scmutil.revsymbol(self, key).node()
2202 if node is None:
2201 if node is None:
2203 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2202 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2204 return node
2203 return node
2205
2204
2206 def lookupbranch(self, key):
2205 def lookupbranch(self, key):
2207 if self.branchmap().hasbranch(key):
2206 if self.branchmap().hasbranch(key):
2208 return key
2207 return key
2209
2208
2210 return scmutil.revsymbol(self, key).branch()
2209 return scmutil.revsymbol(self, key).branch()
2211
2210
2212 def known(self, nodes):
2211 def known(self, nodes):
2213 cl = self.changelog
2212 cl = self.changelog
2214 get_rev = cl.index.get_rev
2213 get_rev = cl.index.get_rev
2215 filtered = cl.filteredrevs
2214 filtered = cl.filteredrevs
2216 result = []
2215 result = []
2217 for n in nodes:
2216 for n in nodes:
2218 r = get_rev(n)
2217 r = get_rev(n)
2219 resp = not (r is None or r in filtered)
2218 resp = not (r is None or r in filtered)
2220 result.append(resp)
2219 result.append(resp)
2221 return result
2220 return result
2222
2221
2223 def local(self):
2222 def local(self):
2224 return self
2223 return self
2225
2224
2226 def publishing(self):
2225 def publishing(self):
2227 # it's safe (and desirable) to trust the publish flag unconditionally
2226 # it's safe (and desirable) to trust the publish flag unconditionally
2228 # so that we don't finalize changes shared between users via ssh or nfs
2227 # so that we don't finalize changes shared between users via ssh or nfs
2229 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2228 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2230
2229
2231 def cancopy(self):
2230 def cancopy(self):
2232 # so statichttprepo's override of local() works
2231 # so statichttprepo's override of local() works
2233 if not self.local():
2232 if not self.local():
2234 return False
2233 return False
2235 if not self.publishing():
2234 if not self.publishing():
2236 return True
2235 return True
2237 # if publishing we can't copy if there is filtered content
2236 # if publishing we can't copy if there is filtered content
2238 return not self.filtered(b'visible').changelog.filteredrevs
2237 return not self.filtered(b'visible').changelog.filteredrevs
2239
2238
2240 def shared(self):
2239 def shared(self):
2241 '''the type of shared repository (None if not shared)'''
2240 '''the type of shared repository (None if not shared)'''
2242 if self.sharedpath != self.path:
2241 if self.sharedpath != self.path:
2243 return b'store'
2242 return b'store'
2244 return None
2243 return None
2245
2244
2246 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2245 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2247 return self.vfs.reljoin(self.root, f, *insidef)
2246 return self.vfs.reljoin(self.root, f, *insidef)
2248
2247
2249 def setparents(self, p1, p2=None):
2248 def setparents(self, p1, p2=None):
2250 if p2 is None:
2249 if p2 is None:
2251 p2 = self.nullid
2250 p2 = self.nullid
2252 self[None].setparents(p1, p2)
2251 self[None].setparents(p1, p2)
2253 self._quick_access_changeid_invalidate()
2252 self._quick_access_changeid_invalidate()
2254
2253
2255 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2254 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2256 """changeid must be a changeset revision, if specified.
2255 """changeid must be a changeset revision, if specified.
2257 fileid can be a file revision or node."""
2256 fileid can be a file revision or node."""
2258 return context.filectx(
2257 return context.filectx(
2259 self, path, changeid, fileid, changectx=changectx
2258 self, path, changeid, fileid, changectx=changectx
2260 )
2259 )
2261
2260
2262 def getcwd(self) -> bytes:
2261 def getcwd(self) -> bytes:
2263 return self.dirstate.getcwd()
2262 return self.dirstate.getcwd()
2264
2263
2265 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2264 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2266 return self.dirstate.pathto(f, cwd)
2265 return self.dirstate.pathto(f, cwd)
2267
2266
2268 def _loadfilter(self, filter):
2267 def _loadfilter(self, filter):
2269 if filter not in self._filterpats:
2268 if filter not in self._filterpats:
2270 l = []
2269 l = []
2271 for pat, cmd in self.ui.configitems(filter):
2270 for pat, cmd in self.ui.configitems(filter):
2272 if cmd == b'!':
2271 if cmd == b'!':
2273 continue
2272 continue
2274 mf = matchmod.match(self.root, b'', [pat])
2273 mf = matchmod.match(self.root, b'', [pat])
2275 fn = None
2274 fn = None
2276 params = cmd
2275 params = cmd
2277 for name, filterfn in self._datafilters.items():
2276 for name, filterfn in self._datafilters.items():
2278 if cmd.startswith(name):
2277 if cmd.startswith(name):
2279 fn = filterfn
2278 fn = filterfn
2280 params = cmd[len(name) :].lstrip()
2279 params = cmd[len(name) :].lstrip()
2281 break
2280 break
2282 if not fn:
2281 if not fn:
2283 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2282 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2284 fn.__name__ = 'commandfilter'
2283 fn.__name__ = 'commandfilter'
2285 # Wrap old filters not supporting keyword arguments
2284 # Wrap old filters not supporting keyword arguments
2286 if not pycompat.getargspec(fn)[2]:
2285 if not pycompat.getargspec(fn)[2]:
2287 oldfn = fn
2286 oldfn = fn
2288 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2287 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2289 fn.__name__ = 'compat-' + oldfn.__name__
2288 fn.__name__ = 'compat-' + oldfn.__name__
2290 l.append((mf, fn, params))
2289 l.append((mf, fn, params))
2291 self._filterpats[filter] = l
2290 self._filterpats[filter] = l
2292 return self._filterpats[filter]
2291 return self._filterpats[filter]
2293
2292
2294 def _filter(self, filterpats, filename, data):
2293 def _filter(self, filterpats, filename, data):
2295 for mf, fn, cmd in filterpats:
2294 for mf, fn, cmd in filterpats:
2296 if mf(filename):
2295 if mf(filename):
2297 self.ui.debug(
2296 self.ui.debug(
2298 b"filtering %s through %s\n"
2297 b"filtering %s through %s\n"
2299 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2298 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2300 )
2299 )
2301 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2300 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2302 break
2301 break
2303
2302
2304 return data
2303 return data
2305
2304
2306 @unfilteredpropertycache
2305 @unfilteredpropertycache
2307 def _encodefilterpats(self):
2306 def _encodefilterpats(self):
2308 return self._loadfilter(b'encode')
2307 return self._loadfilter(b'encode')
2309
2308
2310 @unfilteredpropertycache
2309 @unfilteredpropertycache
2311 def _decodefilterpats(self):
2310 def _decodefilterpats(self):
2312 return self._loadfilter(b'decode')
2311 return self._loadfilter(b'decode')
2313
2312
2314 def adddatafilter(self, name, filter):
2313 def adddatafilter(self, name, filter):
2315 self._datafilters[name] = filter
2314 self._datafilters[name] = filter
2316
2315
2317 def wread(self, filename: bytes) -> bytes:
2316 def wread(self, filename: bytes) -> bytes:
2318 if self.wvfs.islink(filename):
2317 if self.wvfs.islink(filename):
2319 data = self.wvfs.readlink(filename)
2318 data = self.wvfs.readlink(filename)
2320 else:
2319 else:
2321 data = self.wvfs.read(filename)
2320 data = self.wvfs.read(filename)
2322 return self._filter(self._encodefilterpats, filename, data)
2321 return self._filter(self._encodefilterpats, filename, data)
2323
2322
2324 def wwrite(
2323 def wwrite(
2325 self,
2324 self,
2326 filename: bytes,
2325 filename: bytes,
2327 data: bytes,
2326 data: bytes,
2328 flags: bytes,
2327 flags: bytes,
2329 backgroundclose=False,
2328 backgroundclose=False,
2330 **kwargs
2329 **kwargs
2331 ) -> int:
2330 ) -> int:
2332 """write ``data`` into ``filename`` in the working directory
2331 """write ``data`` into ``filename`` in the working directory
2333
2332
2334 This returns length of written (maybe decoded) data.
2333 This returns length of written (maybe decoded) data.
2335 """
2334 """
2336 data = self._filter(self._decodefilterpats, filename, data)
2335 data = self._filter(self._decodefilterpats, filename, data)
2337 if b'l' in flags:
2336 if b'l' in flags:
2338 self.wvfs.symlink(data, filename)
2337 self.wvfs.symlink(data, filename)
2339 else:
2338 else:
2340 self.wvfs.write(
2339 self.wvfs.write(
2341 filename, data, backgroundclose=backgroundclose, **kwargs
2340 filename, data, backgroundclose=backgroundclose, **kwargs
2342 )
2341 )
2343 if b'x' in flags:
2342 if b'x' in flags:
2344 self.wvfs.setflags(filename, False, True)
2343 self.wvfs.setflags(filename, False, True)
2345 else:
2344 else:
2346 self.wvfs.setflags(filename, False, False)
2345 self.wvfs.setflags(filename, False, False)
2347 return len(data)
2346 return len(data)
2348
2347
2349 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2348 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2350 return self._filter(self._decodefilterpats, filename, data)
2349 return self._filter(self._decodefilterpats, filename, data)
2351
2350
2352 def currenttransaction(self):
2351 def currenttransaction(self):
2353 """return the current transaction or None if non exists"""
2352 """return the current transaction or None if non exists"""
2354 if self._transref:
2353 if self._transref:
2355 tr = self._transref()
2354 tr = self._transref()
2356 else:
2355 else:
2357 tr = None
2356 tr = None
2358
2357
2359 if tr and tr.running():
2358 if tr and tr.running():
2360 return tr
2359 return tr
2361 return None
2360 return None
2362
2361
2363 def transaction(self, desc, report=None):
2362 def transaction(self, desc, report=None):
2364 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2363 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2365 b'devel', b'check-locks'
2364 b'devel', b'check-locks'
2366 ):
2365 ):
2367 if self._currentlock(self._lockref) is None:
2366 if self._currentlock(self._lockref) is None:
2368 raise error.ProgrammingError(b'transaction requires locking')
2367 raise error.ProgrammingError(b'transaction requires locking')
2369 tr = self.currenttransaction()
2368 tr = self.currenttransaction()
2370 if tr is not None:
2369 if tr is not None:
2371 return tr.nest(name=desc)
2370 return tr.nest(name=desc)
2372
2371
2373 # abort here if the journal already exists
2372 # abort here if the journal already exists
2374 if self.svfs.exists(b"journal"):
2373 if self.svfs.exists(b"journal"):
2375 raise error.RepoError(
2374 raise error.RepoError(
2376 _(b"abandoned transaction found"),
2375 _(b"abandoned transaction found"),
2377 hint=_(b"run 'hg recover' to clean up transaction"),
2376 hint=_(b"run 'hg recover' to clean up transaction"),
2378 )
2377 )
2379
2378
2380 idbase = b"%.40f#%f" % (random.random(), time.time())
2379 idbase = b"%.40f#%f" % (random.random(), time.time())
2381 ha = hex(hashutil.sha1(idbase).digest())
2380 ha = hex(hashutil.sha1(idbase).digest())
2382 txnid = b'TXN:' + ha
2381 txnid = b'TXN:' + ha
2383 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2382 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2384
2383
2385 self._writejournal(desc)
2384 self._writejournal(desc)
2386 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2385 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2387 if report:
2386 if report:
2388 rp = report
2387 rp = report
2389 else:
2388 else:
2390 rp = self.ui.warn
2389 rp = self.ui.warn
2391 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2390 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2392 # we must avoid cyclic reference between repo and transaction.
2391 # we must avoid cyclic reference between repo and transaction.
2393 reporef = weakref.ref(self)
2392 reporef = weakref.ref(self)
2394 # Code to track tag movement
2393 # Code to track tag movement
2395 #
2394 #
2396 # Since tags are all handled as file content, it is actually quite hard
2395 # Since tags are all handled as file content, it is actually quite hard
2397 # to track these movement from a code perspective. So we fallback to a
2396 # to track these movement from a code perspective. So we fallback to a
2398 # tracking at the repository level. One could envision to track changes
2397 # tracking at the repository level. One could envision to track changes
2399 # to the '.hgtags' file through changegroup apply but that fails to
2398 # to the '.hgtags' file through changegroup apply but that fails to
2400 # cope with case where transaction expose new heads without changegroup
2399 # cope with case where transaction expose new heads without changegroup
2401 # being involved (eg: phase movement).
2400 # being involved (eg: phase movement).
2402 #
2401 #
2403 # For now, We gate the feature behind a flag since this likely comes
2402 # For now, We gate the feature behind a flag since this likely comes
2404 # with performance impacts. The current code run more often than needed
2403 # with performance impacts. The current code run more often than needed
2405 # and do not use caches as much as it could. The current focus is on
2404 # and do not use caches as much as it could. The current focus is on
2406 # the behavior of the feature so we disable it by default. The flag
2405 # the behavior of the feature so we disable it by default. The flag
2407 # will be removed when we are happy with the performance impact.
2406 # will be removed when we are happy with the performance impact.
2408 #
2407 #
2409 # Once this feature is no longer experimental move the following
2408 # Once this feature is no longer experimental move the following
2410 # documentation to the appropriate help section:
2409 # documentation to the appropriate help section:
2411 #
2410 #
2412 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2411 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2413 # tags (new or changed or deleted tags). In addition the details of
2412 # tags (new or changed or deleted tags). In addition the details of
2414 # these changes are made available in a file at:
2413 # these changes are made available in a file at:
2415 # ``REPOROOT/.hg/changes/tags.changes``.
2414 # ``REPOROOT/.hg/changes/tags.changes``.
2416 # Make sure you check for HG_TAG_MOVED before reading that file as it
2415 # Make sure you check for HG_TAG_MOVED before reading that file as it
2417 # might exist from a previous transaction even if no tag were touched
2416 # might exist from a previous transaction even if no tag were touched
2418 # in this one. Changes are recorded in a line base format::
2417 # in this one. Changes are recorded in a line base format::
2419 #
2418 #
2420 # <action> <hex-node> <tag-name>\n
2419 # <action> <hex-node> <tag-name>\n
2421 #
2420 #
2422 # Actions are defined as follow:
2421 # Actions are defined as follow:
2423 # "-R": tag is removed,
2422 # "-R": tag is removed,
2424 # "+A": tag is added,
2423 # "+A": tag is added,
2425 # "-M": tag is moved (old value),
2424 # "-M": tag is moved (old value),
2426 # "+M": tag is moved (new value),
2425 # "+M": tag is moved (new value),
2427 tracktags = lambda x: None
2426 tracktags = lambda x: None
2428 # experimental config: experimental.hook-track-tags
2427 # experimental config: experimental.hook-track-tags
2429 shouldtracktags = self.ui.configbool(
2428 shouldtracktags = self.ui.configbool(
2430 b'experimental', b'hook-track-tags'
2429 b'experimental', b'hook-track-tags'
2431 )
2430 )
2432 if desc != b'strip' and shouldtracktags:
2431 if desc != b'strip' and shouldtracktags:
2433 oldheads = self.changelog.headrevs()
2432 oldheads = self.changelog.headrevs()
2434
2433
2435 def tracktags(tr2):
2434 def tracktags(tr2):
2436 repo = reporef()
2435 repo = reporef()
2437 assert repo is not None # help pytype
2436 assert repo is not None # help pytype
2438 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2437 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2439 newheads = repo.changelog.headrevs()
2438 newheads = repo.changelog.headrevs()
2440 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2439 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2441 # notes: we compare lists here.
2440 # notes: we compare lists here.
2442 # As we do it only once buiding set would not be cheaper
2441 # As we do it only once buiding set would not be cheaper
2443 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2442 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2444 if changes:
2443 if changes:
2445 tr2.hookargs[b'tag_moved'] = b'1'
2444 tr2.hookargs[b'tag_moved'] = b'1'
2446 with repo.vfs(
2445 with repo.vfs(
2447 b'changes/tags.changes', b'w', atomictemp=True
2446 b'changes/tags.changes', b'w', atomictemp=True
2448 ) as changesfile:
2447 ) as changesfile:
2449 # note: we do not register the file to the transaction
2448 # note: we do not register the file to the transaction
2450 # because we needs it to still exist on the transaction
2449 # because we needs it to still exist on the transaction
2451 # is close (for txnclose hooks)
2450 # is close (for txnclose hooks)
2452 tagsmod.writediff(changesfile, changes)
2451 tagsmod.writediff(changesfile, changes)
2453
2452
2454 def validate(tr2):
2453 def validate(tr2):
2455 """will run pre-closing hooks"""
2454 """will run pre-closing hooks"""
2456 # XXX the transaction API is a bit lacking here so we take a hacky
2455 # XXX the transaction API is a bit lacking here so we take a hacky
2457 # path for now
2456 # path for now
2458 #
2457 #
2459 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2458 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2460 # dict is copied before these run. In addition we needs the data
2459 # dict is copied before these run. In addition we needs the data
2461 # available to in memory hooks too.
2460 # available to in memory hooks too.
2462 #
2461 #
2463 # Moreover, we also need to make sure this runs before txnclose
2462 # Moreover, we also need to make sure this runs before txnclose
2464 # hooks and there is no "pending" mechanism that would execute
2463 # hooks and there is no "pending" mechanism that would execute
2465 # logic only if hooks are about to run.
2464 # logic only if hooks are about to run.
2466 #
2465 #
2467 # Fixing this limitation of the transaction is also needed to track
2466 # Fixing this limitation of the transaction is also needed to track
2468 # other families of changes (bookmarks, phases, obsolescence).
2467 # other families of changes (bookmarks, phases, obsolescence).
2469 #
2468 #
2470 # This will have to be fixed before we remove the experimental
2469 # This will have to be fixed before we remove the experimental
2471 # gating.
2470 # gating.
2472 tracktags(tr2)
2471 tracktags(tr2)
2473 repo = reporef()
2472 repo = reporef()
2474 assert repo is not None # help pytype
2473 assert repo is not None # help pytype
2475
2474
2476 singleheadopt = (b'experimental', b'single-head-per-branch')
2475 singleheadopt = (b'experimental', b'single-head-per-branch')
2477 singlehead = repo.ui.configbool(*singleheadopt)
2476 singlehead = repo.ui.configbool(*singleheadopt)
2478 if singlehead:
2477 if singlehead:
2479 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2478 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2480 accountclosed = singleheadsub.get(
2479 accountclosed = singleheadsub.get(
2481 b"account-closed-heads", False
2480 b"account-closed-heads", False
2482 )
2481 )
2483 if singleheadsub.get(b"public-changes-only", False):
2482 if singleheadsub.get(b"public-changes-only", False):
2484 filtername = b"immutable"
2483 filtername = b"immutable"
2485 else:
2484 else:
2486 filtername = b"visible"
2485 filtername = b"visible"
2487 scmutil.enforcesinglehead(
2486 scmutil.enforcesinglehead(
2488 repo, tr2, desc, accountclosed, filtername
2487 repo, tr2, desc, accountclosed, filtername
2489 )
2488 )
2490 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2489 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2491 for name, (old, new) in sorted(
2490 for name, (old, new) in sorted(
2492 tr.changes[b'bookmarks'].items()
2491 tr.changes[b'bookmarks'].items()
2493 ):
2492 ):
2494 args = tr.hookargs.copy()
2493 args = tr.hookargs.copy()
2495 args.update(bookmarks.preparehookargs(name, old, new))
2494 args.update(bookmarks.preparehookargs(name, old, new))
2496 repo.hook(
2495 repo.hook(
2497 b'pretxnclose-bookmark',
2496 b'pretxnclose-bookmark',
2498 throw=True,
2497 throw=True,
2499 **pycompat.strkwargs(args)
2498 **pycompat.strkwargs(args)
2500 )
2499 )
2501 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2500 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2502 cl = repo.unfiltered().changelog
2501 cl = repo.unfiltered().changelog
2503 for revs, (old, new) in tr.changes[b'phases']:
2502 for revs, (old, new) in tr.changes[b'phases']:
2504 for rev in revs:
2503 for rev in revs:
2505 args = tr.hookargs.copy()
2504 args = tr.hookargs.copy()
2506 node = hex(cl.node(rev))
2505 node = hex(cl.node(rev))
2507 args.update(phases.preparehookargs(node, old, new))
2506 args.update(phases.preparehookargs(node, old, new))
2508 repo.hook(
2507 repo.hook(
2509 b'pretxnclose-phase',
2508 b'pretxnclose-phase',
2510 throw=True,
2509 throw=True,
2511 **pycompat.strkwargs(args)
2510 **pycompat.strkwargs(args)
2512 )
2511 )
2513
2512
2514 repo.hook(
2513 repo.hook(
2515 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2514 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2516 )
2515 )
2517
2516
2518 def releasefn(tr, success):
2517 def releasefn(tr, success):
2519 repo = reporef()
2518 repo = reporef()
2520 if repo is None:
2519 if repo is None:
2521 # If the repo has been GC'd (and this release function is being
2520 # If the repo has been GC'd (and this release function is being
2522 # called from transaction.__del__), there's not much we can do,
2521 # called from transaction.__del__), there's not much we can do,
2523 # so just leave the unfinished transaction there and let the
2522 # so just leave the unfinished transaction there and let the
2524 # user run `hg recover`.
2523 # user run `hg recover`.
2525 return
2524 return
2526 if success:
2525 if success:
2527 # this should be explicitly invoked here, because
2526 # this should be explicitly invoked here, because
2528 # in-memory changes aren't written out at closing
2527 # in-memory changes aren't written out at closing
2529 # transaction, if tr.addfilegenerator (via
2528 # transaction, if tr.addfilegenerator (via
2530 # dirstate.write or so) isn't invoked while
2529 # dirstate.write or so) isn't invoked while
2531 # transaction running
2530 # transaction running
2532 repo.dirstate.write(None)
2531 repo.dirstate.write(None)
2533 else:
2532 else:
2534 # discard all changes (including ones already written
2533 # discard all changes (including ones already written
2535 # out) in this transaction
2534 # out) in this transaction
2536 narrowspec.restorebackup(self, b'journal.narrowspec')
2535 narrowspec.restorebackup(self, b'journal.narrowspec')
2537 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2536 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2538 repo.dirstate.restorebackup(None, b'journal.dirstate')
2537 repo.dirstate.restorebackup(None, b'journal.dirstate')
2539
2538
2540 repo.invalidate(clearfilecache=True)
2539 repo.invalidate(clearfilecache=True)
2541
2540
2542 tr = transaction.transaction(
2541 tr = transaction.transaction(
2543 rp,
2542 rp,
2544 self.svfs,
2543 self.svfs,
2545 vfsmap,
2544 vfsmap,
2546 b"journal",
2545 b"journal",
2547 b"undo",
2546 b"undo",
2548 aftertrans(renames),
2547 aftertrans(renames),
2549 self.store.createmode,
2548 self.store.createmode,
2550 validator=validate,
2549 validator=validate,
2551 releasefn=releasefn,
2550 releasefn=releasefn,
2552 checkambigfiles=_cachedfiles,
2551 checkambigfiles=_cachedfiles,
2553 name=desc,
2552 name=desc,
2554 )
2553 )
2555 tr.changes[b'origrepolen'] = len(self)
2554 tr.changes[b'origrepolen'] = len(self)
2556 tr.changes[b'obsmarkers'] = set()
2555 tr.changes[b'obsmarkers'] = set()
2557 tr.changes[b'phases'] = []
2556 tr.changes[b'phases'] = []
2558 tr.changes[b'bookmarks'] = {}
2557 tr.changes[b'bookmarks'] = {}
2559
2558
2560 tr.hookargs[b'txnid'] = txnid
2559 tr.hookargs[b'txnid'] = txnid
2561 tr.hookargs[b'txnname'] = desc
2560 tr.hookargs[b'txnname'] = desc
2562 tr.hookargs[b'changes'] = tr.changes
2561 tr.hookargs[b'changes'] = tr.changes
2563 # note: writing the fncache only during finalize mean that the file is
2562 # note: writing the fncache only during finalize mean that the file is
2564 # outdated when running hooks. As fncache is used for streaming clone,
2563 # outdated when running hooks. As fncache is used for streaming clone,
2565 # this is not expected to break anything that happen during the hooks.
2564 # this is not expected to break anything that happen during the hooks.
2566 tr.addfinalize(b'flush-fncache', self.store.write)
2565 tr.addfinalize(b'flush-fncache', self.store.write)
2567
2566
2568 def txnclosehook(tr2):
2567 def txnclosehook(tr2):
2569 """To be run if transaction is successful, will schedule a hook run"""
2568 """To be run if transaction is successful, will schedule a hook run"""
2570 # Don't reference tr2 in hook() so we don't hold a reference.
2569 # Don't reference tr2 in hook() so we don't hold a reference.
2571 # This reduces memory consumption when there are multiple
2570 # This reduces memory consumption when there are multiple
2572 # transactions per lock. This can likely go away if issue5045
2571 # transactions per lock. This can likely go away if issue5045
2573 # fixes the function accumulation.
2572 # fixes the function accumulation.
2574 hookargs = tr2.hookargs
2573 hookargs = tr2.hookargs
2575
2574
2576 def hookfunc(unused_success):
2575 def hookfunc(unused_success):
2577 repo = reporef()
2576 repo = reporef()
2578 assert repo is not None # help pytype
2577 assert repo is not None # help pytype
2579
2578
2580 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2579 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2581 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2580 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2582 for name, (old, new) in bmchanges:
2581 for name, (old, new) in bmchanges:
2583 args = tr.hookargs.copy()
2582 args = tr.hookargs.copy()
2584 args.update(bookmarks.preparehookargs(name, old, new))
2583 args.update(bookmarks.preparehookargs(name, old, new))
2585 repo.hook(
2584 repo.hook(
2586 b'txnclose-bookmark',
2585 b'txnclose-bookmark',
2587 throw=False,
2586 throw=False,
2588 **pycompat.strkwargs(args)
2587 **pycompat.strkwargs(args)
2589 )
2588 )
2590
2589
2591 if hook.hashook(repo.ui, b'txnclose-phase'):
2590 if hook.hashook(repo.ui, b'txnclose-phase'):
2592 cl = repo.unfiltered().changelog
2591 cl = repo.unfiltered().changelog
2593 phasemv = sorted(
2592 phasemv = sorted(
2594 tr.changes[b'phases'], key=lambda r: r[0][0]
2593 tr.changes[b'phases'], key=lambda r: r[0][0]
2595 )
2594 )
2596 for revs, (old, new) in phasemv:
2595 for revs, (old, new) in phasemv:
2597 for rev in revs:
2596 for rev in revs:
2598 args = tr.hookargs.copy()
2597 args = tr.hookargs.copy()
2599 node = hex(cl.node(rev))
2598 node = hex(cl.node(rev))
2600 args.update(phases.preparehookargs(node, old, new))
2599 args.update(phases.preparehookargs(node, old, new))
2601 repo.hook(
2600 repo.hook(
2602 b'txnclose-phase',
2601 b'txnclose-phase',
2603 throw=False,
2602 throw=False,
2604 **pycompat.strkwargs(args)
2603 **pycompat.strkwargs(args)
2605 )
2604 )
2606
2605
2607 repo.hook(
2606 repo.hook(
2608 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2607 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2609 )
2608 )
2610
2609
2611 repo = reporef()
2610 repo = reporef()
2612 assert repo is not None # help pytype
2611 assert repo is not None # help pytype
2613 repo._afterlock(hookfunc)
2612 repo._afterlock(hookfunc)
2614
2613
2615 tr.addfinalize(b'txnclose-hook', txnclosehook)
2614 tr.addfinalize(b'txnclose-hook', txnclosehook)
2616 # Include a leading "-" to make it happen before the transaction summary
2615 # Include a leading "-" to make it happen before the transaction summary
2617 # reports registered via scmutil.registersummarycallback() whose names
2616 # reports registered via scmutil.registersummarycallback() whose names
2618 # are 00-txnreport etc. That way, the caches will be warm when the
2617 # are 00-txnreport etc. That way, the caches will be warm when the
2619 # callbacks run.
2618 # callbacks run.
2620 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2619 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2621
2620
2622 def txnaborthook(tr2):
2621 def txnaborthook(tr2):
2623 """To be run if transaction is aborted"""
2622 """To be run if transaction is aborted"""
2624 repo = reporef()
2623 repo = reporef()
2625 assert repo is not None # help pytype
2624 assert repo is not None # help pytype
2626 repo.hook(
2625 repo.hook(
2627 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2626 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2628 )
2627 )
2629
2628
2630 tr.addabort(b'txnabort-hook', txnaborthook)
2629 tr.addabort(b'txnabort-hook', txnaborthook)
2631 # avoid eager cache invalidation. in-memory data should be identical
2630 # avoid eager cache invalidation. in-memory data should be identical
2632 # to stored data if transaction has no error.
2631 # to stored data if transaction has no error.
2633 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2632 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2634 self._transref = weakref.ref(tr)
2633 self._transref = weakref.ref(tr)
2635 scmutil.registersummarycallback(self, tr, desc)
2634 scmutil.registersummarycallback(self, tr, desc)
2636 return tr
2635 return tr
2637
2636
2638 def _journalfiles(self):
2637 def _journalfiles(self):
2639 first = (
2638 first = (
2640 (self.svfs, b'journal'),
2639 (self.svfs, b'journal'),
2641 (self.svfs, b'journal.narrowspec'),
2640 (self.svfs, b'journal.narrowspec'),
2642 (self.vfs, b'journal.narrowspec.dirstate'),
2641 (self.vfs, b'journal.narrowspec.dirstate'),
2643 (self.vfs, b'journal.dirstate'),
2642 (self.vfs, b'journal.dirstate'),
2644 )
2643 )
2645 middle = []
2644 middle = []
2646 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2645 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2647 if dirstate_data is not None:
2646 if dirstate_data is not None:
2648 middle.append((self.vfs, dirstate_data))
2647 middle.append((self.vfs, dirstate_data))
2649 end = (
2648 end = (
2650 (self.vfs, b'journal.branch'),
2649 (self.vfs, b'journal.branch'),
2651 (self.vfs, b'journal.desc'),
2650 (self.vfs, b'journal.desc'),
2652 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2651 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2653 (self.svfs, b'journal.phaseroots'),
2652 (self.svfs, b'journal.phaseroots'),
2654 )
2653 )
2655 return first + tuple(middle) + end
2654 return first + tuple(middle) + end
2656
2655
2657 def undofiles(self):
2656 def undofiles(self):
2658 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2657 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2659
2658
2660 @unfilteredmethod
2659 @unfilteredmethod
2661 def _writejournal(self, desc):
2660 def _writejournal(self, desc):
2662 self.dirstate.savebackup(None, b'journal.dirstate')
2661 self.dirstate.savebackup(None, b'journal.dirstate')
2663 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2662 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2664 narrowspec.savebackup(self, b'journal.narrowspec')
2663 narrowspec.savebackup(self, b'journal.narrowspec')
2665 self.vfs.write(
2664 self.vfs.write(
2666 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2665 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2667 )
2666 )
2668 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2667 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2669 bookmarksvfs = bookmarks.bookmarksvfs(self)
2668 bookmarksvfs = bookmarks.bookmarksvfs(self)
2670 bookmarksvfs.write(
2669 bookmarksvfs.write(
2671 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2670 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2672 )
2671 )
2673 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2672 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2674
2673
2675 def recover(self):
2674 def recover(self):
2676 with self.lock():
2675 with self.lock():
2677 if self.svfs.exists(b"journal"):
2676 if self.svfs.exists(b"journal"):
2678 self.ui.status(_(b"rolling back interrupted transaction\n"))
2677 self.ui.status(_(b"rolling back interrupted transaction\n"))
2679 vfsmap = {
2678 vfsmap = {
2680 b'': self.svfs,
2679 b'': self.svfs,
2681 b'plain': self.vfs,
2680 b'plain': self.vfs,
2682 }
2681 }
2683 transaction.rollback(
2682 transaction.rollback(
2684 self.svfs,
2683 self.svfs,
2685 vfsmap,
2684 vfsmap,
2686 b"journal",
2685 b"journal",
2687 self.ui.warn,
2686 self.ui.warn,
2688 checkambigfiles=_cachedfiles,
2687 checkambigfiles=_cachedfiles,
2689 )
2688 )
2690 self.invalidate()
2689 self.invalidate()
2691 return True
2690 return True
2692 else:
2691 else:
2693 self.ui.warn(_(b"no interrupted transaction available\n"))
2692 self.ui.warn(_(b"no interrupted transaction available\n"))
2694 return False
2693 return False
2695
2694
2696 def rollback(self, dryrun=False, force=False):
2695 def rollback(self, dryrun=False, force=False):
2697 wlock = lock = dsguard = None
2696 wlock = lock = dsguard = None
2698 try:
2697 try:
2699 wlock = self.wlock()
2698 wlock = self.wlock()
2700 lock = self.lock()
2699 lock = self.lock()
2701 if self.svfs.exists(b"undo"):
2700 if self.svfs.exists(b"undo"):
2702 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2701 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2703
2702
2704 return self._rollback(dryrun, force, dsguard)
2703 return self._rollback(dryrun, force, dsguard)
2705 else:
2704 else:
2706 self.ui.warn(_(b"no rollback information available\n"))
2705 self.ui.warn(_(b"no rollback information available\n"))
2707 return 1
2706 return 1
2708 finally:
2707 finally:
2709 release(dsguard, lock, wlock)
2708 release(dsguard, lock, wlock)
2710
2709
2711 @unfilteredmethod # Until we get smarter cache management
2710 @unfilteredmethod # Until we get smarter cache management
2712 def _rollback(self, dryrun, force, dsguard):
2711 def _rollback(self, dryrun, force, dsguard):
2713 ui = self.ui
2712 ui = self.ui
2714 try:
2713 try:
2715 args = self.vfs.read(b'undo.desc').splitlines()
2714 args = self.vfs.read(b'undo.desc').splitlines()
2716 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2715 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2717 if len(args) >= 3:
2716 if len(args) >= 3:
2718 detail = args[2]
2717 detail = args[2]
2719 oldtip = oldlen - 1
2718 oldtip = oldlen - 1
2720
2719
2721 if detail and ui.verbose:
2720 if detail and ui.verbose:
2722 msg = _(
2721 msg = _(
2723 b'repository tip rolled back to revision %d'
2722 b'repository tip rolled back to revision %d'
2724 b' (undo %s: %s)\n'
2723 b' (undo %s: %s)\n'
2725 ) % (oldtip, desc, detail)
2724 ) % (oldtip, desc, detail)
2726 else:
2725 else:
2727 msg = _(
2726 msg = _(
2728 b'repository tip rolled back to revision %d (undo %s)\n'
2727 b'repository tip rolled back to revision %d (undo %s)\n'
2729 ) % (oldtip, desc)
2728 ) % (oldtip, desc)
2730 except IOError:
2729 except IOError:
2731 msg = _(b'rolling back unknown transaction\n')
2730 msg = _(b'rolling back unknown transaction\n')
2732 desc = None
2731 desc = None
2733
2732
2734 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2733 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2735 raise error.Abort(
2734 raise error.Abort(
2736 _(
2735 _(
2737 b'rollback of last commit while not checked out '
2736 b'rollback of last commit while not checked out '
2738 b'may lose data'
2737 b'may lose data'
2739 ),
2738 ),
2740 hint=_(b'use -f to force'),
2739 hint=_(b'use -f to force'),
2741 )
2740 )
2742
2741
2743 ui.status(msg)
2742 ui.status(msg)
2744 if dryrun:
2743 if dryrun:
2745 return 0
2744 return 0
2746
2745
2747 parents = self.dirstate.parents()
2746 parents = self.dirstate.parents()
2748 self.destroying()
2747 self.destroying()
2749 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2748 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2750 transaction.rollback(
2749 transaction.rollback(
2751 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2750 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2752 )
2751 )
2753 bookmarksvfs = bookmarks.bookmarksvfs(self)
2752 bookmarksvfs = bookmarks.bookmarksvfs(self)
2754 if bookmarksvfs.exists(b'undo.bookmarks'):
2753 if bookmarksvfs.exists(b'undo.bookmarks'):
2755 bookmarksvfs.rename(
2754 bookmarksvfs.rename(
2756 b'undo.bookmarks', b'bookmarks', checkambig=True
2755 b'undo.bookmarks', b'bookmarks', checkambig=True
2757 )
2756 )
2758 if self.svfs.exists(b'undo.phaseroots'):
2757 if self.svfs.exists(b'undo.phaseroots'):
2759 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2758 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2760 self.invalidate()
2759 self.invalidate()
2761
2760
2762 has_node = self.changelog.index.has_node
2761 has_node = self.changelog.index.has_node
2763 parentgone = any(not has_node(p) for p in parents)
2762 parentgone = any(not has_node(p) for p in parents)
2764 if parentgone:
2763 if parentgone:
2765 # prevent dirstateguard from overwriting already restored one
2764 # prevent dirstateguard from overwriting already restored one
2766 dsguard.close()
2765 dsguard.close()
2767
2766
2768 narrowspec.restorebackup(self, b'undo.narrowspec')
2767 narrowspec.restorebackup(self, b'undo.narrowspec')
2769 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2768 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2770 self.dirstate.restorebackup(None, b'undo.dirstate')
2769 self.dirstate.restorebackup(None, b'undo.dirstate')
2771 try:
2770 try:
2772 branch = self.vfs.read(b'undo.branch')
2771 branch = self.vfs.read(b'undo.branch')
2773 self.dirstate.setbranch(encoding.tolocal(branch))
2772 self.dirstate.setbranch(encoding.tolocal(branch))
2774 except IOError:
2773 except IOError:
2775 ui.warn(
2774 ui.warn(
2776 _(
2775 _(
2777 b'named branch could not be reset: '
2776 b'named branch could not be reset: '
2778 b'current branch is still \'%s\'\n'
2777 b'current branch is still \'%s\'\n'
2779 )
2778 )
2780 % self.dirstate.branch()
2779 % self.dirstate.branch()
2781 )
2780 )
2782
2781
2783 parents = tuple([p.rev() for p in self[None].parents()])
2782 parents = tuple([p.rev() for p in self[None].parents()])
2784 if len(parents) > 1:
2783 if len(parents) > 1:
2785 ui.status(
2784 ui.status(
2786 _(
2785 _(
2787 b'working directory now based on '
2786 b'working directory now based on '
2788 b'revisions %d and %d\n'
2787 b'revisions %d and %d\n'
2789 )
2788 )
2790 % parents
2789 % parents
2791 )
2790 )
2792 else:
2791 else:
2793 ui.status(
2792 ui.status(
2794 _(b'working directory now based on revision %d\n') % parents
2793 _(b'working directory now based on revision %d\n') % parents
2795 )
2794 )
2796 mergestatemod.mergestate.clean(self)
2795 mergestatemod.mergestate.clean(self)
2797
2796
2798 # TODO: if we know which new heads may result from this rollback, pass
2797 # TODO: if we know which new heads may result from this rollback, pass
2799 # them to destroy(), which will prevent the branchhead cache from being
2798 # them to destroy(), which will prevent the branchhead cache from being
2800 # invalidated.
2799 # invalidated.
2801 self.destroyed()
2800 self.destroyed()
2802 return 0
2801 return 0
2803
2802
2804 def _buildcacheupdater(self, newtransaction):
2803 def _buildcacheupdater(self, newtransaction):
2805 """called during transaction to build the callback updating cache
2804 """called during transaction to build the callback updating cache
2806
2805
2807 Lives on the repository to help extension who might want to augment
2806 Lives on the repository to help extension who might want to augment
2808 this logic. For this purpose, the created transaction is passed to the
2807 this logic. For this purpose, the created transaction is passed to the
2809 method.
2808 method.
2810 """
2809 """
2811 # we must avoid cyclic reference between repo and transaction.
2810 # we must avoid cyclic reference between repo and transaction.
2812 reporef = weakref.ref(self)
2811 reporef = weakref.ref(self)
2813
2812
2814 def updater(tr):
2813 def updater(tr):
2815 repo = reporef()
2814 repo = reporef()
2816 assert repo is not None # help pytype
2815 assert repo is not None # help pytype
2817 repo.updatecaches(tr)
2816 repo.updatecaches(tr)
2818
2817
2819 return updater
2818 return updater
2820
2819
2821 @unfilteredmethod
2820 @unfilteredmethod
2822 def updatecaches(self, tr=None, full=False, caches=None):
2821 def updatecaches(self, tr=None, full=False, caches=None):
2823 """warm appropriate caches
2822 """warm appropriate caches
2824
2823
2825 If this function is called after a transaction closed. The transaction
2824 If this function is called after a transaction closed. The transaction
2826 will be available in the 'tr' argument. This can be used to selectively
2825 will be available in the 'tr' argument. This can be used to selectively
2827 update caches relevant to the changes in that transaction.
2826 update caches relevant to the changes in that transaction.
2828
2827
2829 If 'full' is set, make sure all caches the function knows about have
2828 If 'full' is set, make sure all caches the function knows about have
2830 up-to-date data. Even the ones usually loaded more lazily.
2829 up-to-date data. Even the ones usually loaded more lazily.
2831
2830
2832 The `full` argument can take a special "post-clone" value. In this case
2831 The `full` argument can take a special "post-clone" value. In this case
2833 the cache warming is made after a clone and of the slower cache might
2832 the cache warming is made after a clone and of the slower cache might
2834 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2833 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2835 as we plan for a cleaner way to deal with this for 5.9.
2834 as we plan for a cleaner way to deal with this for 5.9.
2836 """
2835 """
2837 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2836 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2838 # During strip, many caches are invalid but
2837 # During strip, many caches are invalid but
2839 # later call to `destroyed` will refresh them.
2838 # later call to `destroyed` will refresh them.
2840 return
2839 return
2841
2840
2842 unfi = self.unfiltered()
2841 unfi = self.unfiltered()
2843
2842
2844 if full:
2843 if full:
2845 msg = (
2844 msg = (
2846 "`full` argument for `repo.updatecaches` is deprecated\n"
2845 "`full` argument for `repo.updatecaches` is deprecated\n"
2847 "(use `caches=repository.CACHE_ALL` instead)"
2846 "(use `caches=repository.CACHE_ALL` instead)"
2848 )
2847 )
2849 self.ui.deprecwarn(msg, b"5.9")
2848 self.ui.deprecwarn(msg, b"5.9")
2850 caches = repository.CACHES_ALL
2849 caches = repository.CACHES_ALL
2851 if full == b"post-clone":
2850 if full == b"post-clone":
2852 caches = repository.CACHES_POST_CLONE
2851 caches = repository.CACHES_POST_CLONE
2853 caches = repository.CACHES_ALL
2852 caches = repository.CACHES_ALL
2854 elif caches is None:
2853 elif caches is None:
2855 caches = repository.CACHES_DEFAULT
2854 caches = repository.CACHES_DEFAULT
2856
2855
2857 if repository.CACHE_BRANCHMAP_SERVED in caches:
2856 if repository.CACHE_BRANCHMAP_SERVED in caches:
2858 if tr is None or tr.changes[b'origrepolen'] < len(self):
2857 if tr is None or tr.changes[b'origrepolen'] < len(self):
2859 # accessing the 'served' branchmap should refresh all the others,
2858 # accessing the 'served' branchmap should refresh all the others,
2860 self.ui.debug(b'updating the branch cache\n')
2859 self.ui.debug(b'updating the branch cache\n')
2861 self.filtered(b'served').branchmap()
2860 self.filtered(b'served').branchmap()
2862 self.filtered(b'served.hidden').branchmap()
2861 self.filtered(b'served.hidden').branchmap()
2863 # flush all possibly delayed write.
2862 # flush all possibly delayed write.
2864 self._branchcaches.write_delayed(self)
2863 self._branchcaches.write_delayed(self)
2865
2864
2866 if repository.CACHE_CHANGELOG_CACHE in caches:
2865 if repository.CACHE_CHANGELOG_CACHE in caches:
2867 self.changelog.update_caches(transaction=tr)
2866 self.changelog.update_caches(transaction=tr)
2868
2867
2869 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2868 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2870 self.manifestlog.update_caches(transaction=tr)
2869 self.manifestlog.update_caches(transaction=tr)
2871
2870
2872 if repository.CACHE_REV_BRANCH in caches:
2871 if repository.CACHE_REV_BRANCH in caches:
2873 rbc = unfi.revbranchcache()
2872 rbc = unfi.revbranchcache()
2874 for r in unfi.changelog:
2873 for r in unfi.changelog:
2875 rbc.branchinfo(r)
2874 rbc.branchinfo(r)
2876 rbc.write()
2875 rbc.write()
2877
2876
2878 if repository.CACHE_FULL_MANIFEST in caches:
2877 if repository.CACHE_FULL_MANIFEST in caches:
2879 # ensure the working copy parents are in the manifestfulltextcache
2878 # ensure the working copy parents are in the manifestfulltextcache
2880 for ctx in self[b'.'].parents():
2879 for ctx in self[b'.'].parents():
2881 ctx.manifest() # accessing the manifest is enough
2880 ctx.manifest() # accessing the manifest is enough
2882
2881
2883 if repository.CACHE_FILE_NODE_TAGS in caches:
2882 if repository.CACHE_FILE_NODE_TAGS in caches:
2884 # accessing fnode cache warms the cache
2883 # accessing fnode cache warms the cache
2885 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2884 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2886
2885
2887 if repository.CACHE_TAGS_DEFAULT in caches:
2886 if repository.CACHE_TAGS_DEFAULT in caches:
2888 # accessing tags warm the cache
2887 # accessing tags warm the cache
2889 self.tags()
2888 self.tags()
2890 if repository.CACHE_TAGS_SERVED in caches:
2889 if repository.CACHE_TAGS_SERVED in caches:
2891 self.filtered(b'served').tags()
2890 self.filtered(b'served').tags()
2892
2891
2893 if repository.CACHE_BRANCHMAP_ALL in caches:
2892 if repository.CACHE_BRANCHMAP_ALL in caches:
2894 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2893 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2895 # so we're forcing a write to cause these caches to be warmed up
2894 # so we're forcing a write to cause these caches to be warmed up
2896 # even if they haven't explicitly been requested yet (if they've
2895 # even if they haven't explicitly been requested yet (if they've
2897 # never been used by hg, they won't ever have been written, even if
2896 # never been used by hg, they won't ever have been written, even if
2898 # they're a subset of another kind of cache that *has* been used).
2897 # they're a subset of another kind of cache that *has* been used).
2899 for filt in repoview.filtertable.keys():
2898 for filt in repoview.filtertable.keys():
2900 filtered = self.filtered(filt)
2899 filtered = self.filtered(filt)
2901 filtered.branchmap().write(filtered)
2900 filtered.branchmap().write(filtered)
2902
2901
2903 def invalidatecaches(self):
2902 def invalidatecaches(self):
2904
2903
2905 if '_tagscache' in vars(self):
2904 if '_tagscache' in vars(self):
2906 # can't use delattr on proxy
2905 # can't use delattr on proxy
2907 del self.__dict__['_tagscache']
2906 del self.__dict__['_tagscache']
2908
2907
2909 self._branchcaches.clear()
2908 self._branchcaches.clear()
2910 self.invalidatevolatilesets()
2909 self.invalidatevolatilesets()
2911 self._sparsesignaturecache.clear()
2910 self._sparsesignaturecache.clear()
2912
2911
2913 def invalidatevolatilesets(self):
2912 def invalidatevolatilesets(self):
2914 self.filteredrevcache.clear()
2913 self.filteredrevcache.clear()
2915 obsolete.clearobscaches(self)
2914 obsolete.clearobscaches(self)
2916 self._quick_access_changeid_invalidate()
2915 self._quick_access_changeid_invalidate()
2917
2916
2918 def invalidatedirstate(self):
2917 def invalidatedirstate(self):
2919 """Invalidates the dirstate, causing the next call to dirstate
2918 """Invalidates the dirstate, causing the next call to dirstate
2920 to check if it was modified since the last time it was read,
2919 to check if it was modified since the last time it was read,
2921 rereading it if it has.
2920 rereading it if it has.
2922
2921
2923 This is different to dirstate.invalidate() that it doesn't always
2922 This is different to dirstate.invalidate() that it doesn't always
2924 rereads the dirstate. Use dirstate.invalidate() if you want to
2923 rereads the dirstate. Use dirstate.invalidate() if you want to
2925 explicitly read the dirstate again (i.e. restoring it to a previous
2924 explicitly read the dirstate again (i.e. restoring it to a previous
2926 known good state)."""
2925 known good state)."""
2927 if hasunfilteredcache(self, 'dirstate'):
2926 if hasunfilteredcache(self, 'dirstate'):
2928 for k in self.dirstate._filecache:
2927 for k in self.dirstate._filecache:
2929 try:
2928 try:
2930 delattr(self.dirstate, k)
2929 delattr(self.dirstate, k)
2931 except AttributeError:
2930 except AttributeError:
2932 pass
2931 pass
2933 delattr(self.unfiltered(), 'dirstate')
2932 delattr(self.unfiltered(), 'dirstate')
2934
2933
2935 def invalidate(self, clearfilecache=False):
2934 def invalidate(self, clearfilecache=False):
2936 """Invalidates both store and non-store parts other than dirstate
2935 """Invalidates both store and non-store parts other than dirstate
2937
2936
2938 If a transaction is running, invalidation of store is omitted,
2937 If a transaction is running, invalidation of store is omitted,
2939 because discarding in-memory changes might cause inconsistency
2938 because discarding in-memory changes might cause inconsistency
2940 (e.g. incomplete fncache causes unintentional failure, but
2939 (e.g. incomplete fncache causes unintentional failure, but
2941 redundant one doesn't).
2940 redundant one doesn't).
2942 """
2941 """
2943 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2942 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2944 for k in list(self._filecache.keys()):
2943 for k in list(self._filecache.keys()):
2945 # dirstate is invalidated separately in invalidatedirstate()
2944 # dirstate is invalidated separately in invalidatedirstate()
2946 if k == b'dirstate':
2945 if k == b'dirstate':
2947 continue
2946 continue
2948 if (
2947 if (
2949 k == b'changelog'
2948 k == b'changelog'
2950 and self.currenttransaction()
2949 and self.currenttransaction()
2951 and self.changelog._delayed
2950 and self.changelog._delayed
2952 ):
2951 ):
2953 # The changelog object may store unwritten revisions. We don't
2952 # The changelog object may store unwritten revisions. We don't
2954 # want to lose them.
2953 # want to lose them.
2955 # TODO: Solve the problem instead of working around it.
2954 # TODO: Solve the problem instead of working around it.
2956 continue
2955 continue
2957
2956
2958 if clearfilecache:
2957 if clearfilecache:
2959 del self._filecache[k]
2958 del self._filecache[k]
2960 try:
2959 try:
2961 delattr(unfiltered, k)
2960 delattr(unfiltered, k)
2962 except AttributeError:
2961 except AttributeError:
2963 pass
2962 pass
2964 self.invalidatecaches()
2963 self.invalidatecaches()
2965 if not self.currenttransaction():
2964 if not self.currenttransaction():
2966 # TODO: Changing contents of store outside transaction
2965 # TODO: Changing contents of store outside transaction
2967 # causes inconsistency. We should make in-memory store
2966 # causes inconsistency. We should make in-memory store
2968 # changes detectable, and abort if changed.
2967 # changes detectable, and abort if changed.
2969 self.store.invalidatecaches()
2968 self.store.invalidatecaches()
2970
2969
2971 def invalidateall(self):
2970 def invalidateall(self):
2972 """Fully invalidates both store and non-store parts, causing the
2971 """Fully invalidates both store and non-store parts, causing the
2973 subsequent operation to reread any outside changes."""
2972 subsequent operation to reread any outside changes."""
2974 # extension should hook this to invalidate its caches
2973 # extension should hook this to invalidate its caches
2975 self.invalidate()
2974 self.invalidate()
2976 self.invalidatedirstate()
2975 self.invalidatedirstate()
2977
2976
2978 @unfilteredmethod
2977 @unfilteredmethod
2979 def _refreshfilecachestats(self, tr):
2978 def _refreshfilecachestats(self, tr):
2980 """Reload stats of cached files so that they are flagged as valid"""
2979 """Reload stats of cached files so that they are flagged as valid"""
2981 for k, ce in self._filecache.items():
2980 for k, ce in self._filecache.items():
2982 k = pycompat.sysstr(k)
2981 k = pycompat.sysstr(k)
2983 if k == 'dirstate' or k not in self.__dict__:
2982 if k == 'dirstate' or k not in self.__dict__:
2984 continue
2983 continue
2985 ce.refresh()
2984 ce.refresh()
2986
2985
2987 def _lock(
2986 def _lock(
2988 self,
2987 self,
2989 vfs,
2988 vfs,
2990 lockname,
2989 lockname,
2991 wait,
2990 wait,
2992 releasefn,
2991 releasefn,
2993 acquirefn,
2992 acquirefn,
2994 desc,
2993 desc,
2995 ):
2994 ):
2996 timeout = 0
2995 timeout = 0
2997 warntimeout = 0
2996 warntimeout = 0
2998 if wait:
2997 if wait:
2999 timeout = self.ui.configint(b"ui", b"timeout")
2998 timeout = self.ui.configint(b"ui", b"timeout")
3000 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2999 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3001 # internal config: ui.signal-safe-lock
3000 # internal config: ui.signal-safe-lock
3002 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3001 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3003
3002
3004 l = lockmod.trylock(
3003 l = lockmod.trylock(
3005 self.ui,
3004 self.ui,
3006 vfs,
3005 vfs,
3007 lockname,
3006 lockname,
3008 timeout,
3007 timeout,
3009 warntimeout,
3008 warntimeout,
3010 releasefn=releasefn,
3009 releasefn=releasefn,
3011 acquirefn=acquirefn,
3010 acquirefn=acquirefn,
3012 desc=desc,
3011 desc=desc,
3013 signalsafe=signalsafe,
3012 signalsafe=signalsafe,
3014 )
3013 )
3015 return l
3014 return l
3016
3015
3017 def _afterlock(self, callback):
3016 def _afterlock(self, callback):
3018 """add a callback to be run when the repository is fully unlocked
3017 """add a callback to be run when the repository is fully unlocked
3019
3018
3020 The callback will be executed when the outermost lock is released
3019 The callback will be executed when the outermost lock is released
3021 (with wlock being higher level than 'lock')."""
3020 (with wlock being higher level than 'lock')."""
3022 for ref in (self._wlockref, self._lockref):
3021 for ref in (self._wlockref, self._lockref):
3023 l = ref and ref()
3022 l = ref and ref()
3024 if l and l.held:
3023 if l and l.held:
3025 l.postrelease.append(callback)
3024 l.postrelease.append(callback)
3026 break
3025 break
3027 else: # no lock have been found.
3026 else: # no lock have been found.
3028 callback(True)
3027 callback(True)
3029
3028
3030 def lock(self, wait=True):
3029 def lock(self, wait=True):
3031 """Lock the repository store (.hg/store) and return a weak reference
3030 """Lock the repository store (.hg/store) and return a weak reference
3032 to the lock. Use this before modifying the store (e.g. committing or
3031 to the lock. Use this before modifying the store (e.g. committing or
3033 stripping). If you are opening a transaction, get a lock as well.)
3032 stripping). If you are opening a transaction, get a lock as well.)
3034
3033
3035 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3034 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3036 'wlock' first to avoid a dead-lock hazard."""
3035 'wlock' first to avoid a dead-lock hazard."""
3037 l = self._currentlock(self._lockref)
3036 l = self._currentlock(self._lockref)
3038 if l is not None:
3037 if l is not None:
3039 l.lock()
3038 l.lock()
3040 return l
3039 return l
3041
3040
3042 l = self._lock(
3041 l = self._lock(
3043 vfs=self.svfs,
3042 vfs=self.svfs,
3044 lockname=b"lock",
3043 lockname=b"lock",
3045 wait=wait,
3044 wait=wait,
3046 releasefn=None,
3045 releasefn=None,
3047 acquirefn=self.invalidate,
3046 acquirefn=self.invalidate,
3048 desc=_(b'repository %s') % self.origroot,
3047 desc=_(b'repository %s') % self.origroot,
3049 )
3048 )
3050 self._lockref = weakref.ref(l)
3049 self._lockref = weakref.ref(l)
3051 return l
3050 return l
3052
3051
3053 def wlock(self, wait=True):
3052 def wlock(self, wait=True):
3054 """Lock the non-store parts of the repository (everything under
3053 """Lock the non-store parts of the repository (everything under
3055 .hg except .hg/store) and return a weak reference to the lock.
3054 .hg except .hg/store) and return a weak reference to the lock.
3056
3055
3057 Use this before modifying files in .hg.
3056 Use this before modifying files in .hg.
3058
3057
3059 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3058 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3060 'wlock' first to avoid a dead-lock hazard."""
3059 'wlock' first to avoid a dead-lock hazard."""
3061 l = self._wlockref() if self._wlockref else None
3060 l = self._wlockref() if self._wlockref else None
3062 if l is not None and l.held:
3061 if l is not None and l.held:
3063 l.lock()
3062 l.lock()
3064 return l
3063 return l
3065
3064
3066 # We do not need to check for non-waiting lock acquisition. Such
3065 # We do not need to check for non-waiting lock acquisition. Such
3067 # acquisition would not cause dead-lock as they would just fail.
3066 # acquisition would not cause dead-lock as they would just fail.
3068 if wait and (
3067 if wait and (
3069 self.ui.configbool(b'devel', b'all-warnings')
3068 self.ui.configbool(b'devel', b'all-warnings')
3070 or self.ui.configbool(b'devel', b'check-locks')
3069 or self.ui.configbool(b'devel', b'check-locks')
3071 ):
3070 ):
3072 if self._currentlock(self._lockref) is not None:
3071 if self._currentlock(self._lockref) is not None:
3073 self.ui.develwarn(b'"wlock" acquired after "lock"')
3072 self.ui.develwarn(b'"wlock" acquired after "lock"')
3074
3073
3075 def unlock():
3074 def unlock():
3076 if self.dirstate.pendingparentchange():
3075 if self.dirstate.pendingparentchange():
3077 self.dirstate.invalidate()
3076 self.dirstate.invalidate()
3078 else:
3077 else:
3079 self.dirstate.write(None)
3078 self.dirstate.write(None)
3080
3079
3081 self._filecache[b'dirstate'].refresh()
3080 self._filecache[b'dirstate'].refresh()
3082
3081
3083 l = self._lock(
3082 l = self._lock(
3084 self.vfs,
3083 self.vfs,
3085 b"wlock",
3084 b"wlock",
3086 wait,
3085 wait,
3087 unlock,
3086 unlock,
3088 self.invalidatedirstate,
3087 self.invalidatedirstate,
3089 _(b'working directory of %s') % self.origroot,
3088 _(b'working directory of %s') % self.origroot,
3090 )
3089 )
3091 self._wlockref = weakref.ref(l)
3090 self._wlockref = weakref.ref(l)
3092 return l
3091 return l
3093
3092
3094 def _currentlock(self, lockref):
3093 def _currentlock(self, lockref):
3095 """Returns the lock if it's held, or None if it's not."""
3094 """Returns the lock if it's held, or None if it's not."""
3096 if lockref is None:
3095 if lockref is None:
3097 return None
3096 return None
3098 l = lockref()
3097 l = lockref()
3099 if l is None or not l.held:
3098 if l is None or not l.held:
3100 return None
3099 return None
3101 return l
3100 return l
3102
3101
3103 def currentwlock(self):
3102 def currentwlock(self):
3104 """Returns the wlock if it's held, or None if it's not."""
3103 """Returns the wlock if it's held, or None if it's not."""
3105 return self._currentlock(self._wlockref)
3104 return self._currentlock(self._wlockref)
3106
3105
3107 def checkcommitpatterns(self, wctx, match, status, fail):
3106 def checkcommitpatterns(self, wctx, match, status, fail):
3108 """check for commit arguments that aren't committable"""
3107 """check for commit arguments that aren't committable"""
3109 if match.isexact() or match.prefix():
3108 if match.isexact() or match.prefix():
3110 matched = set(status.modified + status.added + status.removed)
3109 matched = set(status.modified + status.added + status.removed)
3111
3110
3112 for f in match.files():
3111 for f in match.files():
3113 f = self.dirstate.normalize(f)
3112 f = self.dirstate.normalize(f)
3114 if f == b'.' or f in matched or f in wctx.substate:
3113 if f == b'.' or f in matched or f in wctx.substate:
3115 continue
3114 continue
3116 if f in status.deleted:
3115 if f in status.deleted:
3117 fail(f, _(b'file not found!'))
3116 fail(f, _(b'file not found!'))
3118 # Is it a directory that exists or used to exist?
3117 # Is it a directory that exists or used to exist?
3119 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3118 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3120 d = f + b'/'
3119 d = f + b'/'
3121 for mf in matched:
3120 for mf in matched:
3122 if mf.startswith(d):
3121 if mf.startswith(d):
3123 break
3122 break
3124 else:
3123 else:
3125 fail(f, _(b"no match under directory!"))
3124 fail(f, _(b"no match under directory!"))
3126 elif f not in self.dirstate:
3125 elif f not in self.dirstate:
3127 fail(f, _(b"file not tracked!"))
3126 fail(f, _(b"file not tracked!"))
3128
3127
3129 @unfilteredmethod
3128 @unfilteredmethod
3130 def commit(
3129 def commit(
3131 self,
3130 self,
3132 text=b"",
3131 text=b"",
3133 user=None,
3132 user=None,
3134 date=None,
3133 date=None,
3135 match=None,
3134 match=None,
3136 force=False,
3135 force=False,
3137 editor=None,
3136 editor=None,
3138 extra=None,
3137 extra=None,
3139 ):
3138 ):
3140 """Add a new revision to current repository.
3139 """Add a new revision to current repository.
3141
3140
3142 Revision information is gathered from the working directory,
3141 Revision information is gathered from the working directory,
3143 match can be used to filter the committed files. If editor is
3142 match can be used to filter the committed files. If editor is
3144 supplied, it is called to get a commit message.
3143 supplied, it is called to get a commit message.
3145 """
3144 """
3146 if extra is None:
3145 if extra is None:
3147 extra = {}
3146 extra = {}
3148
3147
3149 def fail(f, msg):
3148 def fail(f, msg):
3150 raise error.InputError(b'%s: %s' % (f, msg))
3149 raise error.InputError(b'%s: %s' % (f, msg))
3151
3150
3152 if not match:
3151 if not match:
3153 match = matchmod.always()
3152 match = matchmod.always()
3154
3153
3155 if not force:
3154 if not force:
3156 match.bad = fail
3155 match.bad = fail
3157
3156
3158 # lock() for recent changelog (see issue4368)
3157 # lock() for recent changelog (see issue4368)
3159 with self.wlock(), self.lock():
3158 with self.wlock(), self.lock():
3160 wctx = self[None]
3159 wctx = self[None]
3161 merge = len(wctx.parents()) > 1
3160 merge = len(wctx.parents()) > 1
3162
3161
3163 if not force and merge and not match.always():
3162 if not force and merge and not match.always():
3164 raise error.Abort(
3163 raise error.Abort(
3165 _(
3164 _(
3166 b'cannot partially commit a merge '
3165 b'cannot partially commit a merge '
3167 b'(do not specify files or patterns)'
3166 b'(do not specify files or patterns)'
3168 )
3167 )
3169 )
3168 )
3170
3169
3171 status = self.status(match=match, clean=force)
3170 status = self.status(match=match, clean=force)
3172 if force:
3171 if force:
3173 status.modified.extend(
3172 status.modified.extend(
3174 status.clean
3173 status.clean
3175 ) # mq may commit clean files
3174 ) # mq may commit clean files
3176
3175
3177 # check subrepos
3176 # check subrepos
3178 subs, commitsubs, newstate = subrepoutil.precommit(
3177 subs, commitsubs, newstate = subrepoutil.precommit(
3179 self.ui, wctx, status, match, force=force
3178 self.ui, wctx, status, match, force=force
3180 )
3179 )
3181
3180
3182 # make sure all explicit patterns are matched
3181 # make sure all explicit patterns are matched
3183 if not force:
3182 if not force:
3184 self.checkcommitpatterns(wctx, match, status, fail)
3183 self.checkcommitpatterns(wctx, match, status, fail)
3185
3184
3186 cctx = context.workingcommitctx(
3185 cctx = context.workingcommitctx(
3187 self, status, text, user, date, extra
3186 self, status, text, user, date, extra
3188 )
3187 )
3189
3188
3190 ms = mergestatemod.mergestate.read(self)
3189 ms = mergestatemod.mergestate.read(self)
3191 mergeutil.checkunresolved(ms)
3190 mergeutil.checkunresolved(ms)
3192
3191
3193 # internal config: ui.allowemptycommit
3192 # internal config: ui.allowemptycommit
3194 if cctx.isempty() and not self.ui.configbool(
3193 if cctx.isempty() and not self.ui.configbool(
3195 b'ui', b'allowemptycommit'
3194 b'ui', b'allowemptycommit'
3196 ):
3195 ):
3197 self.ui.debug(b'nothing to commit, clearing merge state\n')
3196 self.ui.debug(b'nothing to commit, clearing merge state\n')
3198 ms.reset()
3197 ms.reset()
3199 return None
3198 return None
3200
3199
3201 if merge and cctx.deleted():
3200 if merge and cctx.deleted():
3202 raise error.Abort(_(b"cannot commit merge with missing files"))
3201 raise error.Abort(_(b"cannot commit merge with missing files"))
3203
3202
3204 if editor:
3203 if editor:
3205 cctx._text = editor(self, cctx, subs)
3204 cctx._text = editor(self, cctx, subs)
3206 edited = text != cctx._text
3205 edited = text != cctx._text
3207
3206
3208 # Save commit message in case this transaction gets rolled back
3207 # Save commit message in case this transaction gets rolled back
3209 # (e.g. by a pretxncommit hook). Leave the content alone on
3208 # (e.g. by a pretxncommit hook). Leave the content alone on
3210 # the assumption that the user will use the same editor again.
3209 # the assumption that the user will use the same editor again.
3211 msg_path = self.savecommitmessage(cctx._text)
3210 msg_path = self.savecommitmessage(cctx._text)
3212
3211
3213 # commit subs and write new state
3212 # commit subs and write new state
3214 if subs:
3213 if subs:
3215 uipathfn = scmutil.getuipathfn(self)
3214 uipathfn = scmutil.getuipathfn(self)
3216 for s in sorted(commitsubs):
3215 for s in sorted(commitsubs):
3217 sub = wctx.sub(s)
3216 sub = wctx.sub(s)
3218 self.ui.status(
3217 self.ui.status(
3219 _(b'committing subrepository %s\n')
3218 _(b'committing subrepository %s\n')
3220 % uipathfn(subrepoutil.subrelpath(sub))
3219 % uipathfn(subrepoutil.subrelpath(sub))
3221 )
3220 )
3222 sr = sub.commit(cctx._text, user, date)
3221 sr = sub.commit(cctx._text, user, date)
3223 newstate[s] = (newstate[s][0], sr)
3222 newstate[s] = (newstate[s][0], sr)
3224 subrepoutil.writestate(self, newstate)
3223 subrepoutil.writestate(self, newstate)
3225
3224
3226 p1, p2 = self.dirstate.parents()
3225 p1, p2 = self.dirstate.parents()
3227 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3226 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3228 try:
3227 try:
3229 self.hook(
3228 self.hook(
3230 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3229 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3231 )
3230 )
3232 with self.transaction(b'commit'):
3231 with self.transaction(b'commit'):
3233 ret = self.commitctx(cctx, True)
3232 ret = self.commitctx(cctx, True)
3234 # update bookmarks, dirstate and mergestate
3233 # update bookmarks, dirstate and mergestate
3235 bookmarks.update(self, [p1, p2], ret)
3234 bookmarks.update(self, [p1, p2], ret)
3236 cctx.markcommitted(ret)
3235 cctx.markcommitted(ret)
3237 ms.reset()
3236 ms.reset()
3238 except: # re-raises
3237 except: # re-raises
3239 if edited:
3238 if edited:
3240 self.ui.write(
3239 self.ui.write(
3241 _(b'note: commit message saved in %s\n') % msg_path
3240 _(b'note: commit message saved in %s\n') % msg_path
3242 )
3241 )
3243 self.ui.write(
3242 self.ui.write(
3244 _(
3243 _(
3245 b"note: use 'hg commit --logfile "
3244 b"note: use 'hg commit --logfile "
3246 b"%s --edit' to reuse it\n"
3245 b"%s --edit' to reuse it\n"
3247 )
3246 )
3248 % msg_path
3247 % msg_path
3249 )
3248 )
3250 raise
3249 raise
3251
3250
3252 def commithook(unused_success):
3251 def commithook(unused_success):
3253 # hack for command that use a temporary commit (eg: histedit)
3252 # hack for command that use a temporary commit (eg: histedit)
3254 # temporary commit got stripped before hook release
3253 # temporary commit got stripped before hook release
3255 if self.changelog.hasnode(ret):
3254 if self.changelog.hasnode(ret):
3256 self.hook(
3255 self.hook(
3257 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3256 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3258 )
3257 )
3259
3258
3260 self._afterlock(commithook)
3259 self._afterlock(commithook)
3261 return ret
3260 return ret
3262
3261
3263 @unfilteredmethod
3262 @unfilteredmethod
3264 def commitctx(self, ctx, error=False, origctx=None):
3263 def commitctx(self, ctx, error=False, origctx=None):
3265 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3264 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3266
3265
3267 @unfilteredmethod
3266 @unfilteredmethod
3268 def destroying(self):
3267 def destroying(self):
3269 """Inform the repository that nodes are about to be destroyed.
3268 """Inform the repository that nodes are about to be destroyed.
3270 Intended for use by strip and rollback, so there's a common
3269 Intended for use by strip and rollback, so there's a common
3271 place for anything that has to be done before destroying history.
3270 place for anything that has to be done before destroying history.
3272
3271
3273 This is mostly useful for saving state that is in memory and waiting
3272 This is mostly useful for saving state that is in memory and waiting
3274 to be flushed when the current lock is released. Because a call to
3273 to be flushed when the current lock is released. Because a call to
3275 destroyed is imminent, the repo will be invalidated causing those
3274 destroyed is imminent, the repo will be invalidated causing those
3276 changes to stay in memory (waiting for the next unlock), or vanish
3275 changes to stay in memory (waiting for the next unlock), or vanish
3277 completely.
3276 completely.
3278 """
3277 """
3279 # When using the same lock to commit and strip, the phasecache is left
3278 # When using the same lock to commit and strip, the phasecache is left
3280 # dirty after committing. Then when we strip, the repo is invalidated,
3279 # dirty after committing. Then when we strip, the repo is invalidated,
3281 # causing those changes to disappear.
3280 # causing those changes to disappear.
3282 if '_phasecache' in vars(self):
3281 if '_phasecache' in vars(self):
3283 self._phasecache.write()
3282 self._phasecache.write()
3284
3283
3285 @unfilteredmethod
3284 @unfilteredmethod
3286 def destroyed(self):
3285 def destroyed(self):
3287 """Inform the repository that nodes have been destroyed.
3286 """Inform the repository that nodes have been destroyed.
3288 Intended for use by strip and rollback, so there's a common
3287 Intended for use by strip and rollback, so there's a common
3289 place for anything that has to be done after destroying history.
3288 place for anything that has to be done after destroying history.
3290 """
3289 """
3291 # When one tries to:
3290 # When one tries to:
3292 # 1) destroy nodes thus calling this method (e.g. strip)
3291 # 1) destroy nodes thus calling this method (e.g. strip)
3293 # 2) use phasecache somewhere (e.g. commit)
3292 # 2) use phasecache somewhere (e.g. commit)
3294 #
3293 #
3295 # then 2) will fail because the phasecache contains nodes that were
3294 # then 2) will fail because the phasecache contains nodes that were
3296 # removed. We can either remove phasecache from the filecache,
3295 # removed. We can either remove phasecache from the filecache,
3297 # causing it to reload next time it is accessed, or simply filter
3296 # causing it to reload next time it is accessed, or simply filter
3298 # the removed nodes now and write the updated cache.
3297 # the removed nodes now and write the updated cache.
3299 self._phasecache.filterunknown(self)
3298 self._phasecache.filterunknown(self)
3300 self._phasecache.write()
3299 self._phasecache.write()
3301
3300
3302 # refresh all repository caches
3301 # refresh all repository caches
3303 self.updatecaches()
3302 self.updatecaches()
3304
3303
3305 # Ensure the persistent tag cache is updated. Doing it now
3304 # Ensure the persistent tag cache is updated. Doing it now
3306 # means that the tag cache only has to worry about destroyed
3305 # means that the tag cache only has to worry about destroyed
3307 # heads immediately after a strip/rollback. That in turn
3306 # heads immediately after a strip/rollback. That in turn
3308 # guarantees that "cachetip == currenttip" (comparing both rev
3307 # guarantees that "cachetip == currenttip" (comparing both rev
3309 # and node) always means no nodes have been added or destroyed.
3308 # and node) always means no nodes have been added or destroyed.
3310
3309
3311 # XXX this is suboptimal when qrefresh'ing: we strip the current
3310 # XXX this is suboptimal when qrefresh'ing: we strip the current
3312 # head, refresh the tag cache, then immediately add a new head.
3311 # head, refresh the tag cache, then immediately add a new head.
3313 # But I think doing it this way is necessary for the "instant
3312 # But I think doing it this way is necessary for the "instant
3314 # tag cache retrieval" case to work.
3313 # tag cache retrieval" case to work.
3315 self.invalidate()
3314 self.invalidate()
3316
3315
3317 def status(
3316 def status(
3318 self,
3317 self,
3319 node1=b'.',
3318 node1=b'.',
3320 node2=None,
3319 node2=None,
3321 match=None,
3320 match=None,
3322 ignored=False,
3321 ignored=False,
3323 clean=False,
3322 clean=False,
3324 unknown=False,
3323 unknown=False,
3325 listsubrepos=False,
3324 listsubrepos=False,
3326 ):
3325 ):
3327 '''a convenience method that calls node1.status(node2)'''
3326 '''a convenience method that calls node1.status(node2)'''
3328 return self[node1].status(
3327 return self[node1].status(
3329 node2, match, ignored, clean, unknown, listsubrepos
3328 node2, match, ignored, clean, unknown, listsubrepos
3330 )
3329 )
3331
3330
3332 def addpostdsstatus(self, ps):
3331 def addpostdsstatus(self, ps):
3333 """Add a callback to run within the wlock, at the point at which status
3332 """Add a callback to run within the wlock, at the point at which status
3334 fixups happen.
3333 fixups happen.
3335
3334
3336 On status completion, callback(wctx, status) will be called with the
3335 On status completion, callback(wctx, status) will be called with the
3337 wlock held, unless the dirstate has changed from underneath or the wlock
3336 wlock held, unless the dirstate has changed from underneath or the wlock
3338 couldn't be grabbed.
3337 couldn't be grabbed.
3339
3338
3340 Callbacks should not capture and use a cached copy of the dirstate --
3339 Callbacks should not capture and use a cached copy of the dirstate --
3341 it might change in the meanwhile. Instead, they should access the
3340 it might change in the meanwhile. Instead, they should access the
3342 dirstate via wctx.repo().dirstate.
3341 dirstate via wctx.repo().dirstate.
3343
3342
3344 This list is emptied out after each status run -- extensions should
3343 This list is emptied out after each status run -- extensions should
3345 make sure it adds to this list each time dirstate.status is called.
3344 make sure it adds to this list each time dirstate.status is called.
3346 Extensions should also make sure they don't call this for statuses
3345 Extensions should also make sure they don't call this for statuses
3347 that don't involve the dirstate.
3346 that don't involve the dirstate.
3348 """
3347 """
3349
3348
3350 # The list is located here for uniqueness reasons -- it is actually
3349 # The list is located here for uniqueness reasons -- it is actually
3351 # managed by the workingctx, but that isn't unique per-repo.
3350 # managed by the workingctx, but that isn't unique per-repo.
3352 self._postdsstatus.append(ps)
3351 self._postdsstatus.append(ps)
3353
3352
3354 def postdsstatus(self):
3353 def postdsstatus(self):
3355 """Used by workingctx to get the list of post-dirstate-status hooks."""
3354 """Used by workingctx to get the list of post-dirstate-status hooks."""
3356 return self._postdsstatus
3355 return self._postdsstatus
3357
3356
3358 def clearpostdsstatus(self):
3357 def clearpostdsstatus(self):
3359 """Used by workingctx to clear post-dirstate-status hooks."""
3358 """Used by workingctx to clear post-dirstate-status hooks."""
3360 del self._postdsstatus[:]
3359 del self._postdsstatus[:]
3361
3360
3362 def heads(self, start=None):
3361 def heads(self, start=None):
3363 if start is None:
3362 if start is None:
3364 cl = self.changelog
3363 cl = self.changelog
3365 headrevs = reversed(cl.headrevs())
3364 headrevs = reversed(cl.headrevs())
3366 return [cl.node(rev) for rev in headrevs]
3365 return [cl.node(rev) for rev in headrevs]
3367
3366
3368 heads = self.changelog.heads(start)
3367 heads = self.changelog.heads(start)
3369 # sort the output in rev descending order
3368 # sort the output in rev descending order
3370 return sorted(heads, key=self.changelog.rev, reverse=True)
3369 return sorted(heads, key=self.changelog.rev, reverse=True)
3371
3370
3372 def branchheads(self, branch=None, start=None, closed=False):
3371 def branchheads(self, branch=None, start=None, closed=False):
3373 """return a (possibly filtered) list of heads for the given branch
3372 """return a (possibly filtered) list of heads for the given branch
3374
3373
3375 Heads are returned in topological order, from newest to oldest.
3374 Heads are returned in topological order, from newest to oldest.
3376 If branch is None, use the dirstate branch.
3375 If branch is None, use the dirstate branch.
3377 If start is not None, return only heads reachable from start.
3376 If start is not None, return only heads reachable from start.
3378 If closed is True, return heads that are marked as closed as well.
3377 If closed is True, return heads that are marked as closed as well.
3379 """
3378 """
3380 if branch is None:
3379 if branch is None:
3381 branch = self[None].branch()
3380 branch = self[None].branch()
3382 branches = self.branchmap()
3381 branches = self.branchmap()
3383 if not branches.hasbranch(branch):
3382 if not branches.hasbranch(branch):
3384 return []
3383 return []
3385 # the cache returns heads ordered lowest to highest
3384 # the cache returns heads ordered lowest to highest
3386 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3385 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3387 if start is not None:
3386 if start is not None:
3388 # filter out the heads that cannot be reached from startrev
3387 # filter out the heads that cannot be reached from startrev
3389 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3388 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3390 bheads = [h for h in bheads if h in fbheads]
3389 bheads = [h for h in bheads if h in fbheads]
3391 return bheads
3390 return bheads
3392
3391
3393 def branches(self, nodes):
3392 def branches(self, nodes):
3394 if not nodes:
3393 if not nodes:
3395 nodes = [self.changelog.tip()]
3394 nodes = [self.changelog.tip()]
3396 b = []
3395 b = []
3397 for n in nodes:
3396 for n in nodes:
3398 t = n
3397 t = n
3399 while True:
3398 while True:
3400 p = self.changelog.parents(n)
3399 p = self.changelog.parents(n)
3401 if p[1] != self.nullid or p[0] == self.nullid:
3400 if p[1] != self.nullid or p[0] == self.nullid:
3402 b.append((t, n, p[0], p[1]))
3401 b.append((t, n, p[0], p[1]))
3403 break
3402 break
3404 n = p[0]
3403 n = p[0]
3405 return b
3404 return b
3406
3405
3407 def between(self, pairs):
3406 def between(self, pairs):
3408 r = []
3407 r = []
3409
3408
3410 for top, bottom in pairs:
3409 for top, bottom in pairs:
3411 n, l, i = top, [], 0
3410 n, l, i = top, [], 0
3412 f = 1
3411 f = 1
3413
3412
3414 while n != bottom and n != self.nullid:
3413 while n != bottom and n != self.nullid:
3415 p = self.changelog.parents(n)[0]
3414 p = self.changelog.parents(n)[0]
3416 if i == f:
3415 if i == f:
3417 l.append(n)
3416 l.append(n)
3418 f = f * 2
3417 f = f * 2
3419 n = p
3418 n = p
3420 i += 1
3419 i += 1
3421
3420
3422 r.append(l)
3421 r.append(l)
3423
3422
3424 return r
3423 return r
3425
3424
3426 def checkpush(self, pushop):
3425 def checkpush(self, pushop):
3427 """Extensions can override this function if additional checks have
3426 """Extensions can override this function if additional checks have
3428 to be performed before pushing, or call it if they override push
3427 to be performed before pushing, or call it if they override push
3429 command.
3428 command.
3430 """
3429 """
3431
3430
3432 @unfilteredpropertycache
3431 @unfilteredpropertycache
3433 def prepushoutgoinghooks(self):
3432 def prepushoutgoinghooks(self):
3434 """Return util.hooks consists of a pushop with repo, remote, outgoing
3433 """Return util.hooks consists of a pushop with repo, remote, outgoing
3435 methods, which are called before pushing changesets.
3434 methods, which are called before pushing changesets.
3436 """
3435 """
3437 return util.hooks()
3436 return util.hooks()
3438
3437
3439 def pushkey(self, namespace, key, old, new):
3438 def pushkey(self, namespace, key, old, new):
3440 try:
3439 try:
3441 tr = self.currenttransaction()
3440 tr = self.currenttransaction()
3442 hookargs = {}
3441 hookargs = {}
3443 if tr is not None:
3442 if tr is not None:
3444 hookargs.update(tr.hookargs)
3443 hookargs.update(tr.hookargs)
3445 hookargs = pycompat.strkwargs(hookargs)
3444 hookargs = pycompat.strkwargs(hookargs)
3446 hookargs['namespace'] = namespace
3445 hookargs['namespace'] = namespace
3447 hookargs['key'] = key
3446 hookargs['key'] = key
3448 hookargs['old'] = old
3447 hookargs['old'] = old
3449 hookargs['new'] = new
3448 hookargs['new'] = new
3450 self.hook(b'prepushkey', throw=True, **hookargs)
3449 self.hook(b'prepushkey', throw=True, **hookargs)
3451 except error.HookAbort as exc:
3450 except error.HookAbort as exc:
3452 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3451 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3453 if exc.hint:
3452 if exc.hint:
3454 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3453 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3455 return False
3454 return False
3456 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3455 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3457 ret = pushkey.push(self, namespace, key, old, new)
3456 ret = pushkey.push(self, namespace, key, old, new)
3458
3457
3459 def runhook(unused_success):
3458 def runhook(unused_success):
3460 self.hook(
3459 self.hook(
3461 b'pushkey',
3460 b'pushkey',
3462 namespace=namespace,
3461 namespace=namespace,
3463 key=key,
3462 key=key,
3464 old=old,
3463 old=old,
3465 new=new,
3464 new=new,
3466 ret=ret,
3465 ret=ret,
3467 )
3466 )
3468
3467
3469 self._afterlock(runhook)
3468 self._afterlock(runhook)
3470 return ret
3469 return ret
3471
3470
3472 def listkeys(self, namespace):
3471 def listkeys(self, namespace):
3473 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3472 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3474 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3473 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3475 values = pushkey.list(self, namespace)
3474 values = pushkey.list(self, namespace)
3476 self.hook(b'listkeys', namespace=namespace, values=values)
3475 self.hook(b'listkeys', namespace=namespace, values=values)
3477 return values
3476 return values
3478
3477
3479 def debugwireargs(self, one, two, three=None, four=None, five=None):
3478 def debugwireargs(self, one, two, three=None, four=None, five=None):
3480 '''used to test argument passing over the wire'''
3479 '''used to test argument passing over the wire'''
3481 return b"%s %s %s %s %s" % (
3480 return b"%s %s %s %s %s" % (
3482 one,
3481 one,
3483 two,
3482 two,
3484 pycompat.bytestr(three),
3483 pycompat.bytestr(three),
3485 pycompat.bytestr(four),
3484 pycompat.bytestr(four),
3486 pycompat.bytestr(five),
3485 pycompat.bytestr(five),
3487 )
3486 )
3488
3487
3489 def savecommitmessage(self, text):
3488 def savecommitmessage(self, text):
3490 fp = self.vfs(b'last-message.txt', b'wb')
3489 fp = self.vfs(b'last-message.txt', b'wb')
3491 try:
3490 try:
3492 fp.write(text)
3491 fp.write(text)
3493 finally:
3492 finally:
3494 fp.close()
3493 fp.close()
3495 return self.pathto(fp.name[len(self.root) + 1 :])
3494 return self.pathto(fp.name[len(self.root) + 1 :])
3496
3495
3497 def register_wanted_sidedata(self, category):
3496 def register_wanted_sidedata(self, category):
3498 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3497 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3499 # Only revlogv2 repos can want sidedata.
3498 # Only revlogv2 repos can want sidedata.
3500 return
3499 return
3501 self._wanted_sidedata.add(pycompat.bytestr(category))
3500 self._wanted_sidedata.add(pycompat.bytestr(category))
3502
3501
3503 def register_sidedata_computer(
3502 def register_sidedata_computer(
3504 self, kind, category, keys, computer, flags, replace=False
3503 self, kind, category, keys, computer, flags, replace=False
3505 ):
3504 ):
3506 if kind not in revlogconst.ALL_KINDS:
3505 if kind not in revlogconst.ALL_KINDS:
3507 msg = _(b"unexpected revlog kind '%s'.")
3506 msg = _(b"unexpected revlog kind '%s'.")
3508 raise error.ProgrammingError(msg % kind)
3507 raise error.ProgrammingError(msg % kind)
3509 category = pycompat.bytestr(category)
3508 category = pycompat.bytestr(category)
3510 already_registered = category in self._sidedata_computers.get(kind, [])
3509 already_registered = category in self._sidedata_computers.get(kind, [])
3511 if already_registered and not replace:
3510 if already_registered and not replace:
3512 msg = _(
3511 msg = _(
3513 b"cannot register a sidedata computer twice for category '%s'."
3512 b"cannot register a sidedata computer twice for category '%s'."
3514 )
3513 )
3515 raise error.ProgrammingError(msg % category)
3514 raise error.ProgrammingError(msg % category)
3516 if replace and not already_registered:
3515 if replace and not already_registered:
3517 msg = _(
3516 msg = _(
3518 b"cannot replace a sidedata computer that isn't registered "
3517 b"cannot replace a sidedata computer that isn't registered "
3519 b"for category '%s'."
3518 b"for category '%s'."
3520 )
3519 )
3521 raise error.ProgrammingError(msg % category)
3520 raise error.ProgrammingError(msg % category)
3522 self._sidedata_computers.setdefault(kind, {})
3521 self._sidedata_computers.setdefault(kind, {})
3523 self._sidedata_computers[kind][category] = (keys, computer, flags)
3522 self._sidedata_computers[kind][category] = (keys, computer, flags)
3524
3523
3525
3524
3526 # used to avoid circular references so destructors work
3525 # used to avoid circular references so destructors work
3527 def aftertrans(files):
3526 def aftertrans(files):
3528 renamefiles = [tuple(t) for t in files]
3527 renamefiles = [tuple(t) for t in files]
3529
3528
3530 def a():
3529 def a():
3531 for vfs, src, dest in renamefiles:
3530 for vfs, src, dest in renamefiles:
3532 # if src and dest refer to a same file, vfs.rename is a no-op,
3531 # if src and dest refer to a same file, vfs.rename is a no-op,
3533 # leaving both src and dest on disk. delete dest to make sure
3532 # leaving both src and dest on disk. delete dest to make sure
3534 # the rename couldn't be such a no-op.
3533 # the rename couldn't be such a no-op.
3535 vfs.tryunlink(dest)
3534 vfs.tryunlink(dest)
3536 try:
3535 try:
3537 vfs.rename(src, dest)
3536 vfs.rename(src, dest)
3538 except FileNotFoundError: # journal file does not yet exist
3537 except FileNotFoundError: # journal file does not yet exist
3539 pass
3538 pass
3540
3539
3541 return a
3540 return a
3542
3541
3543
3542
3544 def undoname(fn: bytes) -> bytes:
3543 def undoname(fn: bytes) -> bytes:
3545 base, name = os.path.split(fn)
3544 base, name = os.path.split(fn)
3546 assert name.startswith(b'journal')
3545 assert name.startswith(b'journal')
3547 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3546 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3548
3547
3549
3548
3550 def instance(ui, path: bytes, create, intents=None, createopts=None):
3549 def instance(ui, path: bytes, create, intents=None, createopts=None):
3551
3550
3552 # prevent cyclic import localrepo -> upgrade -> localrepo
3551 # prevent cyclic import localrepo -> upgrade -> localrepo
3553 from . import upgrade
3552 from . import upgrade
3554
3553
3555 localpath = urlutil.urllocalpath(path)
3554 localpath = urlutil.urllocalpath(path)
3556 if create:
3555 if create:
3557 createrepository(ui, localpath, createopts=createopts)
3556 createrepository(ui, localpath, createopts=createopts)
3558
3557
3559 def repo_maker():
3558 def repo_maker():
3560 return makelocalrepository(ui, localpath, intents=intents)
3559 return makelocalrepository(ui, localpath, intents=intents)
3561
3560
3562 repo = repo_maker()
3561 repo = repo_maker()
3563 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3562 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3564 return repo
3563 return repo
3565
3564
3566
3565
3567 def islocal(path: bytes) -> bool:
3566 def islocal(path: bytes) -> bool:
3568 return True
3567 return True
3569
3568
3570
3569
3571 def defaultcreateopts(ui, createopts=None):
3570 def defaultcreateopts(ui, createopts=None):
3572 """Populate the default creation options for a repository.
3571 """Populate the default creation options for a repository.
3573
3572
3574 A dictionary of explicitly requested creation options can be passed
3573 A dictionary of explicitly requested creation options can be passed
3575 in. Missing keys will be populated.
3574 in. Missing keys will be populated.
3576 """
3575 """
3577 createopts = dict(createopts or {})
3576 createopts = dict(createopts or {})
3578
3577
3579 if b'backend' not in createopts:
3578 if b'backend' not in createopts:
3580 # experimental config: storage.new-repo-backend
3579 # experimental config: storage.new-repo-backend
3581 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3580 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3582
3581
3583 return createopts
3582 return createopts
3584
3583
3585
3584
3586 def clone_requirements(ui, createopts, srcrepo):
3585 def clone_requirements(ui, createopts, srcrepo):
3587 """clone the requirements of a local repo for a local clone
3586 """clone the requirements of a local repo for a local clone
3588
3587
3589 The store requirements are unchanged while the working copy requirements
3588 The store requirements are unchanged while the working copy requirements
3590 depends on the configuration
3589 depends on the configuration
3591 """
3590 """
3592 target_requirements = set()
3591 target_requirements = set()
3593 if not srcrepo.requirements:
3592 if not srcrepo.requirements:
3594 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3593 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3595 # with it.
3594 # with it.
3596 return target_requirements
3595 return target_requirements
3597 createopts = defaultcreateopts(ui, createopts=createopts)
3596 createopts = defaultcreateopts(ui, createopts=createopts)
3598 for r in newreporequirements(ui, createopts):
3597 for r in newreporequirements(ui, createopts):
3599 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3598 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3600 target_requirements.add(r)
3599 target_requirements.add(r)
3601
3600
3602 for r in srcrepo.requirements:
3601 for r in srcrepo.requirements:
3603 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3602 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3604 target_requirements.add(r)
3603 target_requirements.add(r)
3605 return target_requirements
3604 return target_requirements
3606
3605
3607
3606
3608 def newreporequirements(ui, createopts):
3607 def newreporequirements(ui, createopts):
3609 """Determine the set of requirements for a new local repository.
3608 """Determine the set of requirements for a new local repository.
3610
3609
3611 Extensions can wrap this function to specify custom requirements for
3610 Extensions can wrap this function to specify custom requirements for
3612 new repositories.
3611 new repositories.
3613 """
3612 """
3614
3613
3615 if b'backend' not in createopts:
3614 if b'backend' not in createopts:
3616 raise error.ProgrammingError(
3615 raise error.ProgrammingError(
3617 b'backend key not present in createopts; '
3616 b'backend key not present in createopts; '
3618 b'was defaultcreateopts() called?'
3617 b'was defaultcreateopts() called?'
3619 )
3618 )
3620
3619
3621 if createopts[b'backend'] != b'revlogv1':
3620 if createopts[b'backend'] != b'revlogv1':
3622 raise error.Abort(
3621 raise error.Abort(
3623 _(
3622 _(
3624 b'unable to determine repository requirements for '
3623 b'unable to determine repository requirements for '
3625 b'storage backend: %s'
3624 b'storage backend: %s'
3626 )
3625 )
3627 % createopts[b'backend']
3626 % createopts[b'backend']
3628 )
3627 )
3629
3628
3630 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3629 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3631 if ui.configbool(b'format', b'usestore'):
3630 if ui.configbool(b'format', b'usestore'):
3632 requirements.add(requirementsmod.STORE_REQUIREMENT)
3631 requirements.add(requirementsmod.STORE_REQUIREMENT)
3633 if ui.configbool(b'format', b'usefncache'):
3632 if ui.configbool(b'format', b'usefncache'):
3634 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3633 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3635 if ui.configbool(b'format', b'dotencode'):
3634 if ui.configbool(b'format', b'dotencode'):
3636 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3635 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3637
3636
3638 compengines = ui.configlist(b'format', b'revlog-compression')
3637 compengines = ui.configlist(b'format', b'revlog-compression')
3639 for compengine in compengines:
3638 for compengine in compengines:
3640 if compengine in util.compengines:
3639 if compengine in util.compengines:
3641 engine = util.compengines[compengine]
3640 engine = util.compengines[compengine]
3642 if engine.available() and engine.revlogheader():
3641 if engine.available() and engine.revlogheader():
3643 break
3642 break
3644 else:
3643 else:
3645 raise error.Abort(
3644 raise error.Abort(
3646 _(
3645 _(
3647 b'compression engines %s defined by '
3646 b'compression engines %s defined by '
3648 b'format.revlog-compression not available'
3647 b'format.revlog-compression not available'
3649 )
3648 )
3650 % b', '.join(b'"%s"' % e for e in compengines),
3649 % b', '.join(b'"%s"' % e for e in compengines),
3651 hint=_(
3650 hint=_(
3652 b'run "hg debuginstall" to list available '
3651 b'run "hg debuginstall" to list available '
3653 b'compression engines'
3652 b'compression engines'
3654 ),
3653 ),
3655 )
3654 )
3656
3655
3657 # zlib is the historical default and doesn't need an explicit requirement.
3656 # zlib is the historical default and doesn't need an explicit requirement.
3658 if compengine == b'zstd':
3657 if compengine == b'zstd':
3659 requirements.add(b'revlog-compression-zstd')
3658 requirements.add(b'revlog-compression-zstd')
3660 elif compengine != b'zlib':
3659 elif compengine != b'zlib':
3661 requirements.add(b'exp-compression-%s' % compengine)
3660 requirements.add(b'exp-compression-%s' % compengine)
3662
3661
3663 if scmutil.gdinitconfig(ui):
3662 if scmutil.gdinitconfig(ui):
3664 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3663 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3665 if ui.configbool(b'format', b'sparse-revlog'):
3664 if ui.configbool(b'format', b'sparse-revlog'):
3666 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3665 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3667
3666
3668 # experimental config: format.use-dirstate-v2
3667 # experimental config: format.use-dirstate-v2
3669 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3668 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3670 if ui.configbool(b'format', b'use-dirstate-v2'):
3669 if ui.configbool(b'format', b'use-dirstate-v2'):
3671 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3670 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3672
3671
3673 # experimental config: format.exp-use-copies-side-data-changeset
3672 # experimental config: format.exp-use-copies-side-data-changeset
3674 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3673 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3675 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3674 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3676 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3675 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3677 if ui.configbool(b'experimental', b'treemanifest'):
3676 if ui.configbool(b'experimental', b'treemanifest'):
3678 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3677 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3679
3678
3680 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3679 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3681 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3680 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3682 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3681 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3683
3682
3684 revlogv2 = ui.config(b'experimental', b'revlogv2')
3683 revlogv2 = ui.config(b'experimental', b'revlogv2')
3685 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3684 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3686 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3685 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3687 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3686 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3688 # experimental config: format.internal-phase
3687 # experimental config: format.internal-phase
3689 if ui.configbool(b'format', b'use-internal-phase'):
3688 if ui.configbool(b'format', b'use-internal-phase'):
3690 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3689 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3691
3690
3692 # experimental config: format.exp-archived-phase
3691 # experimental config: format.exp-archived-phase
3693 if ui.configbool(b'format', b'exp-archived-phase'):
3692 if ui.configbool(b'format', b'exp-archived-phase'):
3694 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3693 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3695
3694
3696 if createopts.get(b'narrowfiles'):
3695 if createopts.get(b'narrowfiles'):
3697 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3696 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3698
3697
3699 if createopts.get(b'lfs'):
3698 if createopts.get(b'lfs'):
3700 requirements.add(b'lfs')
3699 requirements.add(b'lfs')
3701
3700
3702 if ui.configbool(b'format', b'bookmarks-in-store'):
3701 if ui.configbool(b'format', b'bookmarks-in-store'):
3703 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3702 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3704
3703
3705 if ui.configbool(b'format', b'use-persistent-nodemap'):
3704 if ui.configbool(b'format', b'use-persistent-nodemap'):
3706 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3705 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3707
3706
3708 # if share-safe is enabled, let's create the new repository with the new
3707 # if share-safe is enabled, let's create the new repository with the new
3709 # requirement
3708 # requirement
3710 if ui.configbool(b'format', b'use-share-safe'):
3709 if ui.configbool(b'format', b'use-share-safe'):
3711 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3710 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3712
3711
3713 # if we are creating a share-repoΒΉ we have to handle requirement
3712 # if we are creating a share-repoΒΉ we have to handle requirement
3714 # differently.
3713 # differently.
3715 #
3714 #
3716 # [1] (i.e. reusing the store from another repository, just having a
3715 # [1] (i.e. reusing the store from another repository, just having a
3717 # working copy)
3716 # working copy)
3718 if b'sharedrepo' in createopts:
3717 if b'sharedrepo' in createopts:
3719 source_requirements = set(createopts[b'sharedrepo'].requirements)
3718 source_requirements = set(createopts[b'sharedrepo'].requirements)
3720
3719
3721 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3720 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3722 # share to an old school repository, we have to copy the
3721 # share to an old school repository, we have to copy the
3723 # requirements and hope for the best.
3722 # requirements and hope for the best.
3724 requirements = source_requirements
3723 requirements = source_requirements
3725 else:
3724 else:
3726 # We have control on the working copy only, so "copy" the non
3725 # We have control on the working copy only, so "copy" the non
3727 # working copy part over, ignoring previous logic.
3726 # working copy part over, ignoring previous logic.
3728 to_drop = set()
3727 to_drop = set()
3729 for req in requirements:
3728 for req in requirements:
3730 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3729 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3731 continue
3730 continue
3732 if req in source_requirements:
3731 if req in source_requirements:
3733 continue
3732 continue
3734 to_drop.add(req)
3733 to_drop.add(req)
3735 requirements -= to_drop
3734 requirements -= to_drop
3736 requirements |= source_requirements
3735 requirements |= source_requirements
3737
3736
3738 if createopts.get(b'sharedrelative'):
3737 if createopts.get(b'sharedrelative'):
3739 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3738 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3740 else:
3739 else:
3741 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3740 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3742
3741
3743 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3742 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3744 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3743 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3745 msg = _(b"ignoring unknown tracked key version: %d\n")
3744 msg = _(b"ignoring unknown tracked key version: %d\n")
3746 hint = _(
3745 hint = _(
3747 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3746 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3748 )
3747 )
3749 if version != 1:
3748 if version != 1:
3750 ui.warn(msg % version, hint=hint)
3749 ui.warn(msg % version, hint=hint)
3751 else:
3750 else:
3752 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3751 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3753
3752
3754 return requirements
3753 return requirements
3755
3754
3756
3755
3757 def checkrequirementscompat(ui, requirements):
3756 def checkrequirementscompat(ui, requirements):
3758 """Checks compatibility of repository requirements enabled and disabled.
3757 """Checks compatibility of repository requirements enabled and disabled.
3759
3758
3760 Returns a set of requirements which needs to be dropped because dependend
3759 Returns a set of requirements which needs to be dropped because dependend
3761 requirements are not enabled. Also warns users about it"""
3760 requirements are not enabled. Also warns users about it"""
3762
3761
3763 dropped = set()
3762 dropped = set()
3764
3763
3765 if requirementsmod.STORE_REQUIREMENT not in requirements:
3764 if requirementsmod.STORE_REQUIREMENT not in requirements:
3766 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3765 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3767 ui.warn(
3766 ui.warn(
3768 _(
3767 _(
3769 b'ignoring enabled \'format.bookmarks-in-store\' config '
3768 b'ignoring enabled \'format.bookmarks-in-store\' config '
3770 b'beacuse it is incompatible with disabled '
3769 b'beacuse it is incompatible with disabled '
3771 b'\'format.usestore\' config\n'
3770 b'\'format.usestore\' config\n'
3772 )
3771 )
3773 )
3772 )
3774 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3773 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3775
3774
3776 if (
3775 if (
3777 requirementsmod.SHARED_REQUIREMENT in requirements
3776 requirementsmod.SHARED_REQUIREMENT in requirements
3778 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3777 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3779 ):
3778 ):
3780 raise error.Abort(
3779 raise error.Abort(
3781 _(
3780 _(
3782 b"cannot create shared repository as source was created"
3781 b"cannot create shared repository as source was created"
3783 b" with 'format.usestore' config disabled"
3782 b" with 'format.usestore' config disabled"
3784 )
3783 )
3785 )
3784 )
3786
3785
3787 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3786 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3788 if ui.hasconfig(b'format', b'use-share-safe'):
3787 if ui.hasconfig(b'format', b'use-share-safe'):
3789 msg = _(
3788 msg = _(
3790 b"ignoring enabled 'format.use-share-safe' config because "
3789 b"ignoring enabled 'format.use-share-safe' config because "
3791 b"it is incompatible with disabled 'format.usestore'"
3790 b"it is incompatible with disabled 'format.usestore'"
3792 b" config\n"
3791 b" config\n"
3793 )
3792 )
3794 ui.warn(msg)
3793 ui.warn(msg)
3795 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3794 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3796
3795
3797 return dropped
3796 return dropped
3798
3797
3799
3798
3800 def filterknowncreateopts(ui, createopts):
3799 def filterknowncreateopts(ui, createopts):
3801 """Filters a dict of repo creation options against options that are known.
3800 """Filters a dict of repo creation options against options that are known.
3802
3801
3803 Receives a dict of repo creation options and returns a dict of those
3802 Receives a dict of repo creation options and returns a dict of those
3804 options that we don't know how to handle.
3803 options that we don't know how to handle.
3805
3804
3806 This function is called as part of repository creation. If the
3805 This function is called as part of repository creation. If the
3807 returned dict contains any items, repository creation will not
3806 returned dict contains any items, repository creation will not
3808 be allowed, as it means there was a request to create a repository
3807 be allowed, as it means there was a request to create a repository
3809 with options not recognized by loaded code.
3808 with options not recognized by loaded code.
3810
3809
3811 Extensions can wrap this function to filter out creation options
3810 Extensions can wrap this function to filter out creation options
3812 they know how to handle.
3811 they know how to handle.
3813 """
3812 """
3814 known = {
3813 known = {
3815 b'backend',
3814 b'backend',
3816 b'lfs',
3815 b'lfs',
3817 b'narrowfiles',
3816 b'narrowfiles',
3818 b'sharedrepo',
3817 b'sharedrepo',
3819 b'sharedrelative',
3818 b'sharedrelative',
3820 b'shareditems',
3819 b'shareditems',
3821 b'shallowfilestore',
3820 b'shallowfilestore',
3822 }
3821 }
3823
3822
3824 return {k: v for k, v in createopts.items() if k not in known}
3823 return {k: v for k, v in createopts.items() if k not in known}
3825
3824
3826
3825
3827 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3826 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3828 """Create a new repository in a vfs.
3827 """Create a new repository in a vfs.
3829
3828
3830 ``path`` path to the new repo's working directory.
3829 ``path`` path to the new repo's working directory.
3831 ``createopts`` options for the new repository.
3830 ``createopts`` options for the new repository.
3832 ``requirement`` predefined set of requirements.
3831 ``requirement`` predefined set of requirements.
3833 (incompatible with ``createopts``)
3832 (incompatible with ``createopts``)
3834
3833
3835 The following keys for ``createopts`` are recognized:
3834 The following keys for ``createopts`` are recognized:
3836
3835
3837 backend
3836 backend
3838 The storage backend to use.
3837 The storage backend to use.
3839 lfs
3838 lfs
3840 Repository will be created with ``lfs`` requirement. The lfs extension
3839 Repository will be created with ``lfs`` requirement. The lfs extension
3841 will automatically be loaded when the repository is accessed.
3840 will automatically be loaded when the repository is accessed.
3842 narrowfiles
3841 narrowfiles
3843 Set up repository to support narrow file storage.
3842 Set up repository to support narrow file storage.
3844 sharedrepo
3843 sharedrepo
3845 Repository object from which storage should be shared.
3844 Repository object from which storage should be shared.
3846 sharedrelative
3845 sharedrelative
3847 Boolean indicating if the path to the shared repo should be
3846 Boolean indicating if the path to the shared repo should be
3848 stored as relative. By default, the pointer to the "parent" repo
3847 stored as relative. By default, the pointer to the "parent" repo
3849 is stored as an absolute path.
3848 is stored as an absolute path.
3850 shareditems
3849 shareditems
3851 Set of items to share to the new repository (in addition to storage).
3850 Set of items to share to the new repository (in addition to storage).
3852 shallowfilestore
3851 shallowfilestore
3853 Indicates that storage for files should be shallow (not all ancestor
3852 Indicates that storage for files should be shallow (not all ancestor
3854 revisions are known).
3853 revisions are known).
3855 """
3854 """
3856
3855
3857 if requirements is not None:
3856 if requirements is not None:
3858 if createopts is not None:
3857 if createopts is not None:
3859 msg = b'cannot specify both createopts and requirements'
3858 msg = b'cannot specify both createopts and requirements'
3860 raise error.ProgrammingError(msg)
3859 raise error.ProgrammingError(msg)
3861 createopts = {}
3860 createopts = {}
3862 else:
3861 else:
3863 createopts = defaultcreateopts(ui, createopts=createopts)
3862 createopts = defaultcreateopts(ui, createopts=createopts)
3864
3863
3865 unknownopts = filterknowncreateopts(ui, createopts)
3864 unknownopts = filterknowncreateopts(ui, createopts)
3866
3865
3867 if not isinstance(unknownopts, dict):
3866 if not isinstance(unknownopts, dict):
3868 raise error.ProgrammingError(
3867 raise error.ProgrammingError(
3869 b'filterknowncreateopts() did not return a dict'
3868 b'filterknowncreateopts() did not return a dict'
3870 )
3869 )
3871
3870
3872 if unknownopts:
3871 if unknownopts:
3873 raise error.Abort(
3872 raise error.Abort(
3874 _(
3873 _(
3875 b'unable to create repository because of unknown '
3874 b'unable to create repository because of unknown '
3876 b'creation option: %s'
3875 b'creation option: %s'
3877 )
3876 )
3878 % b', '.join(sorted(unknownopts)),
3877 % b', '.join(sorted(unknownopts)),
3879 hint=_(b'is a required extension not loaded?'),
3878 hint=_(b'is a required extension not loaded?'),
3880 )
3879 )
3881
3880
3882 requirements = newreporequirements(ui, createopts=createopts)
3881 requirements = newreporequirements(ui, createopts=createopts)
3883 requirements -= checkrequirementscompat(ui, requirements)
3882 requirements -= checkrequirementscompat(ui, requirements)
3884
3883
3885 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3884 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3886
3885
3887 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3886 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3888 if hgvfs.exists():
3887 if hgvfs.exists():
3889 raise error.RepoError(_(b'repository %s already exists') % path)
3888 raise error.RepoError(_(b'repository %s already exists') % path)
3890
3889
3891 if b'sharedrepo' in createopts:
3890 if b'sharedrepo' in createopts:
3892 sharedpath = createopts[b'sharedrepo'].sharedpath
3891 sharedpath = createopts[b'sharedrepo'].sharedpath
3893
3892
3894 if createopts.get(b'sharedrelative'):
3893 if createopts.get(b'sharedrelative'):
3895 try:
3894 try:
3896 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3895 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3897 sharedpath = util.pconvert(sharedpath)
3896 sharedpath = util.pconvert(sharedpath)
3898 except (IOError, ValueError) as e:
3897 except (IOError, ValueError) as e:
3899 # ValueError is raised on Windows if the drive letters differ
3898 # ValueError is raised on Windows if the drive letters differ
3900 # on each path.
3899 # on each path.
3901 raise error.Abort(
3900 raise error.Abort(
3902 _(b'cannot calculate relative path'),
3901 _(b'cannot calculate relative path'),
3903 hint=stringutil.forcebytestr(e),
3902 hint=stringutil.forcebytestr(e),
3904 )
3903 )
3905
3904
3906 if not wdirvfs.exists():
3905 if not wdirvfs.exists():
3907 wdirvfs.makedirs()
3906 wdirvfs.makedirs()
3908
3907
3909 hgvfs.makedir(notindexed=True)
3908 hgvfs.makedir(notindexed=True)
3910 if b'sharedrepo' not in createopts:
3909 if b'sharedrepo' not in createopts:
3911 hgvfs.mkdir(b'cache')
3910 hgvfs.mkdir(b'cache')
3912 hgvfs.mkdir(b'wcache')
3911 hgvfs.mkdir(b'wcache')
3913
3912
3914 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3913 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3915 if has_store and b'sharedrepo' not in createopts:
3914 if has_store and b'sharedrepo' not in createopts:
3916 hgvfs.mkdir(b'store')
3915 hgvfs.mkdir(b'store')
3917
3916
3918 # We create an invalid changelog outside the store so very old
3917 # We create an invalid changelog outside the store so very old
3919 # Mercurial versions (which didn't know about the requirements
3918 # Mercurial versions (which didn't know about the requirements
3920 # file) encounter an error on reading the changelog. This
3919 # file) encounter an error on reading the changelog. This
3921 # effectively locks out old clients and prevents them from
3920 # effectively locks out old clients and prevents them from
3922 # mucking with a repo in an unknown format.
3921 # mucking with a repo in an unknown format.
3923 #
3922 #
3924 # The revlog header has version 65535, which won't be recognized by
3923 # The revlog header has version 65535, which won't be recognized by
3925 # such old clients.
3924 # such old clients.
3926 hgvfs.append(
3925 hgvfs.append(
3927 b'00changelog.i',
3926 b'00changelog.i',
3928 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3927 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3929 b'layout',
3928 b'layout',
3930 )
3929 )
3931
3930
3932 # Filter the requirements into working copy and store ones
3931 # Filter the requirements into working copy and store ones
3933 wcreq, storereq = scmutil.filterrequirements(requirements)
3932 wcreq, storereq = scmutil.filterrequirements(requirements)
3934 # write working copy ones
3933 # write working copy ones
3935 scmutil.writerequires(hgvfs, wcreq)
3934 scmutil.writerequires(hgvfs, wcreq)
3936 # If there are store requirements and the current repository
3935 # If there are store requirements and the current repository
3937 # is not a shared one, write stored requirements
3936 # is not a shared one, write stored requirements
3938 # For new shared repository, we don't need to write the store
3937 # For new shared repository, we don't need to write the store
3939 # requirements as they are already present in store requires
3938 # requirements as they are already present in store requires
3940 if storereq and b'sharedrepo' not in createopts:
3939 if storereq and b'sharedrepo' not in createopts:
3941 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3940 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3942 scmutil.writerequires(storevfs, storereq)
3941 scmutil.writerequires(storevfs, storereq)
3943
3942
3944 # Write out file telling readers where to find the shared store.
3943 # Write out file telling readers where to find the shared store.
3945 if b'sharedrepo' in createopts:
3944 if b'sharedrepo' in createopts:
3946 hgvfs.write(b'sharedpath', sharedpath)
3945 hgvfs.write(b'sharedpath', sharedpath)
3947
3946
3948 if createopts.get(b'shareditems'):
3947 if createopts.get(b'shareditems'):
3949 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3948 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3950 hgvfs.write(b'shared', shared)
3949 hgvfs.write(b'shared', shared)
3951
3950
3952
3951
3953 def poisonrepository(repo):
3952 def poisonrepository(repo):
3954 """Poison a repository instance so it can no longer be used."""
3953 """Poison a repository instance so it can no longer be used."""
3955 # Perform any cleanup on the instance.
3954 # Perform any cleanup on the instance.
3956 repo.close()
3955 repo.close()
3957
3956
3958 # Our strategy is to replace the type of the object with one that
3957 # Our strategy is to replace the type of the object with one that
3959 # has all attribute lookups result in error.
3958 # has all attribute lookups result in error.
3960 #
3959 #
3961 # But we have to allow the close() method because some constructors
3960 # But we have to allow the close() method because some constructors
3962 # of repos call close() on repo references.
3961 # of repos call close() on repo references.
3963 class poisonedrepository:
3962 class poisonedrepository:
3964 def __getattribute__(self, item):
3963 def __getattribute__(self, item):
3965 if item == 'close':
3964 if item == 'close':
3966 return object.__getattribute__(self, item)
3965 return object.__getattribute__(self, item)
3967
3966
3968 raise error.ProgrammingError(
3967 raise error.ProgrammingError(
3969 b'repo instances should not be used after unshare'
3968 b'repo instances should not be used after unshare'
3970 )
3969 )
3971
3970
3972 def close(self):
3971 def close(self):
3973 pass
3972 pass
3974
3973
3975 # We may have a repoview, which intercepts __setattr__. So be sure
3974 # We may have a repoview, which intercepts __setattr__. So be sure
3976 # we operate at the lowest level possible.
3975 # we operate at the lowest level possible.
3977 object.__setattr__(repo, '__class__', poisonedrepository)
3976 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,675 +1,675 b''
1 # sshpeer.py - ssh repository proxy class for mercurial
1 # sshpeer.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import re
9 import re
10 import uuid
10 import uuid
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 wireprototypes,
18 wireprototypes,
19 wireprotov1peer,
19 wireprotov1peer,
20 wireprotov1server,
20 wireprotov1server,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 procutil,
23 procutil,
24 stringutil,
24 stringutil,
25 urlutil,
25 urlutil,
26 )
26 )
27
27
28
28
29 def _serverquote(s):
29 def _serverquote(s):
30 """quote a string for the remote shell ... which we assume is sh"""
30 """quote a string for the remote shell ... which we assume is sh"""
31 if not s:
31 if not s:
32 return s
32 return s
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
34 return s
34 return s
35 return b"'%s'" % s.replace(b"'", b"'\\''")
35 return b"'%s'" % s.replace(b"'", b"'\\''")
36
36
37
37
38 def _forwardoutput(ui, pipe, warn=False):
38 def _forwardoutput(ui, pipe, warn=False):
39 """display all data currently available on pipe as remote output.
39 """display all data currently available on pipe as remote output.
40
40
41 This is non blocking."""
41 This is non blocking."""
42 if pipe and not pipe.closed:
42 if pipe and not pipe.closed:
43 s = procutil.readpipe(pipe)
43 s = procutil.readpipe(pipe)
44 if s:
44 if s:
45 display = ui.warn if warn else ui.status
45 display = ui.warn if warn else ui.status
46 for l in s.splitlines():
46 for l in s.splitlines():
47 display(_(b"remote: "), l, b'\n')
47 display(_(b"remote: "), l, b'\n')
48
48
49
49
50 class doublepipe:
50 class doublepipe:
51 """Operate a side-channel pipe in addition of a main one
51 """Operate a side-channel pipe in addition of a main one
52
52
53 The side-channel pipe contains server output to be forwarded to the user
53 The side-channel pipe contains server output to be forwarded to the user
54 input. The double pipe will behave as the "main" pipe, but will ensure the
54 input. The double pipe will behave as the "main" pipe, but will ensure the
55 content of the "side" pipe is properly processed while we wait for blocking
55 content of the "side" pipe is properly processed while we wait for blocking
56 call on the "main" pipe.
56 call on the "main" pipe.
57
57
58 If large amounts of data are read from "main", the forward will cease after
58 If large amounts of data are read from "main", the forward will cease after
59 the first bytes start to appear. This simplifies the implementation
59 the first bytes start to appear. This simplifies the implementation
60 without affecting actual output of sshpeer too much as we rarely issue
60 without affecting actual output of sshpeer too much as we rarely issue
61 large read for data not yet emitted by the server.
61 large read for data not yet emitted by the server.
62
62
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
64 that handle all the os specific bits. This class lives in this module
64 that handle all the os specific bits. This class lives in this module
65 because it focus on behavior specific to the ssh protocol."""
65 because it focus on behavior specific to the ssh protocol."""
66
66
67 def __init__(self, ui, main, side):
67 def __init__(self, ui, main, side):
68 self._ui = ui
68 self._ui = ui
69 self._main = main
69 self._main = main
70 self._side = side
70 self._side = side
71
71
72 def _wait(self):
72 def _wait(self):
73 """wait until some data are available on main or side
73 """wait until some data are available on main or side
74
74
75 return a pair of boolean (ismainready, issideready)
75 return a pair of boolean (ismainready, issideready)
76
76
77 (This will only wait for data if the setup is supported by `util.poll`)
77 (This will only wait for data if the setup is supported by `util.poll`)
78 """
78 """
79 if (
79 if (
80 isinstance(self._main, util.bufferedinputpipe)
80 isinstance(self._main, util.bufferedinputpipe)
81 and self._main.hasbuffer
81 and self._main.hasbuffer
82 ):
82 ):
83 # Main has data. Assume side is worth poking at.
83 # Main has data. Assume side is worth poking at.
84 return True, True
84 return True, True
85
85
86 fds = [self._main.fileno(), self._side.fileno()]
86 fds = [self._main.fileno(), self._side.fileno()]
87 try:
87 try:
88 act = util.poll(fds)
88 act = util.poll(fds)
89 except NotImplementedError:
89 except NotImplementedError:
90 # non supported yet case, assume all have data.
90 # non supported yet case, assume all have data.
91 act = fds
91 act = fds
92 return (self._main.fileno() in act, self._side.fileno() in act)
92 return (self._main.fileno() in act, self._side.fileno() in act)
93
93
94 def write(self, data):
94 def write(self, data):
95 return self._call(b'write', data)
95 return self._call(b'write', data)
96
96
97 def read(self, size):
97 def read(self, size):
98 r = self._call(b'read', size)
98 r = self._call(b'read', size)
99 if size != 0 and not r:
99 if size != 0 and not r:
100 # We've observed a condition that indicates the
100 # We've observed a condition that indicates the
101 # stdout closed unexpectedly. Check stderr one
101 # stdout closed unexpectedly. Check stderr one
102 # more time and snag anything that's there before
102 # more time and snag anything that's there before
103 # letting anyone know the main part of the pipe
103 # letting anyone know the main part of the pipe
104 # closed prematurely.
104 # closed prematurely.
105 _forwardoutput(self._ui, self._side)
105 _forwardoutput(self._ui, self._side)
106 return r
106 return r
107
107
108 def unbufferedread(self, size):
108 def unbufferedread(self, size):
109 r = self._call(b'unbufferedread', size)
109 r = self._call(b'unbufferedread', size)
110 if size != 0 and not r:
110 if size != 0 and not r:
111 # We've observed a condition that indicates the
111 # We've observed a condition that indicates the
112 # stdout closed unexpectedly. Check stderr one
112 # stdout closed unexpectedly. Check stderr one
113 # more time and snag anything that's there before
113 # more time and snag anything that's there before
114 # letting anyone know the main part of the pipe
114 # letting anyone know the main part of the pipe
115 # closed prematurely.
115 # closed prematurely.
116 _forwardoutput(self._ui, self._side)
116 _forwardoutput(self._ui, self._side)
117 return r
117 return r
118
118
119 def readline(self):
119 def readline(self):
120 return self._call(b'readline')
120 return self._call(b'readline')
121
121
122 def _call(self, methname, data=None):
122 def _call(self, methname, data=None):
123 """call <methname> on "main", forward output of "side" while blocking"""
123 """call <methname> on "main", forward output of "side" while blocking"""
124 # data can be '' or 0
124 # data can be '' or 0
125 if (data is not None and not data) or self._main.closed:
125 if (data is not None and not data) or self._main.closed:
126 _forwardoutput(self._ui, self._side)
126 _forwardoutput(self._ui, self._side)
127 return b''
127 return b''
128 while True:
128 while True:
129 mainready, sideready = self._wait()
129 mainready, sideready = self._wait()
130 if sideready:
130 if sideready:
131 _forwardoutput(self._ui, self._side)
131 _forwardoutput(self._ui, self._side)
132 if mainready:
132 if mainready:
133 meth = getattr(self._main, methname)
133 meth = getattr(self._main, methname)
134 if data is None:
134 if data is None:
135 return meth()
135 return meth()
136 else:
136 else:
137 return meth(data)
137 return meth(data)
138
138
139 def close(self):
139 def close(self):
140 return self._main.close()
140 return self._main.close()
141
141
142 @property
142 @property
143 def closed(self):
143 def closed(self):
144 return self._main.closed
144 return self._main.closed
145
145
146 def flush(self):
146 def flush(self):
147 return self._main.flush()
147 return self._main.flush()
148
148
149
149
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
151 """Clean up pipes used by an SSH connection."""
151 """Clean up pipes used by an SSH connection."""
152 didsomething = False
152 didsomething = False
153 if pipeo and not pipeo.closed:
153 if pipeo and not pipeo.closed:
154 didsomething = True
154 didsomething = True
155 pipeo.close()
155 pipeo.close()
156 if pipei and not pipei.closed:
156 if pipei and not pipei.closed:
157 didsomething = True
157 didsomething = True
158 pipei.close()
158 pipei.close()
159
159
160 if pipee and not pipee.closed:
160 if pipee and not pipee.closed:
161 didsomething = True
161 didsomething = True
162 # Try to read from the err descriptor until EOF.
162 # Try to read from the err descriptor until EOF.
163 try:
163 try:
164 for l in pipee:
164 for l in pipee:
165 ui.status(_(b'remote: '), l)
165 ui.status(_(b'remote: '), l)
166 except (IOError, ValueError):
166 except (IOError, ValueError):
167 pass
167 pass
168
168
169 pipee.close()
169 pipee.close()
170
170
171 if didsomething and warn is not None:
171 if didsomething and warn is not None:
172 # Encourage explicit close of sshpeers. Closing via __del__ is
172 # Encourage explicit close of sshpeers. Closing via __del__ is
173 # not very predictable when exceptions are thrown, which has led
173 # not very predictable when exceptions are thrown, which has led
174 # to deadlocks due to a peer get gc'ed in a fork
174 # to deadlocks due to a peer get gc'ed in a fork
175 # We add our own stack trace, because the stacktrace when called
175 # We add our own stack trace, because the stacktrace when called
176 # from __del__ is useless.
176 # from __del__ is useless.
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
178
178
179
179
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 """Create an SSH connection to a server.
181 """Create an SSH connection to a server.
182
182
183 Returns a tuple of (process, stdin, stdout, stderr) for the
183 Returns a tuple of (process, stdin, stdout, stderr) for the
184 spawned process.
184 spawned process.
185 """
185 """
186 cmd = b'%s %s %s' % (
186 cmd = b'%s %s %s' % (
187 sshcmd,
187 sshcmd,
188 args,
188 args,
189 procutil.shellquote(
189 procutil.shellquote(
190 b'%s -R %s serve --stdio'
190 b'%s -R %s serve --stdio'
191 % (_serverquote(remotecmd), _serverquote(path))
191 % (_serverquote(remotecmd), _serverquote(path))
192 ),
192 ),
193 )
193 )
194
194
195 ui.debug(b'running %s\n' % cmd)
195 ui.debug(b'running %s\n' % cmd)
196
196
197 # no buffer allow the use of 'select'
197 # no buffer allow the use of 'select'
198 # feel free to remove buffering and select usage when we ultimately
198 # feel free to remove buffering and select usage when we ultimately
199 # move to threading.
199 # move to threading.
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
201
201
202 return proc, stdin, stdout, stderr
202 return proc, stdin, stdout, stderr
203
203
204
204
205 def _clientcapabilities():
205 def _clientcapabilities():
206 """Return list of capabilities of this client.
206 """Return list of capabilities of this client.
207
207
208 Returns a list of capabilities that are supported by this client.
208 Returns a list of capabilities that are supported by this client.
209 """
209 """
210 protoparams = {b'partial-pull'}
210 protoparams = {b'partial-pull'}
211 comps = [
211 comps = [
212 e.wireprotosupport().name
212 e.wireprotosupport().name
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 ]
214 ]
215 protoparams.add(b'comp=%s' % b','.join(comps))
215 protoparams.add(b'comp=%s' % b','.join(comps))
216 return protoparams
216 return protoparams
217
217
218
218
219 def _performhandshake(ui, stdin, stdout, stderr):
219 def _performhandshake(ui, stdin, stdout, stderr):
220 def badresponse():
220 def badresponse():
221 # Flush any output on stderr. In general, the stderr contains errors
221 # Flush any output on stderr. In general, the stderr contains errors
222 # from the remote (ssh errors, some hg errors), and status indications
222 # from the remote (ssh errors, some hg errors), and status indications
223 # (like "adding changes"), with no current way to tell them apart.
223 # (like "adding changes"), with no current way to tell them apart.
224 # Here we failed so early that it's almost certainly only errors, so
224 # Here we failed so early that it's almost certainly only errors, so
225 # use warn=True so -q doesn't hide them.
225 # use warn=True so -q doesn't hide them.
226 _forwardoutput(ui, stderr, warn=True)
226 _forwardoutput(ui, stderr, warn=True)
227
227
228 msg = _(b'no suitable response from remote hg')
228 msg = _(b'no suitable response from remote hg')
229 hint = ui.config(b'ui', b'ssherrorhint')
229 hint = ui.config(b'ui', b'ssherrorhint')
230 raise error.RepoError(msg, hint=hint)
230 raise error.RepoError(msg, hint=hint)
231
231
232 # The handshake consists of sending wire protocol commands in reverse
232 # The handshake consists of sending wire protocol commands in reverse
233 # order of protocol implementation and then sniffing for a response
233 # order of protocol implementation and then sniffing for a response
234 # to one of them.
234 # to one of them.
235 #
235 #
236 # Those commands (from oldest to newest) are:
236 # Those commands (from oldest to newest) are:
237 #
237 #
238 # ``between``
238 # ``between``
239 # Asks for the set of revisions between a pair of revisions. Command
239 # Asks for the set of revisions between a pair of revisions. Command
240 # present in all Mercurial server implementations.
240 # present in all Mercurial server implementations.
241 #
241 #
242 # ``hello``
242 # ``hello``
243 # Instructs the server to advertise its capabilities. Introduced in
243 # Instructs the server to advertise its capabilities. Introduced in
244 # Mercurial 0.9.1.
244 # Mercurial 0.9.1.
245 #
245 #
246 # ``upgrade``
246 # ``upgrade``
247 # Requests upgrade from default transport protocol version 1 to
247 # Requests upgrade from default transport protocol version 1 to
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
248 # a newer version. Introduced in Mercurial 4.6 as an experimental
249 # feature.
249 # feature.
250 #
250 #
251 # The ``between`` command is issued with a request for the null
251 # The ``between`` command is issued with a request for the null
252 # range. If the remote is a Mercurial server, this request will
252 # range. If the remote is a Mercurial server, this request will
253 # generate a specific response: ``1\n\n``. This represents the
253 # generate a specific response: ``1\n\n``. This represents the
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
255 # in the output stream and know this is the response to ``between``
255 # in the output stream and know this is the response to ``between``
256 # and we're at the end of our handshake reply.
256 # and we're at the end of our handshake reply.
257 #
257 #
258 # The response to the ``hello`` command will be a line with the
258 # The response to the ``hello`` command will be a line with the
259 # length of the value returned by that command followed by that
259 # length of the value returned by that command followed by that
260 # value. If the server doesn't support ``hello`` (which should be
260 # value. If the server doesn't support ``hello`` (which should be
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
261 # rare), that line will be ``0\n``. Otherwise, the value will contain
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
263 # the capabilities of the server.
263 # the capabilities of the server.
264 #
264 #
265 # The ``upgrade`` command isn't really a command in the traditional
265 # The ``upgrade`` command isn't really a command in the traditional
266 # sense of version 1 of the transport because it isn't using the
266 # sense of version 1 of the transport because it isn't using the
267 # proper mechanism for formatting insteads: instead, it just encodes
267 # proper mechanism for formatting insteads: instead, it just encodes
268 # arguments on the line, delimited by spaces.
268 # arguments on the line, delimited by spaces.
269 #
269 #
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
271 # If the server doesn't support protocol upgrades, it will reply to
271 # If the server doesn't support protocol upgrades, it will reply to
272 # this line with ``0\n``. Otherwise, it emits an
272 # this line with ``0\n``. Otherwise, it emits an
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
274 # Content immediately following this line describes additional
274 # Content immediately following this line describes additional
275 # protocol and server state.
275 # protocol and server state.
276 #
276 #
277 # In addition to the responses to our command requests, the server
277 # In addition to the responses to our command requests, the server
278 # may emit "banner" output on stdout. SSH servers are allowed to
278 # may emit "banner" output on stdout. SSH servers are allowed to
279 # print messages to stdout on login. Issuing commands on connection
279 # print messages to stdout on login. Issuing commands on connection
280 # allows us to flush this banner output from the server by scanning
280 # allows us to flush this banner output from the server by scanning
281 # for output to our well-known ``between`` command. Of course, if
281 # for output to our well-known ``between`` command. Of course, if
282 # the banner contains ``1\n\n``, this will throw off our detection.
282 # the banner contains ``1\n\n``, this will throw off our detection.
283
283
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
285
285
286 # Generate a random token to help identify responses to version 2
286 # Generate a random token to help identify responses to version 2
287 # upgrade request.
287 # upgrade request.
288 token = pycompat.sysbytes(str(uuid.uuid4()))
288 token = pycompat.sysbytes(str(uuid.uuid4()))
289
289
290 try:
290 try:
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
292 handshake = [
292 handshake = [
293 b'hello\n',
293 b'hello\n',
294 b'between\n',
294 b'between\n',
295 b'pairs %d\n' % len(pairsarg),
295 b'pairs %d\n' % len(pairsarg),
296 pairsarg,
296 pairsarg,
297 ]
297 ]
298
298
299 if requestlog:
299 if requestlog:
300 ui.debug(b'devel-peer-request: hello+between\n')
300 ui.debug(b'devel-peer-request: hello+between\n')
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
302 ui.debug(b'sending hello command\n')
302 ui.debug(b'sending hello command\n')
303 ui.debug(b'sending between command\n')
303 ui.debug(b'sending between command\n')
304
304
305 stdin.write(b''.join(handshake))
305 stdin.write(b''.join(handshake))
306 stdin.flush()
306 stdin.flush()
307 except IOError:
307 except IOError:
308 badresponse()
308 badresponse()
309
309
310 # Assume version 1 of wire protocol by default.
310 # Assume version 1 of wire protocol by default.
311 protoname = wireprototypes.SSHV1
311 protoname = wireprototypes.SSHV1
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
313
313
314 lines = [b'', b'dummy']
314 lines = [b'', b'dummy']
315 max_noise = 500
315 max_noise = 500
316 while lines[-1] and max_noise:
316 while lines[-1] and max_noise:
317 try:
317 try:
318 l = stdout.readline()
318 l = stdout.readline()
319 _forwardoutput(ui, stderr, warn=True)
319 _forwardoutput(ui, stderr, warn=True)
320
320
321 # Look for reply to protocol upgrade request. It has a token
321 # Look for reply to protocol upgrade request. It has a token
322 # in it, so there should be no false positives.
322 # in it, so there should be no false positives.
323 m = reupgraded.match(l)
323 m = reupgraded.match(l)
324 if m:
324 if m:
325 protoname = m.group(1)
325 protoname = m.group(1)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
326 ui.debug(b'protocol upgraded to %s\n' % protoname)
327 # If an upgrade was handled, the ``hello`` and ``between``
327 # If an upgrade was handled, the ``hello`` and ``between``
328 # requests are ignored. The next output belongs to the
328 # requests are ignored. The next output belongs to the
329 # protocol, so stop scanning lines.
329 # protocol, so stop scanning lines.
330 break
330 break
331
331
332 # Otherwise it could be a banner, ``0\n`` response if server
332 # Otherwise it could be a banner, ``0\n`` response if server
333 # doesn't support upgrade.
333 # doesn't support upgrade.
334
334
335 if lines[-1] == b'1\n' and l == b'\n':
335 if lines[-1] == b'1\n' and l == b'\n':
336 break
336 break
337 if l:
337 if l:
338 ui.debug(b'remote: ', l)
338 ui.debug(b'remote: ', l)
339 lines.append(l)
339 lines.append(l)
340 max_noise -= 1
340 max_noise -= 1
341 except IOError:
341 except IOError:
342 badresponse()
342 badresponse()
343 else:
343 else:
344 badresponse()
344 badresponse()
345
345
346 caps = set()
346 caps = set()
347
347
348 # For version 1, we should see a ``capabilities`` line in response to the
348 # For version 1, we should see a ``capabilities`` line in response to the
349 # ``hello`` command.
349 # ``hello`` command.
350 if protoname == wireprototypes.SSHV1:
350 if protoname == wireprototypes.SSHV1:
351 for l in reversed(lines):
351 for l in reversed(lines):
352 # Look for response to ``hello`` command. Scan from the back so
352 # Look for response to ``hello`` command. Scan from the back so
353 # we don't misinterpret banner output as the command reply.
353 # we don't misinterpret banner output as the command reply.
354 if l.startswith(b'capabilities:'):
354 if l.startswith(b'capabilities:'):
355 caps.update(l[:-1].split(b':')[1].split())
355 caps.update(l[:-1].split(b':')[1].split())
356 break
356 break
357
357
358 # Error if we couldn't find capabilities, this means:
358 # Error if we couldn't find capabilities, this means:
359 #
359 #
360 # 1. Remote isn't a Mercurial server
360 # 1. Remote isn't a Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
361 # 2. Remote is a <0.9.1 Mercurial server
362 # 3. Remote is a future Mercurial server that dropped ``hello``
362 # 3. Remote is a future Mercurial server that dropped ``hello``
363 # and other attempted handshake mechanisms.
363 # and other attempted handshake mechanisms.
364 if not caps:
364 if not caps:
365 badresponse()
365 badresponse()
366
366
367 # Flush any output on stderr before proceeding.
367 # Flush any output on stderr before proceeding.
368 _forwardoutput(ui, stderr, warn=True)
368 _forwardoutput(ui, stderr, warn=True)
369
369
370 return protoname, caps
370 return protoname, caps
371
371
372
372
373 class sshv1peer(wireprotov1peer.wirepeer):
373 class sshv1peer(wireprotov1peer.wirepeer):
374 def __init__(
374 def __init__(
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 ):
376 ):
377 """Create a peer from an existing SSH connection.
377 """Create a peer from an existing SSH connection.
378
378
379 ``proc`` is a handle on the underlying SSH process.
379 ``proc`` is a handle on the underlying SSH process.
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
381 pipes for that process.
381 pipes for that process.
382 ``caps`` is a set of capabilities supported by the remote.
382 ``caps`` is a set of capabilities supported by the remote.
383 ``autoreadstderr`` denotes whether to automatically read from
383 ``autoreadstderr`` denotes whether to automatically read from
384 stderr and to forward its output.
384 stderr and to forward its output.
385 """
385 """
386 super().__init__(ui)
386 self._url = url
387 self._url = url
387 self.ui = ui
388 # self._subprocess is unused. Keeping a handle on the process
388 # self._subprocess is unused. Keeping a handle on the process
389 # holds a reference and prevents it from being garbage collected.
389 # holds a reference and prevents it from being garbage collected.
390 self._subprocess = proc
390 self._subprocess = proc
391
391
392 # And we hook up our "doublepipe" wrapper to allow querying
392 # And we hook up our "doublepipe" wrapper to allow querying
393 # stderr any time we perform I/O.
393 # stderr any time we perform I/O.
394 if autoreadstderr:
394 if autoreadstderr:
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
396 stdin = doublepipe(ui, stdin, stderr)
396 stdin = doublepipe(ui, stdin, stderr)
397
397
398 self._pipeo = stdin
398 self._pipeo = stdin
399 self._pipei = stdout
399 self._pipei = stdout
400 self._pipee = stderr
400 self._pipee = stderr
401 self._caps = caps
401 self._caps = caps
402 self._autoreadstderr = autoreadstderr
402 self._autoreadstderr = autoreadstderr
403 self._initstack = b''.join(util.getstackframes(1))
403 self._initstack = b''.join(util.getstackframes(1))
404
404
405 # Commands that have a "framed" response where the first line of the
405 # Commands that have a "framed" response where the first line of the
406 # response contains the length of that response.
406 # response contains the length of that response.
407 _FRAMED_COMMANDS = {
407 _FRAMED_COMMANDS = {
408 b'batch',
408 b'batch',
409 }
409 }
410
410
411 # Begin of ipeerconnection interface.
411 # Begin of ipeerconnection interface.
412
412
413 def url(self):
413 def url(self):
414 return self._url
414 return self._url
415
415
416 def local(self):
416 def local(self):
417 return None
417 return None
418
418
419 def canpush(self):
419 def canpush(self):
420 return True
420 return True
421
421
422 def close(self):
422 def close(self):
423 self._cleanup()
423 self._cleanup()
424
424
425 # End of ipeerconnection interface.
425 # End of ipeerconnection interface.
426
426
427 # Begin of ipeercommands interface.
427 # Begin of ipeercommands interface.
428
428
429 def capabilities(self):
429 def capabilities(self):
430 return self._caps
430 return self._caps
431
431
432 # End of ipeercommands interface.
432 # End of ipeercommands interface.
433
433
434 def _readerr(self):
434 def _readerr(self):
435 _forwardoutput(self.ui, self._pipee)
435 _forwardoutput(self.ui, self._pipee)
436
436
437 def _abort(self, exception):
437 def _abort(self, exception):
438 self._cleanup()
438 self._cleanup()
439 raise exception
439 raise exception
440
440
441 def _cleanup(self, warn=None):
441 def _cleanup(self, warn=None):
442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
443
443
444 def __del__(self):
444 def __del__(self):
445 self._cleanup(warn=self._initstack)
445 self._cleanup(warn=self._initstack)
446
446
447 def _sendrequest(self, cmd, args, framed=False):
447 def _sendrequest(self, cmd, args, framed=False):
448 if self.ui.debugflag and self.ui.configbool(
448 if self.ui.debugflag and self.ui.configbool(
449 b'devel', b'debug.peer-request'
449 b'devel', b'debug.peer-request'
450 ):
450 ):
451 dbg = self.ui.debug
451 dbg = self.ui.debug
452 line = b'devel-peer-request: %s\n'
452 line = b'devel-peer-request: %s\n'
453 dbg(line % cmd)
453 dbg(line % cmd)
454 for key, value in sorted(args.items()):
454 for key, value in sorted(args.items()):
455 if not isinstance(value, dict):
455 if not isinstance(value, dict):
456 dbg(line % b' %s: %d bytes' % (key, len(value)))
456 dbg(line % b' %s: %d bytes' % (key, len(value)))
457 else:
457 else:
458 for dk, dv in sorted(value.items()):
458 for dk, dv in sorted(value.items()):
459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
460 self.ui.debug(b"sending %s command\n" % cmd)
460 self.ui.debug(b"sending %s command\n" % cmd)
461 self._pipeo.write(b"%s\n" % cmd)
461 self._pipeo.write(b"%s\n" % cmd)
462 _func, names = wireprotov1server.commands[cmd]
462 _func, names = wireprotov1server.commands[cmd]
463 keys = names.split()
463 keys = names.split()
464 wireargs = {}
464 wireargs = {}
465 for k in keys:
465 for k in keys:
466 if k == b'*':
466 if k == b'*':
467 wireargs[b'*'] = args
467 wireargs[b'*'] = args
468 break
468 break
469 else:
469 else:
470 wireargs[k] = args[k]
470 wireargs[k] = args[k]
471 del args[k]
471 del args[k]
472 for k, v in sorted(wireargs.items()):
472 for k, v in sorted(wireargs.items()):
473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
474 if isinstance(v, dict):
474 if isinstance(v, dict):
475 for dk, dv in v.items():
475 for dk, dv in v.items():
476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
477 self._pipeo.write(dv)
477 self._pipeo.write(dv)
478 else:
478 else:
479 self._pipeo.write(v)
479 self._pipeo.write(v)
480 self._pipeo.flush()
480 self._pipeo.flush()
481
481
482 # We know exactly how many bytes are in the response. So return a proxy
482 # We know exactly how many bytes are in the response. So return a proxy
483 # around the raw output stream that allows reading exactly this many
483 # around the raw output stream that allows reading exactly this many
484 # bytes. Callers then can read() without fear of overrunning the
484 # bytes. Callers then can read() without fear of overrunning the
485 # response.
485 # response.
486 if framed:
486 if framed:
487 amount = self._getamount()
487 amount = self._getamount()
488 return util.cappedreader(self._pipei, amount)
488 return util.cappedreader(self._pipei, amount)
489
489
490 return self._pipei
490 return self._pipei
491
491
492 def _callstream(self, cmd, **args):
492 def _callstream(self, cmd, **args):
493 args = pycompat.byteskwargs(args)
493 args = pycompat.byteskwargs(args)
494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
495
495
496 def _callcompressable(self, cmd, **args):
496 def _callcompressable(self, cmd, **args):
497 args = pycompat.byteskwargs(args)
497 args = pycompat.byteskwargs(args)
498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
499
499
500 def _call(self, cmd, **args):
500 def _call(self, cmd, **args):
501 args = pycompat.byteskwargs(args)
501 args = pycompat.byteskwargs(args)
502 return self._sendrequest(cmd, args, framed=True).read()
502 return self._sendrequest(cmd, args, framed=True).read()
503
503
504 def _callpush(self, cmd, fp, **args):
504 def _callpush(self, cmd, fp, **args):
505 # The server responds with an empty frame if the client should
505 # The server responds with an empty frame if the client should
506 # continue submitting the payload.
506 # continue submitting the payload.
507 r = self._call(cmd, **args)
507 r = self._call(cmd, **args)
508 if r:
508 if r:
509 return b'', r
509 return b'', r
510
510
511 # The payload consists of frames with content followed by an empty
511 # The payload consists of frames with content followed by an empty
512 # frame.
512 # frame.
513 for d in iter(lambda: fp.read(4096), b''):
513 for d in iter(lambda: fp.read(4096), b''):
514 self._writeframed(d)
514 self._writeframed(d)
515 self._writeframed(b"", flush=True)
515 self._writeframed(b"", flush=True)
516
516
517 # In case of success, there is an empty frame and a frame containing
517 # In case of success, there is an empty frame and a frame containing
518 # the integer result (as a string).
518 # the integer result (as a string).
519 # In case of error, there is a non-empty frame containing the error.
519 # In case of error, there is a non-empty frame containing the error.
520 r = self._readframed()
520 r = self._readframed()
521 if r:
521 if r:
522 return b'', r
522 return b'', r
523 return self._readframed(), b''
523 return self._readframed(), b''
524
524
525 def _calltwowaystream(self, cmd, fp, **args):
525 def _calltwowaystream(self, cmd, fp, **args):
526 # The server responds with an empty frame if the client should
526 # The server responds with an empty frame if the client should
527 # continue submitting the payload.
527 # continue submitting the payload.
528 r = self._call(cmd, **args)
528 r = self._call(cmd, **args)
529 if r:
529 if r:
530 # XXX needs to be made better
530 # XXX needs to be made better
531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
532
532
533 # The payload consists of frames with content followed by an empty
533 # The payload consists of frames with content followed by an empty
534 # frame.
534 # frame.
535 for d in iter(lambda: fp.read(4096), b''):
535 for d in iter(lambda: fp.read(4096), b''):
536 self._writeframed(d)
536 self._writeframed(d)
537 self._writeframed(b"", flush=True)
537 self._writeframed(b"", flush=True)
538
538
539 return self._pipei
539 return self._pipei
540
540
541 def _getamount(self):
541 def _getamount(self):
542 l = self._pipei.readline()
542 l = self._pipei.readline()
543 if l == b'\n':
543 if l == b'\n':
544 if self._autoreadstderr:
544 if self._autoreadstderr:
545 self._readerr()
545 self._readerr()
546 msg = _(b'check previous remote output')
546 msg = _(b'check previous remote output')
547 self._abort(error.OutOfBandError(hint=msg))
547 self._abort(error.OutOfBandError(hint=msg))
548 if self._autoreadstderr:
548 if self._autoreadstderr:
549 self._readerr()
549 self._readerr()
550 try:
550 try:
551 return int(l)
551 return int(l)
552 except ValueError:
552 except ValueError:
553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
554
554
555 def _readframed(self):
555 def _readframed(self):
556 size = self._getamount()
556 size = self._getamount()
557 if not size:
557 if not size:
558 return b''
558 return b''
559
559
560 return self._pipei.read(size)
560 return self._pipei.read(size)
561
561
562 def _writeframed(self, data, flush=False):
562 def _writeframed(self, data, flush=False):
563 self._pipeo.write(b"%d\n" % len(data))
563 self._pipeo.write(b"%d\n" % len(data))
564 if data:
564 if data:
565 self._pipeo.write(data)
565 self._pipeo.write(data)
566 if flush:
566 if flush:
567 self._pipeo.flush()
567 self._pipeo.flush()
568 if self._autoreadstderr:
568 if self._autoreadstderr:
569 self._readerr()
569 self._readerr()
570
570
571
571
572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
573 """Make a peer instance from existing pipes.
573 """Make a peer instance from existing pipes.
574
574
575 ``path`` and ``proc`` are stored on the eventual peer instance and may
575 ``path`` and ``proc`` are stored on the eventual peer instance and may
576 not be used for anything meaningful.
576 not be used for anything meaningful.
577
577
578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
579 SSH server's stdio handles.
579 SSH server's stdio handles.
580
580
581 This function is factored out to allow creating peers that don't
581 This function is factored out to allow creating peers that don't
582 actually spawn a new process. It is useful for starting SSH protocol
582 actually spawn a new process. It is useful for starting SSH protocol
583 servers and clients via non-standard means, which can be useful for
583 servers and clients via non-standard means, which can be useful for
584 testing.
584 testing.
585 """
585 """
586 try:
586 try:
587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
588 except Exception:
588 except Exception:
589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
590 raise
590 raise
591
591
592 if protoname == wireprototypes.SSHV1:
592 if protoname == wireprototypes.SSHV1:
593 return sshv1peer(
593 return sshv1peer(
594 ui,
594 ui,
595 path,
595 path,
596 proc,
596 proc,
597 stdin,
597 stdin,
598 stdout,
598 stdout,
599 stderr,
599 stderr,
600 caps,
600 caps,
601 autoreadstderr=autoreadstderr,
601 autoreadstderr=autoreadstderr,
602 )
602 )
603 else:
603 else:
604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
605 raise error.RepoError(
605 raise error.RepoError(
606 _(b'unknown version of SSH protocol: %s') % protoname
606 _(b'unknown version of SSH protocol: %s') % protoname
607 )
607 )
608
608
609
609
610 def make_peer(ui, path, create, intents=None, createopts=None):
610 def make_peer(ui, path, create, intents=None, createopts=None):
611 """Create an SSH peer.
611 """Create an SSH peer.
612
612
613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
614 """
614 """
615 u = urlutil.url(path, parsequery=False, parsefragment=False)
615 u = urlutil.url(path, parsequery=False, parsefragment=False)
616 if u.scheme != b'ssh' or not u.host or u.path is None:
616 if u.scheme != b'ssh' or not u.host or u.path is None:
617 raise error.RepoError(_(b"couldn't parse location %s") % path)
617 raise error.RepoError(_(b"couldn't parse location %s") % path)
618
618
619 urlutil.checksafessh(path)
619 urlutil.checksafessh(path)
620
620
621 if u.passwd is not None:
621 if u.passwd is not None:
622 raise error.RepoError(_(b'password in URL not supported'))
622 raise error.RepoError(_(b'password in URL not supported'))
623
623
624 sshcmd = ui.config(b'ui', b'ssh')
624 sshcmd = ui.config(b'ui', b'ssh')
625 remotecmd = ui.config(b'ui', b'remotecmd')
625 remotecmd = ui.config(b'ui', b'remotecmd')
626 sshaddenv = dict(ui.configitems(b'sshenv'))
626 sshaddenv = dict(ui.configitems(b'sshenv'))
627 sshenv = procutil.shellenviron(sshaddenv)
627 sshenv = procutil.shellenviron(sshaddenv)
628 remotepath = u.path or b'.'
628 remotepath = u.path or b'.'
629
629
630 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
630 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
631
631
632 if create:
632 if create:
633 # We /could/ do this, but only if the remote init command knows how to
633 # We /could/ do this, but only if the remote init command knows how to
634 # handle them. We don't yet make any assumptions about that. And without
634 # handle them. We don't yet make any assumptions about that. And without
635 # querying the remote, there's no way of knowing if the remote even
635 # querying the remote, there's no way of knowing if the remote even
636 # supports said requested feature.
636 # supports said requested feature.
637 if createopts:
637 if createopts:
638 raise error.RepoError(
638 raise error.RepoError(
639 _(
639 _(
640 b'cannot create remote SSH repositories '
640 b'cannot create remote SSH repositories '
641 b'with extra options'
641 b'with extra options'
642 )
642 )
643 )
643 )
644
644
645 cmd = b'%s %s %s' % (
645 cmd = b'%s %s %s' % (
646 sshcmd,
646 sshcmd,
647 args,
647 args,
648 procutil.shellquote(
648 procutil.shellquote(
649 b'%s init %s'
649 b'%s init %s'
650 % (_serverquote(remotecmd), _serverquote(remotepath))
650 % (_serverquote(remotecmd), _serverquote(remotepath))
651 ),
651 ),
652 )
652 )
653 ui.debug(b'running %s\n' % cmd)
653 ui.debug(b'running %s\n' % cmd)
654 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
654 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
655 if res != 0:
655 if res != 0:
656 raise error.RepoError(_(b'could not create remote repo'))
656 raise error.RepoError(_(b'could not create remote repo'))
657
657
658 proc, stdin, stdout, stderr = _makeconnection(
658 proc, stdin, stdout, stderr = _makeconnection(
659 ui, sshcmd, args, remotecmd, remotepath, sshenv
659 ui, sshcmd, args, remotecmd, remotepath, sshenv
660 )
660 )
661
661
662 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
662 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
663
663
664 # Finally, if supported by the server, notify it about our own
664 # Finally, if supported by the server, notify it about our own
665 # capabilities.
665 # capabilities.
666 if b'protocaps' in peer.capabilities():
666 if b'protocaps' in peer.capabilities():
667 try:
667 try:
668 peer._call(
668 peer._call(
669 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
669 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
670 )
670 )
671 except IOError:
671 except IOError:
672 peer._cleanup()
672 peer._cleanup()
673 raise error.RepoError(_(b'capability exchange failed'))
673 raise error.RepoError(_(b'capability exchange failed'))
674
674
675 return peer
675 return peer
General Comments 0
You need to be logged in to leave comments. Login now