##// END OF EJS Templates
peer: make ui an attribute...
Gregory Szorc -
r37337:e826fe7a default
parent child Browse files
Show More
@@ -1,506 +1,502 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import tempfile
16 import tempfile
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 bundle2,
20 bundle2,
21 error,
21 error,
22 httpconnection,
22 httpconnection,
23 pycompat,
23 pycompat,
24 statichttprepo,
24 statichttprepo,
25 url as urlmod,
25 url as urlmod,
26 util,
26 util,
27 wireproto,
27 wireproto,
28 )
28 )
29
29
30 httplib = util.httplib
30 httplib = util.httplib
31 urlerr = util.urlerr
31 urlerr = util.urlerr
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34 def encodevalueinheaders(value, header, limit):
34 def encodevalueinheaders(value, header, limit):
35 """Encode a string value into multiple HTTP headers.
35 """Encode a string value into multiple HTTP headers.
36
36
37 ``value`` will be encoded into 1 or more HTTP headers with the names
37 ``value`` will be encoded into 1 or more HTTP headers with the names
38 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
38 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
39 name + value will be at most ``limit`` bytes long.
39 name + value will be at most ``limit`` bytes long.
40
40
41 Returns an iterable of 2-tuples consisting of header names and
41 Returns an iterable of 2-tuples consisting of header names and
42 values as native strings.
42 values as native strings.
43 """
43 """
44 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
44 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
45 # not bytes. This function always takes bytes in as arguments.
45 # not bytes. This function always takes bytes in as arguments.
46 fmt = pycompat.strurl(header) + r'-%s'
46 fmt = pycompat.strurl(header) + r'-%s'
47 # Note: it is *NOT* a bug that the last bit here is a bytestring
47 # Note: it is *NOT* a bug that the last bit here is a bytestring
48 # and not a unicode: we're just getting the encoded length anyway,
48 # and not a unicode: we're just getting the encoded length anyway,
49 # and using an r-string to make it portable between Python 2 and 3
49 # and using an r-string to make it portable between Python 2 and 3
50 # doesn't work because then the \r is a literal backslash-r
50 # doesn't work because then the \r is a literal backslash-r
51 # instead of a carriage return.
51 # instead of a carriage return.
52 valuelen = limit - len(fmt % r'000') - len(': \r\n')
52 valuelen = limit - len(fmt % r'000') - len(': \r\n')
53 result = []
53 result = []
54
54
55 n = 0
55 n = 0
56 for i in xrange(0, len(value), valuelen):
56 for i in xrange(0, len(value), valuelen):
57 n += 1
57 n += 1
58 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
58 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
59
59
60 return result
60 return result
61
61
62 def _wraphttpresponse(resp):
62 def _wraphttpresponse(resp):
63 """Wrap an HTTPResponse with common error handlers.
63 """Wrap an HTTPResponse with common error handlers.
64
64
65 This ensures that any I/O from any consumer raises the appropriate
65 This ensures that any I/O from any consumer raises the appropriate
66 error and messaging.
66 error and messaging.
67 """
67 """
68 origread = resp.read
68 origread = resp.read
69
69
70 class readerproxy(resp.__class__):
70 class readerproxy(resp.__class__):
71 def read(self, size=None):
71 def read(self, size=None):
72 try:
72 try:
73 return origread(size)
73 return origread(size)
74 except httplib.IncompleteRead as e:
74 except httplib.IncompleteRead as e:
75 # e.expected is an integer if length known or None otherwise.
75 # e.expected is an integer if length known or None otherwise.
76 if e.expected:
76 if e.expected:
77 msg = _('HTTP request error (incomplete response; '
77 msg = _('HTTP request error (incomplete response; '
78 'expected %d bytes got %d)') % (e.expected,
78 'expected %d bytes got %d)') % (e.expected,
79 len(e.partial))
79 len(e.partial))
80 else:
80 else:
81 msg = _('HTTP request error (incomplete response)')
81 msg = _('HTTP request error (incomplete response)')
82
82
83 raise error.PeerTransportError(
83 raise error.PeerTransportError(
84 msg,
84 msg,
85 hint=_('this may be an intermittent network failure; '
85 hint=_('this may be an intermittent network failure; '
86 'if the error persists, consider contacting the '
86 'if the error persists, consider contacting the '
87 'network or server operator'))
87 'network or server operator'))
88 except httplib.HTTPException as e:
88 except httplib.HTTPException as e:
89 raise error.PeerTransportError(
89 raise error.PeerTransportError(
90 _('HTTP request error (%s)') % e,
90 _('HTTP request error (%s)') % e,
91 hint=_('this may be an intermittent network failure; '
91 hint=_('this may be an intermittent network failure; '
92 'if the error persists, consider contacting the '
92 'if the error persists, consider contacting the '
93 'network or server operator'))
93 'network or server operator'))
94
94
95 resp.__class__ = readerproxy
95 resp.__class__ = readerproxy
96
96
97 class _multifile(object):
97 class _multifile(object):
98 def __init__(self, *fileobjs):
98 def __init__(self, *fileobjs):
99 for f in fileobjs:
99 for f in fileobjs:
100 if not util.safehasattr(f, 'length'):
100 if not util.safehasattr(f, 'length'):
101 raise ValueError(
101 raise ValueError(
102 '_multifile only supports file objects that '
102 '_multifile only supports file objects that '
103 'have a length but this one does not:', type(f), f)
103 'have a length but this one does not:', type(f), f)
104 self._fileobjs = fileobjs
104 self._fileobjs = fileobjs
105 self._index = 0
105 self._index = 0
106
106
107 @property
107 @property
108 def length(self):
108 def length(self):
109 return sum(f.length for f in self._fileobjs)
109 return sum(f.length for f in self._fileobjs)
110
110
111 def read(self, amt=None):
111 def read(self, amt=None):
112 if amt <= 0:
112 if amt <= 0:
113 return ''.join(f.read() for f in self._fileobjs)
113 return ''.join(f.read() for f in self._fileobjs)
114 parts = []
114 parts = []
115 while amt and self._index < len(self._fileobjs):
115 while amt and self._index < len(self._fileobjs):
116 parts.append(self._fileobjs[self._index].read(amt))
116 parts.append(self._fileobjs[self._index].read(amt))
117 got = len(parts[-1])
117 got = len(parts[-1])
118 if got < amt:
118 if got < amt:
119 self._index += 1
119 self._index += 1
120 amt -= got
120 amt -= got
121 return ''.join(parts)
121 return ''.join(parts)
122
122
123 def seek(self, offset, whence=os.SEEK_SET):
123 def seek(self, offset, whence=os.SEEK_SET):
124 if whence != os.SEEK_SET:
124 if whence != os.SEEK_SET:
125 raise NotImplementedError(
125 raise NotImplementedError(
126 '_multifile does not support anything other'
126 '_multifile does not support anything other'
127 ' than os.SEEK_SET for whence on seek()')
127 ' than os.SEEK_SET for whence on seek()')
128 if offset != 0:
128 if offset != 0:
129 raise NotImplementedError(
129 raise NotImplementedError(
130 '_multifile only supports seeking to start, but that '
130 '_multifile only supports seeking to start, but that '
131 'could be fixed if you need it')
131 'could be fixed if you need it')
132 for f in self._fileobjs:
132 for f in self._fileobjs:
133 f.seek(0)
133 f.seek(0)
134 self._index = 0
134 self._index = 0
135
135
136 class httppeer(wireproto.wirepeer):
136 class httppeer(wireproto.wirepeer):
137 def __init__(self, ui, path, url, opener):
137 def __init__(self, ui, path, url, opener):
138 self._ui = ui
138 self.ui = ui
139 self._path = path
139 self._path = path
140 self._url = url
140 self._url = url
141 self._caps = None
141 self._caps = None
142 self._urlopener = opener
142 self._urlopener = opener
143 # This is an its own attribute to facilitate extensions overriding
143 # This is an its own attribute to facilitate extensions overriding
144 # the default type.
144 # the default type.
145 self._requestbuilder = urlreq.request
145 self._requestbuilder = urlreq.request
146
146
147 def __del__(self):
147 def __del__(self):
148 for h in self._urlopener.handlers:
148 for h in self._urlopener.handlers:
149 h.close()
149 h.close()
150 getattr(h, "close_all", lambda: None)()
150 getattr(h, "close_all", lambda: None)()
151
151
152 def _openurl(self, req):
152 def _openurl(self, req):
153 if (self._ui.debugflag
153 if (self.ui.debugflag
154 and self._ui.configbool('devel', 'debug.peer-request')):
154 and self.ui.configbool('devel', 'debug.peer-request')):
155 dbg = self._ui.debug
155 dbg = self.ui.debug
156 line = 'devel-peer-request: %s\n'
156 line = 'devel-peer-request: %s\n'
157 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
157 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
158 hgargssize = None
158 hgargssize = None
159
159
160 for header, value in sorted(req.header_items()):
160 for header, value in sorted(req.header_items()):
161 if header.startswith('X-hgarg-'):
161 if header.startswith('X-hgarg-'):
162 if hgargssize is None:
162 if hgargssize is None:
163 hgargssize = 0
163 hgargssize = 0
164 hgargssize += len(value)
164 hgargssize += len(value)
165 else:
165 else:
166 dbg(line % ' %s %s' % (header, value))
166 dbg(line % ' %s %s' % (header, value))
167
167
168 if hgargssize is not None:
168 if hgargssize is not None:
169 dbg(line % ' %d bytes of commands arguments in headers'
169 dbg(line % ' %d bytes of commands arguments in headers'
170 % hgargssize)
170 % hgargssize)
171
171
172 if req.has_data():
172 if req.has_data():
173 data = req.get_data()
173 data = req.get_data()
174 length = getattr(data, 'length', None)
174 length = getattr(data, 'length', None)
175 if length is None:
175 if length is None:
176 length = len(data)
176 length = len(data)
177 dbg(line % ' %d bytes of data' % length)
177 dbg(line % ' %d bytes of data' % length)
178
178
179 start = util.timer()
179 start = util.timer()
180
180
181 ret = self._urlopener.open(req)
181 ret = self._urlopener.open(req)
182 if self._ui.configbool('devel', 'debug.peer-request'):
182 if self.ui.configbool('devel', 'debug.peer-request'):
183 dbg(line % ' finished in %.4f seconds (%s)'
183 dbg(line % ' finished in %.4f seconds (%s)'
184 % (util.timer() - start, ret.code))
184 % (util.timer() - start, ret.code))
185 return ret
185 return ret
186
186
187 # Begin of ipeerconnection interface.
187 # Begin of ipeerconnection interface.
188
188
189 @util.propertycache
190 def ui(self):
191 return self._ui
192
193 def url(self):
189 def url(self):
194 return self._path
190 return self._path
195
191
196 def local(self):
192 def local(self):
197 return None
193 return None
198
194
199 def peer(self):
195 def peer(self):
200 return self
196 return self
201
197
202 def canpush(self):
198 def canpush(self):
203 return True
199 return True
204
200
205 def close(self):
201 def close(self):
206 pass
202 pass
207
203
208 # End of ipeerconnection interface.
204 # End of ipeerconnection interface.
209
205
210 # Begin of ipeercommands interface.
206 # Begin of ipeercommands interface.
211
207
212 def capabilities(self):
208 def capabilities(self):
213 # self._fetchcaps() should have been called as part of peer
209 # self._fetchcaps() should have been called as part of peer
214 # handshake. So self._caps should always be set.
210 # handshake. So self._caps should always be set.
215 assert self._caps is not None
211 assert self._caps is not None
216 return self._caps
212 return self._caps
217
213
218 # End of ipeercommands interface.
214 # End of ipeercommands interface.
219
215
220 # look up capabilities only when needed
216 # look up capabilities only when needed
221
217
222 def _fetchcaps(self):
218 def _fetchcaps(self):
223 self._caps = set(self._call('capabilities').split())
219 self._caps = set(self._call('capabilities').split())
224
220
225 def _callstream(self, cmd, _compressible=False, **args):
221 def _callstream(self, cmd, _compressible=False, **args):
226 args = pycompat.byteskwargs(args)
222 args = pycompat.byteskwargs(args)
227 if cmd == 'pushkey':
223 if cmd == 'pushkey':
228 args['data'] = ''
224 args['data'] = ''
229 data = args.pop('data', None)
225 data = args.pop('data', None)
230 headers = args.pop('headers', {})
226 headers = args.pop('headers', {})
231
227
232 self.ui.debug("sending %s command\n" % cmd)
228 self.ui.debug("sending %s command\n" % cmd)
233 q = [('cmd', cmd)]
229 q = [('cmd', cmd)]
234 headersize = 0
230 headersize = 0
235 varyheaders = []
231 varyheaders = []
236 # Important: don't use self.capable() here or else you end up
232 # Important: don't use self.capable() here or else you end up
237 # with infinite recursion when trying to look up capabilities
233 # with infinite recursion when trying to look up capabilities
238 # for the first time.
234 # for the first time.
239 postargsok = self._caps is not None and 'httppostargs' in self._caps
235 postargsok = self._caps is not None and 'httppostargs' in self._caps
240
236
241 # Send arguments via POST.
237 # Send arguments via POST.
242 if postargsok and args:
238 if postargsok and args:
243 strargs = urlreq.urlencode(sorted(args.items()))
239 strargs = urlreq.urlencode(sorted(args.items()))
244 if not data:
240 if not data:
245 data = strargs
241 data = strargs
246 else:
242 else:
247 if isinstance(data, bytes):
243 if isinstance(data, bytes):
248 i = io.BytesIO(data)
244 i = io.BytesIO(data)
249 i.length = len(data)
245 i.length = len(data)
250 data = i
246 data = i
251 argsio = io.BytesIO(strargs)
247 argsio = io.BytesIO(strargs)
252 argsio.length = len(strargs)
248 argsio.length = len(strargs)
253 data = _multifile(argsio, data)
249 data = _multifile(argsio, data)
254 headers[r'X-HgArgs-Post'] = len(strargs)
250 headers[r'X-HgArgs-Post'] = len(strargs)
255 elif args:
251 elif args:
256 # Calling self.capable() can infinite loop if we are calling
252 # Calling self.capable() can infinite loop if we are calling
257 # "capabilities". But that command should never accept wire
253 # "capabilities". But that command should never accept wire
258 # protocol arguments. So this should never happen.
254 # protocol arguments. So this should never happen.
259 assert cmd != 'capabilities'
255 assert cmd != 'capabilities'
260 httpheader = self.capable('httpheader')
256 httpheader = self.capable('httpheader')
261 if httpheader:
257 if httpheader:
262 headersize = int(httpheader.split(',', 1)[0])
258 headersize = int(httpheader.split(',', 1)[0])
263
259
264 # Send arguments via HTTP headers.
260 # Send arguments via HTTP headers.
265 if headersize > 0:
261 if headersize > 0:
266 # The headers can typically carry more data than the URL.
262 # The headers can typically carry more data than the URL.
267 encargs = urlreq.urlencode(sorted(args.items()))
263 encargs = urlreq.urlencode(sorted(args.items()))
268 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
264 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
269 headersize):
265 headersize):
270 headers[header] = value
266 headers[header] = value
271 varyheaders.append(header)
267 varyheaders.append(header)
272 # Send arguments via query string (Mercurial <1.9).
268 # Send arguments via query string (Mercurial <1.9).
273 else:
269 else:
274 q += sorted(args.items())
270 q += sorted(args.items())
275
271
276 qs = '?%s' % urlreq.urlencode(q)
272 qs = '?%s' % urlreq.urlencode(q)
277 cu = "%s%s" % (self._url, qs)
273 cu = "%s%s" % (self._url, qs)
278 size = 0
274 size = 0
279 if util.safehasattr(data, 'length'):
275 if util.safehasattr(data, 'length'):
280 size = data.length
276 size = data.length
281 elif data is not None:
277 elif data is not None:
282 size = len(data)
278 size = len(data)
283 if data is not None and r'Content-Type' not in headers:
279 if data is not None and r'Content-Type' not in headers:
284 headers[r'Content-Type'] = r'application/mercurial-0.1'
280 headers[r'Content-Type'] = r'application/mercurial-0.1'
285
281
286 # Tell the server we accept application/mercurial-0.2 and multiple
282 # Tell the server we accept application/mercurial-0.2 and multiple
287 # compression formats if the server is capable of emitting those
283 # compression formats if the server is capable of emitting those
288 # payloads.
284 # payloads.
289 protoparams = []
285 protoparams = []
290
286
291 mediatypes = set()
287 mediatypes = set()
292 if self._caps is not None:
288 if self._caps is not None:
293 mt = self.capable('httpmediatype')
289 mt = self.capable('httpmediatype')
294 if mt:
290 if mt:
295 protoparams.append('0.1')
291 protoparams.append('0.1')
296 mediatypes = set(mt.split(','))
292 mediatypes = set(mt.split(','))
297
293
298 if '0.2tx' in mediatypes:
294 if '0.2tx' in mediatypes:
299 protoparams.append('0.2')
295 protoparams.append('0.2')
300
296
301 if '0.2tx' in mediatypes and self.capable('compression'):
297 if '0.2tx' in mediatypes and self.capable('compression'):
302 # We /could/ compare supported compression formats and prune
298 # We /could/ compare supported compression formats and prune
303 # non-mutually supported or error if nothing is mutually supported.
299 # non-mutually supported or error if nothing is mutually supported.
304 # For now, send the full list to the server and have it error.
300 # For now, send the full list to the server and have it error.
305 comps = [e.wireprotosupport().name for e in
301 comps = [e.wireprotosupport().name for e in
306 util.compengines.supportedwireengines(util.CLIENTROLE)]
302 util.compengines.supportedwireengines(util.CLIENTROLE)]
307 protoparams.append('comp=%s' % ','.join(comps))
303 protoparams.append('comp=%s' % ','.join(comps))
308
304
309 if protoparams:
305 if protoparams:
310 protoheaders = encodevalueinheaders(' '.join(protoparams),
306 protoheaders = encodevalueinheaders(' '.join(protoparams),
311 'X-HgProto',
307 'X-HgProto',
312 headersize or 1024)
308 headersize or 1024)
313 for header, value in protoheaders:
309 for header, value in protoheaders:
314 headers[header] = value
310 headers[header] = value
315 varyheaders.append(header)
311 varyheaders.append(header)
316
312
317 if varyheaders:
313 if varyheaders:
318 headers[r'Vary'] = r','.join(varyheaders)
314 headers[r'Vary'] = r','.join(varyheaders)
319
315
320 req = self._requestbuilder(pycompat.strurl(cu), data, headers)
316 req = self._requestbuilder(pycompat.strurl(cu), data, headers)
321
317
322 if data is not None:
318 if data is not None:
323 self.ui.debug("sending %d bytes\n" % size)
319 self.ui.debug("sending %d bytes\n" % size)
324 req.add_unredirected_header(r'Content-Length', r'%d' % size)
320 req.add_unredirected_header(r'Content-Length', r'%d' % size)
325 try:
321 try:
326 resp = self._openurl(req)
322 resp = self._openurl(req)
327 except urlerr.httperror as inst:
323 except urlerr.httperror as inst:
328 if inst.code == 401:
324 if inst.code == 401:
329 raise error.Abort(_('authorization failed'))
325 raise error.Abort(_('authorization failed'))
330 raise
326 raise
331 except httplib.HTTPException as inst:
327 except httplib.HTTPException as inst:
332 self.ui.debug('http error while sending %s command\n' % cmd)
328 self.ui.debug('http error while sending %s command\n' % cmd)
333 self.ui.traceback()
329 self.ui.traceback()
334 raise IOError(None, inst)
330 raise IOError(None, inst)
335
331
336 # Insert error handlers for common I/O failures.
332 # Insert error handlers for common I/O failures.
337 _wraphttpresponse(resp)
333 _wraphttpresponse(resp)
338
334
339 # record the url we got redirected to
335 # record the url we got redirected to
340 resp_url = pycompat.bytesurl(resp.geturl())
336 resp_url = pycompat.bytesurl(resp.geturl())
341 if resp_url.endswith(qs):
337 if resp_url.endswith(qs):
342 resp_url = resp_url[:-len(qs)]
338 resp_url = resp_url[:-len(qs)]
343 if self._url.rstrip('/') != resp_url.rstrip('/'):
339 if self._url.rstrip('/') != resp_url.rstrip('/'):
344 if not self.ui.quiet:
340 if not self.ui.quiet:
345 self.ui.warn(_('real URL is %s\n') % resp_url)
341 self.ui.warn(_('real URL is %s\n') % resp_url)
346 self._url = resp_url
342 self._url = resp_url
347 try:
343 try:
348 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
344 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
349 except AttributeError:
345 except AttributeError:
350 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
346 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
351
347
352 safeurl = util.hidepassword(self._url)
348 safeurl = util.hidepassword(self._url)
353 if proto.startswith('application/hg-error'):
349 if proto.startswith('application/hg-error'):
354 raise error.OutOfBandError(resp.read())
350 raise error.OutOfBandError(resp.read())
355 # accept old "text/plain" and "application/hg-changegroup" for now
351 # accept old "text/plain" and "application/hg-changegroup" for now
356 if not (proto.startswith('application/mercurial-') or
352 if not (proto.startswith('application/mercurial-') or
357 (proto.startswith('text/plain')
353 (proto.startswith('text/plain')
358 and not resp.headers.get('content-length')) or
354 and not resp.headers.get('content-length')) or
359 proto.startswith('application/hg-changegroup')):
355 proto.startswith('application/hg-changegroup')):
360 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
356 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
361 raise error.RepoError(
357 raise error.RepoError(
362 _("'%s' does not appear to be an hg repository:\n"
358 _("'%s' does not appear to be an hg repository:\n"
363 "---%%<--- (%s)\n%s\n---%%<---\n")
359 "---%%<--- (%s)\n%s\n---%%<---\n")
364 % (safeurl, proto or 'no content-type', resp.read(1024)))
360 % (safeurl, proto or 'no content-type', resp.read(1024)))
365
361
366 if proto.startswith('application/mercurial-'):
362 if proto.startswith('application/mercurial-'):
367 try:
363 try:
368 version = proto.split('-', 1)[1]
364 version = proto.split('-', 1)[1]
369 version_info = tuple([int(n) for n in version.split('.')])
365 version_info = tuple([int(n) for n in version.split('.')])
370 except ValueError:
366 except ValueError:
371 raise error.RepoError(_("'%s' sent a broken Content-Type "
367 raise error.RepoError(_("'%s' sent a broken Content-Type "
372 "header (%s)") % (safeurl, proto))
368 "header (%s)") % (safeurl, proto))
373
369
374 # TODO consider switching to a decompression reader that uses
370 # TODO consider switching to a decompression reader that uses
375 # generators.
371 # generators.
376 if version_info == (0, 1):
372 if version_info == (0, 1):
377 if _compressible:
373 if _compressible:
378 return util.compengines['zlib'].decompressorreader(resp)
374 return util.compengines['zlib'].decompressorreader(resp)
379 return resp
375 return resp
380 elif version_info == (0, 2):
376 elif version_info == (0, 2):
381 # application/mercurial-0.2 always identifies the compression
377 # application/mercurial-0.2 always identifies the compression
382 # engine in the payload header.
378 # engine in the payload header.
383 elen = struct.unpack('B', resp.read(1))[0]
379 elen = struct.unpack('B', resp.read(1))[0]
384 ename = resp.read(elen)
380 ename = resp.read(elen)
385 engine = util.compengines.forwiretype(ename)
381 engine = util.compengines.forwiretype(ename)
386 return engine.decompressorreader(resp)
382 return engine.decompressorreader(resp)
387 else:
383 else:
388 raise error.RepoError(_("'%s' uses newer protocol %s") %
384 raise error.RepoError(_("'%s' uses newer protocol %s") %
389 (safeurl, version))
385 (safeurl, version))
390
386
391 if _compressible:
387 if _compressible:
392 return util.compengines['zlib'].decompressorreader(resp)
388 return util.compengines['zlib'].decompressorreader(resp)
393
389
394 return resp
390 return resp
395
391
396 def _call(self, cmd, **args):
392 def _call(self, cmd, **args):
397 fp = self._callstream(cmd, **args)
393 fp = self._callstream(cmd, **args)
398 try:
394 try:
399 return fp.read()
395 return fp.read()
400 finally:
396 finally:
401 # if using keepalive, allow connection to be reused
397 # if using keepalive, allow connection to be reused
402 fp.close()
398 fp.close()
403
399
404 def _callpush(self, cmd, cg, **args):
400 def _callpush(self, cmd, cg, **args):
405 # have to stream bundle to a temp file because we do not have
401 # have to stream bundle to a temp file because we do not have
406 # http 1.1 chunked transfer.
402 # http 1.1 chunked transfer.
407
403
408 types = self.capable('unbundle')
404 types = self.capable('unbundle')
409 try:
405 try:
410 types = types.split(',')
406 types = types.split(',')
411 except AttributeError:
407 except AttributeError:
412 # servers older than d1b16a746db6 will send 'unbundle' as a
408 # servers older than d1b16a746db6 will send 'unbundle' as a
413 # boolean capability. They only support headerless/uncompressed
409 # boolean capability. They only support headerless/uncompressed
414 # bundles.
410 # bundles.
415 types = [""]
411 types = [""]
416 for x in types:
412 for x in types:
417 if x in bundle2.bundletypes:
413 if x in bundle2.bundletypes:
418 type = x
414 type = x
419 break
415 break
420
416
421 tempname = bundle2.writebundle(self.ui, cg, None, type)
417 tempname = bundle2.writebundle(self.ui, cg, None, type)
422 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
418 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
423 headers = {r'Content-Type': r'application/mercurial-0.1'}
419 headers = {r'Content-Type': r'application/mercurial-0.1'}
424
420
425 try:
421 try:
426 r = self._call(cmd, data=fp, headers=headers, **args)
422 r = self._call(cmd, data=fp, headers=headers, **args)
427 vals = r.split('\n', 1)
423 vals = r.split('\n', 1)
428 if len(vals) < 2:
424 if len(vals) < 2:
429 raise error.ResponseError(_("unexpected response:"), r)
425 raise error.ResponseError(_("unexpected response:"), r)
430 return vals
426 return vals
431 except urlerr.httperror:
427 except urlerr.httperror:
432 # Catch and re-raise these so we don't try and treat them
428 # Catch and re-raise these so we don't try and treat them
433 # like generic socket errors. They lack any values in
429 # like generic socket errors. They lack any values in
434 # .args on Python 3 which breaks our socket.error block.
430 # .args on Python 3 which breaks our socket.error block.
435 raise
431 raise
436 except socket.error as err:
432 except socket.error as err:
437 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
433 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
438 raise error.Abort(_('push failed: %s') % err.args[1])
434 raise error.Abort(_('push failed: %s') % err.args[1])
439 raise error.Abort(err.args[1])
435 raise error.Abort(err.args[1])
440 finally:
436 finally:
441 fp.close()
437 fp.close()
442 os.unlink(tempname)
438 os.unlink(tempname)
443
439
444 def _calltwowaystream(self, cmd, fp, **args):
440 def _calltwowaystream(self, cmd, fp, **args):
445 fh = None
441 fh = None
446 fp_ = None
442 fp_ = None
447 filename = None
443 filename = None
448 try:
444 try:
449 # dump bundle to disk
445 # dump bundle to disk
450 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
446 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
451 fh = os.fdopen(fd, r"wb")
447 fh = os.fdopen(fd, r"wb")
452 d = fp.read(4096)
448 d = fp.read(4096)
453 while d:
449 while d:
454 fh.write(d)
450 fh.write(d)
455 d = fp.read(4096)
451 d = fp.read(4096)
456 fh.close()
452 fh.close()
457 # start http push
453 # start http push
458 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
454 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
459 headers = {r'Content-Type': r'application/mercurial-0.1'}
455 headers = {r'Content-Type': r'application/mercurial-0.1'}
460 return self._callstream(cmd, data=fp_, headers=headers, **args)
456 return self._callstream(cmd, data=fp_, headers=headers, **args)
461 finally:
457 finally:
462 if fp_ is not None:
458 if fp_ is not None:
463 fp_.close()
459 fp_.close()
464 if fh is not None:
460 if fh is not None:
465 fh.close()
461 fh.close()
466 os.unlink(filename)
462 os.unlink(filename)
467
463
468 def _callcompressable(self, cmd, **args):
464 def _callcompressable(self, cmd, **args):
469 return self._callstream(cmd, _compressible=True, **args)
465 return self._callstream(cmd, _compressible=True, **args)
470
466
471 def _abort(self, exception):
467 def _abort(self, exception):
472 raise exception
468 raise exception
473
469
474 def makepeer(ui, path):
470 def makepeer(ui, path):
475 u = util.url(path)
471 u = util.url(path)
476 if u.query or u.fragment:
472 if u.query or u.fragment:
477 raise error.Abort(_('unsupported URL component: "%s"') %
473 raise error.Abort(_('unsupported URL component: "%s"') %
478 (u.query or u.fragment))
474 (u.query or u.fragment))
479
475
480 # urllib cannot handle URLs with embedded user or passwd.
476 # urllib cannot handle URLs with embedded user or passwd.
481 url, authinfo = u.authinfo()
477 url, authinfo = u.authinfo()
482 ui.debug('using %s\n' % url)
478 ui.debug('using %s\n' % url)
483
479
484 opener = urlmod.opener(ui, authinfo)
480 opener = urlmod.opener(ui, authinfo)
485
481
486 return httppeer(ui, path, url, opener)
482 return httppeer(ui, path, url, opener)
487
483
488 def instance(ui, path, create):
484 def instance(ui, path, create):
489 if create:
485 if create:
490 raise error.Abort(_('cannot create new http repository'))
486 raise error.Abort(_('cannot create new http repository'))
491 try:
487 try:
492 if path.startswith('https:') and not urlmod.has_https:
488 if path.startswith('https:') and not urlmod.has_https:
493 raise error.Abort(_('Python support for SSL and HTTPS '
489 raise error.Abort(_('Python support for SSL and HTTPS '
494 'is not installed'))
490 'is not installed'))
495
491
496 inst = makepeer(ui, path)
492 inst = makepeer(ui, path)
497 inst._fetchcaps()
493 inst._fetchcaps()
498
494
499 return inst
495 return inst
500 except error.RepoError as httpexception:
496 except error.RepoError as httpexception:
501 try:
497 try:
502 r = statichttprepo.instance(ui, "static-" + path, create)
498 r = statichttprepo.instance(ui, "static-" + path, create)
503 ui.note(_('(falling back to static-http)\n'))
499 ui.note(_('(falling back to static-http)\n'))
504 return r
500 return r
505 except error.RepoError:
501 except error.RepoError:
506 raise httpexception # use the original http RepoError instead
502 raise httpexception # use the original http RepoError instead
@@ -1,2336 +1,2332 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .thirdparty.zope import (
23 from .thirdparty.zope import (
24 interface as zi,
24 interface as zi,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 peer,
52 peer,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 class localpeer(repository.peer):
156 class localpeer(repository.peer):
157 '''peer for a local repo; reflects only the most recent API'''
157 '''peer for a local repo; reflects only the most recent API'''
158
158
159 def __init__(self, repo, caps=None):
159 def __init__(self, repo, caps=None):
160 super(localpeer, self).__init__()
160 super(localpeer, self).__init__()
161
161
162 if caps is None:
162 if caps is None:
163 caps = moderncaps.copy()
163 caps = moderncaps.copy()
164 self._repo = repo.filtered('served')
164 self._repo = repo.filtered('served')
165 self._ui = repo.ui
165 self.ui = repo.ui
166 self._caps = repo._restrictcapabilities(caps)
166 self._caps = repo._restrictcapabilities(caps)
167
167
168 # Begin of _basepeer interface.
168 # Begin of _basepeer interface.
169
169
170 @util.propertycache
171 def ui(self):
172 return self._ui
173
174 def url(self):
170 def url(self):
175 return self._repo.url()
171 return self._repo.url()
176
172
177 def local(self):
173 def local(self):
178 return self._repo
174 return self._repo
179
175
180 def peer(self):
176 def peer(self):
181 return self
177 return self
182
178
183 def canpush(self):
179 def canpush(self):
184 return True
180 return True
185
181
186 def close(self):
182 def close(self):
187 self._repo.close()
183 self._repo.close()
188
184
189 # End of _basepeer interface.
185 # End of _basepeer interface.
190
186
191 # Begin of _basewirecommands interface.
187 # Begin of _basewirecommands interface.
192
188
193 def branchmap(self):
189 def branchmap(self):
194 return self._repo.branchmap()
190 return self._repo.branchmap()
195
191
196 def capabilities(self):
192 def capabilities(self):
197 return self._caps
193 return self._caps
198
194
199 def debugwireargs(self, one, two, three=None, four=None, five=None):
195 def debugwireargs(self, one, two, three=None, four=None, five=None):
200 """Used to test argument passing over the wire"""
196 """Used to test argument passing over the wire"""
201 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
202 pycompat.bytestr(four),
198 pycompat.bytestr(four),
203 pycompat.bytestr(five))
199 pycompat.bytestr(five))
204
200
205 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
206 **kwargs):
202 **kwargs):
207 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
208 common=common, bundlecaps=bundlecaps,
204 common=common, bundlecaps=bundlecaps,
209 **kwargs)[1]
205 **kwargs)[1]
210 cb = util.chunkbuffer(chunks)
206 cb = util.chunkbuffer(chunks)
211
207
212 if exchange.bundle2requested(bundlecaps):
208 if exchange.bundle2requested(bundlecaps):
213 # When requesting a bundle2, getbundle returns a stream to make the
209 # When requesting a bundle2, getbundle returns a stream to make the
214 # wire level function happier. We need to build a proper object
210 # wire level function happier. We need to build a proper object
215 # from it in local peer.
211 # from it in local peer.
216 return bundle2.getunbundler(self.ui, cb)
212 return bundle2.getunbundler(self.ui, cb)
217 else:
213 else:
218 return changegroup.getunbundler('01', cb, None)
214 return changegroup.getunbundler('01', cb, None)
219
215
220 def heads(self):
216 def heads(self):
221 return self._repo.heads()
217 return self._repo.heads()
222
218
223 def known(self, nodes):
219 def known(self, nodes):
224 return self._repo.known(nodes)
220 return self._repo.known(nodes)
225
221
226 def listkeys(self, namespace):
222 def listkeys(self, namespace):
227 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
228
224
229 def lookup(self, key):
225 def lookup(self, key):
230 return self._repo.lookup(key)
226 return self._repo.lookup(key)
231
227
232 def pushkey(self, namespace, key, old, new):
228 def pushkey(self, namespace, key, old, new):
233 return self._repo.pushkey(namespace, key, old, new)
229 return self._repo.pushkey(namespace, key, old, new)
234
230
235 def stream_out(self):
231 def stream_out(self):
236 raise error.Abort(_('cannot perform stream clone against local '
232 raise error.Abort(_('cannot perform stream clone against local '
237 'peer'))
233 'peer'))
238
234
239 def unbundle(self, cg, heads, url):
235 def unbundle(self, cg, heads, url):
240 """apply a bundle on a repo
236 """apply a bundle on a repo
241
237
242 This function handles the repo locking itself."""
238 This function handles the repo locking itself."""
243 try:
239 try:
244 try:
240 try:
245 cg = exchange.readbundle(self.ui, cg, None)
241 cg = exchange.readbundle(self.ui, cg, None)
246 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
247 if util.safehasattr(ret, 'getchunks'):
243 if util.safehasattr(ret, 'getchunks'):
248 # This is a bundle20 object, turn it into an unbundler.
244 # This is a bundle20 object, turn it into an unbundler.
249 # This little dance should be dropped eventually when the
245 # This little dance should be dropped eventually when the
250 # API is finally improved.
246 # API is finally improved.
251 stream = util.chunkbuffer(ret.getchunks())
247 stream = util.chunkbuffer(ret.getchunks())
252 ret = bundle2.getunbundler(self.ui, stream)
248 ret = bundle2.getunbundler(self.ui, stream)
253 return ret
249 return ret
254 except Exception as exc:
250 except Exception as exc:
255 # If the exception contains output salvaged from a bundle2
251 # If the exception contains output salvaged from a bundle2
256 # reply, we need to make sure it is printed before continuing
252 # reply, we need to make sure it is printed before continuing
257 # to fail. So we build a bundle2 with such output and consume
253 # to fail. So we build a bundle2 with such output and consume
258 # it directly.
254 # it directly.
259 #
255 #
260 # This is not very elegant but allows a "simple" solution for
256 # This is not very elegant but allows a "simple" solution for
261 # issue4594
257 # issue4594
262 output = getattr(exc, '_bundle2salvagedoutput', ())
258 output = getattr(exc, '_bundle2salvagedoutput', ())
263 if output:
259 if output:
264 bundler = bundle2.bundle20(self._repo.ui)
260 bundler = bundle2.bundle20(self._repo.ui)
265 for out in output:
261 for out in output:
266 bundler.addpart(out)
262 bundler.addpart(out)
267 stream = util.chunkbuffer(bundler.getchunks())
263 stream = util.chunkbuffer(bundler.getchunks())
268 b = bundle2.getunbundler(self.ui, stream)
264 b = bundle2.getunbundler(self.ui, stream)
269 bundle2.processbundle(self._repo, b)
265 bundle2.processbundle(self._repo, b)
270 raise
266 raise
271 except error.PushRaced as exc:
267 except error.PushRaced as exc:
272 raise error.ResponseError(_('push failed:'),
268 raise error.ResponseError(_('push failed:'),
273 stringutil.forcebytestr(exc))
269 stringutil.forcebytestr(exc))
274
270
275 # End of _basewirecommands interface.
271 # End of _basewirecommands interface.
276
272
277 # Begin of peer interface.
273 # Begin of peer interface.
278
274
279 def iterbatch(self):
275 def iterbatch(self):
280 return peer.localiterbatcher(self)
276 return peer.localiterbatcher(self)
281
277
282 # End of peer interface.
278 # End of peer interface.
283
279
284 class locallegacypeer(repository.legacypeer, localpeer):
280 class locallegacypeer(repository.legacypeer, localpeer):
285 '''peer extension which implements legacy methods too; used for tests with
281 '''peer extension which implements legacy methods too; used for tests with
286 restricted capabilities'''
282 restricted capabilities'''
287
283
288 def __init__(self, repo):
284 def __init__(self, repo):
289 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
290
286
291 # Begin of baselegacywirecommands interface.
287 # Begin of baselegacywirecommands interface.
292
288
293 def between(self, pairs):
289 def between(self, pairs):
294 return self._repo.between(pairs)
290 return self._repo.between(pairs)
295
291
296 def branches(self, nodes):
292 def branches(self, nodes):
297 return self._repo.branches(nodes)
293 return self._repo.branches(nodes)
298
294
299 def changegroup(self, basenodes, source):
295 def changegroup(self, basenodes, source):
300 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
301 missingheads=self._repo.heads())
297 missingheads=self._repo.heads())
302 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
303
299
304 def changegroupsubset(self, bases, heads, source):
300 def changegroupsubset(self, bases, heads, source):
305 outgoing = discovery.outgoing(self._repo, missingroots=bases,
301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
306 missingheads=heads)
302 missingheads=heads)
307 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
308
304
309 # End of baselegacywirecommands interface.
305 # End of baselegacywirecommands interface.
310
306
311 # Increment the sub-version when the revlog v2 format changes to lock out old
307 # Increment the sub-version when the revlog v2 format changes to lock out old
312 # clients.
308 # clients.
313 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
314
310
315 # Functions receiving (ui, features) that extensions can register to impact
311 # Functions receiving (ui, features) that extensions can register to impact
316 # the ability to load repositories with custom requirements. Only
312 # the ability to load repositories with custom requirements. Only
317 # functions defined in loaded extensions are called.
313 # functions defined in loaded extensions are called.
318 #
314 #
319 # The function receives a set of requirement strings that the repository
315 # The function receives a set of requirement strings that the repository
320 # is capable of opening. Functions will typically add elements to the
316 # is capable of opening. Functions will typically add elements to the
321 # set to reflect that the extension knows how to handle that requirements.
317 # set to reflect that the extension knows how to handle that requirements.
322 featuresetupfuncs = set()
318 featuresetupfuncs = set()
323
319
324 @zi.implementer(repository.completelocalrepository)
320 @zi.implementer(repository.completelocalrepository)
325 class localrepository(object):
321 class localrepository(object):
326
322
327 # obsolete experimental requirements:
323 # obsolete experimental requirements:
328 # - manifestv2: An experimental new manifest format that allowed
324 # - manifestv2: An experimental new manifest format that allowed
329 # for stem compression of long paths. Experiment ended up not
325 # for stem compression of long paths. Experiment ended up not
330 # being successful (repository sizes went up due to worse delta
326 # being successful (repository sizes went up due to worse delta
331 # chains), and the code was deleted in 4.6.
327 # chains), and the code was deleted in 4.6.
332 supportedformats = {
328 supportedformats = {
333 'revlogv1',
329 'revlogv1',
334 'generaldelta',
330 'generaldelta',
335 'treemanifest',
331 'treemanifest',
336 REVLOGV2_REQUIREMENT,
332 REVLOGV2_REQUIREMENT,
337 }
333 }
338 _basesupported = supportedformats | {
334 _basesupported = supportedformats | {
339 'store',
335 'store',
340 'fncache',
336 'fncache',
341 'shared',
337 'shared',
342 'relshared',
338 'relshared',
343 'dotencode',
339 'dotencode',
344 'exp-sparse',
340 'exp-sparse',
345 }
341 }
346 openerreqs = {
342 openerreqs = {
347 'revlogv1',
343 'revlogv1',
348 'generaldelta',
344 'generaldelta',
349 'treemanifest',
345 'treemanifest',
350 }
346 }
351
347
352 # list of prefix for file which can be written without 'wlock'
348 # list of prefix for file which can be written without 'wlock'
353 # Extensions should extend this list when needed
349 # Extensions should extend this list when needed
354 _wlockfreeprefix = {
350 _wlockfreeprefix = {
355 # We migh consider requiring 'wlock' for the next
351 # We migh consider requiring 'wlock' for the next
356 # two, but pretty much all the existing code assume
352 # two, but pretty much all the existing code assume
357 # wlock is not needed so we keep them excluded for
353 # wlock is not needed so we keep them excluded for
358 # now.
354 # now.
359 'hgrc',
355 'hgrc',
360 'requires',
356 'requires',
361 # XXX cache is a complicatged business someone
357 # XXX cache is a complicatged business someone
362 # should investigate this in depth at some point
358 # should investigate this in depth at some point
363 'cache/',
359 'cache/',
364 # XXX shouldn't be dirstate covered by the wlock?
360 # XXX shouldn't be dirstate covered by the wlock?
365 'dirstate',
361 'dirstate',
366 # XXX bisect was still a bit too messy at the time
362 # XXX bisect was still a bit too messy at the time
367 # this changeset was introduced. Someone should fix
363 # this changeset was introduced. Someone should fix
368 # the remainig bit and drop this line
364 # the remainig bit and drop this line
369 'bisect.state',
365 'bisect.state',
370 }
366 }
371
367
372 def __init__(self, baseui, path, create=False):
368 def __init__(self, baseui, path, create=False):
373 self.requirements = set()
369 self.requirements = set()
374 self.filtername = None
370 self.filtername = None
375 # wvfs: rooted at the repository root, used to access the working copy
371 # wvfs: rooted at the repository root, used to access the working copy
376 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
377 # vfs: rooted at .hg, used to access repo files outside of .hg/store
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
378 self.vfs = None
374 self.vfs = None
379 # svfs: usually rooted at .hg/store, used to access repository history
375 # svfs: usually rooted at .hg/store, used to access repository history
380 # If this is a shared repository, this vfs may point to another
376 # If this is a shared repository, this vfs may point to another
381 # repository's .hg/store directory.
377 # repository's .hg/store directory.
382 self.svfs = None
378 self.svfs = None
383 self.root = self.wvfs.base
379 self.root = self.wvfs.base
384 self.path = self.wvfs.join(".hg")
380 self.path = self.wvfs.join(".hg")
385 self.origroot = path
381 self.origroot = path
386 # This is only used by context.workingctx.match in order to
382 # This is only used by context.workingctx.match in order to
387 # detect files in subrepos.
383 # detect files in subrepos.
388 self.auditor = pathutil.pathauditor(
384 self.auditor = pathutil.pathauditor(
389 self.root, callback=self._checknested)
385 self.root, callback=self._checknested)
390 # This is only used by context.basectx.match in order to detect
386 # This is only used by context.basectx.match in order to detect
391 # files in subrepos.
387 # files in subrepos.
392 self.nofsauditor = pathutil.pathauditor(
388 self.nofsauditor = pathutil.pathauditor(
393 self.root, callback=self._checknested, realfs=False, cached=True)
389 self.root, callback=self._checknested, realfs=False, cached=True)
394 self.baseui = baseui
390 self.baseui = baseui
395 self.ui = baseui.copy()
391 self.ui = baseui.copy()
396 self.ui.copy = baseui.copy # prevent copying repo configuration
392 self.ui.copy = baseui.copy # prevent copying repo configuration
397 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
398 if (self.ui.configbool('devel', 'all-warnings') or
394 if (self.ui.configbool('devel', 'all-warnings') or
399 self.ui.configbool('devel', 'check-locks')):
395 self.ui.configbool('devel', 'check-locks')):
400 self.vfs.audit = self._getvfsward(self.vfs.audit)
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
401 # A list of callback to shape the phase if no data were found.
397 # A list of callback to shape the phase if no data were found.
402 # Callback are in the form: func(repo, roots) --> processed root.
398 # Callback are in the form: func(repo, roots) --> processed root.
403 # This list it to be filled by extension during repo setup
399 # This list it to be filled by extension during repo setup
404 self._phasedefaults = []
400 self._phasedefaults = []
405 try:
401 try:
406 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
407 self._loadextensions()
403 self._loadextensions()
408 except IOError:
404 except IOError:
409 pass
405 pass
410
406
411 if featuresetupfuncs:
407 if featuresetupfuncs:
412 self.supported = set(self._basesupported) # use private copy
408 self.supported = set(self._basesupported) # use private copy
413 extmods = set(m.__name__ for n, m
409 extmods = set(m.__name__ for n, m
414 in extensions.extensions(self.ui))
410 in extensions.extensions(self.ui))
415 for setupfunc in featuresetupfuncs:
411 for setupfunc in featuresetupfuncs:
416 if setupfunc.__module__ in extmods:
412 if setupfunc.__module__ in extmods:
417 setupfunc(self.ui, self.supported)
413 setupfunc(self.ui, self.supported)
418 else:
414 else:
419 self.supported = self._basesupported
415 self.supported = self._basesupported
420 color.setup(self.ui)
416 color.setup(self.ui)
421
417
422 # Add compression engines.
418 # Add compression engines.
423 for name in util.compengines:
419 for name in util.compengines:
424 engine = util.compengines[name]
420 engine = util.compengines[name]
425 if engine.revlogheader():
421 if engine.revlogheader():
426 self.supported.add('exp-compression-%s' % name)
422 self.supported.add('exp-compression-%s' % name)
427
423
428 if not self.vfs.isdir():
424 if not self.vfs.isdir():
429 if create:
425 if create:
430 self.requirements = newreporequirements(self)
426 self.requirements = newreporequirements(self)
431
427
432 if not self.wvfs.exists():
428 if not self.wvfs.exists():
433 self.wvfs.makedirs()
429 self.wvfs.makedirs()
434 self.vfs.makedir(notindexed=True)
430 self.vfs.makedir(notindexed=True)
435
431
436 if 'store' in self.requirements:
432 if 'store' in self.requirements:
437 self.vfs.mkdir("store")
433 self.vfs.mkdir("store")
438
434
439 # create an invalid changelog
435 # create an invalid changelog
440 self.vfs.append(
436 self.vfs.append(
441 "00changelog.i",
437 "00changelog.i",
442 '\0\0\0\2' # represents revlogv2
438 '\0\0\0\2' # represents revlogv2
443 ' dummy changelog to prevent using the old repo layout'
439 ' dummy changelog to prevent using the old repo layout'
444 )
440 )
445 else:
441 else:
446 raise error.RepoError(_("repository %s not found") % path)
442 raise error.RepoError(_("repository %s not found") % path)
447 elif create:
443 elif create:
448 raise error.RepoError(_("repository %s already exists") % path)
444 raise error.RepoError(_("repository %s already exists") % path)
449 else:
445 else:
450 try:
446 try:
451 self.requirements = scmutil.readrequires(
447 self.requirements = scmutil.readrequires(
452 self.vfs, self.supported)
448 self.vfs, self.supported)
453 except IOError as inst:
449 except IOError as inst:
454 if inst.errno != errno.ENOENT:
450 if inst.errno != errno.ENOENT:
455 raise
451 raise
456
452
457 cachepath = self.vfs.join('cache')
453 cachepath = self.vfs.join('cache')
458 self.sharedpath = self.path
454 self.sharedpath = self.path
459 try:
455 try:
460 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
461 if 'relshared' in self.requirements:
457 if 'relshared' in self.requirements:
462 sharedpath = self.vfs.join(sharedpath)
458 sharedpath = self.vfs.join(sharedpath)
463 vfs = vfsmod.vfs(sharedpath, realpath=True)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
464 cachepath = vfs.join('cache')
460 cachepath = vfs.join('cache')
465 s = vfs.base
461 s = vfs.base
466 if not vfs.exists():
462 if not vfs.exists():
467 raise error.RepoError(
463 raise error.RepoError(
468 _('.hg/sharedpath points to nonexistent directory %s') % s)
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
469 self.sharedpath = s
465 self.sharedpath = s
470 except IOError as inst:
466 except IOError as inst:
471 if inst.errno != errno.ENOENT:
467 if inst.errno != errno.ENOENT:
472 raise
468 raise
473
469
474 if 'exp-sparse' in self.requirements and not sparse.enabled:
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
475 raise error.RepoError(_('repository is using sparse feature but '
471 raise error.RepoError(_('repository is using sparse feature but '
476 'sparse is not enabled; enable the '
472 'sparse is not enabled; enable the '
477 '"sparse" extensions to access'))
473 '"sparse" extensions to access'))
478
474
479 self.store = store.store(
475 self.store = store.store(
480 self.requirements, self.sharedpath,
476 self.requirements, self.sharedpath,
481 lambda base: vfsmod.vfs(base, cacheaudited=True))
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
482 self.spath = self.store.path
478 self.spath = self.store.path
483 self.svfs = self.store.vfs
479 self.svfs = self.store.vfs
484 self.sjoin = self.store.join
480 self.sjoin = self.store.join
485 self.vfs.createmode = self.store.createmode
481 self.vfs.createmode = self.store.createmode
486 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
487 self.cachevfs.createmode = self.store.createmode
483 self.cachevfs.createmode = self.store.createmode
488 if (self.ui.configbool('devel', 'all-warnings') or
484 if (self.ui.configbool('devel', 'all-warnings') or
489 self.ui.configbool('devel', 'check-locks')):
485 self.ui.configbool('devel', 'check-locks')):
490 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
491 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
492 else: # standard vfs
488 else: # standard vfs
493 self.svfs.audit = self._getsvfsward(self.svfs.audit)
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
494 self._applyopenerreqs()
490 self._applyopenerreqs()
495 if create:
491 if create:
496 self._writerequirements()
492 self._writerequirements()
497
493
498 self._dirstatevalidatewarned = False
494 self._dirstatevalidatewarned = False
499
495
500 self._branchcaches = {}
496 self._branchcaches = {}
501 self._revbranchcache = None
497 self._revbranchcache = None
502 self._filterpats = {}
498 self._filterpats = {}
503 self._datafilters = {}
499 self._datafilters = {}
504 self._transref = self._lockref = self._wlockref = None
500 self._transref = self._lockref = self._wlockref = None
505
501
506 # A cache for various files under .hg/ that tracks file changes,
502 # A cache for various files under .hg/ that tracks file changes,
507 # (used by the filecache decorator)
503 # (used by the filecache decorator)
508 #
504 #
509 # Maps a property name to its util.filecacheentry
505 # Maps a property name to its util.filecacheentry
510 self._filecache = {}
506 self._filecache = {}
511
507
512 # hold sets of revision to be filtered
508 # hold sets of revision to be filtered
513 # should be cleared when something might have changed the filter value:
509 # should be cleared when something might have changed the filter value:
514 # - new changesets,
510 # - new changesets,
515 # - phase change,
511 # - phase change,
516 # - new obsolescence marker,
512 # - new obsolescence marker,
517 # - working directory parent change,
513 # - working directory parent change,
518 # - bookmark changes
514 # - bookmark changes
519 self.filteredrevcache = {}
515 self.filteredrevcache = {}
520
516
521 # post-dirstate-status hooks
517 # post-dirstate-status hooks
522 self._postdsstatus = []
518 self._postdsstatus = []
523
519
524 # generic mapping between names and nodes
520 # generic mapping between names and nodes
525 self.names = namespaces.namespaces()
521 self.names = namespaces.namespaces()
526
522
527 # Key to signature value.
523 # Key to signature value.
528 self._sparsesignaturecache = {}
524 self._sparsesignaturecache = {}
529 # Signature to cached matcher instance.
525 # Signature to cached matcher instance.
530 self._sparsematchercache = {}
526 self._sparsematchercache = {}
531
527
532 def _getvfsward(self, origfunc):
528 def _getvfsward(self, origfunc):
533 """build a ward for self.vfs"""
529 """build a ward for self.vfs"""
534 rref = weakref.ref(self)
530 rref = weakref.ref(self)
535 def checkvfs(path, mode=None):
531 def checkvfs(path, mode=None):
536 ret = origfunc(path, mode=mode)
532 ret = origfunc(path, mode=mode)
537 repo = rref()
533 repo = rref()
538 if (repo is None
534 if (repo is None
539 or not util.safehasattr(repo, '_wlockref')
535 or not util.safehasattr(repo, '_wlockref')
540 or not util.safehasattr(repo, '_lockref')):
536 or not util.safehasattr(repo, '_lockref')):
541 return
537 return
542 if mode in (None, 'r', 'rb'):
538 if mode in (None, 'r', 'rb'):
543 return
539 return
544 if path.startswith(repo.path):
540 if path.startswith(repo.path):
545 # truncate name relative to the repository (.hg)
541 # truncate name relative to the repository (.hg)
546 path = path[len(repo.path) + 1:]
542 path = path[len(repo.path) + 1:]
547 if path.startswith('cache/'):
543 if path.startswith('cache/'):
548 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
549 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
550 if path.startswith('journal.'):
546 if path.startswith('journal.'):
551 # journal is covered by 'lock'
547 # journal is covered by 'lock'
552 if repo._currentlock(repo._lockref) is None:
548 if repo._currentlock(repo._lockref) is None:
553 repo.ui.develwarn('write with no lock: "%s"' % path,
549 repo.ui.develwarn('write with no lock: "%s"' % path,
554 stacklevel=2, config='check-locks')
550 stacklevel=2, config='check-locks')
555 elif repo._currentlock(repo._wlockref) is None:
551 elif repo._currentlock(repo._wlockref) is None:
556 # rest of vfs files are covered by 'wlock'
552 # rest of vfs files are covered by 'wlock'
557 #
553 #
558 # exclude special files
554 # exclude special files
559 for prefix in self._wlockfreeprefix:
555 for prefix in self._wlockfreeprefix:
560 if path.startswith(prefix):
556 if path.startswith(prefix):
561 return
557 return
562 repo.ui.develwarn('write with no wlock: "%s"' % path,
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
563 stacklevel=2, config='check-locks')
559 stacklevel=2, config='check-locks')
564 return ret
560 return ret
565 return checkvfs
561 return checkvfs
566
562
567 def _getsvfsward(self, origfunc):
563 def _getsvfsward(self, origfunc):
568 """build a ward for self.svfs"""
564 """build a ward for self.svfs"""
569 rref = weakref.ref(self)
565 rref = weakref.ref(self)
570 def checksvfs(path, mode=None):
566 def checksvfs(path, mode=None):
571 ret = origfunc(path, mode=mode)
567 ret = origfunc(path, mode=mode)
572 repo = rref()
568 repo = rref()
573 if repo is None or not util.safehasattr(repo, '_lockref'):
569 if repo is None or not util.safehasattr(repo, '_lockref'):
574 return
570 return
575 if mode in (None, 'r', 'rb'):
571 if mode in (None, 'r', 'rb'):
576 return
572 return
577 if path.startswith(repo.sharedpath):
573 if path.startswith(repo.sharedpath):
578 # truncate name relative to the repository (.hg)
574 # truncate name relative to the repository (.hg)
579 path = path[len(repo.sharedpath) + 1:]
575 path = path[len(repo.sharedpath) + 1:]
580 if repo._currentlock(repo._lockref) is None:
576 if repo._currentlock(repo._lockref) is None:
581 repo.ui.develwarn('write with no lock: "%s"' % path,
577 repo.ui.develwarn('write with no lock: "%s"' % path,
582 stacklevel=3)
578 stacklevel=3)
583 return ret
579 return ret
584 return checksvfs
580 return checksvfs
585
581
586 def close(self):
582 def close(self):
587 self._writecaches()
583 self._writecaches()
588
584
589 def _loadextensions(self):
585 def _loadextensions(self):
590 extensions.loadall(self.ui)
586 extensions.loadall(self.ui)
591
587
592 def _writecaches(self):
588 def _writecaches(self):
593 if self._revbranchcache:
589 if self._revbranchcache:
594 self._revbranchcache.write()
590 self._revbranchcache.write()
595
591
596 def _restrictcapabilities(self, caps):
592 def _restrictcapabilities(self, caps):
597 if self.ui.configbool('experimental', 'bundle2-advertise'):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
598 caps = set(caps)
594 caps = set(caps)
599 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
600 role='client'))
596 role='client'))
601 caps.add('bundle2=' + urlreq.quote(capsblob))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
602 return caps
598 return caps
603
599
604 def _applyopenerreqs(self):
600 def _applyopenerreqs(self):
605 self.svfs.options = dict((r, 1) for r in self.requirements
601 self.svfs.options = dict((r, 1) for r in self.requirements
606 if r in self.openerreqs)
602 if r in self.openerreqs)
607 # experimental config: format.chunkcachesize
603 # experimental config: format.chunkcachesize
608 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
609 if chunkcachesize is not None:
605 if chunkcachesize is not None:
610 self.svfs.options['chunkcachesize'] = chunkcachesize
606 self.svfs.options['chunkcachesize'] = chunkcachesize
611 # experimental config: format.maxchainlen
607 # experimental config: format.maxchainlen
612 maxchainlen = self.ui.configint('format', 'maxchainlen')
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
613 if maxchainlen is not None:
609 if maxchainlen is not None:
614 self.svfs.options['maxchainlen'] = maxchainlen
610 self.svfs.options['maxchainlen'] = maxchainlen
615 # experimental config: format.manifestcachesize
611 # experimental config: format.manifestcachesize
616 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
617 if manifestcachesize is not None:
613 if manifestcachesize is not None:
618 self.svfs.options['manifestcachesize'] = manifestcachesize
614 self.svfs.options['manifestcachesize'] = manifestcachesize
619 # experimental config: format.aggressivemergedeltas
615 # experimental config: format.aggressivemergedeltas
620 aggressivemergedeltas = self.ui.configbool('format',
616 aggressivemergedeltas = self.ui.configbool('format',
621 'aggressivemergedeltas')
617 'aggressivemergedeltas')
622 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
623 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
624 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
625 if 0 <= chainspan:
621 if 0 <= chainspan:
626 self.svfs.options['maxdeltachainspan'] = chainspan
622 self.svfs.options['maxdeltachainspan'] = chainspan
627 mmapindexthreshold = self.ui.configbytes('experimental',
623 mmapindexthreshold = self.ui.configbytes('experimental',
628 'mmapindexthreshold')
624 'mmapindexthreshold')
629 if mmapindexthreshold is not None:
625 if mmapindexthreshold is not None:
630 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
631 withsparseread = self.ui.configbool('experimental', 'sparse-read')
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
632 srdensitythres = float(self.ui.config('experimental',
628 srdensitythres = float(self.ui.config('experimental',
633 'sparse-read.density-threshold'))
629 'sparse-read.density-threshold'))
634 srmingapsize = self.ui.configbytes('experimental',
630 srmingapsize = self.ui.configbytes('experimental',
635 'sparse-read.min-gap-size')
631 'sparse-read.min-gap-size')
636 self.svfs.options['with-sparse-read'] = withsparseread
632 self.svfs.options['with-sparse-read'] = withsparseread
637 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
638 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
639
635
640 for r in self.requirements:
636 for r in self.requirements:
641 if r.startswith('exp-compression-'):
637 if r.startswith('exp-compression-'):
642 self.svfs.options['compengine'] = r[len('exp-compression-'):]
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
643
639
644 # TODO move "revlogv2" to openerreqs once finalized.
640 # TODO move "revlogv2" to openerreqs once finalized.
645 if REVLOGV2_REQUIREMENT in self.requirements:
641 if REVLOGV2_REQUIREMENT in self.requirements:
646 self.svfs.options['revlogv2'] = True
642 self.svfs.options['revlogv2'] = True
647
643
648 def _writerequirements(self):
644 def _writerequirements(self):
649 scmutil.writerequires(self.vfs, self.requirements)
645 scmutil.writerequires(self.vfs, self.requirements)
650
646
651 def _checknested(self, path):
647 def _checknested(self, path):
652 """Determine if path is a legal nested repository."""
648 """Determine if path is a legal nested repository."""
653 if not path.startswith(self.root):
649 if not path.startswith(self.root):
654 return False
650 return False
655 subpath = path[len(self.root) + 1:]
651 subpath = path[len(self.root) + 1:]
656 normsubpath = util.pconvert(subpath)
652 normsubpath = util.pconvert(subpath)
657
653
658 # XXX: Checking against the current working copy is wrong in
654 # XXX: Checking against the current working copy is wrong in
659 # the sense that it can reject things like
655 # the sense that it can reject things like
660 #
656 #
661 # $ hg cat -r 10 sub/x.txt
657 # $ hg cat -r 10 sub/x.txt
662 #
658 #
663 # if sub/ is no longer a subrepository in the working copy
659 # if sub/ is no longer a subrepository in the working copy
664 # parent revision.
660 # parent revision.
665 #
661 #
666 # However, it can of course also allow things that would have
662 # However, it can of course also allow things that would have
667 # been rejected before, such as the above cat command if sub/
663 # been rejected before, such as the above cat command if sub/
668 # is a subrepository now, but was a normal directory before.
664 # is a subrepository now, but was a normal directory before.
669 # The old path auditor would have rejected by mistake since it
665 # The old path auditor would have rejected by mistake since it
670 # panics when it sees sub/.hg/.
666 # panics when it sees sub/.hg/.
671 #
667 #
672 # All in all, checking against the working copy seems sensible
668 # All in all, checking against the working copy seems sensible
673 # since we want to prevent access to nested repositories on
669 # since we want to prevent access to nested repositories on
674 # the filesystem *now*.
670 # the filesystem *now*.
675 ctx = self[None]
671 ctx = self[None]
676 parts = util.splitpath(subpath)
672 parts = util.splitpath(subpath)
677 while parts:
673 while parts:
678 prefix = '/'.join(parts)
674 prefix = '/'.join(parts)
679 if prefix in ctx.substate:
675 if prefix in ctx.substate:
680 if prefix == normsubpath:
676 if prefix == normsubpath:
681 return True
677 return True
682 else:
678 else:
683 sub = ctx.sub(prefix)
679 sub = ctx.sub(prefix)
684 return sub.checknested(subpath[len(prefix) + 1:])
680 return sub.checknested(subpath[len(prefix) + 1:])
685 else:
681 else:
686 parts.pop()
682 parts.pop()
687 return False
683 return False
688
684
689 def peer(self):
685 def peer(self):
690 return localpeer(self) # not cached to avoid reference cycle
686 return localpeer(self) # not cached to avoid reference cycle
691
687
692 def unfiltered(self):
688 def unfiltered(self):
693 """Return unfiltered version of the repository
689 """Return unfiltered version of the repository
694
690
695 Intended to be overwritten by filtered repo."""
691 Intended to be overwritten by filtered repo."""
696 return self
692 return self
697
693
698 def filtered(self, name, visibilityexceptions=None):
694 def filtered(self, name, visibilityexceptions=None):
699 """Return a filtered version of a repository"""
695 """Return a filtered version of a repository"""
700 cls = repoview.newtype(self.unfiltered().__class__)
696 cls = repoview.newtype(self.unfiltered().__class__)
701 return cls(self, name, visibilityexceptions)
697 return cls(self, name, visibilityexceptions)
702
698
703 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
704 def _bookmarks(self):
700 def _bookmarks(self):
705 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
706
702
707 @property
703 @property
708 def _activebookmark(self):
704 def _activebookmark(self):
709 return self._bookmarks.active
705 return self._bookmarks.active
710
706
711 # _phasesets depend on changelog. what we need is to call
707 # _phasesets depend on changelog. what we need is to call
712 # _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
713 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
714 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
715 def _phasecache(self):
711 def _phasecache(self):
716 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
717
713
718 @storecache('obsstore')
714 @storecache('obsstore')
719 def obsstore(self):
715 def obsstore(self):
720 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
721
717
722 @storecache('00changelog.i')
718 @storecache('00changelog.i')
723 def changelog(self):
719 def changelog(self):
724 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
725 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
726
722
727 def _constructmanifest(self):
723 def _constructmanifest(self):
728 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
729 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
730 # manifest creation.
726 # manifest creation.
731 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
732
728
733 @storecache('00manifest.i')
729 @storecache('00manifest.i')
734 def manifestlog(self):
730 def manifestlog(self):
735 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
736
732
737 @repofilecache('dirstate')
733 @repofilecache('dirstate')
738 def dirstate(self):
734 def dirstate(self):
739 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
740
736
741 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
742 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
743
739
744 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
745 try:
741 try:
746 self.changelog.rev(node)
742 self.changelog.rev(node)
747 return node
743 return node
748 except error.LookupError:
744 except error.LookupError:
749 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
750 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
751 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
752 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
753 return nullid
749 return nullid
754
750
755 @repofilecache(narrowspec.FILENAME)
751 @repofilecache(narrowspec.FILENAME)
756 def narrowpats(self):
752 def narrowpats(self):
757 """matcher patterns for this repository's narrowspec
753 """matcher patterns for this repository's narrowspec
758
754
759 A tuple of (includes, excludes).
755 A tuple of (includes, excludes).
760 """
756 """
761 source = self
757 source = self
762 if self.shared():
758 if self.shared():
763 from . import hg
759 from . import hg
764 source = hg.sharedreposource(self)
760 source = hg.sharedreposource(self)
765 return narrowspec.load(source)
761 return narrowspec.load(source)
766
762
767 @repofilecache(narrowspec.FILENAME)
763 @repofilecache(narrowspec.FILENAME)
768 def _narrowmatch(self):
764 def _narrowmatch(self):
769 if changegroup.NARROW_REQUIREMENT not in self.requirements:
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
770 return matchmod.always(self.root, '')
766 return matchmod.always(self.root, '')
771 include, exclude = self.narrowpats
767 include, exclude = self.narrowpats
772 return narrowspec.match(self.root, include=include, exclude=exclude)
768 return narrowspec.match(self.root, include=include, exclude=exclude)
773
769
774 # TODO(martinvonz): make this property-like instead?
770 # TODO(martinvonz): make this property-like instead?
775 def narrowmatch(self):
771 def narrowmatch(self):
776 return self._narrowmatch
772 return self._narrowmatch
777
773
778 def setnarrowpats(self, newincludes, newexcludes):
774 def setnarrowpats(self, newincludes, newexcludes):
779 target = self
775 target = self
780 if self.shared():
776 if self.shared():
781 from . import hg
777 from . import hg
782 target = hg.sharedreposource(self)
778 target = hg.sharedreposource(self)
783 narrowspec.save(target, newincludes, newexcludes)
779 narrowspec.save(target, newincludes, newexcludes)
784 self.invalidate(clearfilecache=True)
780 self.invalidate(clearfilecache=True)
785
781
786 def __getitem__(self, changeid):
782 def __getitem__(self, changeid):
787 if changeid is None:
783 if changeid is None:
788 return context.workingctx(self)
784 return context.workingctx(self)
789 if isinstance(changeid, context.basectx):
785 if isinstance(changeid, context.basectx):
790 return changeid
786 return changeid
791 if isinstance(changeid, slice):
787 if isinstance(changeid, slice):
792 # wdirrev isn't contiguous so the slice shouldn't include it
788 # wdirrev isn't contiguous so the slice shouldn't include it
793 return [context.changectx(self, i)
789 return [context.changectx(self, i)
794 for i in xrange(*changeid.indices(len(self)))
790 for i in xrange(*changeid.indices(len(self)))
795 if i not in self.changelog.filteredrevs]
791 if i not in self.changelog.filteredrevs]
796 try:
792 try:
797 return context.changectx(self, changeid)
793 return context.changectx(self, changeid)
798 except error.WdirUnsupported:
794 except error.WdirUnsupported:
799 return context.workingctx(self)
795 return context.workingctx(self)
800
796
801 def __contains__(self, changeid):
797 def __contains__(self, changeid):
802 """True if the given changeid exists
798 """True if the given changeid exists
803
799
804 error.LookupError is raised if an ambiguous node specified.
800 error.LookupError is raised if an ambiguous node specified.
805 """
801 """
806 try:
802 try:
807 self[changeid]
803 self[changeid]
808 return True
804 return True
809 except error.RepoLookupError:
805 except error.RepoLookupError:
810 return False
806 return False
811
807
812 def __nonzero__(self):
808 def __nonzero__(self):
813 return True
809 return True
814
810
815 __bool__ = __nonzero__
811 __bool__ = __nonzero__
816
812
817 def __len__(self):
813 def __len__(self):
818 # no need to pay the cost of repoview.changelog
814 # no need to pay the cost of repoview.changelog
819 unfi = self.unfiltered()
815 unfi = self.unfiltered()
820 return len(unfi.changelog)
816 return len(unfi.changelog)
821
817
822 def __iter__(self):
818 def __iter__(self):
823 return iter(self.changelog)
819 return iter(self.changelog)
824
820
825 def revs(self, expr, *args):
821 def revs(self, expr, *args):
826 '''Find revisions matching a revset.
822 '''Find revisions matching a revset.
827
823
828 The revset is specified as a string ``expr`` that may contain
824 The revset is specified as a string ``expr`` that may contain
829 %-formatting to escape certain types. See ``revsetlang.formatspec``.
825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
830
826
831 Revset aliases from the configuration are not expanded. To expand
827 Revset aliases from the configuration are not expanded. To expand
832 user aliases, consider calling ``scmutil.revrange()`` or
828 user aliases, consider calling ``scmutil.revrange()`` or
833 ``repo.anyrevs([expr], user=True)``.
829 ``repo.anyrevs([expr], user=True)``.
834
830
835 Returns a revset.abstractsmartset, which is a list-like interface
831 Returns a revset.abstractsmartset, which is a list-like interface
836 that contains integer revisions.
832 that contains integer revisions.
837 '''
833 '''
838 expr = revsetlang.formatspec(expr, *args)
834 expr = revsetlang.formatspec(expr, *args)
839 m = revset.match(None, expr)
835 m = revset.match(None, expr)
840 return m(self)
836 return m(self)
841
837
842 def set(self, expr, *args):
838 def set(self, expr, *args):
843 '''Find revisions matching a revset and emit changectx instances.
839 '''Find revisions matching a revset and emit changectx instances.
844
840
845 This is a convenience wrapper around ``revs()`` that iterates the
841 This is a convenience wrapper around ``revs()`` that iterates the
846 result and is a generator of changectx instances.
842 result and is a generator of changectx instances.
847
843
848 Revset aliases from the configuration are not expanded. To expand
844 Revset aliases from the configuration are not expanded. To expand
849 user aliases, consider calling ``scmutil.revrange()``.
845 user aliases, consider calling ``scmutil.revrange()``.
850 '''
846 '''
851 for r in self.revs(expr, *args):
847 for r in self.revs(expr, *args):
852 yield self[r]
848 yield self[r]
853
849
854 def anyrevs(self, specs, user=False, localalias=None):
850 def anyrevs(self, specs, user=False, localalias=None):
855 '''Find revisions matching one of the given revsets.
851 '''Find revisions matching one of the given revsets.
856
852
857 Revset aliases from the configuration are not expanded by default. To
853 Revset aliases from the configuration are not expanded by default. To
858 expand user aliases, specify ``user=True``. To provide some local
854 expand user aliases, specify ``user=True``. To provide some local
859 definitions overriding user aliases, set ``localalias`` to
855 definitions overriding user aliases, set ``localalias`` to
860 ``{name: definitionstring}``.
856 ``{name: definitionstring}``.
861 '''
857 '''
862 if user:
858 if user:
863 m = revset.matchany(self.ui, specs, repo=self,
859 m = revset.matchany(self.ui, specs, repo=self,
864 localalias=localalias)
860 localalias=localalias)
865 else:
861 else:
866 m = revset.matchany(None, specs, localalias=localalias)
862 m = revset.matchany(None, specs, localalias=localalias)
867 return m(self)
863 return m(self)
868
864
869 def url(self):
865 def url(self):
870 return 'file:' + self.root
866 return 'file:' + self.root
871
867
872 def hook(self, name, throw=False, **args):
868 def hook(self, name, throw=False, **args):
873 """Call a hook, passing this repo instance.
869 """Call a hook, passing this repo instance.
874
870
875 This a convenience method to aid invoking hooks. Extensions likely
871 This a convenience method to aid invoking hooks. Extensions likely
876 won't call this unless they have registered a custom hook or are
872 won't call this unless they have registered a custom hook or are
877 replacing code that is expected to call a hook.
873 replacing code that is expected to call a hook.
878 """
874 """
879 return hook.hook(self.ui, self, name, throw, **args)
875 return hook.hook(self.ui, self, name, throw, **args)
880
876
881 @filteredpropertycache
877 @filteredpropertycache
882 def _tagscache(self):
878 def _tagscache(self):
883 '''Returns a tagscache object that contains various tags related
879 '''Returns a tagscache object that contains various tags related
884 caches.'''
880 caches.'''
885
881
886 # This simplifies its cache management by having one decorated
882 # This simplifies its cache management by having one decorated
887 # function (this one) and the rest simply fetch things from it.
883 # function (this one) and the rest simply fetch things from it.
888 class tagscache(object):
884 class tagscache(object):
889 def __init__(self):
885 def __init__(self):
890 # These two define the set of tags for this repository. tags
886 # These two define the set of tags for this repository. tags
891 # maps tag name to node; tagtypes maps tag name to 'global' or
887 # maps tag name to node; tagtypes maps tag name to 'global' or
892 # 'local'. (Global tags are defined by .hgtags across all
888 # 'local'. (Global tags are defined by .hgtags across all
893 # heads, and local tags are defined in .hg/localtags.)
889 # heads, and local tags are defined in .hg/localtags.)
894 # They constitute the in-memory cache of tags.
890 # They constitute the in-memory cache of tags.
895 self.tags = self.tagtypes = None
891 self.tags = self.tagtypes = None
896
892
897 self.nodetagscache = self.tagslist = None
893 self.nodetagscache = self.tagslist = None
898
894
899 cache = tagscache()
895 cache = tagscache()
900 cache.tags, cache.tagtypes = self._findtags()
896 cache.tags, cache.tagtypes = self._findtags()
901
897
902 return cache
898 return cache
903
899
904 def tags(self):
900 def tags(self):
905 '''return a mapping of tag to node'''
901 '''return a mapping of tag to node'''
906 t = {}
902 t = {}
907 if self.changelog.filteredrevs:
903 if self.changelog.filteredrevs:
908 tags, tt = self._findtags()
904 tags, tt = self._findtags()
909 else:
905 else:
910 tags = self._tagscache.tags
906 tags = self._tagscache.tags
911 for k, v in tags.iteritems():
907 for k, v in tags.iteritems():
912 try:
908 try:
913 # ignore tags to unknown nodes
909 # ignore tags to unknown nodes
914 self.changelog.rev(v)
910 self.changelog.rev(v)
915 t[k] = v
911 t[k] = v
916 except (error.LookupError, ValueError):
912 except (error.LookupError, ValueError):
917 pass
913 pass
918 return t
914 return t
919
915
920 def _findtags(self):
916 def _findtags(self):
921 '''Do the hard work of finding tags. Return a pair of dicts
917 '''Do the hard work of finding tags. Return a pair of dicts
922 (tags, tagtypes) where tags maps tag name to node, and tagtypes
918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
923 maps tag name to a string like \'global\' or \'local\'.
919 maps tag name to a string like \'global\' or \'local\'.
924 Subclasses or extensions are free to add their own tags, but
920 Subclasses or extensions are free to add their own tags, but
925 should be aware that the returned dicts will be retained for the
921 should be aware that the returned dicts will be retained for the
926 duration of the localrepo object.'''
922 duration of the localrepo object.'''
927
923
928 # XXX what tagtype should subclasses/extensions use? Currently
924 # XXX what tagtype should subclasses/extensions use? Currently
929 # mq and bookmarks add tags, but do not set the tagtype at all.
925 # mq and bookmarks add tags, but do not set the tagtype at all.
930 # Should each extension invent its own tag type? Should there
926 # Should each extension invent its own tag type? Should there
931 # be one tagtype for all such "virtual" tags? Or is the status
927 # be one tagtype for all such "virtual" tags? Or is the status
932 # quo fine?
928 # quo fine?
933
929
934
930
935 # map tag name to (node, hist)
931 # map tag name to (node, hist)
936 alltags = tagsmod.findglobaltags(self.ui, self)
932 alltags = tagsmod.findglobaltags(self.ui, self)
937 # map tag name to tag type
933 # map tag name to tag type
938 tagtypes = dict((tag, 'global') for tag in alltags)
934 tagtypes = dict((tag, 'global') for tag in alltags)
939
935
940 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
941
937
942 # Build the return dicts. Have to re-encode tag names because
938 # Build the return dicts. Have to re-encode tag names because
943 # the tags module always uses UTF-8 (in order not to lose info
939 # the tags module always uses UTF-8 (in order not to lose info
944 # writing to the cache), but the rest of Mercurial wants them in
940 # writing to the cache), but the rest of Mercurial wants them in
945 # local encoding.
941 # local encoding.
946 tags = {}
942 tags = {}
947 for (name, (node, hist)) in alltags.iteritems():
943 for (name, (node, hist)) in alltags.iteritems():
948 if node != nullid:
944 if node != nullid:
949 tags[encoding.tolocal(name)] = node
945 tags[encoding.tolocal(name)] = node
950 tags['tip'] = self.changelog.tip()
946 tags['tip'] = self.changelog.tip()
951 tagtypes = dict([(encoding.tolocal(name), value)
947 tagtypes = dict([(encoding.tolocal(name), value)
952 for (name, value) in tagtypes.iteritems()])
948 for (name, value) in tagtypes.iteritems()])
953 return (tags, tagtypes)
949 return (tags, tagtypes)
954
950
955 def tagtype(self, tagname):
951 def tagtype(self, tagname):
956 '''
952 '''
957 return the type of the given tag. result can be:
953 return the type of the given tag. result can be:
958
954
959 'local' : a local tag
955 'local' : a local tag
960 'global' : a global tag
956 'global' : a global tag
961 None : tag does not exist
957 None : tag does not exist
962 '''
958 '''
963
959
964 return self._tagscache.tagtypes.get(tagname)
960 return self._tagscache.tagtypes.get(tagname)
965
961
966 def tagslist(self):
962 def tagslist(self):
967 '''return a list of tags ordered by revision'''
963 '''return a list of tags ordered by revision'''
968 if not self._tagscache.tagslist:
964 if not self._tagscache.tagslist:
969 l = []
965 l = []
970 for t, n in self.tags().iteritems():
966 for t, n in self.tags().iteritems():
971 l.append((self.changelog.rev(n), t, n))
967 l.append((self.changelog.rev(n), t, n))
972 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
973
969
974 return self._tagscache.tagslist
970 return self._tagscache.tagslist
975
971
976 def nodetags(self, node):
972 def nodetags(self, node):
977 '''return the tags associated with a node'''
973 '''return the tags associated with a node'''
978 if not self._tagscache.nodetagscache:
974 if not self._tagscache.nodetagscache:
979 nodetagscache = {}
975 nodetagscache = {}
980 for t, n in self._tagscache.tags.iteritems():
976 for t, n in self._tagscache.tags.iteritems():
981 nodetagscache.setdefault(n, []).append(t)
977 nodetagscache.setdefault(n, []).append(t)
982 for tags in nodetagscache.itervalues():
978 for tags in nodetagscache.itervalues():
983 tags.sort()
979 tags.sort()
984 self._tagscache.nodetagscache = nodetagscache
980 self._tagscache.nodetagscache = nodetagscache
985 return self._tagscache.nodetagscache.get(node, [])
981 return self._tagscache.nodetagscache.get(node, [])
986
982
987 def nodebookmarks(self, node):
983 def nodebookmarks(self, node):
988 """return the list of bookmarks pointing to the specified node"""
984 """return the list of bookmarks pointing to the specified node"""
989 marks = []
985 marks = []
990 for bookmark, n in self._bookmarks.iteritems():
986 for bookmark, n in self._bookmarks.iteritems():
991 if n == node:
987 if n == node:
992 marks.append(bookmark)
988 marks.append(bookmark)
993 return sorted(marks)
989 return sorted(marks)
994
990
995 def branchmap(self):
991 def branchmap(self):
996 '''returns a dictionary {branch: [branchheads]} with branchheads
992 '''returns a dictionary {branch: [branchheads]} with branchheads
997 ordered by increasing revision number'''
993 ordered by increasing revision number'''
998 branchmap.updatecache(self)
994 branchmap.updatecache(self)
999 return self._branchcaches[self.filtername]
995 return self._branchcaches[self.filtername]
1000
996
1001 @unfilteredmethod
997 @unfilteredmethod
1002 def revbranchcache(self):
998 def revbranchcache(self):
1003 if not self._revbranchcache:
999 if not self._revbranchcache:
1004 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1005 return self._revbranchcache
1001 return self._revbranchcache
1006
1002
1007 def branchtip(self, branch, ignoremissing=False):
1003 def branchtip(self, branch, ignoremissing=False):
1008 '''return the tip node for a given branch
1004 '''return the tip node for a given branch
1009
1005
1010 If ignoremissing is True, then this method will not raise an error.
1006 If ignoremissing is True, then this method will not raise an error.
1011 This is helpful for callers that only expect None for a missing branch
1007 This is helpful for callers that only expect None for a missing branch
1012 (e.g. namespace).
1008 (e.g. namespace).
1013
1009
1014 '''
1010 '''
1015 try:
1011 try:
1016 return self.branchmap().branchtip(branch)
1012 return self.branchmap().branchtip(branch)
1017 except KeyError:
1013 except KeyError:
1018 if not ignoremissing:
1014 if not ignoremissing:
1019 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1020 else:
1016 else:
1021 pass
1017 pass
1022
1018
1023 def lookup(self, key):
1019 def lookup(self, key):
1024 return scmutil.revsymbol(self, key).node()
1020 return scmutil.revsymbol(self, key).node()
1025
1021
1026 def lookupbranch(self, key, remote=None):
1022 def lookupbranch(self, key, remote=None):
1027 repo = remote or self
1023 repo = remote or self
1028 if key in repo.branchmap():
1024 if key in repo.branchmap():
1029 return key
1025 return key
1030
1026
1031 repo = (remote and remote.local()) and remote or self
1027 repo = (remote and remote.local()) and remote or self
1032 return repo[key].branch()
1028 return repo[key].branch()
1033
1029
1034 def known(self, nodes):
1030 def known(self, nodes):
1035 cl = self.changelog
1031 cl = self.changelog
1036 nm = cl.nodemap
1032 nm = cl.nodemap
1037 filtered = cl.filteredrevs
1033 filtered = cl.filteredrevs
1038 result = []
1034 result = []
1039 for n in nodes:
1035 for n in nodes:
1040 r = nm.get(n)
1036 r = nm.get(n)
1041 resp = not (r is None or r in filtered)
1037 resp = not (r is None or r in filtered)
1042 result.append(resp)
1038 result.append(resp)
1043 return result
1039 return result
1044
1040
1045 def local(self):
1041 def local(self):
1046 return self
1042 return self
1047
1043
1048 def publishing(self):
1044 def publishing(self):
1049 # it's safe (and desirable) to trust the publish flag unconditionally
1045 # it's safe (and desirable) to trust the publish flag unconditionally
1050 # so that we don't finalize changes shared between users via ssh or nfs
1046 # so that we don't finalize changes shared between users via ssh or nfs
1051 return self.ui.configbool('phases', 'publish', untrusted=True)
1047 return self.ui.configbool('phases', 'publish', untrusted=True)
1052
1048
1053 def cancopy(self):
1049 def cancopy(self):
1054 # so statichttprepo's override of local() works
1050 # so statichttprepo's override of local() works
1055 if not self.local():
1051 if not self.local():
1056 return False
1052 return False
1057 if not self.publishing():
1053 if not self.publishing():
1058 return True
1054 return True
1059 # if publishing we can't copy if there is filtered content
1055 # if publishing we can't copy if there is filtered content
1060 return not self.filtered('visible').changelog.filteredrevs
1056 return not self.filtered('visible').changelog.filteredrevs
1061
1057
1062 def shared(self):
1058 def shared(self):
1063 '''the type of shared repository (None if not shared)'''
1059 '''the type of shared repository (None if not shared)'''
1064 if self.sharedpath != self.path:
1060 if self.sharedpath != self.path:
1065 return 'store'
1061 return 'store'
1066 return None
1062 return None
1067
1063
1068 def wjoin(self, f, *insidef):
1064 def wjoin(self, f, *insidef):
1069 return self.vfs.reljoin(self.root, f, *insidef)
1065 return self.vfs.reljoin(self.root, f, *insidef)
1070
1066
1071 def file(self, f):
1067 def file(self, f):
1072 if f[0] == '/':
1068 if f[0] == '/':
1073 f = f[1:]
1069 f = f[1:]
1074 return filelog.filelog(self.svfs, f)
1070 return filelog.filelog(self.svfs, f)
1075
1071
1076 def setparents(self, p1, p2=nullid):
1072 def setparents(self, p1, p2=nullid):
1077 with self.dirstate.parentchange():
1073 with self.dirstate.parentchange():
1078 copies = self.dirstate.setparents(p1, p2)
1074 copies = self.dirstate.setparents(p1, p2)
1079 pctx = self[p1]
1075 pctx = self[p1]
1080 if copies:
1076 if copies:
1081 # Adjust copy records, the dirstate cannot do it, it
1077 # Adjust copy records, the dirstate cannot do it, it
1082 # requires access to parents manifests. Preserve them
1078 # requires access to parents manifests. Preserve them
1083 # only for entries added to first parent.
1079 # only for entries added to first parent.
1084 for f in copies:
1080 for f in copies:
1085 if f not in pctx and copies[f] in pctx:
1081 if f not in pctx and copies[f] in pctx:
1086 self.dirstate.copy(copies[f], f)
1082 self.dirstate.copy(copies[f], f)
1087 if p2 == nullid:
1083 if p2 == nullid:
1088 for f, s in sorted(self.dirstate.copies().items()):
1084 for f, s in sorted(self.dirstate.copies().items()):
1089 if f not in pctx and s not in pctx:
1085 if f not in pctx and s not in pctx:
1090 self.dirstate.copy(None, f)
1086 self.dirstate.copy(None, f)
1091
1087
1092 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1088 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1093 """changeid can be a changeset revision, node, or tag.
1089 """changeid can be a changeset revision, node, or tag.
1094 fileid can be a file revision or node."""
1090 fileid can be a file revision or node."""
1095 return context.filectx(self, path, changeid, fileid,
1091 return context.filectx(self, path, changeid, fileid,
1096 changectx=changectx)
1092 changectx=changectx)
1097
1093
1098 def getcwd(self):
1094 def getcwd(self):
1099 return self.dirstate.getcwd()
1095 return self.dirstate.getcwd()
1100
1096
1101 def pathto(self, f, cwd=None):
1097 def pathto(self, f, cwd=None):
1102 return self.dirstate.pathto(f, cwd)
1098 return self.dirstate.pathto(f, cwd)
1103
1099
1104 def _loadfilter(self, filter):
1100 def _loadfilter(self, filter):
1105 if filter not in self._filterpats:
1101 if filter not in self._filterpats:
1106 l = []
1102 l = []
1107 for pat, cmd in self.ui.configitems(filter):
1103 for pat, cmd in self.ui.configitems(filter):
1108 if cmd == '!':
1104 if cmd == '!':
1109 continue
1105 continue
1110 mf = matchmod.match(self.root, '', [pat])
1106 mf = matchmod.match(self.root, '', [pat])
1111 fn = None
1107 fn = None
1112 params = cmd
1108 params = cmd
1113 for name, filterfn in self._datafilters.iteritems():
1109 for name, filterfn in self._datafilters.iteritems():
1114 if cmd.startswith(name):
1110 if cmd.startswith(name):
1115 fn = filterfn
1111 fn = filterfn
1116 params = cmd[len(name):].lstrip()
1112 params = cmd[len(name):].lstrip()
1117 break
1113 break
1118 if not fn:
1114 if not fn:
1119 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1115 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1120 # Wrap old filters not supporting keyword arguments
1116 # Wrap old filters not supporting keyword arguments
1121 if not pycompat.getargspec(fn)[2]:
1117 if not pycompat.getargspec(fn)[2]:
1122 oldfn = fn
1118 oldfn = fn
1123 fn = lambda s, c, **kwargs: oldfn(s, c)
1119 fn = lambda s, c, **kwargs: oldfn(s, c)
1124 l.append((mf, fn, params))
1120 l.append((mf, fn, params))
1125 self._filterpats[filter] = l
1121 self._filterpats[filter] = l
1126 return self._filterpats[filter]
1122 return self._filterpats[filter]
1127
1123
1128 def _filter(self, filterpats, filename, data):
1124 def _filter(self, filterpats, filename, data):
1129 for mf, fn, cmd in filterpats:
1125 for mf, fn, cmd in filterpats:
1130 if mf(filename):
1126 if mf(filename):
1131 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1127 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1132 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1128 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1133 break
1129 break
1134
1130
1135 return data
1131 return data
1136
1132
1137 @unfilteredpropertycache
1133 @unfilteredpropertycache
1138 def _encodefilterpats(self):
1134 def _encodefilterpats(self):
1139 return self._loadfilter('encode')
1135 return self._loadfilter('encode')
1140
1136
1141 @unfilteredpropertycache
1137 @unfilteredpropertycache
1142 def _decodefilterpats(self):
1138 def _decodefilterpats(self):
1143 return self._loadfilter('decode')
1139 return self._loadfilter('decode')
1144
1140
1145 def adddatafilter(self, name, filter):
1141 def adddatafilter(self, name, filter):
1146 self._datafilters[name] = filter
1142 self._datafilters[name] = filter
1147
1143
1148 def wread(self, filename):
1144 def wread(self, filename):
1149 if self.wvfs.islink(filename):
1145 if self.wvfs.islink(filename):
1150 data = self.wvfs.readlink(filename)
1146 data = self.wvfs.readlink(filename)
1151 else:
1147 else:
1152 data = self.wvfs.read(filename)
1148 data = self.wvfs.read(filename)
1153 return self._filter(self._encodefilterpats, filename, data)
1149 return self._filter(self._encodefilterpats, filename, data)
1154
1150
1155 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1151 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1156 """write ``data`` into ``filename`` in the working directory
1152 """write ``data`` into ``filename`` in the working directory
1157
1153
1158 This returns length of written (maybe decoded) data.
1154 This returns length of written (maybe decoded) data.
1159 """
1155 """
1160 data = self._filter(self._decodefilterpats, filename, data)
1156 data = self._filter(self._decodefilterpats, filename, data)
1161 if 'l' in flags:
1157 if 'l' in flags:
1162 self.wvfs.symlink(data, filename)
1158 self.wvfs.symlink(data, filename)
1163 else:
1159 else:
1164 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1160 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1165 **kwargs)
1161 **kwargs)
1166 if 'x' in flags:
1162 if 'x' in flags:
1167 self.wvfs.setflags(filename, False, True)
1163 self.wvfs.setflags(filename, False, True)
1168 else:
1164 else:
1169 self.wvfs.setflags(filename, False, False)
1165 self.wvfs.setflags(filename, False, False)
1170 return len(data)
1166 return len(data)
1171
1167
1172 def wwritedata(self, filename, data):
1168 def wwritedata(self, filename, data):
1173 return self._filter(self._decodefilterpats, filename, data)
1169 return self._filter(self._decodefilterpats, filename, data)
1174
1170
1175 def currenttransaction(self):
1171 def currenttransaction(self):
1176 """return the current transaction or None if non exists"""
1172 """return the current transaction or None if non exists"""
1177 if self._transref:
1173 if self._transref:
1178 tr = self._transref()
1174 tr = self._transref()
1179 else:
1175 else:
1180 tr = None
1176 tr = None
1181
1177
1182 if tr and tr.running():
1178 if tr and tr.running():
1183 return tr
1179 return tr
1184 return None
1180 return None
1185
1181
1186 def transaction(self, desc, report=None):
1182 def transaction(self, desc, report=None):
1187 if (self.ui.configbool('devel', 'all-warnings')
1183 if (self.ui.configbool('devel', 'all-warnings')
1188 or self.ui.configbool('devel', 'check-locks')):
1184 or self.ui.configbool('devel', 'check-locks')):
1189 if self._currentlock(self._lockref) is None:
1185 if self._currentlock(self._lockref) is None:
1190 raise error.ProgrammingError('transaction requires locking')
1186 raise error.ProgrammingError('transaction requires locking')
1191 tr = self.currenttransaction()
1187 tr = self.currenttransaction()
1192 if tr is not None:
1188 if tr is not None:
1193 return tr.nest(name=desc)
1189 return tr.nest(name=desc)
1194
1190
1195 # abort here if the journal already exists
1191 # abort here if the journal already exists
1196 if self.svfs.exists("journal"):
1192 if self.svfs.exists("journal"):
1197 raise error.RepoError(
1193 raise error.RepoError(
1198 _("abandoned transaction found"),
1194 _("abandoned transaction found"),
1199 hint=_("run 'hg recover' to clean up transaction"))
1195 hint=_("run 'hg recover' to clean up transaction"))
1200
1196
1201 idbase = "%.40f#%f" % (random.random(), time.time())
1197 idbase = "%.40f#%f" % (random.random(), time.time())
1202 ha = hex(hashlib.sha1(idbase).digest())
1198 ha = hex(hashlib.sha1(idbase).digest())
1203 txnid = 'TXN:' + ha
1199 txnid = 'TXN:' + ha
1204 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1200 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1205
1201
1206 self._writejournal(desc)
1202 self._writejournal(desc)
1207 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1203 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1208 if report:
1204 if report:
1209 rp = report
1205 rp = report
1210 else:
1206 else:
1211 rp = self.ui.warn
1207 rp = self.ui.warn
1212 vfsmap = {'plain': self.vfs} # root of .hg/
1208 vfsmap = {'plain': self.vfs} # root of .hg/
1213 # we must avoid cyclic reference between repo and transaction.
1209 # we must avoid cyclic reference between repo and transaction.
1214 reporef = weakref.ref(self)
1210 reporef = weakref.ref(self)
1215 # Code to track tag movement
1211 # Code to track tag movement
1216 #
1212 #
1217 # Since tags are all handled as file content, it is actually quite hard
1213 # Since tags are all handled as file content, it is actually quite hard
1218 # to track these movement from a code perspective. So we fallback to a
1214 # to track these movement from a code perspective. So we fallback to a
1219 # tracking at the repository level. One could envision to track changes
1215 # tracking at the repository level. One could envision to track changes
1220 # to the '.hgtags' file through changegroup apply but that fails to
1216 # to the '.hgtags' file through changegroup apply but that fails to
1221 # cope with case where transaction expose new heads without changegroup
1217 # cope with case where transaction expose new heads without changegroup
1222 # being involved (eg: phase movement).
1218 # being involved (eg: phase movement).
1223 #
1219 #
1224 # For now, We gate the feature behind a flag since this likely comes
1220 # For now, We gate the feature behind a flag since this likely comes
1225 # with performance impacts. The current code run more often than needed
1221 # with performance impacts. The current code run more often than needed
1226 # and do not use caches as much as it could. The current focus is on
1222 # and do not use caches as much as it could. The current focus is on
1227 # the behavior of the feature so we disable it by default. The flag
1223 # the behavior of the feature so we disable it by default. The flag
1228 # will be removed when we are happy with the performance impact.
1224 # will be removed when we are happy with the performance impact.
1229 #
1225 #
1230 # Once this feature is no longer experimental move the following
1226 # Once this feature is no longer experimental move the following
1231 # documentation to the appropriate help section:
1227 # documentation to the appropriate help section:
1232 #
1228 #
1233 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1229 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1234 # tags (new or changed or deleted tags). In addition the details of
1230 # tags (new or changed or deleted tags). In addition the details of
1235 # these changes are made available in a file at:
1231 # these changes are made available in a file at:
1236 # ``REPOROOT/.hg/changes/tags.changes``.
1232 # ``REPOROOT/.hg/changes/tags.changes``.
1237 # Make sure you check for HG_TAG_MOVED before reading that file as it
1233 # Make sure you check for HG_TAG_MOVED before reading that file as it
1238 # might exist from a previous transaction even if no tag were touched
1234 # might exist from a previous transaction even if no tag were touched
1239 # in this one. Changes are recorded in a line base format::
1235 # in this one. Changes are recorded in a line base format::
1240 #
1236 #
1241 # <action> <hex-node> <tag-name>\n
1237 # <action> <hex-node> <tag-name>\n
1242 #
1238 #
1243 # Actions are defined as follow:
1239 # Actions are defined as follow:
1244 # "-R": tag is removed,
1240 # "-R": tag is removed,
1245 # "+A": tag is added,
1241 # "+A": tag is added,
1246 # "-M": tag is moved (old value),
1242 # "-M": tag is moved (old value),
1247 # "+M": tag is moved (new value),
1243 # "+M": tag is moved (new value),
1248 tracktags = lambda x: None
1244 tracktags = lambda x: None
1249 # experimental config: experimental.hook-track-tags
1245 # experimental config: experimental.hook-track-tags
1250 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1246 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1251 if desc != 'strip' and shouldtracktags:
1247 if desc != 'strip' and shouldtracktags:
1252 oldheads = self.changelog.headrevs()
1248 oldheads = self.changelog.headrevs()
1253 def tracktags(tr2):
1249 def tracktags(tr2):
1254 repo = reporef()
1250 repo = reporef()
1255 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1251 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1256 newheads = repo.changelog.headrevs()
1252 newheads = repo.changelog.headrevs()
1257 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1253 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1258 # notes: we compare lists here.
1254 # notes: we compare lists here.
1259 # As we do it only once buiding set would not be cheaper
1255 # As we do it only once buiding set would not be cheaper
1260 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1256 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1261 if changes:
1257 if changes:
1262 tr2.hookargs['tag_moved'] = '1'
1258 tr2.hookargs['tag_moved'] = '1'
1263 with repo.vfs('changes/tags.changes', 'w',
1259 with repo.vfs('changes/tags.changes', 'w',
1264 atomictemp=True) as changesfile:
1260 atomictemp=True) as changesfile:
1265 # note: we do not register the file to the transaction
1261 # note: we do not register the file to the transaction
1266 # because we needs it to still exist on the transaction
1262 # because we needs it to still exist on the transaction
1267 # is close (for txnclose hooks)
1263 # is close (for txnclose hooks)
1268 tagsmod.writediff(changesfile, changes)
1264 tagsmod.writediff(changesfile, changes)
1269 def validate(tr2):
1265 def validate(tr2):
1270 """will run pre-closing hooks"""
1266 """will run pre-closing hooks"""
1271 # XXX the transaction API is a bit lacking here so we take a hacky
1267 # XXX the transaction API is a bit lacking here so we take a hacky
1272 # path for now
1268 # path for now
1273 #
1269 #
1274 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1270 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1275 # dict is copied before these run. In addition we needs the data
1271 # dict is copied before these run. In addition we needs the data
1276 # available to in memory hooks too.
1272 # available to in memory hooks too.
1277 #
1273 #
1278 # Moreover, we also need to make sure this runs before txnclose
1274 # Moreover, we also need to make sure this runs before txnclose
1279 # hooks and there is no "pending" mechanism that would execute
1275 # hooks and there is no "pending" mechanism that would execute
1280 # logic only if hooks are about to run.
1276 # logic only if hooks are about to run.
1281 #
1277 #
1282 # Fixing this limitation of the transaction is also needed to track
1278 # Fixing this limitation of the transaction is also needed to track
1283 # other families of changes (bookmarks, phases, obsolescence).
1279 # other families of changes (bookmarks, phases, obsolescence).
1284 #
1280 #
1285 # This will have to be fixed before we remove the experimental
1281 # This will have to be fixed before we remove the experimental
1286 # gating.
1282 # gating.
1287 tracktags(tr2)
1283 tracktags(tr2)
1288 repo = reporef()
1284 repo = reporef()
1289 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1285 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1290 scmutil.enforcesinglehead(repo, tr2, desc)
1286 scmutil.enforcesinglehead(repo, tr2, desc)
1291 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1287 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1292 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1288 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1293 args = tr.hookargs.copy()
1289 args = tr.hookargs.copy()
1294 args.update(bookmarks.preparehookargs(name, old, new))
1290 args.update(bookmarks.preparehookargs(name, old, new))
1295 repo.hook('pretxnclose-bookmark', throw=True,
1291 repo.hook('pretxnclose-bookmark', throw=True,
1296 txnname=desc,
1292 txnname=desc,
1297 **pycompat.strkwargs(args))
1293 **pycompat.strkwargs(args))
1298 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1294 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1299 cl = repo.unfiltered().changelog
1295 cl = repo.unfiltered().changelog
1300 for rev, (old, new) in tr.changes['phases'].items():
1296 for rev, (old, new) in tr.changes['phases'].items():
1301 args = tr.hookargs.copy()
1297 args = tr.hookargs.copy()
1302 node = hex(cl.node(rev))
1298 node = hex(cl.node(rev))
1303 args.update(phases.preparehookargs(node, old, new))
1299 args.update(phases.preparehookargs(node, old, new))
1304 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1300 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1305 **pycompat.strkwargs(args))
1301 **pycompat.strkwargs(args))
1306
1302
1307 repo.hook('pretxnclose', throw=True,
1303 repo.hook('pretxnclose', throw=True,
1308 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1304 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1309 def releasefn(tr, success):
1305 def releasefn(tr, success):
1310 repo = reporef()
1306 repo = reporef()
1311 if success:
1307 if success:
1312 # this should be explicitly invoked here, because
1308 # this should be explicitly invoked here, because
1313 # in-memory changes aren't written out at closing
1309 # in-memory changes aren't written out at closing
1314 # transaction, if tr.addfilegenerator (via
1310 # transaction, if tr.addfilegenerator (via
1315 # dirstate.write or so) isn't invoked while
1311 # dirstate.write or so) isn't invoked while
1316 # transaction running
1312 # transaction running
1317 repo.dirstate.write(None)
1313 repo.dirstate.write(None)
1318 else:
1314 else:
1319 # discard all changes (including ones already written
1315 # discard all changes (including ones already written
1320 # out) in this transaction
1316 # out) in this transaction
1321 repo.dirstate.restorebackup(None, 'journal.dirstate')
1317 repo.dirstate.restorebackup(None, 'journal.dirstate')
1322
1318
1323 repo.invalidate(clearfilecache=True)
1319 repo.invalidate(clearfilecache=True)
1324
1320
1325 tr = transaction.transaction(rp, self.svfs, vfsmap,
1321 tr = transaction.transaction(rp, self.svfs, vfsmap,
1326 "journal",
1322 "journal",
1327 "undo",
1323 "undo",
1328 aftertrans(renames),
1324 aftertrans(renames),
1329 self.store.createmode,
1325 self.store.createmode,
1330 validator=validate,
1326 validator=validate,
1331 releasefn=releasefn,
1327 releasefn=releasefn,
1332 checkambigfiles=_cachedfiles,
1328 checkambigfiles=_cachedfiles,
1333 name=desc)
1329 name=desc)
1334 tr.changes['revs'] = xrange(0, 0)
1330 tr.changes['revs'] = xrange(0, 0)
1335 tr.changes['obsmarkers'] = set()
1331 tr.changes['obsmarkers'] = set()
1336 tr.changes['phases'] = {}
1332 tr.changes['phases'] = {}
1337 tr.changes['bookmarks'] = {}
1333 tr.changes['bookmarks'] = {}
1338
1334
1339 tr.hookargs['txnid'] = txnid
1335 tr.hookargs['txnid'] = txnid
1340 # note: writing the fncache only during finalize mean that the file is
1336 # note: writing the fncache only during finalize mean that the file is
1341 # outdated when running hooks. As fncache is used for streaming clone,
1337 # outdated when running hooks. As fncache is used for streaming clone,
1342 # this is not expected to break anything that happen during the hooks.
1338 # this is not expected to break anything that happen during the hooks.
1343 tr.addfinalize('flush-fncache', self.store.write)
1339 tr.addfinalize('flush-fncache', self.store.write)
1344 def txnclosehook(tr2):
1340 def txnclosehook(tr2):
1345 """To be run if transaction is successful, will schedule a hook run
1341 """To be run if transaction is successful, will schedule a hook run
1346 """
1342 """
1347 # Don't reference tr2 in hook() so we don't hold a reference.
1343 # Don't reference tr2 in hook() so we don't hold a reference.
1348 # This reduces memory consumption when there are multiple
1344 # This reduces memory consumption when there are multiple
1349 # transactions per lock. This can likely go away if issue5045
1345 # transactions per lock. This can likely go away if issue5045
1350 # fixes the function accumulation.
1346 # fixes the function accumulation.
1351 hookargs = tr2.hookargs
1347 hookargs = tr2.hookargs
1352
1348
1353 def hookfunc():
1349 def hookfunc():
1354 repo = reporef()
1350 repo = reporef()
1355 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1351 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1356 bmchanges = sorted(tr.changes['bookmarks'].items())
1352 bmchanges = sorted(tr.changes['bookmarks'].items())
1357 for name, (old, new) in bmchanges:
1353 for name, (old, new) in bmchanges:
1358 args = tr.hookargs.copy()
1354 args = tr.hookargs.copy()
1359 args.update(bookmarks.preparehookargs(name, old, new))
1355 args.update(bookmarks.preparehookargs(name, old, new))
1360 repo.hook('txnclose-bookmark', throw=False,
1356 repo.hook('txnclose-bookmark', throw=False,
1361 txnname=desc, **pycompat.strkwargs(args))
1357 txnname=desc, **pycompat.strkwargs(args))
1362
1358
1363 if hook.hashook(repo.ui, 'txnclose-phase'):
1359 if hook.hashook(repo.ui, 'txnclose-phase'):
1364 cl = repo.unfiltered().changelog
1360 cl = repo.unfiltered().changelog
1365 phasemv = sorted(tr.changes['phases'].items())
1361 phasemv = sorted(tr.changes['phases'].items())
1366 for rev, (old, new) in phasemv:
1362 for rev, (old, new) in phasemv:
1367 args = tr.hookargs.copy()
1363 args = tr.hookargs.copy()
1368 node = hex(cl.node(rev))
1364 node = hex(cl.node(rev))
1369 args.update(phases.preparehookargs(node, old, new))
1365 args.update(phases.preparehookargs(node, old, new))
1370 repo.hook('txnclose-phase', throw=False, txnname=desc,
1366 repo.hook('txnclose-phase', throw=False, txnname=desc,
1371 **pycompat.strkwargs(args))
1367 **pycompat.strkwargs(args))
1372
1368
1373 repo.hook('txnclose', throw=False, txnname=desc,
1369 repo.hook('txnclose', throw=False, txnname=desc,
1374 **pycompat.strkwargs(hookargs))
1370 **pycompat.strkwargs(hookargs))
1375 reporef()._afterlock(hookfunc)
1371 reporef()._afterlock(hookfunc)
1376 tr.addfinalize('txnclose-hook', txnclosehook)
1372 tr.addfinalize('txnclose-hook', txnclosehook)
1377 # Include a leading "-" to make it happen before the transaction summary
1373 # Include a leading "-" to make it happen before the transaction summary
1378 # reports registered via scmutil.registersummarycallback() whose names
1374 # reports registered via scmutil.registersummarycallback() whose names
1379 # are 00-txnreport etc. That way, the caches will be warm when the
1375 # are 00-txnreport etc. That way, the caches will be warm when the
1380 # callbacks run.
1376 # callbacks run.
1381 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1377 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1382 def txnaborthook(tr2):
1378 def txnaborthook(tr2):
1383 """To be run if transaction is aborted
1379 """To be run if transaction is aborted
1384 """
1380 """
1385 reporef().hook('txnabort', throw=False, txnname=desc,
1381 reporef().hook('txnabort', throw=False, txnname=desc,
1386 **pycompat.strkwargs(tr2.hookargs))
1382 **pycompat.strkwargs(tr2.hookargs))
1387 tr.addabort('txnabort-hook', txnaborthook)
1383 tr.addabort('txnabort-hook', txnaborthook)
1388 # avoid eager cache invalidation. in-memory data should be identical
1384 # avoid eager cache invalidation. in-memory data should be identical
1389 # to stored data if transaction has no error.
1385 # to stored data if transaction has no error.
1390 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1386 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1391 self._transref = weakref.ref(tr)
1387 self._transref = weakref.ref(tr)
1392 scmutil.registersummarycallback(self, tr, desc)
1388 scmutil.registersummarycallback(self, tr, desc)
1393 return tr
1389 return tr
1394
1390
1395 def _journalfiles(self):
1391 def _journalfiles(self):
1396 return ((self.svfs, 'journal'),
1392 return ((self.svfs, 'journal'),
1397 (self.vfs, 'journal.dirstate'),
1393 (self.vfs, 'journal.dirstate'),
1398 (self.vfs, 'journal.branch'),
1394 (self.vfs, 'journal.branch'),
1399 (self.vfs, 'journal.desc'),
1395 (self.vfs, 'journal.desc'),
1400 (self.vfs, 'journal.bookmarks'),
1396 (self.vfs, 'journal.bookmarks'),
1401 (self.svfs, 'journal.phaseroots'))
1397 (self.svfs, 'journal.phaseroots'))
1402
1398
1403 def undofiles(self):
1399 def undofiles(self):
1404 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1400 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1405
1401
1406 @unfilteredmethod
1402 @unfilteredmethod
1407 def _writejournal(self, desc):
1403 def _writejournal(self, desc):
1408 self.dirstate.savebackup(None, 'journal.dirstate')
1404 self.dirstate.savebackup(None, 'journal.dirstate')
1409 self.vfs.write("journal.branch",
1405 self.vfs.write("journal.branch",
1410 encoding.fromlocal(self.dirstate.branch()))
1406 encoding.fromlocal(self.dirstate.branch()))
1411 self.vfs.write("journal.desc",
1407 self.vfs.write("journal.desc",
1412 "%d\n%s\n" % (len(self), desc))
1408 "%d\n%s\n" % (len(self), desc))
1413 self.vfs.write("journal.bookmarks",
1409 self.vfs.write("journal.bookmarks",
1414 self.vfs.tryread("bookmarks"))
1410 self.vfs.tryread("bookmarks"))
1415 self.svfs.write("journal.phaseroots",
1411 self.svfs.write("journal.phaseroots",
1416 self.svfs.tryread("phaseroots"))
1412 self.svfs.tryread("phaseroots"))
1417
1413
1418 def recover(self):
1414 def recover(self):
1419 with self.lock():
1415 with self.lock():
1420 if self.svfs.exists("journal"):
1416 if self.svfs.exists("journal"):
1421 self.ui.status(_("rolling back interrupted transaction\n"))
1417 self.ui.status(_("rolling back interrupted transaction\n"))
1422 vfsmap = {'': self.svfs,
1418 vfsmap = {'': self.svfs,
1423 'plain': self.vfs,}
1419 'plain': self.vfs,}
1424 transaction.rollback(self.svfs, vfsmap, "journal",
1420 transaction.rollback(self.svfs, vfsmap, "journal",
1425 self.ui.warn,
1421 self.ui.warn,
1426 checkambigfiles=_cachedfiles)
1422 checkambigfiles=_cachedfiles)
1427 self.invalidate()
1423 self.invalidate()
1428 return True
1424 return True
1429 else:
1425 else:
1430 self.ui.warn(_("no interrupted transaction available\n"))
1426 self.ui.warn(_("no interrupted transaction available\n"))
1431 return False
1427 return False
1432
1428
1433 def rollback(self, dryrun=False, force=False):
1429 def rollback(self, dryrun=False, force=False):
1434 wlock = lock = dsguard = None
1430 wlock = lock = dsguard = None
1435 try:
1431 try:
1436 wlock = self.wlock()
1432 wlock = self.wlock()
1437 lock = self.lock()
1433 lock = self.lock()
1438 if self.svfs.exists("undo"):
1434 if self.svfs.exists("undo"):
1439 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1435 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1440
1436
1441 return self._rollback(dryrun, force, dsguard)
1437 return self._rollback(dryrun, force, dsguard)
1442 else:
1438 else:
1443 self.ui.warn(_("no rollback information available\n"))
1439 self.ui.warn(_("no rollback information available\n"))
1444 return 1
1440 return 1
1445 finally:
1441 finally:
1446 release(dsguard, lock, wlock)
1442 release(dsguard, lock, wlock)
1447
1443
1448 @unfilteredmethod # Until we get smarter cache management
1444 @unfilteredmethod # Until we get smarter cache management
1449 def _rollback(self, dryrun, force, dsguard):
1445 def _rollback(self, dryrun, force, dsguard):
1450 ui = self.ui
1446 ui = self.ui
1451 try:
1447 try:
1452 args = self.vfs.read('undo.desc').splitlines()
1448 args = self.vfs.read('undo.desc').splitlines()
1453 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1449 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1454 if len(args) >= 3:
1450 if len(args) >= 3:
1455 detail = args[2]
1451 detail = args[2]
1456 oldtip = oldlen - 1
1452 oldtip = oldlen - 1
1457
1453
1458 if detail and ui.verbose:
1454 if detail and ui.verbose:
1459 msg = (_('repository tip rolled back to revision %d'
1455 msg = (_('repository tip rolled back to revision %d'
1460 ' (undo %s: %s)\n')
1456 ' (undo %s: %s)\n')
1461 % (oldtip, desc, detail))
1457 % (oldtip, desc, detail))
1462 else:
1458 else:
1463 msg = (_('repository tip rolled back to revision %d'
1459 msg = (_('repository tip rolled back to revision %d'
1464 ' (undo %s)\n')
1460 ' (undo %s)\n')
1465 % (oldtip, desc))
1461 % (oldtip, desc))
1466 except IOError:
1462 except IOError:
1467 msg = _('rolling back unknown transaction\n')
1463 msg = _('rolling back unknown transaction\n')
1468 desc = None
1464 desc = None
1469
1465
1470 if not force and self['.'] != self['tip'] and desc == 'commit':
1466 if not force and self['.'] != self['tip'] and desc == 'commit':
1471 raise error.Abort(
1467 raise error.Abort(
1472 _('rollback of last commit while not checked out '
1468 _('rollback of last commit while not checked out '
1473 'may lose data'), hint=_('use -f to force'))
1469 'may lose data'), hint=_('use -f to force'))
1474
1470
1475 ui.status(msg)
1471 ui.status(msg)
1476 if dryrun:
1472 if dryrun:
1477 return 0
1473 return 0
1478
1474
1479 parents = self.dirstate.parents()
1475 parents = self.dirstate.parents()
1480 self.destroying()
1476 self.destroying()
1481 vfsmap = {'plain': self.vfs, '': self.svfs}
1477 vfsmap = {'plain': self.vfs, '': self.svfs}
1482 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1478 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1483 checkambigfiles=_cachedfiles)
1479 checkambigfiles=_cachedfiles)
1484 if self.vfs.exists('undo.bookmarks'):
1480 if self.vfs.exists('undo.bookmarks'):
1485 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1481 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1486 if self.svfs.exists('undo.phaseroots'):
1482 if self.svfs.exists('undo.phaseroots'):
1487 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1483 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1488 self.invalidate()
1484 self.invalidate()
1489
1485
1490 parentgone = (parents[0] not in self.changelog.nodemap or
1486 parentgone = (parents[0] not in self.changelog.nodemap or
1491 parents[1] not in self.changelog.nodemap)
1487 parents[1] not in self.changelog.nodemap)
1492 if parentgone:
1488 if parentgone:
1493 # prevent dirstateguard from overwriting already restored one
1489 # prevent dirstateguard from overwriting already restored one
1494 dsguard.close()
1490 dsguard.close()
1495
1491
1496 self.dirstate.restorebackup(None, 'undo.dirstate')
1492 self.dirstate.restorebackup(None, 'undo.dirstate')
1497 try:
1493 try:
1498 branch = self.vfs.read('undo.branch')
1494 branch = self.vfs.read('undo.branch')
1499 self.dirstate.setbranch(encoding.tolocal(branch))
1495 self.dirstate.setbranch(encoding.tolocal(branch))
1500 except IOError:
1496 except IOError:
1501 ui.warn(_('named branch could not be reset: '
1497 ui.warn(_('named branch could not be reset: '
1502 'current branch is still \'%s\'\n')
1498 'current branch is still \'%s\'\n')
1503 % self.dirstate.branch())
1499 % self.dirstate.branch())
1504
1500
1505 parents = tuple([p.rev() for p in self[None].parents()])
1501 parents = tuple([p.rev() for p in self[None].parents()])
1506 if len(parents) > 1:
1502 if len(parents) > 1:
1507 ui.status(_('working directory now based on '
1503 ui.status(_('working directory now based on '
1508 'revisions %d and %d\n') % parents)
1504 'revisions %d and %d\n') % parents)
1509 else:
1505 else:
1510 ui.status(_('working directory now based on '
1506 ui.status(_('working directory now based on '
1511 'revision %d\n') % parents)
1507 'revision %d\n') % parents)
1512 mergemod.mergestate.clean(self, self['.'].node())
1508 mergemod.mergestate.clean(self, self['.'].node())
1513
1509
1514 # TODO: if we know which new heads may result from this rollback, pass
1510 # TODO: if we know which new heads may result from this rollback, pass
1515 # them to destroy(), which will prevent the branchhead cache from being
1511 # them to destroy(), which will prevent the branchhead cache from being
1516 # invalidated.
1512 # invalidated.
1517 self.destroyed()
1513 self.destroyed()
1518 return 0
1514 return 0
1519
1515
1520 def _buildcacheupdater(self, newtransaction):
1516 def _buildcacheupdater(self, newtransaction):
1521 """called during transaction to build the callback updating cache
1517 """called during transaction to build the callback updating cache
1522
1518
1523 Lives on the repository to help extension who might want to augment
1519 Lives on the repository to help extension who might want to augment
1524 this logic. For this purpose, the created transaction is passed to the
1520 this logic. For this purpose, the created transaction is passed to the
1525 method.
1521 method.
1526 """
1522 """
1527 # we must avoid cyclic reference between repo and transaction.
1523 # we must avoid cyclic reference between repo and transaction.
1528 reporef = weakref.ref(self)
1524 reporef = weakref.ref(self)
1529 def updater(tr):
1525 def updater(tr):
1530 repo = reporef()
1526 repo = reporef()
1531 repo.updatecaches(tr)
1527 repo.updatecaches(tr)
1532 return updater
1528 return updater
1533
1529
1534 @unfilteredmethod
1530 @unfilteredmethod
1535 def updatecaches(self, tr=None, full=False):
1531 def updatecaches(self, tr=None, full=False):
1536 """warm appropriate caches
1532 """warm appropriate caches
1537
1533
1538 If this function is called after a transaction closed. The transaction
1534 If this function is called after a transaction closed. The transaction
1539 will be available in the 'tr' argument. This can be used to selectively
1535 will be available in the 'tr' argument. This can be used to selectively
1540 update caches relevant to the changes in that transaction.
1536 update caches relevant to the changes in that transaction.
1541
1537
1542 If 'full' is set, make sure all caches the function knows about have
1538 If 'full' is set, make sure all caches the function knows about have
1543 up-to-date data. Even the ones usually loaded more lazily.
1539 up-to-date data. Even the ones usually loaded more lazily.
1544 """
1540 """
1545 if tr is not None and tr.hookargs.get('source') == 'strip':
1541 if tr is not None and tr.hookargs.get('source') == 'strip':
1546 # During strip, many caches are invalid but
1542 # During strip, many caches are invalid but
1547 # later call to `destroyed` will refresh them.
1543 # later call to `destroyed` will refresh them.
1548 return
1544 return
1549
1545
1550 if tr is None or tr.changes['revs']:
1546 if tr is None or tr.changes['revs']:
1551 # updating the unfiltered branchmap should refresh all the others,
1547 # updating the unfiltered branchmap should refresh all the others,
1552 self.ui.debug('updating the branch cache\n')
1548 self.ui.debug('updating the branch cache\n')
1553 branchmap.updatecache(self.filtered('served'))
1549 branchmap.updatecache(self.filtered('served'))
1554
1550
1555 if full:
1551 if full:
1556 rbc = self.revbranchcache()
1552 rbc = self.revbranchcache()
1557 for r in self.changelog:
1553 for r in self.changelog:
1558 rbc.branchinfo(r)
1554 rbc.branchinfo(r)
1559 rbc.write()
1555 rbc.write()
1560
1556
1561 def invalidatecaches(self):
1557 def invalidatecaches(self):
1562
1558
1563 if '_tagscache' in vars(self):
1559 if '_tagscache' in vars(self):
1564 # can't use delattr on proxy
1560 # can't use delattr on proxy
1565 del self.__dict__['_tagscache']
1561 del self.__dict__['_tagscache']
1566
1562
1567 self.unfiltered()._branchcaches.clear()
1563 self.unfiltered()._branchcaches.clear()
1568 self.invalidatevolatilesets()
1564 self.invalidatevolatilesets()
1569 self._sparsesignaturecache.clear()
1565 self._sparsesignaturecache.clear()
1570
1566
1571 def invalidatevolatilesets(self):
1567 def invalidatevolatilesets(self):
1572 self.filteredrevcache.clear()
1568 self.filteredrevcache.clear()
1573 obsolete.clearobscaches(self)
1569 obsolete.clearobscaches(self)
1574
1570
1575 def invalidatedirstate(self):
1571 def invalidatedirstate(self):
1576 '''Invalidates the dirstate, causing the next call to dirstate
1572 '''Invalidates the dirstate, causing the next call to dirstate
1577 to check if it was modified since the last time it was read,
1573 to check if it was modified since the last time it was read,
1578 rereading it if it has.
1574 rereading it if it has.
1579
1575
1580 This is different to dirstate.invalidate() that it doesn't always
1576 This is different to dirstate.invalidate() that it doesn't always
1581 rereads the dirstate. Use dirstate.invalidate() if you want to
1577 rereads the dirstate. Use dirstate.invalidate() if you want to
1582 explicitly read the dirstate again (i.e. restoring it to a previous
1578 explicitly read the dirstate again (i.e. restoring it to a previous
1583 known good state).'''
1579 known good state).'''
1584 if hasunfilteredcache(self, 'dirstate'):
1580 if hasunfilteredcache(self, 'dirstate'):
1585 for k in self.dirstate._filecache:
1581 for k in self.dirstate._filecache:
1586 try:
1582 try:
1587 delattr(self.dirstate, k)
1583 delattr(self.dirstate, k)
1588 except AttributeError:
1584 except AttributeError:
1589 pass
1585 pass
1590 delattr(self.unfiltered(), 'dirstate')
1586 delattr(self.unfiltered(), 'dirstate')
1591
1587
1592 def invalidate(self, clearfilecache=False):
1588 def invalidate(self, clearfilecache=False):
1593 '''Invalidates both store and non-store parts other than dirstate
1589 '''Invalidates both store and non-store parts other than dirstate
1594
1590
1595 If a transaction is running, invalidation of store is omitted,
1591 If a transaction is running, invalidation of store is omitted,
1596 because discarding in-memory changes might cause inconsistency
1592 because discarding in-memory changes might cause inconsistency
1597 (e.g. incomplete fncache causes unintentional failure, but
1593 (e.g. incomplete fncache causes unintentional failure, but
1598 redundant one doesn't).
1594 redundant one doesn't).
1599 '''
1595 '''
1600 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1596 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1601 for k in list(self._filecache.keys()):
1597 for k in list(self._filecache.keys()):
1602 # dirstate is invalidated separately in invalidatedirstate()
1598 # dirstate is invalidated separately in invalidatedirstate()
1603 if k == 'dirstate':
1599 if k == 'dirstate':
1604 continue
1600 continue
1605 if (k == 'changelog' and
1601 if (k == 'changelog' and
1606 self.currenttransaction() and
1602 self.currenttransaction() and
1607 self.changelog._delayed):
1603 self.changelog._delayed):
1608 # The changelog object may store unwritten revisions. We don't
1604 # The changelog object may store unwritten revisions. We don't
1609 # want to lose them.
1605 # want to lose them.
1610 # TODO: Solve the problem instead of working around it.
1606 # TODO: Solve the problem instead of working around it.
1611 continue
1607 continue
1612
1608
1613 if clearfilecache:
1609 if clearfilecache:
1614 del self._filecache[k]
1610 del self._filecache[k]
1615 try:
1611 try:
1616 delattr(unfiltered, k)
1612 delattr(unfiltered, k)
1617 except AttributeError:
1613 except AttributeError:
1618 pass
1614 pass
1619 self.invalidatecaches()
1615 self.invalidatecaches()
1620 if not self.currenttransaction():
1616 if not self.currenttransaction():
1621 # TODO: Changing contents of store outside transaction
1617 # TODO: Changing contents of store outside transaction
1622 # causes inconsistency. We should make in-memory store
1618 # causes inconsistency. We should make in-memory store
1623 # changes detectable, and abort if changed.
1619 # changes detectable, and abort if changed.
1624 self.store.invalidatecaches()
1620 self.store.invalidatecaches()
1625
1621
1626 def invalidateall(self):
1622 def invalidateall(self):
1627 '''Fully invalidates both store and non-store parts, causing the
1623 '''Fully invalidates both store and non-store parts, causing the
1628 subsequent operation to reread any outside changes.'''
1624 subsequent operation to reread any outside changes.'''
1629 # extension should hook this to invalidate its caches
1625 # extension should hook this to invalidate its caches
1630 self.invalidate()
1626 self.invalidate()
1631 self.invalidatedirstate()
1627 self.invalidatedirstate()
1632
1628
1633 @unfilteredmethod
1629 @unfilteredmethod
1634 def _refreshfilecachestats(self, tr):
1630 def _refreshfilecachestats(self, tr):
1635 """Reload stats of cached files so that they are flagged as valid"""
1631 """Reload stats of cached files so that they are flagged as valid"""
1636 for k, ce in self._filecache.items():
1632 for k, ce in self._filecache.items():
1637 k = pycompat.sysstr(k)
1633 k = pycompat.sysstr(k)
1638 if k == r'dirstate' or k not in self.__dict__:
1634 if k == r'dirstate' or k not in self.__dict__:
1639 continue
1635 continue
1640 ce.refresh()
1636 ce.refresh()
1641
1637
1642 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1638 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1643 inheritchecker=None, parentenvvar=None):
1639 inheritchecker=None, parentenvvar=None):
1644 parentlock = None
1640 parentlock = None
1645 # the contents of parentenvvar are used by the underlying lock to
1641 # the contents of parentenvvar are used by the underlying lock to
1646 # determine whether it can be inherited
1642 # determine whether it can be inherited
1647 if parentenvvar is not None:
1643 if parentenvvar is not None:
1648 parentlock = encoding.environ.get(parentenvvar)
1644 parentlock = encoding.environ.get(parentenvvar)
1649
1645
1650 timeout = 0
1646 timeout = 0
1651 warntimeout = 0
1647 warntimeout = 0
1652 if wait:
1648 if wait:
1653 timeout = self.ui.configint("ui", "timeout")
1649 timeout = self.ui.configint("ui", "timeout")
1654 warntimeout = self.ui.configint("ui", "timeout.warn")
1650 warntimeout = self.ui.configint("ui", "timeout.warn")
1655
1651
1656 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1652 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1657 releasefn=releasefn,
1653 releasefn=releasefn,
1658 acquirefn=acquirefn, desc=desc,
1654 acquirefn=acquirefn, desc=desc,
1659 inheritchecker=inheritchecker,
1655 inheritchecker=inheritchecker,
1660 parentlock=parentlock)
1656 parentlock=parentlock)
1661 return l
1657 return l
1662
1658
1663 def _afterlock(self, callback):
1659 def _afterlock(self, callback):
1664 """add a callback to be run when the repository is fully unlocked
1660 """add a callback to be run when the repository is fully unlocked
1665
1661
1666 The callback will be executed when the outermost lock is released
1662 The callback will be executed when the outermost lock is released
1667 (with wlock being higher level than 'lock')."""
1663 (with wlock being higher level than 'lock')."""
1668 for ref in (self._wlockref, self._lockref):
1664 for ref in (self._wlockref, self._lockref):
1669 l = ref and ref()
1665 l = ref and ref()
1670 if l and l.held:
1666 if l and l.held:
1671 l.postrelease.append(callback)
1667 l.postrelease.append(callback)
1672 break
1668 break
1673 else: # no lock have been found.
1669 else: # no lock have been found.
1674 callback()
1670 callback()
1675
1671
1676 def lock(self, wait=True):
1672 def lock(self, wait=True):
1677 '''Lock the repository store (.hg/store) and return a weak reference
1673 '''Lock the repository store (.hg/store) and return a weak reference
1678 to the lock. Use this before modifying the store (e.g. committing or
1674 to the lock. Use this before modifying the store (e.g. committing or
1679 stripping). If you are opening a transaction, get a lock as well.)
1675 stripping). If you are opening a transaction, get a lock as well.)
1680
1676
1681 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1677 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1682 'wlock' first to avoid a dead-lock hazard.'''
1678 'wlock' first to avoid a dead-lock hazard.'''
1683 l = self._currentlock(self._lockref)
1679 l = self._currentlock(self._lockref)
1684 if l is not None:
1680 if l is not None:
1685 l.lock()
1681 l.lock()
1686 return l
1682 return l
1687
1683
1688 l = self._lock(self.svfs, "lock", wait, None,
1684 l = self._lock(self.svfs, "lock", wait, None,
1689 self.invalidate, _('repository %s') % self.origroot)
1685 self.invalidate, _('repository %s') % self.origroot)
1690 self._lockref = weakref.ref(l)
1686 self._lockref = weakref.ref(l)
1691 return l
1687 return l
1692
1688
1693 def _wlockchecktransaction(self):
1689 def _wlockchecktransaction(self):
1694 if self.currenttransaction() is not None:
1690 if self.currenttransaction() is not None:
1695 raise error.LockInheritanceContractViolation(
1691 raise error.LockInheritanceContractViolation(
1696 'wlock cannot be inherited in the middle of a transaction')
1692 'wlock cannot be inherited in the middle of a transaction')
1697
1693
1698 def wlock(self, wait=True):
1694 def wlock(self, wait=True):
1699 '''Lock the non-store parts of the repository (everything under
1695 '''Lock the non-store parts of the repository (everything under
1700 .hg except .hg/store) and return a weak reference to the lock.
1696 .hg except .hg/store) and return a weak reference to the lock.
1701
1697
1702 Use this before modifying files in .hg.
1698 Use this before modifying files in .hg.
1703
1699
1704 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1700 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1705 'wlock' first to avoid a dead-lock hazard.'''
1701 'wlock' first to avoid a dead-lock hazard.'''
1706 l = self._wlockref and self._wlockref()
1702 l = self._wlockref and self._wlockref()
1707 if l is not None and l.held:
1703 if l is not None and l.held:
1708 l.lock()
1704 l.lock()
1709 return l
1705 return l
1710
1706
1711 # We do not need to check for non-waiting lock acquisition. Such
1707 # We do not need to check for non-waiting lock acquisition. Such
1712 # acquisition would not cause dead-lock as they would just fail.
1708 # acquisition would not cause dead-lock as they would just fail.
1713 if wait and (self.ui.configbool('devel', 'all-warnings')
1709 if wait and (self.ui.configbool('devel', 'all-warnings')
1714 or self.ui.configbool('devel', 'check-locks')):
1710 or self.ui.configbool('devel', 'check-locks')):
1715 if self._currentlock(self._lockref) is not None:
1711 if self._currentlock(self._lockref) is not None:
1716 self.ui.develwarn('"wlock" acquired after "lock"')
1712 self.ui.develwarn('"wlock" acquired after "lock"')
1717
1713
1718 def unlock():
1714 def unlock():
1719 if self.dirstate.pendingparentchange():
1715 if self.dirstate.pendingparentchange():
1720 self.dirstate.invalidate()
1716 self.dirstate.invalidate()
1721 else:
1717 else:
1722 self.dirstate.write(None)
1718 self.dirstate.write(None)
1723
1719
1724 self._filecache['dirstate'].refresh()
1720 self._filecache['dirstate'].refresh()
1725
1721
1726 l = self._lock(self.vfs, "wlock", wait, unlock,
1722 l = self._lock(self.vfs, "wlock", wait, unlock,
1727 self.invalidatedirstate, _('working directory of %s') %
1723 self.invalidatedirstate, _('working directory of %s') %
1728 self.origroot,
1724 self.origroot,
1729 inheritchecker=self._wlockchecktransaction,
1725 inheritchecker=self._wlockchecktransaction,
1730 parentenvvar='HG_WLOCK_LOCKER')
1726 parentenvvar='HG_WLOCK_LOCKER')
1731 self._wlockref = weakref.ref(l)
1727 self._wlockref = weakref.ref(l)
1732 return l
1728 return l
1733
1729
1734 def _currentlock(self, lockref):
1730 def _currentlock(self, lockref):
1735 """Returns the lock if it's held, or None if it's not."""
1731 """Returns the lock if it's held, or None if it's not."""
1736 if lockref is None:
1732 if lockref is None:
1737 return None
1733 return None
1738 l = lockref()
1734 l = lockref()
1739 if l is None or not l.held:
1735 if l is None or not l.held:
1740 return None
1736 return None
1741 return l
1737 return l
1742
1738
1743 def currentwlock(self):
1739 def currentwlock(self):
1744 """Returns the wlock if it's held, or None if it's not."""
1740 """Returns the wlock if it's held, or None if it's not."""
1745 return self._currentlock(self._wlockref)
1741 return self._currentlock(self._wlockref)
1746
1742
1747 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1743 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1748 """
1744 """
1749 commit an individual file as part of a larger transaction
1745 commit an individual file as part of a larger transaction
1750 """
1746 """
1751
1747
1752 fname = fctx.path()
1748 fname = fctx.path()
1753 fparent1 = manifest1.get(fname, nullid)
1749 fparent1 = manifest1.get(fname, nullid)
1754 fparent2 = manifest2.get(fname, nullid)
1750 fparent2 = manifest2.get(fname, nullid)
1755 if isinstance(fctx, context.filectx):
1751 if isinstance(fctx, context.filectx):
1756 node = fctx.filenode()
1752 node = fctx.filenode()
1757 if node in [fparent1, fparent2]:
1753 if node in [fparent1, fparent2]:
1758 self.ui.debug('reusing %s filelog entry\n' % fname)
1754 self.ui.debug('reusing %s filelog entry\n' % fname)
1759 if manifest1.flags(fname) != fctx.flags():
1755 if manifest1.flags(fname) != fctx.flags():
1760 changelist.append(fname)
1756 changelist.append(fname)
1761 return node
1757 return node
1762
1758
1763 flog = self.file(fname)
1759 flog = self.file(fname)
1764 meta = {}
1760 meta = {}
1765 copy = fctx.renamed()
1761 copy = fctx.renamed()
1766 if copy and copy[0] != fname:
1762 if copy and copy[0] != fname:
1767 # Mark the new revision of this file as a copy of another
1763 # Mark the new revision of this file as a copy of another
1768 # file. This copy data will effectively act as a parent
1764 # file. This copy data will effectively act as a parent
1769 # of this new revision. If this is a merge, the first
1765 # of this new revision. If this is a merge, the first
1770 # parent will be the nullid (meaning "look up the copy data")
1766 # parent will be the nullid (meaning "look up the copy data")
1771 # and the second one will be the other parent. For example:
1767 # and the second one will be the other parent. For example:
1772 #
1768 #
1773 # 0 --- 1 --- 3 rev1 changes file foo
1769 # 0 --- 1 --- 3 rev1 changes file foo
1774 # \ / rev2 renames foo to bar and changes it
1770 # \ / rev2 renames foo to bar and changes it
1775 # \- 2 -/ rev3 should have bar with all changes and
1771 # \- 2 -/ rev3 should have bar with all changes and
1776 # should record that bar descends from
1772 # should record that bar descends from
1777 # bar in rev2 and foo in rev1
1773 # bar in rev2 and foo in rev1
1778 #
1774 #
1779 # this allows this merge to succeed:
1775 # this allows this merge to succeed:
1780 #
1776 #
1781 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1777 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1782 # \ / merging rev3 and rev4 should use bar@rev2
1778 # \ / merging rev3 and rev4 should use bar@rev2
1783 # \- 2 --- 4 as the merge base
1779 # \- 2 --- 4 as the merge base
1784 #
1780 #
1785
1781
1786 cfname = copy[0]
1782 cfname = copy[0]
1787 crev = manifest1.get(cfname)
1783 crev = manifest1.get(cfname)
1788 newfparent = fparent2
1784 newfparent = fparent2
1789
1785
1790 if manifest2: # branch merge
1786 if manifest2: # branch merge
1791 if fparent2 == nullid or crev is None: # copied on remote side
1787 if fparent2 == nullid or crev is None: # copied on remote side
1792 if cfname in manifest2:
1788 if cfname in manifest2:
1793 crev = manifest2[cfname]
1789 crev = manifest2[cfname]
1794 newfparent = fparent1
1790 newfparent = fparent1
1795
1791
1796 # Here, we used to search backwards through history to try to find
1792 # Here, we used to search backwards through history to try to find
1797 # where the file copy came from if the source of a copy was not in
1793 # where the file copy came from if the source of a copy was not in
1798 # the parent directory. However, this doesn't actually make sense to
1794 # the parent directory. However, this doesn't actually make sense to
1799 # do (what does a copy from something not in your working copy even
1795 # do (what does a copy from something not in your working copy even
1800 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1796 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1801 # the user that copy information was dropped, so if they didn't
1797 # the user that copy information was dropped, so if they didn't
1802 # expect this outcome it can be fixed, but this is the correct
1798 # expect this outcome it can be fixed, but this is the correct
1803 # behavior in this circumstance.
1799 # behavior in this circumstance.
1804
1800
1805 if crev:
1801 if crev:
1806 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1802 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1807 meta["copy"] = cfname
1803 meta["copy"] = cfname
1808 meta["copyrev"] = hex(crev)
1804 meta["copyrev"] = hex(crev)
1809 fparent1, fparent2 = nullid, newfparent
1805 fparent1, fparent2 = nullid, newfparent
1810 else:
1806 else:
1811 self.ui.warn(_("warning: can't find ancestor for '%s' "
1807 self.ui.warn(_("warning: can't find ancestor for '%s' "
1812 "copied from '%s'!\n") % (fname, cfname))
1808 "copied from '%s'!\n") % (fname, cfname))
1813
1809
1814 elif fparent1 == nullid:
1810 elif fparent1 == nullid:
1815 fparent1, fparent2 = fparent2, nullid
1811 fparent1, fparent2 = fparent2, nullid
1816 elif fparent2 != nullid:
1812 elif fparent2 != nullid:
1817 # is one parent an ancestor of the other?
1813 # is one parent an ancestor of the other?
1818 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1814 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1819 if fparent1 in fparentancestors:
1815 if fparent1 in fparentancestors:
1820 fparent1, fparent2 = fparent2, nullid
1816 fparent1, fparent2 = fparent2, nullid
1821 elif fparent2 in fparentancestors:
1817 elif fparent2 in fparentancestors:
1822 fparent2 = nullid
1818 fparent2 = nullid
1823
1819
1824 # is the file changed?
1820 # is the file changed?
1825 text = fctx.data()
1821 text = fctx.data()
1826 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1822 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1827 changelist.append(fname)
1823 changelist.append(fname)
1828 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1824 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1829 # are just the flags changed during merge?
1825 # are just the flags changed during merge?
1830 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1826 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1831 changelist.append(fname)
1827 changelist.append(fname)
1832
1828
1833 return fparent1
1829 return fparent1
1834
1830
1835 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1831 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1836 """check for commit arguments that aren't committable"""
1832 """check for commit arguments that aren't committable"""
1837 if match.isexact() or match.prefix():
1833 if match.isexact() or match.prefix():
1838 matched = set(status.modified + status.added + status.removed)
1834 matched = set(status.modified + status.added + status.removed)
1839
1835
1840 for f in match.files():
1836 for f in match.files():
1841 f = self.dirstate.normalize(f)
1837 f = self.dirstate.normalize(f)
1842 if f == '.' or f in matched or f in wctx.substate:
1838 if f == '.' or f in matched or f in wctx.substate:
1843 continue
1839 continue
1844 if f in status.deleted:
1840 if f in status.deleted:
1845 fail(f, _('file not found!'))
1841 fail(f, _('file not found!'))
1846 if f in vdirs: # visited directory
1842 if f in vdirs: # visited directory
1847 d = f + '/'
1843 d = f + '/'
1848 for mf in matched:
1844 for mf in matched:
1849 if mf.startswith(d):
1845 if mf.startswith(d):
1850 break
1846 break
1851 else:
1847 else:
1852 fail(f, _("no match under directory!"))
1848 fail(f, _("no match under directory!"))
1853 elif f not in self.dirstate:
1849 elif f not in self.dirstate:
1854 fail(f, _("file not tracked!"))
1850 fail(f, _("file not tracked!"))
1855
1851
1856 @unfilteredmethod
1852 @unfilteredmethod
1857 def commit(self, text="", user=None, date=None, match=None, force=False,
1853 def commit(self, text="", user=None, date=None, match=None, force=False,
1858 editor=False, extra=None):
1854 editor=False, extra=None):
1859 """Add a new revision to current repository.
1855 """Add a new revision to current repository.
1860
1856
1861 Revision information is gathered from the working directory,
1857 Revision information is gathered from the working directory,
1862 match can be used to filter the committed files. If editor is
1858 match can be used to filter the committed files. If editor is
1863 supplied, it is called to get a commit message.
1859 supplied, it is called to get a commit message.
1864 """
1860 """
1865 if extra is None:
1861 if extra is None:
1866 extra = {}
1862 extra = {}
1867
1863
1868 def fail(f, msg):
1864 def fail(f, msg):
1869 raise error.Abort('%s: %s' % (f, msg))
1865 raise error.Abort('%s: %s' % (f, msg))
1870
1866
1871 if not match:
1867 if not match:
1872 match = matchmod.always(self.root, '')
1868 match = matchmod.always(self.root, '')
1873
1869
1874 if not force:
1870 if not force:
1875 vdirs = []
1871 vdirs = []
1876 match.explicitdir = vdirs.append
1872 match.explicitdir = vdirs.append
1877 match.bad = fail
1873 match.bad = fail
1878
1874
1879 wlock = lock = tr = None
1875 wlock = lock = tr = None
1880 try:
1876 try:
1881 wlock = self.wlock()
1877 wlock = self.wlock()
1882 lock = self.lock() # for recent changelog (see issue4368)
1878 lock = self.lock() # for recent changelog (see issue4368)
1883
1879
1884 wctx = self[None]
1880 wctx = self[None]
1885 merge = len(wctx.parents()) > 1
1881 merge = len(wctx.parents()) > 1
1886
1882
1887 if not force and merge and not match.always():
1883 if not force and merge and not match.always():
1888 raise error.Abort(_('cannot partially commit a merge '
1884 raise error.Abort(_('cannot partially commit a merge '
1889 '(do not specify files or patterns)'))
1885 '(do not specify files or patterns)'))
1890
1886
1891 status = self.status(match=match, clean=force)
1887 status = self.status(match=match, clean=force)
1892 if force:
1888 if force:
1893 status.modified.extend(status.clean) # mq may commit clean files
1889 status.modified.extend(status.clean) # mq may commit clean files
1894
1890
1895 # check subrepos
1891 # check subrepos
1896 subs, commitsubs, newstate = subrepoutil.precommit(
1892 subs, commitsubs, newstate = subrepoutil.precommit(
1897 self.ui, wctx, status, match, force=force)
1893 self.ui, wctx, status, match, force=force)
1898
1894
1899 # make sure all explicit patterns are matched
1895 # make sure all explicit patterns are matched
1900 if not force:
1896 if not force:
1901 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1897 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1902
1898
1903 cctx = context.workingcommitctx(self, status,
1899 cctx = context.workingcommitctx(self, status,
1904 text, user, date, extra)
1900 text, user, date, extra)
1905
1901
1906 # internal config: ui.allowemptycommit
1902 # internal config: ui.allowemptycommit
1907 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1903 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1908 or extra.get('close') or merge or cctx.files()
1904 or extra.get('close') or merge or cctx.files()
1909 or self.ui.configbool('ui', 'allowemptycommit'))
1905 or self.ui.configbool('ui', 'allowemptycommit'))
1910 if not allowemptycommit:
1906 if not allowemptycommit:
1911 return None
1907 return None
1912
1908
1913 if merge and cctx.deleted():
1909 if merge and cctx.deleted():
1914 raise error.Abort(_("cannot commit merge with missing files"))
1910 raise error.Abort(_("cannot commit merge with missing files"))
1915
1911
1916 ms = mergemod.mergestate.read(self)
1912 ms = mergemod.mergestate.read(self)
1917 mergeutil.checkunresolved(ms)
1913 mergeutil.checkunresolved(ms)
1918
1914
1919 if editor:
1915 if editor:
1920 cctx._text = editor(self, cctx, subs)
1916 cctx._text = editor(self, cctx, subs)
1921 edited = (text != cctx._text)
1917 edited = (text != cctx._text)
1922
1918
1923 # Save commit message in case this transaction gets rolled back
1919 # Save commit message in case this transaction gets rolled back
1924 # (e.g. by a pretxncommit hook). Leave the content alone on
1920 # (e.g. by a pretxncommit hook). Leave the content alone on
1925 # the assumption that the user will use the same editor again.
1921 # the assumption that the user will use the same editor again.
1926 msgfn = self.savecommitmessage(cctx._text)
1922 msgfn = self.savecommitmessage(cctx._text)
1927
1923
1928 # commit subs and write new state
1924 # commit subs and write new state
1929 if subs:
1925 if subs:
1930 for s in sorted(commitsubs):
1926 for s in sorted(commitsubs):
1931 sub = wctx.sub(s)
1927 sub = wctx.sub(s)
1932 self.ui.status(_('committing subrepository %s\n') %
1928 self.ui.status(_('committing subrepository %s\n') %
1933 subrepoutil.subrelpath(sub))
1929 subrepoutil.subrelpath(sub))
1934 sr = sub.commit(cctx._text, user, date)
1930 sr = sub.commit(cctx._text, user, date)
1935 newstate[s] = (newstate[s][0], sr)
1931 newstate[s] = (newstate[s][0], sr)
1936 subrepoutil.writestate(self, newstate)
1932 subrepoutil.writestate(self, newstate)
1937
1933
1938 p1, p2 = self.dirstate.parents()
1934 p1, p2 = self.dirstate.parents()
1939 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1940 try:
1936 try:
1941 self.hook("precommit", throw=True, parent1=hookp1,
1937 self.hook("precommit", throw=True, parent1=hookp1,
1942 parent2=hookp2)
1938 parent2=hookp2)
1943 tr = self.transaction('commit')
1939 tr = self.transaction('commit')
1944 ret = self.commitctx(cctx, True)
1940 ret = self.commitctx(cctx, True)
1945 except: # re-raises
1941 except: # re-raises
1946 if edited:
1942 if edited:
1947 self.ui.write(
1943 self.ui.write(
1948 _('note: commit message saved in %s\n') % msgfn)
1944 _('note: commit message saved in %s\n') % msgfn)
1949 raise
1945 raise
1950 # update bookmarks, dirstate and mergestate
1946 # update bookmarks, dirstate and mergestate
1951 bookmarks.update(self, [p1, p2], ret)
1947 bookmarks.update(self, [p1, p2], ret)
1952 cctx.markcommitted(ret)
1948 cctx.markcommitted(ret)
1953 ms.reset()
1949 ms.reset()
1954 tr.close()
1950 tr.close()
1955
1951
1956 finally:
1952 finally:
1957 lockmod.release(tr, lock, wlock)
1953 lockmod.release(tr, lock, wlock)
1958
1954
1959 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1955 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1960 # hack for command that use a temporary commit (eg: histedit)
1956 # hack for command that use a temporary commit (eg: histedit)
1961 # temporary commit got stripped before hook release
1957 # temporary commit got stripped before hook release
1962 if self.changelog.hasnode(ret):
1958 if self.changelog.hasnode(ret):
1963 self.hook("commit", node=node, parent1=parent1,
1959 self.hook("commit", node=node, parent1=parent1,
1964 parent2=parent2)
1960 parent2=parent2)
1965 self._afterlock(commithook)
1961 self._afterlock(commithook)
1966 return ret
1962 return ret
1967
1963
1968 @unfilteredmethod
1964 @unfilteredmethod
1969 def commitctx(self, ctx, error=False):
1965 def commitctx(self, ctx, error=False):
1970 """Add a new revision to current repository.
1966 """Add a new revision to current repository.
1971 Revision information is passed via the context argument.
1967 Revision information is passed via the context argument.
1972 """
1968 """
1973
1969
1974 tr = None
1970 tr = None
1975 p1, p2 = ctx.p1(), ctx.p2()
1971 p1, p2 = ctx.p1(), ctx.p2()
1976 user = ctx.user()
1972 user = ctx.user()
1977
1973
1978 lock = self.lock()
1974 lock = self.lock()
1979 try:
1975 try:
1980 tr = self.transaction("commit")
1976 tr = self.transaction("commit")
1981 trp = weakref.proxy(tr)
1977 trp = weakref.proxy(tr)
1982
1978
1983 if ctx.manifestnode():
1979 if ctx.manifestnode():
1984 # reuse an existing manifest revision
1980 # reuse an existing manifest revision
1985 mn = ctx.manifestnode()
1981 mn = ctx.manifestnode()
1986 files = ctx.files()
1982 files = ctx.files()
1987 elif ctx.files():
1983 elif ctx.files():
1988 m1ctx = p1.manifestctx()
1984 m1ctx = p1.manifestctx()
1989 m2ctx = p2.manifestctx()
1985 m2ctx = p2.manifestctx()
1990 mctx = m1ctx.copy()
1986 mctx = m1ctx.copy()
1991
1987
1992 m = mctx.read()
1988 m = mctx.read()
1993 m1 = m1ctx.read()
1989 m1 = m1ctx.read()
1994 m2 = m2ctx.read()
1990 m2 = m2ctx.read()
1995
1991
1996 # check in files
1992 # check in files
1997 added = []
1993 added = []
1998 changed = []
1994 changed = []
1999 removed = list(ctx.removed())
1995 removed = list(ctx.removed())
2000 linkrev = len(self)
1996 linkrev = len(self)
2001 self.ui.note(_("committing files:\n"))
1997 self.ui.note(_("committing files:\n"))
2002 for f in sorted(ctx.modified() + ctx.added()):
1998 for f in sorted(ctx.modified() + ctx.added()):
2003 self.ui.note(f + "\n")
1999 self.ui.note(f + "\n")
2004 try:
2000 try:
2005 fctx = ctx[f]
2001 fctx = ctx[f]
2006 if fctx is None:
2002 if fctx is None:
2007 removed.append(f)
2003 removed.append(f)
2008 else:
2004 else:
2009 added.append(f)
2005 added.append(f)
2010 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2006 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2011 trp, changed)
2007 trp, changed)
2012 m.setflag(f, fctx.flags())
2008 m.setflag(f, fctx.flags())
2013 except OSError as inst:
2009 except OSError as inst:
2014 self.ui.warn(_("trouble committing %s!\n") % f)
2010 self.ui.warn(_("trouble committing %s!\n") % f)
2015 raise
2011 raise
2016 except IOError as inst:
2012 except IOError as inst:
2017 errcode = getattr(inst, 'errno', errno.ENOENT)
2013 errcode = getattr(inst, 'errno', errno.ENOENT)
2018 if error or errcode and errcode != errno.ENOENT:
2014 if error or errcode and errcode != errno.ENOENT:
2019 self.ui.warn(_("trouble committing %s!\n") % f)
2015 self.ui.warn(_("trouble committing %s!\n") % f)
2020 raise
2016 raise
2021
2017
2022 # update manifest
2018 # update manifest
2023 self.ui.note(_("committing manifest\n"))
2019 self.ui.note(_("committing manifest\n"))
2024 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2020 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2025 drop = [f for f in removed if f in m]
2021 drop = [f for f in removed if f in m]
2026 for f in drop:
2022 for f in drop:
2027 del m[f]
2023 del m[f]
2028 mn = mctx.write(trp, linkrev,
2024 mn = mctx.write(trp, linkrev,
2029 p1.manifestnode(), p2.manifestnode(),
2025 p1.manifestnode(), p2.manifestnode(),
2030 added, drop)
2026 added, drop)
2031 files = changed + removed
2027 files = changed + removed
2032 else:
2028 else:
2033 mn = p1.manifestnode()
2029 mn = p1.manifestnode()
2034 files = []
2030 files = []
2035
2031
2036 # update changelog
2032 # update changelog
2037 self.ui.note(_("committing changelog\n"))
2033 self.ui.note(_("committing changelog\n"))
2038 self.changelog.delayupdate(tr)
2034 self.changelog.delayupdate(tr)
2039 n = self.changelog.add(mn, files, ctx.description(),
2035 n = self.changelog.add(mn, files, ctx.description(),
2040 trp, p1.node(), p2.node(),
2036 trp, p1.node(), p2.node(),
2041 user, ctx.date(), ctx.extra().copy())
2037 user, ctx.date(), ctx.extra().copy())
2042 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2038 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2043 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2039 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2044 parent2=xp2)
2040 parent2=xp2)
2045 # set the new commit is proper phase
2041 # set the new commit is proper phase
2046 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2042 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2047 if targetphase:
2043 if targetphase:
2048 # retract boundary do not alter parent changeset.
2044 # retract boundary do not alter parent changeset.
2049 # if a parent have higher the resulting phase will
2045 # if a parent have higher the resulting phase will
2050 # be compliant anyway
2046 # be compliant anyway
2051 #
2047 #
2052 # if minimal phase was 0 we don't need to retract anything
2048 # if minimal phase was 0 we don't need to retract anything
2053 phases.registernew(self, tr, targetphase, [n])
2049 phases.registernew(self, tr, targetphase, [n])
2054 tr.close()
2050 tr.close()
2055 return n
2051 return n
2056 finally:
2052 finally:
2057 if tr:
2053 if tr:
2058 tr.release()
2054 tr.release()
2059 lock.release()
2055 lock.release()
2060
2056
2061 @unfilteredmethod
2057 @unfilteredmethod
2062 def destroying(self):
2058 def destroying(self):
2063 '''Inform the repository that nodes are about to be destroyed.
2059 '''Inform the repository that nodes are about to be destroyed.
2064 Intended for use by strip and rollback, so there's a common
2060 Intended for use by strip and rollback, so there's a common
2065 place for anything that has to be done before destroying history.
2061 place for anything that has to be done before destroying history.
2066
2062
2067 This is mostly useful for saving state that is in memory and waiting
2063 This is mostly useful for saving state that is in memory and waiting
2068 to be flushed when the current lock is released. Because a call to
2064 to be flushed when the current lock is released. Because a call to
2069 destroyed is imminent, the repo will be invalidated causing those
2065 destroyed is imminent, the repo will be invalidated causing those
2070 changes to stay in memory (waiting for the next unlock), or vanish
2066 changes to stay in memory (waiting for the next unlock), or vanish
2071 completely.
2067 completely.
2072 '''
2068 '''
2073 # When using the same lock to commit and strip, the phasecache is left
2069 # When using the same lock to commit and strip, the phasecache is left
2074 # dirty after committing. Then when we strip, the repo is invalidated,
2070 # dirty after committing. Then when we strip, the repo is invalidated,
2075 # causing those changes to disappear.
2071 # causing those changes to disappear.
2076 if '_phasecache' in vars(self):
2072 if '_phasecache' in vars(self):
2077 self._phasecache.write()
2073 self._phasecache.write()
2078
2074
2079 @unfilteredmethod
2075 @unfilteredmethod
2080 def destroyed(self):
2076 def destroyed(self):
2081 '''Inform the repository that nodes have been destroyed.
2077 '''Inform the repository that nodes have been destroyed.
2082 Intended for use by strip and rollback, so there's a common
2078 Intended for use by strip and rollback, so there's a common
2083 place for anything that has to be done after destroying history.
2079 place for anything that has to be done after destroying history.
2084 '''
2080 '''
2085 # When one tries to:
2081 # When one tries to:
2086 # 1) destroy nodes thus calling this method (e.g. strip)
2082 # 1) destroy nodes thus calling this method (e.g. strip)
2087 # 2) use phasecache somewhere (e.g. commit)
2083 # 2) use phasecache somewhere (e.g. commit)
2088 #
2084 #
2089 # then 2) will fail because the phasecache contains nodes that were
2085 # then 2) will fail because the phasecache contains nodes that were
2090 # removed. We can either remove phasecache from the filecache,
2086 # removed. We can either remove phasecache from the filecache,
2091 # causing it to reload next time it is accessed, or simply filter
2087 # causing it to reload next time it is accessed, or simply filter
2092 # the removed nodes now and write the updated cache.
2088 # the removed nodes now and write the updated cache.
2093 self._phasecache.filterunknown(self)
2089 self._phasecache.filterunknown(self)
2094 self._phasecache.write()
2090 self._phasecache.write()
2095
2091
2096 # refresh all repository caches
2092 # refresh all repository caches
2097 self.updatecaches()
2093 self.updatecaches()
2098
2094
2099 # Ensure the persistent tag cache is updated. Doing it now
2095 # Ensure the persistent tag cache is updated. Doing it now
2100 # means that the tag cache only has to worry about destroyed
2096 # means that the tag cache only has to worry about destroyed
2101 # heads immediately after a strip/rollback. That in turn
2097 # heads immediately after a strip/rollback. That in turn
2102 # guarantees that "cachetip == currenttip" (comparing both rev
2098 # guarantees that "cachetip == currenttip" (comparing both rev
2103 # and node) always means no nodes have been added or destroyed.
2099 # and node) always means no nodes have been added or destroyed.
2104
2100
2105 # XXX this is suboptimal when qrefresh'ing: we strip the current
2101 # XXX this is suboptimal when qrefresh'ing: we strip the current
2106 # head, refresh the tag cache, then immediately add a new head.
2102 # head, refresh the tag cache, then immediately add a new head.
2107 # But I think doing it this way is necessary for the "instant
2103 # But I think doing it this way is necessary for the "instant
2108 # tag cache retrieval" case to work.
2104 # tag cache retrieval" case to work.
2109 self.invalidate()
2105 self.invalidate()
2110
2106
2111 def status(self, node1='.', node2=None, match=None,
2107 def status(self, node1='.', node2=None, match=None,
2112 ignored=False, clean=False, unknown=False,
2108 ignored=False, clean=False, unknown=False,
2113 listsubrepos=False):
2109 listsubrepos=False):
2114 '''a convenience method that calls node1.status(node2)'''
2110 '''a convenience method that calls node1.status(node2)'''
2115 return self[node1].status(node2, match, ignored, clean, unknown,
2111 return self[node1].status(node2, match, ignored, clean, unknown,
2116 listsubrepos)
2112 listsubrepos)
2117
2113
2118 def addpostdsstatus(self, ps):
2114 def addpostdsstatus(self, ps):
2119 """Add a callback to run within the wlock, at the point at which status
2115 """Add a callback to run within the wlock, at the point at which status
2120 fixups happen.
2116 fixups happen.
2121
2117
2122 On status completion, callback(wctx, status) will be called with the
2118 On status completion, callback(wctx, status) will be called with the
2123 wlock held, unless the dirstate has changed from underneath or the wlock
2119 wlock held, unless the dirstate has changed from underneath or the wlock
2124 couldn't be grabbed.
2120 couldn't be grabbed.
2125
2121
2126 Callbacks should not capture and use a cached copy of the dirstate --
2122 Callbacks should not capture and use a cached copy of the dirstate --
2127 it might change in the meanwhile. Instead, they should access the
2123 it might change in the meanwhile. Instead, they should access the
2128 dirstate via wctx.repo().dirstate.
2124 dirstate via wctx.repo().dirstate.
2129
2125
2130 This list is emptied out after each status run -- extensions should
2126 This list is emptied out after each status run -- extensions should
2131 make sure it adds to this list each time dirstate.status is called.
2127 make sure it adds to this list each time dirstate.status is called.
2132 Extensions should also make sure they don't call this for statuses
2128 Extensions should also make sure they don't call this for statuses
2133 that don't involve the dirstate.
2129 that don't involve the dirstate.
2134 """
2130 """
2135
2131
2136 # The list is located here for uniqueness reasons -- it is actually
2132 # The list is located here for uniqueness reasons -- it is actually
2137 # managed by the workingctx, but that isn't unique per-repo.
2133 # managed by the workingctx, but that isn't unique per-repo.
2138 self._postdsstatus.append(ps)
2134 self._postdsstatus.append(ps)
2139
2135
2140 def postdsstatus(self):
2136 def postdsstatus(self):
2141 """Used by workingctx to get the list of post-dirstate-status hooks."""
2137 """Used by workingctx to get the list of post-dirstate-status hooks."""
2142 return self._postdsstatus
2138 return self._postdsstatus
2143
2139
2144 def clearpostdsstatus(self):
2140 def clearpostdsstatus(self):
2145 """Used by workingctx to clear post-dirstate-status hooks."""
2141 """Used by workingctx to clear post-dirstate-status hooks."""
2146 del self._postdsstatus[:]
2142 del self._postdsstatus[:]
2147
2143
2148 def heads(self, start=None):
2144 def heads(self, start=None):
2149 if start is None:
2145 if start is None:
2150 cl = self.changelog
2146 cl = self.changelog
2151 headrevs = reversed(cl.headrevs())
2147 headrevs = reversed(cl.headrevs())
2152 return [cl.node(rev) for rev in headrevs]
2148 return [cl.node(rev) for rev in headrevs]
2153
2149
2154 heads = self.changelog.heads(start)
2150 heads = self.changelog.heads(start)
2155 # sort the output in rev descending order
2151 # sort the output in rev descending order
2156 return sorted(heads, key=self.changelog.rev, reverse=True)
2152 return sorted(heads, key=self.changelog.rev, reverse=True)
2157
2153
2158 def branchheads(self, branch=None, start=None, closed=False):
2154 def branchheads(self, branch=None, start=None, closed=False):
2159 '''return a (possibly filtered) list of heads for the given branch
2155 '''return a (possibly filtered) list of heads for the given branch
2160
2156
2161 Heads are returned in topological order, from newest to oldest.
2157 Heads are returned in topological order, from newest to oldest.
2162 If branch is None, use the dirstate branch.
2158 If branch is None, use the dirstate branch.
2163 If start is not None, return only heads reachable from start.
2159 If start is not None, return only heads reachable from start.
2164 If closed is True, return heads that are marked as closed as well.
2160 If closed is True, return heads that are marked as closed as well.
2165 '''
2161 '''
2166 if branch is None:
2162 if branch is None:
2167 branch = self[None].branch()
2163 branch = self[None].branch()
2168 branches = self.branchmap()
2164 branches = self.branchmap()
2169 if branch not in branches:
2165 if branch not in branches:
2170 return []
2166 return []
2171 # the cache returns heads ordered lowest to highest
2167 # the cache returns heads ordered lowest to highest
2172 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2168 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2173 if start is not None:
2169 if start is not None:
2174 # filter out the heads that cannot be reached from startrev
2170 # filter out the heads that cannot be reached from startrev
2175 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2171 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2176 bheads = [h for h in bheads if h in fbheads]
2172 bheads = [h for h in bheads if h in fbheads]
2177 return bheads
2173 return bheads
2178
2174
2179 def branches(self, nodes):
2175 def branches(self, nodes):
2180 if not nodes:
2176 if not nodes:
2181 nodes = [self.changelog.tip()]
2177 nodes = [self.changelog.tip()]
2182 b = []
2178 b = []
2183 for n in nodes:
2179 for n in nodes:
2184 t = n
2180 t = n
2185 while True:
2181 while True:
2186 p = self.changelog.parents(n)
2182 p = self.changelog.parents(n)
2187 if p[1] != nullid or p[0] == nullid:
2183 if p[1] != nullid or p[0] == nullid:
2188 b.append((t, n, p[0], p[1]))
2184 b.append((t, n, p[0], p[1]))
2189 break
2185 break
2190 n = p[0]
2186 n = p[0]
2191 return b
2187 return b
2192
2188
2193 def between(self, pairs):
2189 def between(self, pairs):
2194 r = []
2190 r = []
2195
2191
2196 for top, bottom in pairs:
2192 for top, bottom in pairs:
2197 n, l, i = top, [], 0
2193 n, l, i = top, [], 0
2198 f = 1
2194 f = 1
2199
2195
2200 while n != bottom and n != nullid:
2196 while n != bottom and n != nullid:
2201 p = self.changelog.parents(n)[0]
2197 p = self.changelog.parents(n)[0]
2202 if i == f:
2198 if i == f:
2203 l.append(n)
2199 l.append(n)
2204 f = f * 2
2200 f = f * 2
2205 n = p
2201 n = p
2206 i += 1
2202 i += 1
2207
2203
2208 r.append(l)
2204 r.append(l)
2209
2205
2210 return r
2206 return r
2211
2207
2212 def checkpush(self, pushop):
2208 def checkpush(self, pushop):
2213 """Extensions can override this function if additional checks have
2209 """Extensions can override this function if additional checks have
2214 to be performed before pushing, or call it if they override push
2210 to be performed before pushing, or call it if they override push
2215 command.
2211 command.
2216 """
2212 """
2217
2213
2218 @unfilteredpropertycache
2214 @unfilteredpropertycache
2219 def prepushoutgoinghooks(self):
2215 def prepushoutgoinghooks(self):
2220 """Return util.hooks consists of a pushop with repo, remote, outgoing
2216 """Return util.hooks consists of a pushop with repo, remote, outgoing
2221 methods, which are called before pushing changesets.
2217 methods, which are called before pushing changesets.
2222 """
2218 """
2223 return util.hooks()
2219 return util.hooks()
2224
2220
2225 def pushkey(self, namespace, key, old, new):
2221 def pushkey(self, namespace, key, old, new):
2226 try:
2222 try:
2227 tr = self.currenttransaction()
2223 tr = self.currenttransaction()
2228 hookargs = {}
2224 hookargs = {}
2229 if tr is not None:
2225 if tr is not None:
2230 hookargs.update(tr.hookargs)
2226 hookargs.update(tr.hookargs)
2231 hookargs = pycompat.strkwargs(hookargs)
2227 hookargs = pycompat.strkwargs(hookargs)
2232 hookargs[r'namespace'] = namespace
2228 hookargs[r'namespace'] = namespace
2233 hookargs[r'key'] = key
2229 hookargs[r'key'] = key
2234 hookargs[r'old'] = old
2230 hookargs[r'old'] = old
2235 hookargs[r'new'] = new
2231 hookargs[r'new'] = new
2236 self.hook('prepushkey', throw=True, **hookargs)
2232 self.hook('prepushkey', throw=True, **hookargs)
2237 except error.HookAbort as exc:
2233 except error.HookAbort as exc:
2238 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2234 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2239 if exc.hint:
2235 if exc.hint:
2240 self.ui.write_err(_("(%s)\n") % exc.hint)
2236 self.ui.write_err(_("(%s)\n") % exc.hint)
2241 return False
2237 return False
2242 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2238 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2243 ret = pushkey.push(self, namespace, key, old, new)
2239 ret = pushkey.push(self, namespace, key, old, new)
2244 def runhook():
2240 def runhook():
2245 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2241 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2246 ret=ret)
2242 ret=ret)
2247 self._afterlock(runhook)
2243 self._afterlock(runhook)
2248 return ret
2244 return ret
2249
2245
2250 def listkeys(self, namespace):
2246 def listkeys(self, namespace):
2251 self.hook('prelistkeys', throw=True, namespace=namespace)
2247 self.hook('prelistkeys', throw=True, namespace=namespace)
2252 self.ui.debug('listing keys for "%s"\n' % namespace)
2248 self.ui.debug('listing keys for "%s"\n' % namespace)
2253 values = pushkey.list(self, namespace)
2249 values = pushkey.list(self, namespace)
2254 self.hook('listkeys', namespace=namespace, values=values)
2250 self.hook('listkeys', namespace=namespace, values=values)
2255 return values
2251 return values
2256
2252
2257 def debugwireargs(self, one, two, three=None, four=None, five=None):
2253 def debugwireargs(self, one, two, three=None, four=None, five=None):
2258 '''used to test argument passing over the wire'''
2254 '''used to test argument passing over the wire'''
2259 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2255 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2260 pycompat.bytestr(four),
2256 pycompat.bytestr(four),
2261 pycompat.bytestr(five))
2257 pycompat.bytestr(five))
2262
2258
2263 def savecommitmessage(self, text):
2259 def savecommitmessage(self, text):
2264 fp = self.vfs('last-message.txt', 'wb')
2260 fp = self.vfs('last-message.txt', 'wb')
2265 try:
2261 try:
2266 fp.write(text)
2262 fp.write(text)
2267 finally:
2263 finally:
2268 fp.close()
2264 fp.close()
2269 return self.pathto(fp.name[len(self.root) + 1:])
2265 return self.pathto(fp.name[len(self.root) + 1:])
2270
2266
2271 # used to avoid circular references so destructors work
2267 # used to avoid circular references so destructors work
2272 def aftertrans(files):
2268 def aftertrans(files):
2273 renamefiles = [tuple(t) for t in files]
2269 renamefiles = [tuple(t) for t in files]
2274 def a():
2270 def a():
2275 for vfs, src, dest in renamefiles:
2271 for vfs, src, dest in renamefiles:
2276 # if src and dest refer to a same file, vfs.rename is a no-op,
2272 # if src and dest refer to a same file, vfs.rename is a no-op,
2277 # leaving both src and dest on disk. delete dest to make sure
2273 # leaving both src and dest on disk. delete dest to make sure
2278 # the rename couldn't be such a no-op.
2274 # the rename couldn't be such a no-op.
2279 vfs.tryunlink(dest)
2275 vfs.tryunlink(dest)
2280 try:
2276 try:
2281 vfs.rename(src, dest)
2277 vfs.rename(src, dest)
2282 except OSError: # journal file does not yet exist
2278 except OSError: # journal file does not yet exist
2283 pass
2279 pass
2284 return a
2280 return a
2285
2281
2286 def undoname(fn):
2282 def undoname(fn):
2287 base, name = os.path.split(fn)
2283 base, name = os.path.split(fn)
2288 assert name.startswith('journal')
2284 assert name.startswith('journal')
2289 return os.path.join(base, name.replace('journal', 'undo', 1))
2285 return os.path.join(base, name.replace('journal', 'undo', 1))
2290
2286
2291 def instance(ui, path, create):
2287 def instance(ui, path, create):
2292 return localrepository(ui, util.urllocalpath(path), create)
2288 return localrepository(ui, util.urllocalpath(path), create)
2293
2289
2294 def islocal(path):
2290 def islocal(path):
2295 return True
2291 return True
2296
2292
2297 def newreporequirements(repo):
2293 def newreporequirements(repo):
2298 """Determine the set of requirements for a new local repository.
2294 """Determine the set of requirements for a new local repository.
2299
2295
2300 Extensions can wrap this function to specify custom requirements for
2296 Extensions can wrap this function to specify custom requirements for
2301 new repositories.
2297 new repositories.
2302 """
2298 """
2303 ui = repo.ui
2299 ui = repo.ui
2304 requirements = {'revlogv1'}
2300 requirements = {'revlogv1'}
2305 if ui.configbool('format', 'usestore'):
2301 if ui.configbool('format', 'usestore'):
2306 requirements.add('store')
2302 requirements.add('store')
2307 if ui.configbool('format', 'usefncache'):
2303 if ui.configbool('format', 'usefncache'):
2308 requirements.add('fncache')
2304 requirements.add('fncache')
2309 if ui.configbool('format', 'dotencode'):
2305 if ui.configbool('format', 'dotencode'):
2310 requirements.add('dotencode')
2306 requirements.add('dotencode')
2311
2307
2312 compengine = ui.config('experimental', 'format.compression')
2308 compengine = ui.config('experimental', 'format.compression')
2313 if compengine not in util.compengines:
2309 if compengine not in util.compengines:
2314 raise error.Abort(_('compression engine %s defined by '
2310 raise error.Abort(_('compression engine %s defined by '
2315 'experimental.format.compression not available') %
2311 'experimental.format.compression not available') %
2316 compengine,
2312 compengine,
2317 hint=_('run "hg debuginstall" to list available '
2313 hint=_('run "hg debuginstall" to list available '
2318 'compression engines'))
2314 'compression engines'))
2319
2315
2320 # zlib is the historical default and doesn't need an explicit requirement.
2316 # zlib is the historical default and doesn't need an explicit requirement.
2321 if compengine != 'zlib':
2317 if compengine != 'zlib':
2322 requirements.add('exp-compression-%s' % compengine)
2318 requirements.add('exp-compression-%s' % compengine)
2323
2319
2324 if scmutil.gdinitconfig(ui):
2320 if scmutil.gdinitconfig(ui):
2325 requirements.add('generaldelta')
2321 requirements.add('generaldelta')
2326 if ui.configbool('experimental', 'treemanifest'):
2322 if ui.configbool('experimental', 'treemanifest'):
2327 requirements.add('treemanifest')
2323 requirements.add('treemanifest')
2328
2324
2329 revlogv2 = ui.config('experimental', 'revlogv2')
2325 revlogv2 = ui.config('experimental', 'revlogv2')
2330 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2326 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2331 requirements.remove('revlogv1')
2327 requirements.remove('revlogv1')
2332 # generaldelta is implied by revlogv2.
2328 # generaldelta is implied by revlogv2.
2333 requirements.discard('generaldelta')
2329 requirements.discard('generaldelta')
2334 requirements.add(REVLOGV2_REQUIREMENT)
2330 requirements.add(REVLOGV2_REQUIREMENT)
2335
2331
2336 return requirements
2332 return requirements
@@ -1,616 +1,612 b''
1 # sshpeer.py - ssh repository proxy class for mercurial
1 # sshpeer.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import uuid
11 import uuid
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 wireproto,
18 wireproto,
19 wireprotoserver,
19 wireprotoserver,
20 wireprototypes,
20 wireprototypes,
21 )
21 )
22 from .utils import (
22 from .utils import (
23 procutil,
23 procutil,
24 )
24 )
25
25
26 def _serverquote(s):
26 def _serverquote(s):
27 """quote a string for the remote shell ... which we assume is sh"""
27 """quote a string for the remote shell ... which we assume is sh"""
28 if not s:
28 if not s:
29 return s
29 return s
30 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
30 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
31 return s
31 return s
32 return "'%s'" % s.replace("'", "'\\''")
32 return "'%s'" % s.replace("'", "'\\''")
33
33
34 def _forwardoutput(ui, pipe):
34 def _forwardoutput(ui, pipe):
35 """display all data currently available on pipe as remote output.
35 """display all data currently available on pipe as remote output.
36
36
37 This is non blocking."""
37 This is non blocking."""
38 if pipe:
38 if pipe:
39 s = procutil.readpipe(pipe)
39 s = procutil.readpipe(pipe)
40 if s:
40 if s:
41 for l in s.splitlines():
41 for l in s.splitlines():
42 ui.status(_("remote: "), l, '\n')
42 ui.status(_("remote: "), l, '\n')
43
43
44 class doublepipe(object):
44 class doublepipe(object):
45 """Operate a side-channel pipe in addition of a main one
45 """Operate a side-channel pipe in addition of a main one
46
46
47 The side-channel pipe contains server output to be forwarded to the user
47 The side-channel pipe contains server output to be forwarded to the user
48 input. The double pipe will behave as the "main" pipe, but will ensure the
48 input. The double pipe will behave as the "main" pipe, but will ensure the
49 content of the "side" pipe is properly processed while we wait for blocking
49 content of the "side" pipe is properly processed while we wait for blocking
50 call on the "main" pipe.
50 call on the "main" pipe.
51
51
52 If large amounts of data are read from "main", the forward will cease after
52 If large amounts of data are read from "main", the forward will cease after
53 the first bytes start to appear. This simplifies the implementation
53 the first bytes start to appear. This simplifies the implementation
54 without affecting actual output of sshpeer too much as we rarely issue
54 without affecting actual output of sshpeer too much as we rarely issue
55 large read for data not yet emitted by the server.
55 large read for data not yet emitted by the server.
56
56
57 The main pipe is expected to be a 'bufferedinputpipe' from the util module
57 The main pipe is expected to be a 'bufferedinputpipe' from the util module
58 that handle all the os specific bits. This class lives in this module
58 that handle all the os specific bits. This class lives in this module
59 because it focus on behavior specific to the ssh protocol."""
59 because it focus on behavior specific to the ssh protocol."""
60
60
61 def __init__(self, ui, main, side):
61 def __init__(self, ui, main, side):
62 self._ui = ui
62 self._ui = ui
63 self._main = main
63 self._main = main
64 self._side = side
64 self._side = side
65
65
66 def _wait(self):
66 def _wait(self):
67 """wait until some data are available on main or side
67 """wait until some data are available on main or side
68
68
69 return a pair of boolean (ismainready, issideready)
69 return a pair of boolean (ismainready, issideready)
70
70
71 (This will only wait for data if the setup is supported by `util.poll`)
71 (This will only wait for data if the setup is supported by `util.poll`)
72 """
72 """
73 if (isinstance(self._main, util.bufferedinputpipe) and
73 if (isinstance(self._main, util.bufferedinputpipe) and
74 self._main.hasbuffer):
74 self._main.hasbuffer):
75 # Main has data. Assume side is worth poking at.
75 # Main has data. Assume side is worth poking at.
76 return True, True
76 return True, True
77
77
78 fds = [self._main.fileno(), self._side.fileno()]
78 fds = [self._main.fileno(), self._side.fileno()]
79 try:
79 try:
80 act = util.poll(fds)
80 act = util.poll(fds)
81 except NotImplementedError:
81 except NotImplementedError:
82 # non supported yet case, assume all have data.
82 # non supported yet case, assume all have data.
83 act = fds
83 act = fds
84 return (self._main.fileno() in act, self._side.fileno() in act)
84 return (self._main.fileno() in act, self._side.fileno() in act)
85
85
86 def write(self, data):
86 def write(self, data):
87 return self._call('write', data)
87 return self._call('write', data)
88
88
89 def read(self, size):
89 def read(self, size):
90 r = self._call('read', size)
90 r = self._call('read', size)
91 if size != 0 and not r:
91 if size != 0 and not r:
92 # We've observed a condition that indicates the
92 # We've observed a condition that indicates the
93 # stdout closed unexpectedly. Check stderr one
93 # stdout closed unexpectedly. Check stderr one
94 # more time and snag anything that's there before
94 # more time and snag anything that's there before
95 # letting anyone know the main part of the pipe
95 # letting anyone know the main part of the pipe
96 # closed prematurely.
96 # closed prematurely.
97 _forwardoutput(self._ui, self._side)
97 _forwardoutput(self._ui, self._side)
98 return r
98 return r
99
99
100 def readline(self):
100 def readline(self):
101 return self._call('readline')
101 return self._call('readline')
102
102
103 def _call(self, methname, data=None):
103 def _call(self, methname, data=None):
104 """call <methname> on "main", forward output of "side" while blocking
104 """call <methname> on "main", forward output of "side" while blocking
105 """
105 """
106 # data can be '' or 0
106 # data can be '' or 0
107 if (data is not None and not data) or self._main.closed:
107 if (data is not None and not data) or self._main.closed:
108 _forwardoutput(self._ui, self._side)
108 _forwardoutput(self._ui, self._side)
109 return ''
109 return ''
110 while True:
110 while True:
111 mainready, sideready = self._wait()
111 mainready, sideready = self._wait()
112 if sideready:
112 if sideready:
113 _forwardoutput(self._ui, self._side)
113 _forwardoutput(self._ui, self._side)
114 if mainready:
114 if mainready:
115 meth = getattr(self._main, methname)
115 meth = getattr(self._main, methname)
116 if data is None:
116 if data is None:
117 return meth()
117 return meth()
118 else:
118 else:
119 return meth(data)
119 return meth(data)
120
120
121 def close(self):
121 def close(self):
122 return self._main.close()
122 return self._main.close()
123
123
124 def flush(self):
124 def flush(self):
125 return self._main.flush()
125 return self._main.flush()
126
126
127 def _cleanuppipes(ui, pipei, pipeo, pipee):
127 def _cleanuppipes(ui, pipei, pipeo, pipee):
128 """Clean up pipes used by an SSH connection."""
128 """Clean up pipes used by an SSH connection."""
129 if pipeo:
129 if pipeo:
130 pipeo.close()
130 pipeo.close()
131 if pipei:
131 if pipei:
132 pipei.close()
132 pipei.close()
133
133
134 if pipee:
134 if pipee:
135 # Try to read from the err descriptor until EOF.
135 # Try to read from the err descriptor until EOF.
136 try:
136 try:
137 for l in pipee:
137 for l in pipee:
138 ui.status(_('remote: '), l)
138 ui.status(_('remote: '), l)
139 except (IOError, ValueError):
139 except (IOError, ValueError):
140 pass
140 pass
141
141
142 pipee.close()
142 pipee.close()
143
143
144 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
144 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
145 """Create an SSH connection to a server.
145 """Create an SSH connection to a server.
146
146
147 Returns a tuple of (process, stdin, stdout, stderr) for the
147 Returns a tuple of (process, stdin, stdout, stderr) for the
148 spawned process.
148 spawned process.
149 """
149 """
150 cmd = '%s %s %s' % (
150 cmd = '%s %s %s' % (
151 sshcmd,
151 sshcmd,
152 args,
152 args,
153 procutil.shellquote('%s -R %s serve --stdio' % (
153 procutil.shellquote('%s -R %s serve --stdio' % (
154 _serverquote(remotecmd), _serverquote(path))))
154 _serverquote(remotecmd), _serverquote(path))))
155
155
156 ui.debug('running %s\n' % cmd)
156 ui.debug('running %s\n' % cmd)
157 cmd = procutil.quotecommand(cmd)
157 cmd = procutil.quotecommand(cmd)
158
158
159 # no buffer allow the use of 'select'
159 # no buffer allow the use of 'select'
160 # feel free to remove buffering and select usage when we ultimately
160 # feel free to remove buffering and select usage when we ultimately
161 # move to threading.
161 # move to threading.
162 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
162 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
163
163
164 return proc, stdin, stdout, stderr
164 return proc, stdin, stdout, stderr
165
165
166 def _performhandshake(ui, stdin, stdout, stderr):
166 def _performhandshake(ui, stdin, stdout, stderr):
167 def badresponse():
167 def badresponse():
168 # Flush any output on stderr.
168 # Flush any output on stderr.
169 _forwardoutput(ui, stderr)
169 _forwardoutput(ui, stderr)
170
170
171 msg = _('no suitable response from remote hg')
171 msg = _('no suitable response from remote hg')
172 hint = ui.config('ui', 'ssherrorhint')
172 hint = ui.config('ui', 'ssherrorhint')
173 raise error.RepoError(msg, hint=hint)
173 raise error.RepoError(msg, hint=hint)
174
174
175 # The handshake consists of sending wire protocol commands in reverse
175 # The handshake consists of sending wire protocol commands in reverse
176 # order of protocol implementation and then sniffing for a response
176 # order of protocol implementation and then sniffing for a response
177 # to one of them.
177 # to one of them.
178 #
178 #
179 # Those commands (from oldest to newest) are:
179 # Those commands (from oldest to newest) are:
180 #
180 #
181 # ``between``
181 # ``between``
182 # Asks for the set of revisions between a pair of revisions. Command
182 # Asks for the set of revisions between a pair of revisions. Command
183 # present in all Mercurial server implementations.
183 # present in all Mercurial server implementations.
184 #
184 #
185 # ``hello``
185 # ``hello``
186 # Instructs the server to advertise its capabilities. Introduced in
186 # Instructs the server to advertise its capabilities. Introduced in
187 # Mercurial 0.9.1.
187 # Mercurial 0.9.1.
188 #
188 #
189 # ``upgrade``
189 # ``upgrade``
190 # Requests upgrade from default transport protocol version 1 to
190 # Requests upgrade from default transport protocol version 1 to
191 # a newer version. Introduced in Mercurial 4.6 as an experimental
191 # a newer version. Introduced in Mercurial 4.6 as an experimental
192 # feature.
192 # feature.
193 #
193 #
194 # The ``between`` command is issued with a request for the null
194 # The ``between`` command is issued with a request for the null
195 # range. If the remote is a Mercurial server, this request will
195 # range. If the remote is a Mercurial server, this request will
196 # generate a specific response: ``1\n\n``. This represents the
196 # generate a specific response: ``1\n\n``. This represents the
197 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
197 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
198 # in the output stream and know this is the response to ``between``
198 # in the output stream and know this is the response to ``between``
199 # and we're at the end of our handshake reply.
199 # and we're at the end of our handshake reply.
200 #
200 #
201 # The response to the ``hello`` command will be a line with the
201 # The response to the ``hello`` command will be a line with the
202 # length of the value returned by that command followed by that
202 # length of the value returned by that command followed by that
203 # value. If the server doesn't support ``hello`` (which should be
203 # value. If the server doesn't support ``hello`` (which should be
204 # rare), that line will be ``0\n``. Otherwise, the value will contain
204 # rare), that line will be ``0\n``. Otherwise, the value will contain
205 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
205 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
206 # the capabilities of the server.
206 # the capabilities of the server.
207 #
207 #
208 # The ``upgrade`` command isn't really a command in the traditional
208 # The ``upgrade`` command isn't really a command in the traditional
209 # sense of version 1 of the transport because it isn't using the
209 # sense of version 1 of the transport because it isn't using the
210 # proper mechanism for formatting insteads: instead, it just encodes
210 # proper mechanism for formatting insteads: instead, it just encodes
211 # arguments on the line, delimited by spaces.
211 # arguments on the line, delimited by spaces.
212 #
212 #
213 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
213 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
214 # If the server doesn't support protocol upgrades, it will reply to
214 # If the server doesn't support protocol upgrades, it will reply to
215 # this line with ``0\n``. Otherwise, it emits an
215 # this line with ``0\n``. Otherwise, it emits an
216 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
216 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
217 # Content immediately following this line describes additional
217 # Content immediately following this line describes additional
218 # protocol and server state.
218 # protocol and server state.
219 #
219 #
220 # In addition to the responses to our command requests, the server
220 # In addition to the responses to our command requests, the server
221 # may emit "banner" output on stdout. SSH servers are allowed to
221 # may emit "banner" output on stdout. SSH servers are allowed to
222 # print messages to stdout on login. Issuing commands on connection
222 # print messages to stdout on login. Issuing commands on connection
223 # allows us to flush this banner output from the server by scanning
223 # allows us to flush this banner output from the server by scanning
224 # for output to our well-known ``between`` command. Of course, if
224 # for output to our well-known ``between`` command. Of course, if
225 # the banner contains ``1\n\n``, this will throw off our detection.
225 # the banner contains ``1\n\n``, this will throw off our detection.
226
226
227 requestlog = ui.configbool('devel', 'debug.peer-request')
227 requestlog = ui.configbool('devel', 'debug.peer-request')
228
228
229 # Generate a random token to help identify responses to version 2
229 # Generate a random token to help identify responses to version 2
230 # upgrade request.
230 # upgrade request.
231 token = pycompat.sysbytes(str(uuid.uuid4()))
231 token = pycompat.sysbytes(str(uuid.uuid4()))
232 upgradecaps = [
232 upgradecaps = [
233 ('proto', wireprotoserver.SSHV2),
233 ('proto', wireprotoserver.SSHV2),
234 ]
234 ]
235 upgradecaps = util.urlreq.urlencode(upgradecaps)
235 upgradecaps = util.urlreq.urlencode(upgradecaps)
236
236
237 try:
237 try:
238 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
238 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
239 handshake = [
239 handshake = [
240 'hello\n',
240 'hello\n',
241 'between\n',
241 'between\n',
242 'pairs %d\n' % len(pairsarg),
242 'pairs %d\n' % len(pairsarg),
243 pairsarg,
243 pairsarg,
244 ]
244 ]
245
245
246 # Request upgrade to version 2 if configured.
246 # Request upgrade to version 2 if configured.
247 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
247 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
248 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
248 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
249 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
249 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
250
250
251 if requestlog:
251 if requestlog:
252 ui.debug('devel-peer-request: hello\n')
252 ui.debug('devel-peer-request: hello\n')
253 ui.debug('sending hello command\n')
253 ui.debug('sending hello command\n')
254 if requestlog:
254 if requestlog:
255 ui.debug('devel-peer-request: between\n')
255 ui.debug('devel-peer-request: between\n')
256 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
256 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
257 ui.debug('sending between command\n')
257 ui.debug('sending between command\n')
258
258
259 stdin.write(''.join(handshake))
259 stdin.write(''.join(handshake))
260 stdin.flush()
260 stdin.flush()
261 except IOError:
261 except IOError:
262 badresponse()
262 badresponse()
263
263
264 # Assume version 1 of wire protocol by default.
264 # Assume version 1 of wire protocol by default.
265 protoname = wireprototypes.SSHV1
265 protoname = wireprototypes.SSHV1
266 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
266 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
267
267
268 lines = ['', 'dummy']
268 lines = ['', 'dummy']
269 max_noise = 500
269 max_noise = 500
270 while lines[-1] and max_noise:
270 while lines[-1] and max_noise:
271 try:
271 try:
272 l = stdout.readline()
272 l = stdout.readline()
273 _forwardoutput(ui, stderr)
273 _forwardoutput(ui, stderr)
274
274
275 # Look for reply to protocol upgrade request. It has a token
275 # Look for reply to protocol upgrade request. It has a token
276 # in it, so there should be no false positives.
276 # in it, so there should be no false positives.
277 m = reupgraded.match(l)
277 m = reupgraded.match(l)
278 if m:
278 if m:
279 protoname = m.group(1)
279 protoname = m.group(1)
280 ui.debug('protocol upgraded to %s\n' % protoname)
280 ui.debug('protocol upgraded to %s\n' % protoname)
281 # If an upgrade was handled, the ``hello`` and ``between``
281 # If an upgrade was handled, the ``hello`` and ``between``
282 # requests are ignored. The next output belongs to the
282 # requests are ignored. The next output belongs to the
283 # protocol, so stop scanning lines.
283 # protocol, so stop scanning lines.
284 break
284 break
285
285
286 # Otherwise it could be a banner, ``0\n`` response if server
286 # Otherwise it could be a banner, ``0\n`` response if server
287 # doesn't support upgrade.
287 # doesn't support upgrade.
288
288
289 if lines[-1] == '1\n' and l == '\n':
289 if lines[-1] == '1\n' and l == '\n':
290 break
290 break
291 if l:
291 if l:
292 ui.debug('remote: ', l)
292 ui.debug('remote: ', l)
293 lines.append(l)
293 lines.append(l)
294 max_noise -= 1
294 max_noise -= 1
295 except IOError:
295 except IOError:
296 badresponse()
296 badresponse()
297 else:
297 else:
298 badresponse()
298 badresponse()
299
299
300 caps = set()
300 caps = set()
301
301
302 # For version 1, we should see a ``capabilities`` line in response to the
302 # For version 1, we should see a ``capabilities`` line in response to the
303 # ``hello`` command.
303 # ``hello`` command.
304 if protoname == wireprototypes.SSHV1:
304 if protoname == wireprototypes.SSHV1:
305 for l in reversed(lines):
305 for l in reversed(lines):
306 # Look for response to ``hello`` command. Scan from the back so
306 # Look for response to ``hello`` command. Scan from the back so
307 # we don't misinterpret banner output as the command reply.
307 # we don't misinterpret banner output as the command reply.
308 if l.startswith('capabilities:'):
308 if l.startswith('capabilities:'):
309 caps.update(l[:-1].split(':')[1].split())
309 caps.update(l[:-1].split(':')[1].split())
310 break
310 break
311 elif protoname == wireprotoserver.SSHV2:
311 elif protoname == wireprotoserver.SSHV2:
312 # We see a line with number of bytes to follow and then a value
312 # We see a line with number of bytes to follow and then a value
313 # looking like ``capabilities: *``.
313 # looking like ``capabilities: *``.
314 line = stdout.readline()
314 line = stdout.readline()
315 try:
315 try:
316 valuelen = int(line)
316 valuelen = int(line)
317 except ValueError:
317 except ValueError:
318 badresponse()
318 badresponse()
319
319
320 capsline = stdout.read(valuelen)
320 capsline = stdout.read(valuelen)
321 if not capsline.startswith('capabilities: '):
321 if not capsline.startswith('capabilities: '):
322 badresponse()
322 badresponse()
323
323
324 ui.debug('remote: %s\n' % capsline)
324 ui.debug('remote: %s\n' % capsline)
325
325
326 caps.update(capsline.split(':')[1].split())
326 caps.update(capsline.split(':')[1].split())
327 # Trailing newline.
327 # Trailing newline.
328 stdout.read(1)
328 stdout.read(1)
329
329
330 # Error if we couldn't find capabilities, this means:
330 # Error if we couldn't find capabilities, this means:
331 #
331 #
332 # 1. Remote isn't a Mercurial server
332 # 1. Remote isn't a Mercurial server
333 # 2. Remote is a <0.9.1 Mercurial server
333 # 2. Remote is a <0.9.1 Mercurial server
334 # 3. Remote is a future Mercurial server that dropped ``hello``
334 # 3. Remote is a future Mercurial server that dropped ``hello``
335 # and other attempted handshake mechanisms.
335 # and other attempted handshake mechanisms.
336 if not caps:
336 if not caps:
337 badresponse()
337 badresponse()
338
338
339 # Flush any output on stderr before proceeding.
339 # Flush any output on stderr before proceeding.
340 _forwardoutput(ui, stderr)
340 _forwardoutput(ui, stderr)
341
341
342 return protoname, caps
342 return protoname, caps
343
343
344 class sshv1peer(wireproto.wirepeer):
344 class sshv1peer(wireproto.wirepeer):
345 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
345 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
346 autoreadstderr=True):
346 autoreadstderr=True):
347 """Create a peer from an existing SSH connection.
347 """Create a peer from an existing SSH connection.
348
348
349 ``proc`` is a handle on the underlying SSH process.
349 ``proc`` is a handle on the underlying SSH process.
350 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
350 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
351 pipes for that process.
351 pipes for that process.
352 ``caps`` is a set of capabilities supported by the remote.
352 ``caps`` is a set of capabilities supported by the remote.
353 ``autoreadstderr`` denotes whether to automatically read from
353 ``autoreadstderr`` denotes whether to automatically read from
354 stderr and to forward its output.
354 stderr and to forward its output.
355 """
355 """
356 self._url = url
356 self._url = url
357 self._ui = ui
357 self.ui = ui
358 # self._subprocess is unused. Keeping a handle on the process
358 # self._subprocess is unused. Keeping a handle on the process
359 # holds a reference and prevents it from being garbage collected.
359 # holds a reference and prevents it from being garbage collected.
360 self._subprocess = proc
360 self._subprocess = proc
361
361
362 # And we hook up our "doublepipe" wrapper to allow querying
362 # And we hook up our "doublepipe" wrapper to allow querying
363 # stderr any time we perform I/O.
363 # stderr any time we perform I/O.
364 if autoreadstderr:
364 if autoreadstderr:
365 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
365 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
366 stdin = doublepipe(ui, stdin, stderr)
366 stdin = doublepipe(ui, stdin, stderr)
367
367
368 self._pipeo = stdin
368 self._pipeo = stdin
369 self._pipei = stdout
369 self._pipei = stdout
370 self._pipee = stderr
370 self._pipee = stderr
371 self._caps = caps
371 self._caps = caps
372 self._autoreadstderr = autoreadstderr
372 self._autoreadstderr = autoreadstderr
373
373
374 # Commands that have a "framed" response where the first line of the
374 # Commands that have a "framed" response where the first line of the
375 # response contains the length of that response.
375 # response contains the length of that response.
376 _FRAMED_COMMANDS = {
376 _FRAMED_COMMANDS = {
377 'batch',
377 'batch',
378 }
378 }
379
379
380 # Begin of ipeerconnection interface.
380 # Begin of ipeerconnection interface.
381
381
382 @util.propertycache
383 def ui(self):
384 return self._ui
385
386 def url(self):
382 def url(self):
387 return self._url
383 return self._url
388
384
389 def local(self):
385 def local(self):
390 return None
386 return None
391
387
392 def peer(self):
388 def peer(self):
393 return self
389 return self
394
390
395 def canpush(self):
391 def canpush(self):
396 return True
392 return True
397
393
398 def close(self):
394 def close(self):
399 pass
395 pass
400
396
401 # End of ipeerconnection interface.
397 # End of ipeerconnection interface.
402
398
403 # Begin of ipeercommands interface.
399 # Begin of ipeercommands interface.
404
400
405 def capabilities(self):
401 def capabilities(self):
406 return self._caps
402 return self._caps
407
403
408 # End of ipeercommands interface.
404 # End of ipeercommands interface.
409
405
410 def _readerr(self):
406 def _readerr(self):
411 _forwardoutput(self.ui, self._pipee)
407 _forwardoutput(self.ui, self._pipee)
412
408
413 def _abort(self, exception):
409 def _abort(self, exception):
414 self._cleanup()
410 self._cleanup()
415 raise exception
411 raise exception
416
412
417 def _cleanup(self):
413 def _cleanup(self):
418 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
414 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
419
415
420 __del__ = _cleanup
416 __del__ = _cleanup
421
417
422 def _sendrequest(self, cmd, args, framed=False):
418 def _sendrequest(self, cmd, args, framed=False):
423 if (self.ui.debugflag
419 if (self.ui.debugflag
424 and self.ui.configbool('devel', 'debug.peer-request')):
420 and self.ui.configbool('devel', 'debug.peer-request')):
425 dbg = self.ui.debug
421 dbg = self.ui.debug
426 line = 'devel-peer-request: %s\n'
422 line = 'devel-peer-request: %s\n'
427 dbg(line % cmd)
423 dbg(line % cmd)
428 for key, value in sorted(args.items()):
424 for key, value in sorted(args.items()):
429 if not isinstance(value, dict):
425 if not isinstance(value, dict):
430 dbg(line % ' %s: %d bytes' % (key, len(value)))
426 dbg(line % ' %s: %d bytes' % (key, len(value)))
431 else:
427 else:
432 for dk, dv in sorted(value.items()):
428 for dk, dv in sorted(value.items()):
433 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
429 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
434 self.ui.debug("sending %s command\n" % cmd)
430 self.ui.debug("sending %s command\n" % cmd)
435 self._pipeo.write("%s\n" % cmd)
431 self._pipeo.write("%s\n" % cmd)
436 _func, names = wireproto.commands[cmd]
432 _func, names = wireproto.commands[cmd]
437 keys = names.split()
433 keys = names.split()
438 wireargs = {}
434 wireargs = {}
439 for k in keys:
435 for k in keys:
440 if k == '*':
436 if k == '*':
441 wireargs['*'] = args
437 wireargs['*'] = args
442 break
438 break
443 else:
439 else:
444 wireargs[k] = args[k]
440 wireargs[k] = args[k]
445 del args[k]
441 del args[k]
446 for k, v in sorted(wireargs.iteritems()):
442 for k, v in sorted(wireargs.iteritems()):
447 self._pipeo.write("%s %d\n" % (k, len(v)))
443 self._pipeo.write("%s %d\n" % (k, len(v)))
448 if isinstance(v, dict):
444 if isinstance(v, dict):
449 for dk, dv in v.iteritems():
445 for dk, dv in v.iteritems():
450 self._pipeo.write("%s %d\n" % (dk, len(dv)))
446 self._pipeo.write("%s %d\n" % (dk, len(dv)))
451 self._pipeo.write(dv)
447 self._pipeo.write(dv)
452 else:
448 else:
453 self._pipeo.write(v)
449 self._pipeo.write(v)
454 self._pipeo.flush()
450 self._pipeo.flush()
455
451
456 # We know exactly how many bytes are in the response. So return a proxy
452 # We know exactly how many bytes are in the response. So return a proxy
457 # around the raw output stream that allows reading exactly this many
453 # around the raw output stream that allows reading exactly this many
458 # bytes. Callers then can read() without fear of overrunning the
454 # bytes. Callers then can read() without fear of overrunning the
459 # response.
455 # response.
460 if framed:
456 if framed:
461 amount = self._getamount()
457 amount = self._getamount()
462 return util.cappedreader(self._pipei, amount)
458 return util.cappedreader(self._pipei, amount)
463
459
464 return self._pipei
460 return self._pipei
465
461
466 def _callstream(self, cmd, **args):
462 def _callstream(self, cmd, **args):
467 args = pycompat.byteskwargs(args)
463 args = pycompat.byteskwargs(args)
468 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
464 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
469
465
470 def _callcompressable(self, cmd, **args):
466 def _callcompressable(self, cmd, **args):
471 args = pycompat.byteskwargs(args)
467 args = pycompat.byteskwargs(args)
472 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
468 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
473
469
474 def _call(self, cmd, **args):
470 def _call(self, cmd, **args):
475 args = pycompat.byteskwargs(args)
471 args = pycompat.byteskwargs(args)
476 return self._sendrequest(cmd, args, framed=True).read()
472 return self._sendrequest(cmd, args, framed=True).read()
477
473
478 def _callpush(self, cmd, fp, **args):
474 def _callpush(self, cmd, fp, **args):
479 # The server responds with an empty frame if the client should
475 # The server responds with an empty frame if the client should
480 # continue submitting the payload.
476 # continue submitting the payload.
481 r = self._call(cmd, **args)
477 r = self._call(cmd, **args)
482 if r:
478 if r:
483 return '', r
479 return '', r
484
480
485 # The payload consists of frames with content followed by an empty
481 # The payload consists of frames with content followed by an empty
486 # frame.
482 # frame.
487 for d in iter(lambda: fp.read(4096), ''):
483 for d in iter(lambda: fp.read(4096), ''):
488 self._writeframed(d)
484 self._writeframed(d)
489 self._writeframed("", flush=True)
485 self._writeframed("", flush=True)
490
486
491 # In case of success, there is an empty frame and a frame containing
487 # In case of success, there is an empty frame and a frame containing
492 # the integer result (as a string).
488 # the integer result (as a string).
493 # In case of error, there is a non-empty frame containing the error.
489 # In case of error, there is a non-empty frame containing the error.
494 r = self._readframed()
490 r = self._readframed()
495 if r:
491 if r:
496 return '', r
492 return '', r
497 return self._readframed(), ''
493 return self._readframed(), ''
498
494
499 def _calltwowaystream(self, cmd, fp, **args):
495 def _calltwowaystream(self, cmd, fp, **args):
500 # The server responds with an empty frame if the client should
496 # The server responds with an empty frame if the client should
501 # continue submitting the payload.
497 # continue submitting the payload.
502 r = self._call(cmd, **args)
498 r = self._call(cmd, **args)
503 if r:
499 if r:
504 # XXX needs to be made better
500 # XXX needs to be made better
505 raise error.Abort(_('unexpected remote reply: %s') % r)
501 raise error.Abort(_('unexpected remote reply: %s') % r)
506
502
507 # The payload consists of frames with content followed by an empty
503 # The payload consists of frames with content followed by an empty
508 # frame.
504 # frame.
509 for d in iter(lambda: fp.read(4096), ''):
505 for d in iter(lambda: fp.read(4096), ''):
510 self._writeframed(d)
506 self._writeframed(d)
511 self._writeframed("", flush=True)
507 self._writeframed("", flush=True)
512
508
513 return self._pipei
509 return self._pipei
514
510
515 def _getamount(self):
511 def _getamount(self):
516 l = self._pipei.readline()
512 l = self._pipei.readline()
517 if l == '\n':
513 if l == '\n':
518 if self._autoreadstderr:
514 if self._autoreadstderr:
519 self._readerr()
515 self._readerr()
520 msg = _('check previous remote output')
516 msg = _('check previous remote output')
521 self._abort(error.OutOfBandError(hint=msg))
517 self._abort(error.OutOfBandError(hint=msg))
522 if self._autoreadstderr:
518 if self._autoreadstderr:
523 self._readerr()
519 self._readerr()
524 try:
520 try:
525 return int(l)
521 return int(l)
526 except ValueError:
522 except ValueError:
527 self._abort(error.ResponseError(_("unexpected response:"), l))
523 self._abort(error.ResponseError(_("unexpected response:"), l))
528
524
529 def _readframed(self):
525 def _readframed(self):
530 size = self._getamount()
526 size = self._getamount()
531 if not size:
527 if not size:
532 return b''
528 return b''
533
529
534 return self._pipei.read(size)
530 return self._pipei.read(size)
535
531
536 def _writeframed(self, data, flush=False):
532 def _writeframed(self, data, flush=False):
537 self._pipeo.write("%d\n" % len(data))
533 self._pipeo.write("%d\n" % len(data))
538 if data:
534 if data:
539 self._pipeo.write(data)
535 self._pipeo.write(data)
540 if flush:
536 if flush:
541 self._pipeo.flush()
537 self._pipeo.flush()
542 if self._autoreadstderr:
538 if self._autoreadstderr:
543 self._readerr()
539 self._readerr()
544
540
545 class sshv2peer(sshv1peer):
541 class sshv2peer(sshv1peer):
546 """A peer that speakers version 2 of the transport protocol."""
542 """A peer that speakers version 2 of the transport protocol."""
547 # Currently version 2 is identical to version 1 post handshake.
543 # Currently version 2 is identical to version 1 post handshake.
548 # And handshake is performed before the peer is instantiated. So
544 # And handshake is performed before the peer is instantiated. So
549 # we need no custom code.
545 # we need no custom code.
550
546
551 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
547 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
552 """Make a peer instance from existing pipes.
548 """Make a peer instance from existing pipes.
553
549
554 ``path`` and ``proc`` are stored on the eventual peer instance and may
550 ``path`` and ``proc`` are stored on the eventual peer instance and may
555 not be used for anything meaningful.
551 not be used for anything meaningful.
556
552
557 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
553 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
558 SSH server's stdio handles.
554 SSH server's stdio handles.
559
555
560 This function is factored out to allow creating peers that don't
556 This function is factored out to allow creating peers that don't
561 actually spawn a new process. It is useful for starting SSH protocol
557 actually spawn a new process. It is useful for starting SSH protocol
562 servers and clients via non-standard means, which can be useful for
558 servers and clients via non-standard means, which can be useful for
563 testing.
559 testing.
564 """
560 """
565 try:
561 try:
566 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
562 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
567 except Exception:
563 except Exception:
568 _cleanuppipes(ui, stdout, stdin, stderr)
564 _cleanuppipes(ui, stdout, stdin, stderr)
569 raise
565 raise
570
566
571 if protoname == wireprototypes.SSHV1:
567 if protoname == wireprototypes.SSHV1:
572 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
568 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
573 autoreadstderr=autoreadstderr)
569 autoreadstderr=autoreadstderr)
574 elif protoname == wireprototypes.SSHV2:
570 elif protoname == wireprototypes.SSHV2:
575 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
571 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
576 autoreadstderr=autoreadstderr)
572 autoreadstderr=autoreadstderr)
577 else:
573 else:
578 _cleanuppipes(ui, stdout, stdin, stderr)
574 _cleanuppipes(ui, stdout, stdin, stderr)
579 raise error.RepoError(_('unknown version of SSH protocol: %s') %
575 raise error.RepoError(_('unknown version of SSH protocol: %s') %
580 protoname)
576 protoname)
581
577
582 def instance(ui, path, create):
578 def instance(ui, path, create):
583 """Create an SSH peer.
579 """Create an SSH peer.
584
580
585 The returned object conforms to the ``wireproto.wirepeer`` interface.
581 The returned object conforms to the ``wireproto.wirepeer`` interface.
586 """
582 """
587 u = util.url(path, parsequery=False, parsefragment=False)
583 u = util.url(path, parsequery=False, parsefragment=False)
588 if u.scheme != 'ssh' or not u.host or u.path is None:
584 if u.scheme != 'ssh' or not u.host or u.path is None:
589 raise error.RepoError(_("couldn't parse location %s") % path)
585 raise error.RepoError(_("couldn't parse location %s") % path)
590
586
591 util.checksafessh(path)
587 util.checksafessh(path)
592
588
593 if u.passwd is not None:
589 if u.passwd is not None:
594 raise error.RepoError(_('password in URL not supported'))
590 raise error.RepoError(_('password in URL not supported'))
595
591
596 sshcmd = ui.config('ui', 'ssh')
592 sshcmd = ui.config('ui', 'ssh')
597 remotecmd = ui.config('ui', 'remotecmd')
593 remotecmd = ui.config('ui', 'remotecmd')
598 sshaddenv = dict(ui.configitems('sshenv'))
594 sshaddenv = dict(ui.configitems('sshenv'))
599 sshenv = procutil.shellenviron(sshaddenv)
595 sshenv = procutil.shellenviron(sshaddenv)
600 remotepath = u.path or '.'
596 remotepath = u.path or '.'
601
597
602 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
598 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
603
599
604 if create:
600 if create:
605 cmd = '%s %s %s' % (sshcmd, args,
601 cmd = '%s %s %s' % (sshcmd, args,
606 procutil.shellquote('%s init %s' %
602 procutil.shellquote('%s init %s' %
607 (_serverquote(remotecmd), _serverquote(remotepath))))
603 (_serverquote(remotecmd), _serverquote(remotepath))))
608 ui.debug('running %s\n' % cmd)
604 ui.debug('running %s\n' % cmd)
609 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
605 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
610 if res != 0:
606 if res != 0:
611 raise error.RepoError(_('could not create remote repo'))
607 raise error.RepoError(_('could not create remote repo'))
612
608
613 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
609 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
614 remotepath, sshenv)
610 remotepath, sshenv)
615
611
616 return makepeer(ui, path, proc, stdin, stdout, stderr)
612 return makepeer(ui, path, proc, stdin, stdout, stderr)
@@ -1,102 +1,98 b''
1 from __future__ import absolute_import, print_function
1 from __future__ import absolute_import, print_function
2
2
3 from mercurial import (
3 from mercurial import (
4 error,
4 error,
5 pycompat,
5 pycompat,
6 ui as uimod,
6 ui as uimod,
7 util,
7 util,
8 wireproto,
8 wireproto,
9 wireprototypes,
9 wireprototypes,
10 )
10 )
11 stringio = util.stringio
11 stringio = util.stringio
12
12
13 class proto(object):
13 class proto(object):
14 def __init__(self, args):
14 def __init__(self, args):
15 self.args = args
15 self.args = args
16 self.name = 'dummyproto'
16 self.name = 'dummyproto'
17
17
18 def getargs(self, spec):
18 def getargs(self, spec):
19 args = self.args
19 args = self.args
20 args.setdefault(b'*', {})
20 args.setdefault(b'*', {})
21 names = spec.split()
21 names = spec.split()
22 return [args[n] for n in names]
22 return [args[n] for n in names]
23
23
24 def checkperm(self, perm):
24 def checkperm(self, perm):
25 pass
25 pass
26
26
27 wireprototypes.TRANSPORTS['dummyproto'] = {
27 wireprototypes.TRANSPORTS['dummyproto'] = {
28 'transport': 'dummy',
28 'transport': 'dummy',
29 'version': 1,
29 'version': 1,
30 }
30 }
31
31
32 class clientpeer(wireproto.wirepeer):
32 class clientpeer(wireproto.wirepeer):
33 def __init__(self, serverrepo, ui):
33 def __init__(self, serverrepo, ui):
34 self.serverrepo = serverrepo
34 self.serverrepo = serverrepo
35 self._ui = ui
35 self.ui = ui
36
37 @property
38 def ui(self):
39 return self._ui
40
36
41 def url(self):
37 def url(self):
42 return b'test'
38 return b'test'
43
39
44 def local(self):
40 def local(self):
45 return None
41 return None
46
42
47 def peer(self):
43 def peer(self):
48 return self
44 return self
49
45
50 def canpush(self):
46 def canpush(self):
51 return True
47 return True
52
48
53 def close(self):
49 def close(self):
54 pass
50 pass
55
51
56 def capabilities(self):
52 def capabilities(self):
57 return [b'batch']
53 return [b'batch']
58
54
59 def _call(self, cmd, **args):
55 def _call(self, cmd, **args):
60 args = pycompat.byteskwargs(args)
56 args = pycompat.byteskwargs(args)
61 res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
57 res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
62 if isinstance(res, wireprototypes.bytesresponse):
58 if isinstance(res, wireprototypes.bytesresponse):
63 return res.data
59 return res.data
64 elif isinstance(res, bytes):
60 elif isinstance(res, bytes):
65 return res
61 return res
66 else:
62 else:
67 raise error.Abort('dummy client does not support response type')
63 raise error.Abort('dummy client does not support response type')
68
64
69 def _callstream(self, cmd, **args):
65 def _callstream(self, cmd, **args):
70 return stringio(self._call(cmd, **args))
66 return stringio(self._call(cmd, **args))
71
67
72 @wireproto.batchable
68 @wireproto.batchable
73 def greet(self, name):
69 def greet(self, name):
74 f = wireproto.future()
70 f = wireproto.future()
75 yield {b'name': mangle(name)}, f
71 yield {b'name': mangle(name)}, f
76 yield unmangle(f.value)
72 yield unmangle(f.value)
77
73
78 class serverrepo(object):
74 class serverrepo(object):
79 def greet(self, name):
75 def greet(self, name):
80 return b"Hello, " + name
76 return b"Hello, " + name
81
77
82 def filtered(self, name):
78 def filtered(self, name):
83 return self
79 return self
84
80
85 def mangle(s):
81 def mangle(s):
86 return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
82 return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
87 def unmangle(s):
83 def unmangle(s):
88 return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
84 return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
89
85
90 def greet(repo, proto, name):
86 def greet(repo, proto, name):
91 return mangle(repo.greet(unmangle(name)))
87 return mangle(repo.greet(unmangle(name)))
92
88
93 wireproto.commands[b'greet'] = (greet, b'name',)
89 wireproto.commands[b'greet'] = (greet, b'name',)
94
90
95 srv = serverrepo()
91 srv = serverrepo()
96 clt = clientpeer(srv, uimod.ui())
92 clt = clientpeer(srv, uimod.ui())
97
93
98 print(clt.greet(b"Foobar"))
94 print(clt.greet(b"Foobar"))
99 b = clt.iterbatch()
95 b = clt.iterbatch()
100 list(map(b.greet, (b'Fo, =;:<o', b'Bar')))
96 list(map(b.greet, (b'Fo, =;:<o', b'Bar')))
101 b.submit()
97 b.submit()
102 print([r for r in b.results()])
98 print([r for r in b.results()])
General Comments 0
You need to be logged in to leave comments. Login now