##// END OF EJS Templates
peer: make ui an attribute...
Gregory Szorc -
r37337:e826fe7a default
parent child Browse files
Show More
@@ -1,506 +1,502 b''
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import io
13 13 import os
14 14 import socket
15 15 import struct
16 16 import tempfile
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 bundle2,
21 21 error,
22 22 httpconnection,
23 23 pycompat,
24 24 statichttprepo,
25 25 url as urlmod,
26 26 util,
27 27 wireproto,
28 28 )
29 29
30 30 httplib = util.httplib
31 31 urlerr = util.urlerr
32 32 urlreq = util.urlreq
33 33
34 34 def encodevalueinheaders(value, header, limit):
35 35 """Encode a string value into multiple HTTP headers.
36 36
37 37 ``value`` will be encoded into 1 or more HTTP headers with the names
38 38 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
39 39 name + value will be at most ``limit`` bytes long.
40 40
41 41 Returns an iterable of 2-tuples consisting of header names and
42 42 values as native strings.
43 43 """
44 44 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
45 45 # not bytes. This function always takes bytes in as arguments.
46 46 fmt = pycompat.strurl(header) + r'-%s'
47 47 # Note: it is *NOT* a bug that the last bit here is a bytestring
48 48 # and not a unicode: we're just getting the encoded length anyway,
49 49 # and using an r-string to make it portable between Python 2 and 3
50 50 # doesn't work because then the \r is a literal backslash-r
51 51 # instead of a carriage return.
52 52 valuelen = limit - len(fmt % r'000') - len(': \r\n')
53 53 result = []
54 54
55 55 n = 0
56 56 for i in xrange(0, len(value), valuelen):
57 57 n += 1
58 58 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
59 59
60 60 return result
61 61
62 62 def _wraphttpresponse(resp):
63 63 """Wrap an HTTPResponse with common error handlers.
64 64
65 65 This ensures that any I/O from any consumer raises the appropriate
66 66 error and messaging.
67 67 """
68 68 origread = resp.read
69 69
70 70 class readerproxy(resp.__class__):
71 71 def read(self, size=None):
72 72 try:
73 73 return origread(size)
74 74 except httplib.IncompleteRead as e:
75 75 # e.expected is an integer if length known or None otherwise.
76 76 if e.expected:
77 77 msg = _('HTTP request error (incomplete response; '
78 78 'expected %d bytes got %d)') % (e.expected,
79 79 len(e.partial))
80 80 else:
81 81 msg = _('HTTP request error (incomplete response)')
82 82
83 83 raise error.PeerTransportError(
84 84 msg,
85 85 hint=_('this may be an intermittent network failure; '
86 86 'if the error persists, consider contacting the '
87 87 'network or server operator'))
88 88 except httplib.HTTPException as e:
89 89 raise error.PeerTransportError(
90 90 _('HTTP request error (%s)') % e,
91 91 hint=_('this may be an intermittent network failure; '
92 92 'if the error persists, consider contacting the '
93 93 'network or server operator'))
94 94
95 95 resp.__class__ = readerproxy
96 96
97 97 class _multifile(object):
98 98 def __init__(self, *fileobjs):
99 99 for f in fileobjs:
100 100 if not util.safehasattr(f, 'length'):
101 101 raise ValueError(
102 102 '_multifile only supports file objects that '
103 103 'have a length but this one does not:', type(f), f)
104 104 self._fileobjs = fileobjs
105 105 self._index = 0
106 106
107 107 @property
108 108 def length(self):
109 109 return sum(f.length for f in self._fileobjs)
110 110
111 111 def read(self, amt=None):
112 112 if amt <= 0:
113 113 return ''.join(f.read() for f in self._fileobjs)
114 114 parts = []
115 115 while amt and self._index < len(self._fileobjs):
116 116 parts.append(self._fileobjs[self._index].read(amt))
117 117 got = len(parts[-1])
118 118 if got < amt:
119 119 self._index += 1
120 120 amt -= got
121 121 return ''.join(parts)
122 122
123 123 def seek(self, offset, whence=os.SEEK_SET):
124 124 if whence != os.SEEK_SET:
125 125 raise NotImplementedError(
126 126 '_multifile does not support anything other'
127 127 ' than os.SEEK_SET for whence on seek()')
128 128 if offset != 0:
129 129 raise NotImplementedError(
130 130 '_multifile only supports seeking to start, but that '
131 131 'could be fixed if you need it')
132 132 for f in self._fileobjs:
133 133 f.seek(0)
134 134 self._index = 0
135 135
136 136 class httppeer(wireproto.wirepeer):
137 137 def __init__(self, ui, path, url, opener):
138 self._ui = ui
138 self.ui = ui
139 139 self._path = path
140 140 self._url = url
141 141 self._caps = None
142 142 self._urlopener = opener
143 143 # This is an its own attribute to facilitate extensions overriding
144 144 # the default type.
145 145 self._requestbuilder = urlreq.request
146 146
147 147 def __del__(self):
148 148 for h in self._urlopener.handlers:
149 149 h.close()
150 150 getattr(h, "close_all", lambda: None)()
151 151
152 152 def _openurl(self, req):
153 if (self._ui.debugflag
154 and self._ui.configbool('devel', 'debug.peer-request')):
155 dbg = self._ui.debug
153 if (self.ui.debugflag
154 and self.ui.configbool('devel', 'debug.peer-request')):
155 dbg = self.ui.debug
156 156 line = 'devel-peer-request: %s\n'
157 157 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
158 158 hgargssize = None
159 159
160 160 for header, value in sorted(req.header_items()):
161 161 if header.startswith('X-hgarg-'):
162 162 if hgargssize is None:
163 163 hgargssize = 0
164 164 hgargssize += len(value)
165 165 else:
166 166 dbg(line % ' %s %s' % (header, value))
167 167
168 168 if hgargssize is not None:
169 169 dbg(line % ' %d bytes of commands arguments in headers'
170 170 % hgargssize)
171 171
172 172 if req.has_data():
173 173 data = req.get_data()
174 174 length = getattr(data, 'length', None)
175 175 if length is None:
176 176 length = len(data)
177 177 dbg(line % ' %d bytes of data' % length)
178 178
179 179 start = util.timer()
180 180
181 181 ret = self._urlopener.open(req)
182 if self._ui.configbool('devel', 'debug.peer-request'):
182 if self.ui.configbool('devel', 'debug.peer-request'):
183 183 dbg(line % ' finished in %.4f seconds (%s)'
184 184 % (util.timer() - start, ret.code))
185 185 return ret
186 186
187 187 # Begin of ipeerconnection interface.
188 188
189 @util.propertycache
190 def ui(self):
191 return self._ui
192
193 189 def url(self):
194 190 return self._path
195 191
196 192 def local(self):
197 193 return None
198 194
199 195 def peer(self):
200 196 return self
201 197
202 198 def canpush(self):
203 199 return True
204 200
205 201 def close(self):
206 202 pass
207 203
208 204 # End of ipeerconnection interface.
209 205
210 206 # Begin of ipeercommands interface.
211 207
212 208 def capabilities(self):
213 209 # self._fetchcaps() should have been called as part of peer
214 210 # handshake. So self._caps should always be set.
215 211 assert self._caps is not None
216 212 return self._caps
217 213
218 214 # End of ipeercommands interface.
219 215
220 216 # look up capabilities only when needed
221 217
222 218 def _fetchcaps(self):
223 219 self._caps = set(self._call('capabilities').split())
224 220
225 221 def _callstream(self, cmd, _compressible=False, **args):
226 222 args = pycompat.byteskwargs(args)
227 223 if cmd == 'pushkey':
228 224 args['data'] = ''
229 225 data = args.pop('data', None)
230 226 headers = args.pop('headers', {})
231 227
232 228 self.ui.debug("sending %s command\n" % cmd)
233 229 q = [('cmd', cmd)]
234 230 headersize = 0
235 231 varyheaders = []
236 232 # Important: don't use self.capable() here or else you end up
237 233 # with infinite recursion when trying to look up capabilities
238 234 # for the first time.
239 235 postargsok = self._caps is not None and 'httppostargs' in self._caps
240 236
241 237 # Send arguments via POST.
242 238 if postargsok and args:
243 239 strargs = urlreq.urlencode(sorted(args.items()))
244 240 if not data:
245 241 data = strargs
246 242 else:
247 243 if isinstance(data, bytes):
248 244 i = io.BytesIO(data)
249 245 i.length = len(data)
250 246 data = i
251 247 argsio = io.BytesIO(strargs)
252 248 argsio.length = len(strargs)
253 249 data = _multifile(argsio, data)
254 250 headers[r'X-HgArgs-Post'] = len(strargs)
255 251 elif args:
256 252 # Calling self.capable() can infinite loop if we are calling
257 253 # "capabilities". But that command should never accept wire
258 254 # protocol arguments. So this should never happen.
259 255 assert cmd != 'capabilities'
260 256 httpheader = self.capable('httpheader')
261 257 if httpheader:
262 258 headersize = int(httpheader.split(',', 1)[0])
263 259
264 260 # Send arguments via HTTP headers.
265 261 if headersize > 0:
266 262 # The headers can typically carry more data than the URL.
267 263 encargs = urlreq.urlencode(sorted(args.items()))
268 264 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
269 265 headersize):
270 266 headers[header] = value
271 267 varyheaders.append(header)
272 268 # Send arguments via query string (Mercurial <1.9).
273 269 else:
274 270 q += sorted(args.items())
275 271
276 272 qs = '?%s' % urlreq.urlencode(q)
277 273 cu = "%s%s" % (self._url, qs)
278 274 size = 0
279 275 if util.safehasattr(data, 'length'):
280 276 size = data.length
281 277 elif data is not None:
282 278 size = len(data)
283 279 if data is not None and r'Content-Type' not in headers:
284 280 headers[r'Content-Type'] = r'application/mercurial-0.1'
285 281
286 282 # Tell the server we accept application/mercurial-0.2 and multiple
287 283 # compression formats if the server is capable of emitting those
288 284 # payloads.
289 285 protoparams = []
290 286
291 287 mediatypes = set()
292 288 if self._caps is not None:
293 289 mt = self.capable('httpmediatype')
294 290 if mt:
295 291 protoparams.append('0.1')
296 292 mediatypes = set(mt.split(','))
297 293
298 294 if '0.2tx' in mediatypes:
299 295 protoparams.append('0.2')
300 296
301 297 if '0.2tx' in mediatypes and self.capable('compression'):
302 298 # We /could/ compare supported compression formats and prune
303 299 # non-mutually supported or error if nothing is mutually supported.
304 300 # For now, send the full list to the server and have it error.
305 301 comps = [e.wireprotosupport().name for e in
306 302 util.compengines.supportedwireengines(util.CLIENTROLE)]
307 303 protoparams.append('comp=%s' % ','.join(comps))
308 304
309 305 if protoparams:
310 306 protoheaders = encodevalueinheaders(' '.join(protoparams),
311 307 'X-HgProto',
312 308 headersize or 1024)
313 309 for header, value in protoheaders:
314 310 headers[header] = value
315 311 varyheaders.append(header)
316 312
317 313 if varyheaders:
318 314 headers[r'Vary'] = r','.join(varyheaders)
319 315
320 316 req = self._requestbuilder(pycompat.strurl(cu), data, headers)
321 317
322 318 if data is not None:
323 319 self.ui.debug("sending %d bytes\n" % size)
324 320 req.add_unredirected_header(r'Content-Length', r'%d' % size)
325 321 try:
326 322 resp = self._openurl(req)
327 323 except urlerr.httperror as inst:
328 324 if inst.code == 401:
329 325 raise error.Abort(_('authorization failed'))
330 326 raise
331 327 except httplib.HTTPException as inst:
332 328 self.ui.debug('http error while sending %s command\n' % cmd)
333 329 self.ui.traceback()
334 330 raise IOError(None, inst)
335 331
336 332 # Insert error handlers for common I/O failures.
337 333 _wraphttpresponse(resp)
338 334
339 335 # record the url we got redirected to
340 336 resp_url = pycompat.bytesurl(resp.geturl())
341 337 if resp_url.endswith(qs):
342 338 resp_url = resp_url[:-len(qs)]
343 339 if self._url.rstrip('/') != resp_url.rstrip('/'):
344 340 if not self.ui.quiet:
345 341 self.ui.warn(_('real URL is %s\n') % resp_url)
346 342 self._url = resp_url
347 343 try:
348 344 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
349 345 except AttributeError:
350 346 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
351 347
352 348 safeurl = util.hidepassword(self._url)
353 349 if proto.startswith('application/hg-error'):
354 350 raise error.OutOfBandError(resp.read())
355 351 # accept old "text/plain" and "application/hg-changegroup" for now
356 352 if not (proto.startswith('application/mercurial-') or
357 353 (proto.startswith('text/plain')
358 354 and not resp.headers.get('content-length')) or
359 355 proto.startswith('application/hg-changegroup')):
360 356 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
361 357 raise error.RepoError(
362 358 _("'%s' does not appear to be an hg repository:\n"
363 359 "---%%<--- (%s)\n%s\n---%%<---\n")
364 360 % (safeurl, proto or 'no content-type', resp.read(1024)))
365 361
366 362 if proto.startswith('application/mercurial-'):
367 363 try:
368 364 version = proto.split('-', 1)[1]
369 365 version_info = tuple([int(n) for n in version.split('.')])
370 366 except ValueError:
371 367 raise error.RepoError(_("'%s' sent a broken Content-Type "
372 368 "header (%s)") % (safeurl, proto))
373 369
374 370 # TODO consider switching to a decompression reader that uses
375 371 # generators.
376 372 if version_info == (0, 1):
377 373 if _compressible:
378 374 return util.compengines['zlib'].decompressorreader(resp)
379 375 return resp
380 376 elif version_info == (0, 2):
381 377 # application/mercurial-0.2 always identifies the compression
382 378 # engine in the payload header.
383 379 elen = struct.unpack('B', resp.read(1))[0]
384 380 ename = resp.read(elen)
385 381 engine = util.compengines.forwiretype(ename)
386 382 return engine.decompressorreader(resp)
387 383 else:
388 384 raise error.RepoError(_("'%s' uses newer protocol %s") %
389 385 (safeurl, version))
390 386
391 387 if _compressible:
392 388 return util.compengines['zlib'].decompressorreader(resp)
393 389
394 390 return resp
395 391
396 392 def _call(self, cmd, **args):
397 393 fp = self._callstream(cmd, **args)
398 394 try:
399 395 return fp.read()
400 396 finally:
401 397 # if using keepalive, allow connection to be reused
402 398 fp.close()
403 399
404 400 def _callpush(self, cmd, cg, **args):
405 401 # have to stream bundle to a temp file because we do not have
406 402 # http 1.1 chunked transfer.
407 403
408 404 types = self.capable('unbundle')
409 405 try:
410 406 types = types.split(',')
411 407 except AttributeError:
412 408 # servers older than d1b16a746db6 will send 'unbundle' as a
413 409 # boolean capability. They only support headerless/uncompressed
414 410 # bundles.
415 411 types = [""]
416 412 for x in types:
417 413 if x in bundle2.bundletypes:
418 414 type = x
419 415 break
420 416
421 417 tempname = bundle2.writebundle(self.ui, cg, None, type)
422 418 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
423 419 headers = {r'Content-Type': r'application/mercurial-0.1'}
424 420
425 421 try:
426 422 r = self._call(cmd, data=fp, headers=headers, **args)
427 423 vals = r.split('\n', 1)
428 424 if len(vals) < 2:
429 425 raise error.ResponseError(_("unexpected response:"), r)
430 426 return vals
431 427 except urlerr.httperror:
432 428 # Catch and re-raise these so we don't try and treat them
433 429 # like generic socket errors. They lack any values in
434 430 # .args on Python 3 which breaks our socket.error block.
435 431 raise
436 432 except socket.error as err:
437 433 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
438 434 raise error.Abort(_('push failed: %s') % err.args[1])
439 435 raise error.Abort(err.args[1])
440 436 finally:
441 437 fp.close()
442 438 os.unlink(tempname)
443 439
444 440 def _calltwowaystream(self, cmd, fp, **args):
445 441 fh = None
446 442 fp_ = None
447 443 filename = None
448 444 try:
449 445 # dump bundle to disk
450 446 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
451 447 fh = os.fdopen(fd, r"wb")
452 448 d = fp.read(4096)
453 449 while d:
454 450 fh.write(d)
455 451 d = fp.read(4096)
456 452 fh.close()
457 453 # start http push
458 454 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
459 455 headers = {r'Content-Type': r'application/mercurial-0.1'}
460 456 return self._callstream(cmd, data=fp_, headers=headers, **args)
461 457 finally:
462 458 if fp_ is not None:
463 459 fp_.close()
464 460 if fh is not None:
465 461 fh.close()
466 462 os.unlink(filename)
467 463
468 464 def _callcompressable(self, cmd, **args):
469 465 return self._callstream(cmd, _compressible=True, **args)
470 466
471 467 def _abort(self, exception):
472 468 raise exception
473 469
474 470 def makepeer(ui, path):
475 471 u = util.url(path)
476 472 if u.query or u.fragment:
477 473 raise error.Abort(_('unsupported URL component: "%s"') %
478 474 (u.query or u.fragment))
479 475
480 476 # urllib cannot handle URLs with embedded user or passwd.
481 477 url, authinfo = u.authinfo()
482 478 ui.debug('using %s\n' % url)
483 479
484 480 opener = urlmod.opener(ui, authinfo)
485 481
486 482 return httppeer(ui, path, url, opener)
487 483
488 484 def instance(ui, path, create):
489 485 if create:
490 486 raise error.Abort(_('cannot create new http repository'))
491 487 try:
492 488 if path.startswith('https:') and not urlmod.has_https:
493 489 raise error.Abort(_('Python support for SSL and HTTPS '
494 490 'is not installed'))
495 491
496 492 inst = makepeer(ui, path)
497 493 inst._fetchcaps()
498 494
499 495 return inst
500 496 except error.RepoError as httpexception:
501 497 try:
502 498 r = statichttprepo.instance(ui, "static-" + path, create)
503 499 ui.note(_('(falling back to static-http)\n'))
504 500 return r
505 501 except error.RepoError:
506 502 raise httpexception # use the original http RepoError instead
@@ -1,2336 +1,2332 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 )
23 23 from .thirdparty.zope import (
24 24 interface as zi,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 peer,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 repository,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70 from .utils import (
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 release = lockmod.release
76 76 urlerr = util.urlerr
77 77 urlreq = util.urlreq
78 78
79 79 # set of (path, vfs-location) tuples. vfs-location is:
80 80 # - 'plain for vfs relative paths
81 81 # - '' for svfs relative paths
82 82 _cachedfiles = set()
83 83
84 84 class _basefilecache(scmutil.filecache):
85 85 """All filecache usage on repo are done for logic that should be unfiltered
86 86 """
87 87 def __get__(self, repo, type=None):
88 88 if repo is None:
89 89 return self
90 90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 91 def __set__(self, repo, value):
92 92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 93 def __delete__(self, repo):
94 94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95 95
96 96 class repofilecache(_basefilecache):
97 97 """filecache for files in .hg but outside of .hg/store"""
98 98 def __init__(self, *paths):
99 99 super(repofilecache, self).__init__(*paths)
100 100 for path in paths:
101 101 _cachedfiles.add((path, 'plain'))
102 102
103 103 def join(self, obj, fname):
104 104 return obj.vfs.join(fname)
105 105
106 106 class storecache(_basefilecache):
107 107 """filecache for files in the store"""
108 108 def __init__(self, *paths):
109 109 super(storecache, self).__init__(*paths)
110 110 for path in paths:
111 111 _cachedfiles.add((path, ''))
112 112
113 113 def join(self, obj, fname):
114 114 return obj.sjoin(fname)
115 115
116 116 def isfilecached(repo, name):
117 117 """check if a repo has already cached "name" filecache-ed property
118 118
119 119 This returns (cachedobj-or-None, iscached) tuple.
120 120 """
121 121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 122 if not cacheentry:
123 123 return None, False
124 124 return cacheentry.obj, True
125 125
126 126 class unfilteredpropertycache(util.propertycache):
127 127 """propertycache that apply to unfiltered repo only"""
128 128
129 129 def __get__(self, repo, type=None):
130 130 unfi = repo.unfiltered()
131 131 if unfi is repo:
132 132 return super(unfilteredpropertycache, self).__get__(unfi)
133 133 return getattr(unfi, self.name)
134 134
135 135 class filteredpropertycache(util.propertycache):
136 136 """propertycache that must take filtering in account"""
137 137
138 138 def cachevalue(self, obj, value):
139 139 object.__setattr__(obj, self.name, value)
140 140
141 141
142 142 def hasunfilteredcache(repo, name):
143 143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 144 return name in vars(repo.unfiltered())
145 145
146 146 def unfilteredmethod(orig):
147 147 """decorate method that always need to be run on unfiltered version"""
148 148 def wrapper(repo, *args, **kwargs):
149 149 return orig(repo.unfiltered(), *args, **kwargs)
150 150 return wrapper
151 151
152 152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 153 'unbundle'}
154 154 legacycaps = moderncaps.union({'changegroupsubset'})
155 155
156 156 class localpeer(repository.peer):
157 157 '''peer for a local repo; reflects only the most recent API'''
158 158
159 159 def __init__(self, repo, caps=None):
160 160 super(localpeer, self).__init__()
161 161
162 162 if caps is None:
163 163 caps = moderncaps.copy()
164 164 self._repo = repo.filtered('served')
165 self._ui = repo.ui
165 self.ui = repo.ui
166 166 self._caps = repo._restrictcapabilities(caps)
167 167
168 168 # Begin of _basepeer interface.
169 169
170 @util.propertycache
171 def ui(self):
172 return self._ui
173
174 170 def url(self):
175 171 return self._repo.url()
176 172
177 173 def local(self):
178 174 return self._repo
179 175
180 176 def peer(self):
181 177 return self
182 178
183 179 def canpush(self):
184 180 return True
185 181
186 182 def close(self):
187 183 self._repo.close()
188 184
189 185 # End of _basepeer interface.
190 186
191 187 # Begin of _basewirecommands interface.
192 188
193 189 def branchmap(self):
194 190 return self._repo.branchmap()
195 191
196 192 def capabilities(self):
197 193 return self._caps
198 194
199 195 def debugwireargs(self, one, two, three=None, four=None, five=None):
200 196 """Used to test argument passing over the wire"""
201 197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
202 198 pycompat.bytestr(four),
203 199 pycompat.bytestr(five))
204 200
205 201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
206 202 **kwargs):
207 203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
208 204 common=common, bundlecaps=bundlecaps,
209 205 **kwargs)[1]
210 206 cb = util.chunkbuffer(chunks)
211 207
212 208 if exchange.bundle2requested(bundlecaps):
213 209 # When requesting a bundle2, getbundle returns a stream to make the
214 210 # wire level function happier. We need to build a proper object
215 211 # from it in local peer.
216 212 return bundle2.getunbundler(self.ui, cb)
217 213 else:
218 214 return changegroup.getunbundler('01', cb, None)
219 215
220 216 def heads(self):
221 217 return self._repo.heads()
222 218
223 219 def known(self, nodes):
224 220 return self._repo.known(nodes)
225 221
226 222 def listkeys(self, namespace):
227 223 return self._repo.listkeys(namespace)
228 224
229 225 def lookup(self, key):
230 226 return self._repo.lookup(key)
231 227
232 228 def pushkey(self, namespace, key, old, new):
233 229 return self._repo.pushkey(namespace, key, old, new)
234 230
235 231 def stream_out(self):
236 232 raise error.Abort(_('cannot perform stream clone against local '
237 233 'peer'))
238 234
239 235 def unbundle(self, cg, heads, url):
240 236 """apply a bundle on a repo
241 237
242 238 This function handles the repo locking itself."""
243 239 try:
244 240 try:
245 241 cg = exchange.readbundle(self.ui, cg, None)
246 242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
247 243 if util.safehasattr(ret, 'getchunks'):
248 244 # This is a bundle20 object, turn it into an unbundler.
249 245 # This little dance should be dropped eventually when the
250 246 # API is finally improved.
251 247 stream = util.chunkbuffer(ret.getchunks())
252 248 ret = bundle2.getunbundler(self.ui, stream)
253 249 return ret
254 250 except Exception as exc:
255 251 # If the exception contains output salvaged from a bundle2
256 252 # reply, we need to make sure it is printed before continuing
257 253 # to fail. So we build a bundle2 with such output and consume
258 254 # it directly.
259 255 #
260 256 # This is not very elegant but allows a "simple" solution for
261 257 # issue4594
262 258 output = getattr(exc, '_bundle2salvagedoutput', ())
263 259 if output:
264 260 bundler = bundle2.bundle20(self._repo.ui)
265 261 for out in output:
266 262 bundler.addpart(out)
267 263 stream = util.chunkbuffer(bundler.getchunks())
268 264 b = bundle2.getunbundler(self.ui, stream)
269 265 bundle2.processbundle(self._repo, b)
270 266 raise
271 267 except error.PushRaced as exc:
272 268 raise error.ResponseError(_('push failed:'),
273 269 stringutil.forcebytestr(exc))
274 270
275 271 # End of _basewirecommands interface.
276 272
277 273 # Begin of peer interface.
278 274
279 275 def iterbatch(self):
280 276 return peer.localiterbatcher(self)
281 277
282 278 # End of peer interface.
283 279
284 280 class locallegacypeer(repository.legacypeer, localpeer):
285 281 '''peer extension which implements legacy methods too; used for tests with
286 282 restricted capabilities'''
287 283
288 284 def __init__(self, repo):
289 285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
290 286
291 287 # Begin of baselegacywirecommands interface.
292 288
293 289 def between(self, pairs):
294 290 return self._repo.between(pairs)
295 291
296 292 def branches(self, nodes):
297 293 return self._repo.branches(nodes)
298 294
299 295 def changegroup(self, basenodes, source):
300 296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
301 297 missingheads=self._repo.heads())
302 298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
303 299
304 300 def changegroupsubset(self, bases, heads, source):
305 301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
306 302 missingheads=heads)
307 303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
308 304
309 305 # End of baselegacywirecommands interface.
310 306
311 307 # Increment the sub-version when the revlog v2 format changes to lock out old
312 308 # clients.
313 309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
314 310
315 311 # Functions receiving (ui, features) that extensions can register to impact
316 312 # the ability to load repositories with custom requirements. Only
317 313 # functions defined in loaded extensions are called.
318 314 #
319 315 # The function receives a set of requirement strings that the repository
320 316 # is capable of opening. Functions will typically add elements to the
321 317 # set to reflect that the extension knows how to handle that requirements.
322 318 featuresetupfuncs = set()
323 319
324 320 @zi.implementer(repository.completelocalrepository)
325 321 class localrepository(object):
326 322
327 323 # obsolete experimental requirements:
328 324 # - manifestv2: An experimental new manifest format that allowed
329 325 # for stem compression of long paths. Experiment ended up not
330 326 # being successful (repository sizes went up due to worse delta
331 327 # chains), and the code was deleted in 4.6.
332 328 supportedformats = {
333 329 'revlogv1',
334 330 'generaldelta',
335 331 'treemanifest',
336 332 REVLOGV2_REQUIREMENT,
337 333 }
338 334 _basesupported = supportedformats | {
339 335 'store',
340 336 'fncache',
341 337 'shared',
342 338 'relshared',
343 339 'dotencode',
344 340 'exp-sparse',
345 341 }
346 342 openerreqs = {
347 343 'revlogv1',
348 344 'generaldelta',
349 345 'treemanifest',
350 346 }
351 347
352 348 # list of prefix for file which can be written without 'wlock'
353 349 # Extensions should extend this list when needed
354 350 _wlockfreeprefix = {
355 351 # We migh consider requiring 'wlock' for the next
356 352 # two, but pretty much all the existing code assume
357 353 # wlock is not needed so we keep them excluded for
358 354 # now.
359 355 'hgrc',
360 356 'requires',
361 357 # XXX cache is a complicatged business someone
362 358 # should investigate this in depth at some point
363 359 'cache/',
364 360 # XXX shouldn't be dirstate covered by the wlock?
365 361 'dirstate',
366 362 # XXX bisect was still a bit too messy at the time
367 363 # this changeset was introduced. Someone should fix
368 364 # the remainig bit and drop this line
369 365 'bisect.state',
370 366 }
371 367
372 368 def __init__(self, baseui, path, create=False):
373 369 self.requirements = set()
374 370 self.filtername = None
375 371 # wvfs: rooted at the repository root, used to access the working copy
376 372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
377 373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
378 374 self.vfs = None
379 375 # svfs: usually rooted at .hg/store, used to access repository history
380 376 # If this is a shared repository, this vfs may point to another
381 377 # repository's .hg/store directory.
382 378 self.svfs = None
383 379 self.root = self.wvfs.base
384 380 self.path = self.wvfs.join(".hg")
385 381 self.origroot = path
386 382 # This is only used by context.workingctx.match in order to
387 383 # detect files in subrepos.
388 384 self.auditor = pathutil.pathauditor(
389 385 self.root, callback=self._checknested)
390 386 # This is only used by context.basectx.match in order to detect
391 387 # files in subrepos.
392 388 self.nofsauditor = pathutil.pathauditor(
393 389 self.root, callback=self._checknested, realfs=False, cached=True)
394 390 self.baseui = baseui
395 391 self.ui = baseui.copy()
396 392 self.ui.copy = baseui.copy # prevent copying repo configuration
397 393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
398 394 if (self.ui.configbool('devel', 'all-warnings') or
399 395 self.ui.configbool('devel', 'check-locks')):
400 396 self.vfs.audit = self._getvfsward(self.vfs.audit)
401 397 # A list of callback to shape the phase if no data were found.
402 398 # Callback are in the form: func(repo, roots) --> processed root.
403 399 # This list it to be filled by extension during repo setup
404 400 self._phasedefaults = []
405 401 try:
406 402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
407 403 self._loadextensions()
408 404 except IOError:
409 405 pass
410 406
411 407 if featuresetupfuncs:
412 408 self.supported = set(self._basesupported) # use private copy
413 409 extmods = set(m.__name__ for n, m
414 410 in extensions.extensions(self.ui))
415 411 for setupfunc in featuresetupfuncs:
416 412 if setupfunc.__module__ in extmods:
417 413 setupfunc(self.ui, self.supported)
418 414 else:
419 415 self.supported = self._basesupported
420 416 color.setup(self.ui)
421 417
422 418 # Add compression engines.
423 419 for name in util.compengines:
424 420 engine = util.compengines[name]
425 421 if engine.revlogheader():
426 422 self.supported.add('exp-compression-%s' % name)
427 423
428 424 if not self.vfs.isdir():
429 425 if create:
430 426 self.requirements = newreporequirements(self)
431 427
432 428 if not self.wvfs.exists():
433 429 self.wvfs.makedirs()
434 430 self.vfs.makedir(notindexed=True)
435 431
436 432 if 'store' in self.requirements:
437 433 self.vfs.mkdir("store")
438 434
439 435 # create an invalid changelog
440 436 self.vfs.append(
441 437 "00changelog.i",
442 438 '\0\0\0\2' # represents revlogv2
443 439 ' dummy changelog to prevent using the old repo layout'
444 440 )
445 441 else:
446 442 raise error.RepoError(_("repository %s not found") % path)
447 443 elif create:
448 444 raise error.RepoError(_("repository %s already exists") % path)
449 445 else:
450 446 try:
451 447 self.requirements = scmutil.readrequires(
452 448 self.vfs, self.supported)
453 449 except IOError as inst:
454 450 if inst.errno != errno.ENOENT:
455 451 raise
456 452
457 453 cachepath = self.vfs.join('cache')
458 454 self.sharedpath = self.path
459 455 try:
460 456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
461 457 if 'relshared' in self.requirements:
462 458 sharedpath = self.vfs.join(sharedpath)
463 459 vfs = vfsmod.vfs(sharedpath, realpath=True)
464 460 cachepath = vfs.join('cache')
465 461 s = vfs.base
466 462 if not vfs.exists():
467 463 raise error.RepoError(
468 464 _('.hg/sharedpath points to nonexistent directory %s') % s)
469 465 self.sharedpath = s
470 466 except IOError as inst:
471 467 if inst.errno != errno.ENOENT:
472 468 raise
473 469
474 470 if 'exp-sparse' in self.requirements and not sparse.enabled:
475 471 raise error.RepoError(_('repository is using sparse feature but '
476 472 'sparse is not enabled; enable the '
477 473 '"sparse" extensions to access'))
478 474
479 475 self.store = store.store(
480 476 self.requirements, self.sharedpath,
481 477 lambda base: vfsmod.vfs(base, cacheaudited=True))
482 478 self.spath = self.store.path
483 479 self.svfs = self.store.vfs
484 480 self.sjoin = self.store.join
485 481 self.vfs.createmode = self.store.createmode
486 482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
487 483 self.cachevfs.createmode = self.store.createmode
488 484 if (self.ui.configbool('devel', 'all-warnings') or
489 485 self.ui.configbool('devel', 'check-locks')):
490 486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
491 487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
492 488 else: # standard vfs
493 489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
494 490 self._applyopenerreqs()
495 491 if create:
496 492 self._writerequirements()
497 493
498 494 self._dirstatevalidatewarned = False
499 495
500 496 self._branchcaches = {}
501 497 self._revbranchcache = None
502 498 self._filterpats = {}
503 499 self._datafilters = {}
504 500 self._transref = self._lockref = self._wlockref = None
505 501
506 502 # A cache for various files under .hg/ that tracks file changes,
507 503 # (used by the filecache decorator)
508 504 #
509 505 # Maps a property name to its util.filecacheentry
510 506 self._filecache = {}
511 507
512 508 # hold sets of revision to be filtered
513 509 # should be cleared when something might have changed the filter value:
514 510 # - new changesets,
515 511 # - phase change,
516 512 # - new obsolescence marker,
517 513 # - working directory parent change,
518 514 # - bookmark changes
519 515 self.filteredrevcache = {}
520 516
521 517 # post-dirstate-status hooks
522 518 self._postdsstatus = []
523 519
524 520 # generic mapping between names and nodes
525 521 self.names = namespaces.namespaces()
526 522
527 523 # Key to signature value.
528 524 self._sparsesignaturecache = {}
529 525 # Signature to cached matcher instance.
530 526 self._sparsematchercache = {}
531 527
532 528 def _getvfsward(self, origfunc):
533 529 """build a ward for self.vfs"""
534 530 rref = weakref.ref(self)
535 531 def checkvfs(path, mode=None):
536 532 ret = origfunc(path, mode=mode)
537 533 repo = rref()
538 534 if (repo is None
539 535 or not util.safehasattr(repo, '_wlockref')
540 536 or not util.safehasattr(repo, '_lockref')):
541 537 return
542 538 if mode in (None, 'r', 'rb'):
543 539 return
544 540 if path.startswith(repo.path):
545 541 # truncate name relative to the repository (.hg)
546 542 path = path[len(repo.path) + 1:]
547 543 if path.startswith('cache/'):
548 544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
549 545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
550 546 if path.startswith('journal.'):
551 547 # journal is covered by 'lock'
552 548 if repo._currentlock(repo._lockref) is None:
553 549 repo.ui.develwarn('write with no lock: "%s"' % path,
554 550 stacklevel=2, config='check-locks')
555 551 elif repo._currentlock(repo._wlockref) is None:
556 552 # rest of vfs files are covered by 'wlock'
557 553 #
558 554 # exclude special files
559 555 for prefix in self._wlockfreeprefix:
560 556 if path.startswith(prefix):
561 557 return
562 558 repo.ui.develwarn('write with no wlock: "%s"' % path,
563 559 stacklevel=2, config='check-locks')
564 560 return ret
565 561 return checkvfs
566 562
567 563 def _getsvfsward(self, origfunc):
568 564 """build a ward for self.svfs"""
569 565 rref = weakref.ref(self)
570 566 def checksvfs(path, mode=None):
571 567 ret = origfunc(path, mode=mode)
572 568 repo = rref()
573 569 if repo is None or not util.safehasattr(repo, '_lockref'):
574 570 return
575 571 if mode in (None, 'r', 'rb'):
576 572 return
577 573 if path.startswith(repo.sharedpath):
578 574 # truncate name relative to the repository (.hg)
579 575 path = path[len(repo.sharedpath) + 1:]
580 576 if repo._currentlock(repo._lockref) is None:
581 577 repo.ui.develwarn('write with no lock: "%s"' % path,
582 578 stacklevel=3)
583 579 return ret
584 580 return checksvfs
585 581
586 582 def close(self):
587 583 self._writecaches()
588 584
589 585 def _loadextensions(self):
590 586 extensions.loadall(self.ui)
591 587
592 588 def _writecaches(self):
593 589 if self._revbranchcache:
594 590 self._revbranchcache.write()
595 591
596 592 def _restrictcapabilities(self, caps):
597 593 if self.ui.configbool('experimental', 'bundle2-advertise'):
598 594 caps = set(caps)
599 595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
600 596 role='client'))
601 597 caps.add('bundle2=' + urlreq.quote(capsblob))
602 598 return caps
603 599
604 600 def _applyopenerreqs(self):
605 601 self.svfs.options = dict((r, 1) for r in self.requirements
606 602 if r in self.openerreqs)
607 603 # experimental config: format.chunkcachesize
608 604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
609 605 if chunkcachesize is not None:
610 606 self.svfs.options['chunkcachesize'] = chunkcachesize
611 607 # experimental config: format.maxchainlen
612 608 maxchainlen = self.ui.configint('format', 'maxchainlen')
613 609 if maxchainlen is not None:
614 610 self.svfs.options['maxchainlen'] = maxchainlen
615 611 # experimental config: format.manifestcachesize
616 612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
617 613 if manifestcachesize is not None:
618 614 self.svfs.options['manifestcachesize'] = manifestcachesize
619 615 # experimental config: format.aggressivemergedeltas
620 616 aggressivemergedeltas = self.ui.configbool('format',
621 617 'aggressivemergedeltas')
622 618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
623 619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
624 620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
625 621 if 0 <= chainspan:
626 622 self.svfs.options['maxdeltachainspan'] = chainspan
627 623 mmapindexthreshold = self.ui.configbytes('experimental',
628 624 'mmapindexthreshold')
629 625 if mmapindexthreshold is not None:
630 626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
631 627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
632 628 srdensitythres = float(self.ui.config('experimental',
633 629 'sparse-read.density-threshold'))
634 630 srmingapsize = self.ui.configbytes('experimental',
635 631 'sparse-read.min-gap-size')
636 632 self.svfs.options['with-sparse-read'] = withsparseread
637 633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
638 634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
639 635
640 636 for r in self.requirements:
641 637 if r.startswith('exp-compression-'):
642 638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
643 639
644 640 # TODO move "revlogv2" to openerreqs once finalized.
645 641 if REVLOGV2_REQUIREMENT in self.requirements:
646 642 self.svfs.options['revlogv2'] = True
647 643
648 644 def _writerequirements(self):
649 645 scmutil.writerequires(self.vfs, self.requirements)
650 646
651 647 def _checknested(self, path):
652 648 """Determine if path is a legal nested repository."""
653 649 if not path.startswith(self.root):
654 650 return False
655 651 subpath = path[len(self.root) + 1:]
656 652 normsubpath = util.pconvert(subpath)
657 653
658 654 # XXX: Checking against the current working copy is wrong in
659 655 # the sense that it can reject things like
660 656 #
661 657 # $ hg cat -r 10 sub/x.txt
662 658 #
663 659 # if sub/ is no longer a subrepository in the working copy
664 660 # parent revision.
665 661 #
666 662 # However, it can of course also allow things that would have
667 663 # been rejected before, such as the above cat command if sub/
668 664 # is a subrepository now, but was a normal directory before.
669 665 # The old path auditor would have rejected by mistake since it
670 666 # panics when it sees sub/.hg/.
671 667 #
672 668 # All in all, checking against the working copy seems sensible
673 669 # since we want to prevent access to nested repositories on
674 670 # the filesystem *now*.
675 671 ctx = self[None]
676 672 parts = util.splitpath(subpath)
677 673 while parts:
678 674 prefix = '/'.join(parts)
679 675 if prefix in ctx.substate:
680 676 if prefix == normsubpath:
681 677 return True
682 678 else:
683 679 sub = ctx.sub(prefix)
684 680 return sub.checknested(subpath[len(prefix) + 1:])
685 681 else:
686 682 parts.pop()
687 683 return False
688 684
689 685 def peer(self):
690 686 return localpeer(self) # not cached to avoid reference cycle
691 687
692 688 def unfiltered(self):
693 689 """Return unfiltered version of the repository
694 690
695 691 Intended to be overwritten by filtered repo."""
696 692 return self
697 693
698 694 def filtered(self, name, visibilityexceptions=None):
699 695 """Return a filtered version of a repository"""
700 696 cls = repoview.newtype(self.unfiltered().__class__)
701 697 return cls(self, name, visibilityexceptions)
702 698
703 699 @repofilecache('bookmarks', 'bookmarks.current')
704 700 def _bookmarks(self):
705 701 return bookmarks.bmstore(self)
706 702
707 703 @property
708 704 def _activebookmark(self):
709 705 return self._bookmarks.active
710 706
711 707 # _phasesets depend on changelog. what we need is to call
712 708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
713 709 # can't be easily expressed in filecache mechanism.
714 710 @storecache('phaseroots', '00changelog.i')
715 711 def _phasecache(self):
716 712 return phases.phasecache(self, self._phasedefaults)
717 713
718 714 @storecache('obsstore')
719 715 def obsstore(self):
720 716 return obsolete.makestore(self.ui, self)
721 717
722 718 @storecache('00changelog.i')
723 719 def changelog(self):
724 720 return changelog.changelog(self.svfs,
725 721 trypending=txnutil.mayhavepending(self.root))
726 722
727 723 def _constructmanifest(self):
728 724 # This is a temporary function while we migrate from manifest to
729 725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
730 726 # manifest creation.
731 727 return manifest.manifestrevlog(self.svfs)
732 728
733 729 @storecache('00manifest.i')
734 730 def manifestlog(self):
735 731 return manifest.manifestlog(self.svfs, self)
736 732
737 733 @repofilecache('dirstate')
738 734 def dirstate(self):
739 735 sparsematchfn = lambda: sparse.matcher(self)
740 736
741 737 return dirstate.dirstate(self.vfs, self.ui, self.root,
742 738 self._dirstatevalidate, sparsematchfn)
743 739
744 740 def _dirstatevalidate(self, node):
745 741 try:
746 742 self.changelog.rev(node)
747 743 return node
748 744 except error.LookupError:
749 745 if not self._dirstatevalidatewarned:
750 746 self._dirstatevalidatewarned = True
751 747 self.ui.warn(_("warning: ignoring unknown"
752 748 " working parent %s!\n") % short(node))
753 749 return nullid
754 750
755 751 @repofilecache(narrowspec.FILENAME)
756 752 def narrowpats(self):
757 753 """matcher patterns for this repository's narrowspec
758 754
759 755 A tuple of (includes, excludes).
760 756 """
761 757 source = self
762 758 if self.shared():
763 759 from . import hg
764 760 source = hg.sharedreposource(self)
765 761 return narrowspec.load(source)
766 762
767 763 @repofilecache(narrowspec.FILENAME)
768 764 def _narrowmatch(self):
769 765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
770 766 return matchmod.always(self.root, '')
771 767 include, exclude = self.narrowpats
772 768 return narrowspec.match(self.root, include=include, exclude=exclude)
773 769
774 770 # TODO(martinvonz): make this property-like instead?
775 771 def narrowmatch(self):
776 772 return self._narrowmatch
777 773
778 774 def setnarrowpats(self, newincludes, newexcludes):
779 775 target = self
780 776 if self.shared():
781 777 from . import hg
782 778 target = hg.sharedreposource(self)
783 779 narrowspec.save(target, newincludes, newexcludes)
784 780 self.invalidate(clearfilecache=True)
785 781
786 782 def __getitem__(self, changeid):
787 783 if changeid is None:
788 784 return context.workingctx(self)
789 785 if isinstance(changeid, context.basectx):
790 786 return changeid
791 787 if isinstance(changeid, slice):
792 788 # wdirrev isn't contiguous so the slice shouldn't include it
793 789 return [context.changectx(self, i)
794 790 for i in xrange(*changeid.indices(len(self)))
795 791 if i not in self.changelog.filteredrevs]
796 792 try:
797 793 return context.changectx(self, changeid)
798 794 except error.WdirUnsupported:
799 795 return context.workingctx(self)
800 796
801 797 def __contains__(self, changeid):
802 798 """True if the given changeid exists
803 799
804 800 error.LookupError is raised if an ambiguous node specified.
805 801 """
806 802 try:
807 803 self[changeid]
808 804 return True
809 805 except error.RepoLookupError:
810 806 return False
811 807
812 808 def __nonzero__(self):
813 809 return True
814 810
815 811 __bool__ = __nonzero__
816 812
817 813 def __len__(self):
818 814 # no need to pay the cost of repoview.changelog
819 815 unfi = self.unfiltered()
820 816 return len(unfi.changelog)
821 817
822 818 def __iter__(self):
823 819 return iter(self.changelog)
824 820
825 821 def revs(self, expr, *args):
826 822 '''Find revisions matching a revset.
827 823
828 824 The revset is specified as a string ``expr`` that may contain
829 825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
830 826
831 827 Revset aliases from the configuration are not expanded. To expand
832 828 user aliases, consider calling ``scmutil.revrange()`` or
833 829 ``repo.anyrevs([expr], user=True)``.
834 830
835 831 Returns a revset.abstractsmartset, which is a list-like interface
836 832 that contains integer revisions.
837 833 '''
838 834 expr = revsetlang.formatspec(expr, *args)
839 835 m = revset.match(None, expr)
840 836 return m(self)
841 837
842 838 def set(self, expr, *args):
843 839 '''Find revisions matching a revset and emit changectx instances.
844 840
845 841 This is a convenience wrapper around ``revs()`` that iterates the
846 842 result and is a generator of changectx instances.
847 843
848 844 Revset aliases from the configuration are not expanded. To expand
849 845 user aliases, consider calling ``scmutil.revrange()``.
850 846 '''
851 847 for r in self.revs(expr, *args):
852 848 yield self[r]
853 849
854 850 def anyrevs(self, specs, user=False, localalias=None):
855 851 '''Find revisions matching one of the given revsets.
856 852
857 853 Revset aliases from the configuration are not expanded by default. To
858 854 expand user aliases, specify ``user=True``. To provide some local
859 855 definitions overriding user aliases, set ``localalias`` to
860 856 ``{name: definitionstring}``.
861 857 '''
862 858 if user:
863 859 m = revset.matchany(self.ui, specs, repo=self,
864 860 localalias=localalias)
865 861 else:
866 862 m = revset.matchany(None, specs, localalias=localalias)
867 863 return m(self)
868 864
869 865 def url(self):
870 866 return 'file:' + self.root
871 867
872 868 def hook(self, name, throw=False, **args):
873 869 """Call a hook, passing this repo instance.
874 870
875 871 This a convenience method to aid invoking hooks. Extensions likely
876 872 won't call this unless they have registered a custom hook or are
877 873 replacing code that is expected to call a hook.
878 874 """
879 875 return hook.hook(self.ui, self, name, throw, **args)
880 876
881 877 @filteredpropertycache
882 878 def _tagscache(self):
883 879 '''Returns a tagscache object that contains various tags related
884 880 caches.'''
885 881
886 882 # This simplifies its cache management by having one decorated
887 883 # function (this one) and the rest simply fetch things from it.
888 884 class tagscache(object):
889 885 def __init__(self):
890 886 # These two define the set of tags for this repository. tags
891 887 # maps tag name to node; tagtypes maps tag name to 'global' or
892 888 # 'local'. (Global tags are defined by .hgtags across all
893 889 # heads, and local tags are defined in .hg/localtags.)
894 890 # They constitute the in-memory cache of tags.
895 891 self.tags = self.tagtypes = None
896 892
897 893 self.nodetagscache = self.tagslist = None
898 894
899 895 cache = tagscache()
900 896 cache.tags, cache.tagtypes = self._findtags()
901 897
902 898 return cache
903 899
904 900 def tags(self):
905 901 '''return a mapping of tag to node'''
906 902 t = {}
907 903 if self.changelog.filteredrevs:
908 904 tags, tt = self._findtags()
909 905 else:
910 906 tags = self._tagscache.tags
911 907 for k, v in tags.iteritems():
912 908 try:
913 909 # ignore tags to unknown nodes
914 910 self.changelog.rev(v)
915 911 t[k] = v
916 912 except (error.LookupError, ValueError):
917 913 pass
918 914 return t
919 915
920 916 def _findtags(self):
921 917 '''Do the hard work of finding tags. Return a pair of dicts
922 918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
923 919 maps tag name to a string like \'global\' or \'local\'.
924 920 Subclasses or extensions are free to add their own tags, but
925 921 should be aware that the returned dicts will be retained for the
926 922 duration of the localrepo object.'''
927 923
928 924 # XXX what tagtype should subclasses/extensions use? Currently
929 925 # mq and bookmarks add tags, but do not set the tagtype at all.
930 926 # Should each extension invent its own tag type? Should there
931 927 # be one tagtype for all such "virtual" tags? Or is the status
932 928 # quo fine?
933 929
934 930
935 931 # map tag name to (node, hist)
936 932 alltags = tagsmod.findglobaltags(self.ui, self)
937 933 # map tag name to tag type
938 934 tagtypes = dict((tag, 'global') for tag in alltags)
939 935
940 936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
941 937
942 938 # Build the return dicts. Have to re-encode tag names because
943 939 # the tags module always uses UTF-8 (in order not to lose info
944 940 # writing to the cache), but the rest of Mercurial wants them in
945 941 # local encoding.
946 942 tags = {}
947 943 for (name, (node, hist)) in alltags.iteritems():
948 944 if node != nullid:
949 945 tags[encoding.tolocal(name)] = node
950 946 tags['tip'] = self.changelog.tip()
951 947 tagtypes = dict([(encoding.tolocal(name), value)
952 948 for (name, value) in tagtypes.iteritems()])
953 949 return (tags, tagtypes)
954 950
955 951 def tagtype(self, tagname):
956 952 '''
957 953 return the type of the given tag. result can be:
958 954
959 955 'local' : a local tag
960 956 'global' : a global tag
961 957 None : tag does not exist
962 958 '''
963 959
964 960 return self._tagscache.tagtypes.get(tagname)
965 961
966 962 def tagslist(self):
967 963 '''return a list of tags ordered by revision'''
968 964 if not self._tagscache.tagslist:
969 965 l = []
970 966 for t, n in self.tags().iteritems():
971 967 l.append((self.changelog.rev(n), t, n))
972 968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
973 969
974 970 return self._tagscache.tagslist
975 971
976 972 def nodetags(self, node):
977 973 '''return the tags associated with a node'''
978 974 if not self._tagscache.nodetagscache:
979 975 nodetagscache = {}
980 976 for t, n in self._tagscache.tags.iteritems():
981 977 nodetagscache.setdefault(n, []).append(t)
982 978 for tags in nodetagscache.itervalues():
983 979 tags.sort()
984 980 self._tagscache.nodetagscache = nodetagscache
985 981 return self._tagscache.nodetagscache.get(node, [])
986 982
987 983 def nodebookmarks(self, node):
988 984 """return the list of bookmarks pointing to the specified node"""
989 985 marks = []
990 986 for bookmark, n in self._bookmarks.iteritems():
991 987 if n == node:
992 988 marks.append(bookmark)
993 989 return sorted(marks)
994 990
995 991 def branchmap(self):
996 992 '''returns a dictionary {branch: [branchheads]} with branchheads
997 993 ordered by increasing revision number'''
998 994 branchmap.updatecache(self)
999 995 return self._branchcaches[self.filtername]
1000 996
1001 997 @unfilteredmethod
1002 998 def revbranchcache(self):
1003 999 if not self._revbranchcache:
1004 1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1005 1001 return self._revbranchcache
1006 1002
1007 1003 def branchtip(self, branch, ignoremissing=False):
1008 1004 '''return the tip node for a given branch
1009 1005
1010 1006 If ignoremissing is True, then this method will not raise an error.
1011 1007 This is helpful for callers that only expect None for a missing branch
1012 1008 (e.g. namespace).
1013 1009
1014 1010 '''
1015 1011 try:
1016 1012 return self.branchmap().branchtip(branch)
1017 1013 except KeyError:
1018 1014 if not ignoremissing:
1019 1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1020 1016 else:
1021 1017 pass
1022 1018
1023 1019 def lookup(self, key):
1024 1020 return scmutil.revsymbol(self, key).node()
1025 1021
1026 1022 def lookupbranch(self, key, remote=None):
1027 1023 repo = remote or self
1028 1024 if key in repo.branchmap():
1029 1025 return key
1030 1026
1031 1027 repo = (remote and remote.local()) and remote or self
1032 1028 return repo[key].branch()
1033 1029
1034 1030 def known(self, nodes):
1035 1031 cl = self.changelog
1036 1032 nm = cl.nodemap
1037 1033 filtered = cl.filteredrevs
1038 1034 result = []
1039 1035 for n in nodes:
1040 1036 r = nm.get(n)
1041 1037 resp = not (r is None or r in filtered)
1042 1038 result.append(resp)
1043 1039 return result
1044 1040
1045 1041 def local(self):
1046 1042 return self
1047 1043
1048 1044 def publishing(self):
1049 1045 # it's safe (and desirable) to trust the publish flag unconditionally
1050 1046 # so that we don't finalize changes shared between users via ssh or nfs
1051 1047 return self.ui.configbool('phases', 'publish', untrusted=True)
1052 1048
1053 1049 def cancopy(self):
1054 1050 # so statichttprepo's override of local() works
1055 1051 if not self.local():
1056 1052 return False
1057 1053 if not self.publishing():
1058 1054 return True
1059 1055 # if publishing we can't copy if there is filtered content
1060 1056 return not self.filtered('visible').changelog.filteredrevs
1061 1057
1062 1058 def shared(self):
1063 1059 '''the type of shared repository (None if not shared)'''
1064 1060 if self.sharedpath != self.path:
1065 1061 return 'store'
1066 1062 return None
1067 1063
1068 1064 def wjoin(self, f, *insidef):
1069 1065 return self.vfs.reljoin(self.root, f, *insidef)
1070 1066
1071 1067 def file(self, f):
1072 1068 if f[0] == '/':
1073 1069 f = f[1:]
1074 1070 return filelog.filelog(self.svfs, f)
1075 1071
1076 1072 def setparents(self, p1, p2=nullid):
1077 1073 with self.dirstate.parentchange():
1078 1074 copies = self.dirstate.setparents(p1, p2)
1079 1075 pctx = self[p1]
1080 1076 if copies:
1081 1077 # Adjust copy records, the dirstate cannot do it, it
1082 1078 # requires access to parents manifests. Preserve them
1083 1079 # only for entries added to first parent.
1084 1080 for f in copies:
1085 1081 if f not in pctx and copies[f] in pctx:
1086 1082 self.dirstate.copy(copies[f], f)
1087 1083 if p2 == nullid:
1088 1084 for f, s in sorted(self.dirstate.copies().items()):
1089 1085 if f not in pctx and s not in pctx:
1090 1086 self.dirstate.copy(None, f)
1091 1087
1092 1088 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1093 1089 """changeid can be a changeset revision, node, or tag.
1094 1090 fileid can be a file revision or node."""
1095 1091 return context.filectx(self, path, changeid, fileid,
1096 1092 changectx=changectx)
1097 1093
1098 1094 def getcwd(self):
1099 1095 return self.dirstate.getcwd()
1100 1096
1101 1097 def pathto(self, f, cwd=None):
1102 1098 return self.dirstate.pathto(f, cwd)
1103 1099
1104 1100 def _loadfilter(self, filter):
1105 1101 if filter not in self._filterpats:
1106 1102 l = []
1107 1103 for pat, cmd in self.ui.configitems(filter):
1108 1104 if cmd == '!':
1109 1105 continue
1110 1106 mf = matchmod.match(self.root, '', [pat])
1111 1107 fn = None
1112 1108 params = cmd
1113 1109 for name, filterfn in self._datafilters.iteritems():
1114 1110 if cmd.startswith(name):
1115 1111 fn = filterfn
1116 1112 params = cmd[len(name):].lstrip()
1117 1113 break
1118 1114 if not fn:
1119 1115 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1120 1116 # Wrap old filters not supporting keyword arguments
1121 1117 if not pycompat.getargspec(fn)[2]:
1122 1118 oldfn = fn
1123 1119 fn = lambda s, c, **kwargs: oldfn(s, c)
1124 1120 l.append((mf, fn, params))
1125 1121 self._filterpats[filter] = l
1126 1122 return self._filterpats[filter]
1127 1123
1128 1124 def _filter(self, filterpats, filename, data):
1129 1125 for mf, fn, cmd in filterpats:
1130 1126 if mf(filename):
1131 1127 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1132 1128 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1133 1129 break
1134 1130
1135 1131 return data
1136 1132
1137 1133 @unfilteredpropertycache
1138 1134 def _encodefilterpats(self):
1139 1135 return self._loadfilter('encode')
1140 1136
1141 1137 @unfilteredpropertycache
1142 1138 def _decodefilterpats(self):
1143 1139 return self._loadfilter('decode')
1144 1140
1145 1141 def adddatafilter(self, name, filter):
1146 1142 self._datafilters[name] = filter
1147 1143
1148 1144 def wread(self, filename):
1149 1145 if self.wvfs.islink(filename):
1150 1146 data = self.wvfs.readlink(filename)
1151 1147 else:
1152 1148 data = self.wvfs.read(filename)
1153 1149 return self._filter(self._encodefilterpats, filename, data)
1154 1150
1155 1151 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1156 1152 """write ``data`` into ``filename`` in the working directory
1157 1153
1158 1154 This returns length of written (maybe decoded) data.
1159 1155 """
1160 1156 data = self._filter(self._decodefilterpats, filename, data)
1161 1157 if 'l' in flags:
1162 1158 self.wvfs.symlink(data, filename)
1163 1159 else:
1164 1160 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1165 1161 **kwargs)
1166 1162 if 'x' in flags:
1167 1163 self.wvfs.setflags(filename, False, True)
1168 1164 else:
1169 1165 self.wvfs.setflags(filename, False, False)
1170 1166 return len(data)
1171 1167
1172 1168 def wwritedata(self, filename, data):
1173 1169 return self._filter(self._decodefilterpats, filename, data)
1174 1170
1175 1171 def currenttransaction(self):
1176 1172 """return the current transaction or None if non exists"""
1177 1173 if self._transref:
1178 1174 tr = self._transref()
1179 1175 else:
1180 1176 tr = None
1181 1177
1182 1178 if tr and tr.running():
1183 1179 return tr
1184 1180 return None
1185 1181
1186 1182 def transaction(self, desc, report=None):
1187 1183 if (self.ui.configbool('devel', 'all-warnings')
1188 1184 or self.ui.configbool('devel', 'check-locks')):
1189 1185 if self._currentlock(self._lockref) is None:
1190 1186 raise error.ProgrammingError('transaction requires locking')
1191 1187 tr = self.currenttransaction()
1192 1188 if tr is not None:
1193 1189 return tr.nest(name=desc)
1194 1190
1195 1191 # abort here if the journal already exists
1196 1192 if self.svfs.exists("journal"):
1197 1193 raise error.RepoError(
1198 1194 _("abandoned transaction found"),
1199 1195 hint=_("run 'hg recover' to clean up transaction"))
1200 1196
1201 1197 idbase = "%.40f#%f" % (random.random(), time.time())
1202 1198 ha = hex(hashlib.sha1(idbase).digest())
1203 1199 txnid = 'TXN:' + ha
1204 1200 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1205 1201
1206 1202 self._writejournal(desc)
1207 1203 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1208 1204 if report:
1209 1205 rp = report
1210 1206 else:
1211 1207 rp = self.ui.warn
1212 1208 vfsmap = {'plain': self.vfs} # root of .hg/
1213 1209 # we must avoid cyclic reference between repo and transaction.
1214 1210 reporef = weakref.ref(self)
1215 1211 # Code to track tag movement
1216 1212 #
1217 1213 # Since tags are all handled as file content, it is actually quite hard
1218 1214 # to track these movement from a code perspective. So we fallback to a
1219 1215 # tracking at the repository level. One could envision to track changes
1220 1216 # to the '.hgtags' file through changegroup apply but that fails to
1221 1217 # cope with case where transaction expose new heads without changegroup
1222 1218 # being involved (eg: phase movement).
1223 1219 #
1224 1220 # For now, We gate the feature behind a flag since this likely comes
1225 1221 # with performance impacts. The current code run more often than needed
1226 1222 # and do not use caches as much as it could. The current focus is on
1227 1223 # the behavior of the feature so we disable it by default. The flag
1228 1224 # will be removed when we are happy with the performance impact.
1229 1225 #
1230 1226 # Once this feature is no longer experimental move the following
1231 1227 # documentation to the appropriate help section:
1232 1228 #
1233 1229 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1234 1230 # tags (new or changed or deleted tags). In addition the details of
1235 1231 # these changes are made available in a file at:
1236 1232 # ``REPOROOT/.hg/changes/tags.changes``.
1237 1233 # Make sure you check for HG_TAG_MOVED before reading that file as it
1238 1234 # might exist from a previous transaction even if no tag were touched
1239 1235 # in this one. Changes are recorded in a line base format::
1240 1236 #
1241 1237 # <action> <hex-node> <tag-name>\n
1242 1238 #
1243 1239 # Actions are defined as follow:
1244 1240 # "-R": tag is removed,
1245 1241 # "+A": tag is added,
1246 1242 # "-M": tag is moved (old value),
1247 1243 # "+M": tag is moved (new value),
1248 1244 tracktags = lambda x: None
1249 1245 # experimental config: experimental.hook-track-tags
1250 1246 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1251 1247 if desc != 'strip' and shouldtracktags:
1252 1248 oldheads = self.changelog.headrevs()
1253 1249 def tracktags(tr2):
1254 1250 repo = reporef()
1255 1251 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1256 1252 newheads = repo.changelog.headrevs()
1257 1253 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1258 1254 # notes: we compare lists here.
1259 1255 # As we do it only once buiding set would not be cheaper
1260 1256 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1261 1257 if changes:
1262 1258 tr2.hookargs['tag_moved'] = '1'
1263 1259 with repo.vfs('changes/tags.changes', 'w',
1264 1260 atomictemp=True) as changesfile:
1265 1261 # note: we do not register the file to the transaction
1266 1262 # because we needs it to still exist on the transaction
1267 1263 # is close (for txnclose hooks)
1268 1264 tagsmod.writediff(changesfile, changes)
1269 1265 def validate(tr2):
1270 1266 """will run pre-closing hooks"""
1271 1267 # XXX the transaction API is a bit lacking here so we take a hacky
1272 1268 # path for now
1273 1269 #
1274 1270 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1275 1271 # dict is copied before these run. In addition we needs the data
1276 1272 # available to in memory hooks too.
1277 1273 #
1278 1274 # Moreover, we also need to make sure this runs before txnclose
1279 1275 # hooks and there is no "pending" mechanism that would execute
1280 1276 # logic only if hooks are about to run.
1281 1277 #
1282 1278 # Fixing this limitation of the transaction is also needed to track
1283 1279 # other families of changes (bookmarks, phases, obsolescence).
1284 1280 #
1285 1281 # This will have to be fixed before we remove the experimental
1286 1282 # gating.
1287 1283 tracktags(tr2)
1288 1284 repo = reporef()
1289 1285 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1290 1286 scmutil.enforcesinglehead(repo, tr2, desc)
1291 1287 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1292 1288 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1293 1289 args = tr.hookargs.copy()
1294 1290 args.update(bookmarks.preparehookargs(name, old, new))
1295 1291 repo.hook('pretxnclose-bookmark', throw=True,
1296 1292 txnname=desc,
1297 1293 **pycompat.strkwargs(args))
1298 1294 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1299 1295 cl = repo.unfiltered().changelog
1300 1296 for rev, (old, new) in tr.changes['phases'].items():
1301 1297 args = tr.hookargs.copy()
1302 1298 node = hex(cl.node(rev))
1303 1299 args.update(phases.preparehookargs(node, old, new))
1304 1300 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1305 1301 **pycompat.strkwargs(args))
1306 1302
1307 1303 repo.hook('pretxnclose', throw=True,
1308 1304 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1309 1305 def releasefn(tr, success):
1310 1306 repo = reporef()
1311 1307 if success:
1312 1308 # this should be explicitly invoked here, because
1313 1309 # in-memory changes aren't written out at closing
1314 1310 # transaction, if tr.addfilegenerator (via
1315 1311 # dirstate.write or so) isn't invoked while
1316 1312 # transaction running
1317 1313 repo.dirstate.write(None)
1318 1314 else:
1319 1315 # discard all changes (including ones already written
1320 1316 # out) in this transaction
1321 1317 repo.dirstate.restorebackup(None, 'journal.dirstate')
1322 1318
1323 1319 repo.invalidate(clearfilecache=True)
1324 1320
1325 1321 tr = transaction.transaction(rp, self.svfs, vfsmap,
1326 1322 "journal",
1327 1323 "undo",
1328 1324 aftertrans(renames),
1329 1325 self.store.createmode,
1330 1326 validator=validate,
1331 1327 releasefn=releasefn,
1332 1328 checkambigfiles=_cachedfiles,
1333 1329 name=desc)
1334 1330 tr.changes['revs'] = xrange(0, 0)
1335 1331 tr.changes['obsmarkers'] = set()
1336 1332 tr.changes['phases'] = {}
1337 1333 tr.changes['bookmarks'] = {}
1338 1334
1339 1335 tr.hookargs['txnid'] = txnid
1340 1336 # note: writing the fncache only during finalize mean that the file is
1341 1337 # outdated when running hooks. As fncache is used for streaming clone,
1342 1338 # this is not expected to break anything that happen during the hooks.
1343 1339 tr.addfinalize('flush-fncache', self.store.write)
1344 1340 def txnclosehook(tr2):
1345 1341 """To be run if transaction is successful, will schedule a hook run
1346 1342 """
1347 1343 # Don't reference tr2 in hook() so we don't hold a reference.
1348 1344 # This reduces memory consumption when there are multiple
1349 1345 # transactions per lock. This can likely go away if issue5045
1350 1346 # fixes the function accumulation.
1351 1347 hookargs = tr2.hookargs
1352 1348
1353 1349 def hookfunc():
1354 1350 repo = reporef()
1355 1351 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1356 1352 bmchanges = sorted(tr.changes['bookmarks'].items())
1357 1353 for name, (old, new) in bmchanges:
1358 1354 args = tr.hookargs.copy()
1359 1355 args.update(bookmarks.preparehookargs(name, old, new))
1360 1356 repo.hook('txnclose-bookmark', throw=False,
1361 1357 txnname=desc, **pycompat.strkwargs(args))
1362 1358
1363 1359 if hook.hashook(repo.ui, 'txnclose-phase'):
1364 1360 cl = repo.unfiltered().changelog
1365 1361 phasemv = sorted(tr.changes['phases'].items())
1366 1362 for rev, (old, new) in phasemv:
1367 1363 args = tr.hookargs.copy()
1368 1364 node = hex(cl.node(rev))
1369 1365 args.update(phases.preparehookargs(node, old, new))
1370 1366 repo.hook('txnclose-phase', throw=False, txnname=desc,
1371 1367 **pycompat.strkwargs(args))
1372 1368
1373 1369 repo.hook('txnclose', throw=False, txnname=desc,
1374 1370 **pycompat.strkwargs(hookargs))
1375 1371 reporef()._afterlock(hookfunc)
1376 1372 tr.addfinalize('txnclose-hook', txnclosehook)
1377 1373 # Include a leading "-" to make it happen before the transaction summary
1378 1374 # reports registered via scmutil.registersummarycallback() whose names
1379 1375 # are 00-txnreport etc. That way, the caches will be warm when the
1380 1376 # callbacks run.
1381 1377 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1382 1378 def txnaborthook(tr2):
1383 1379 """To be run if transaction is aborted
1384 1380 """
1385 1381 reporef().hook('txnabort', throw=False, txnname=desc,
1386 1382 **pycompat.strkwargs(tr2.hookargs))
1387 1383 tr.addabort('txnabort-hook', txnaborthook)
1388 1384 # avoid eager cache invalidation. in-memory data should be identical
1389 1385 # to stored data if transaction has no error.
1390 1386 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1391 1387 self._transref = weakref.ref(tr)
1392 1388 scmutil.registersummarycallback(self, tr, desc)
1393 1389 return tr
1394 1390
1395 1391 def _journalfiles(self):
1396 1392 return ((self.svfs, 'journal'),
1397 1393 (self.vfs, 'journal.dirstate'),
1398 1394 (self.vfs, 'journal.branch'),
1399 1395 (self.vfs, 'journal.desc'),
1400 1396 (self.vfs, 'journal.bookmarks'),
1401 1397 (self.svfs, 'journal.phaseroots'))
1402 1398
1403 1399 def undofiles(self):
1404 1400 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1405 1401
1406 1402 @unfilteredmethod
1407 1403 def _writejournal(self, desc):
1408 1404 self.dirstate.savebackup(None, 'journal.dirstate')
1409 1405 self.vfs.write("journal.branch",
1410 1406 encoding.fromlocal(self.dirstate.branch()))
1411 1407 self.vfs.write("journal.desc",
1412 1408 "%d\n%s\n" % (len(self), desc))
1413 1409 self.vfs.write("journal.bookmarks",
1414 1410 self.vfs.tryread("bookmarks"))
1415 1411 self.svfs.write("journal.phaseroots",
1416 1412 self.svfs.tryread("phaseroots"))
1417 1413
1418 1414 def recover(self):
1419 1415 with self.lock():
1420 1416 if self.svfs.exists("journal"):
1421 1417 self.ui.status(_("rolling back interrupted transaction\n"))
1422 1418 vfsmap = {'': self.svfs,
1423 1419 'plain': self.vfs,}
1424 1420 transaction.rollback(self.svfs, vfsmap, "journal",
1425 1421 self.ui.warn,
1426 1422 checkambigfiles=_cachedfiles)
1427 1423 self.invalidate()
1428 1424 return True
1429 1425 else:
1430 1426 self.ui.warn(_("no interrupted transaction available\n"))
1431 1427 return False
1432 1428
1433 1429 def rollback(self, dryrun=False, force=False):
1434 1430 wlock = lock = dsguard = None
1435 1431 try:
1436 1432 wlock = self.wlock()
1437 1433 lock = self.lock()
1438 1434 if self.svfs.exists("undo"):
1439 1435 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1440 1436
1441 1437 return self._rollback(dryrun, force, dsguard)
1442 1438 else:
1443 1439 self.ui.warn(_("no rollback information available\n"))
1444 1440 return 1
1445 1441 finally:
1446 1442 release(dsguard, lock, wlock)
1447 1443
1448 1444 @unfilteredmethod # Until we get smarter cache management
1449 1445 def _rollback(self, dryrun, force, dsguard):
1450 1446 ui = self.ui
1451 1447 try:
1452 1448 args = self.vfs.read('undo.desc').splitlines()
1453 1449 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1454 1450 if len(args) >= 3:
1455 1451 detail = args[2]
1456 1452 oldtip = oldlen - 1
1457 1453
1458 1454 if detail and ui.verbose:
1459 1455 msg = (_('repository tip rolled back to revision %d'
1460 1456 ' (undo %s: %s)\n')
1461 1457 % (oldtip, desc, detail))
1462 1458 else:
1463 1459 msg = (_('repository tip rolled back to revision %d'
1464 1460 ' (undo %s)\n')
1465 1461 % (oldtip, desc))
1466 1462 except IOError:
1467 1463 msg = _('rolling back unknown transaction\n')
1468 1464 desc = None
1469 1465
1470 1466 if not force and self['.'] != self['tip'] and desc == 'commit':
1471 1467 raise error.Abort(
1472 1468 _('rollback of last commit while not checked out '
1473 1469 'may lose data'), hint=_('use -f to force'))
1474 1470
1475 1471 ui.status(msg)
1476 1472 if dryrun:
1477 1473 return 0
1478 1474
1479 1475 parents = self.dirstate.parents()
1480 1476 self.destroying()
1481 1477 vfsmap = {'plain': self.vfs, '': self.svfs}
1482 1478 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1483 1479 checkambigfiles=_cachedfiles)
1484 1480 if self.vfs.exists('undo.bookmarks'):
1485 1481 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1486 1482 if self.svfs.exists('undo.phaseroots'):
1487 1483 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1488 1484 self.invalidate()
1489 1485
1490 1486 parentgone = (parents[0] not in self.changelog.nodemap or
1491 1487 parents[1] not in self.changelog.nodemap)
1492 1488 if parentgone:
1493 1489 # prevent dirstateguard from overwriting already restored one
1494 1490 dsguard.close()
1495 1491
1496 1492 self.dirstate.restorebackup(None, 'undo.dirstate')
1497 1493 try:
1498 1494 branch = self.vfs.read('undo.branch')
1499 1495 self.dirstate.setbranch(encoding.tolocal(branch))
1500 1496 except IOError:
1501 1497 ui.warn(_('named branch could not be reset: '
1502 1498 'current branch is still \'%s\'\n')
1503 1499 % self.dirstate.branch())
1504 1500
1505 1501 parents = tuple([p.rev() for p in self[None].parents()])
1506 1502 if len(parents) > 1:
1507 1503 ui.status(_('working directory now based on '
1508 1504 'revisions %d and %d\n') % parents)
1509 1505 else:
1510 1506 ui.status(_('working directory now based on '
1511 1507 'revision %d\n') % parents)
1512 1508 mergemod.mergestate.clean(self, self['.'].node())
1513 1509
1514 1510 # TODO: if we know which new heads may result from this rollback, pass
1515 1511 # them to destroy(), which will prevent the branchhead cache from being
1516 1512 # invalidated.
1517 1513 self.destroyed()
1518 1514 return 0
1519 1515
1520 1516 def _buildcacheupdater(self, newtransaction):
1521 1517 """called during transaction to build the callback updating cache
1522 1518
1523 1519 Lives on the repository to help extension who might want to augment
1524 1520 this logic. For this purpose, the created transaction is passed to the
1525 1521 method.
1526 1522 """
1527 1523 # we must avoid cyclic reference between repo and transaction.
1528 1524 reporef = weakref.ref(self)
1529 1525 def updater(tr):
1530 1526 repo = reporef()
1531 1527 repo.updatecaches(tr)
1532 1528 return updater
1533 1529
1534 1530 @unfilteredmethod
1535 1531 def updatecaches(self, tr=None, full=False):
1536 1532 """warm appropriate caches
1537 1533
1538 1534 If this function is called after a transaction closed. The transaction
1539 1535 will be available in the 'tr' argument. This can be used to selectively
1540 1536 update caches relevant to the changes in that transaction.
1541 1537
1542 1538 If 'full' is set, make sure all caches the function knows about have
1543 1539 up-to-date data. Even the ones usually loaded more lazily.
1544 1540 """
1545 1541 if tr is not None and tr.hookargs.get('source') == 'strip':
1546 1542 # During strip, many caches are invalid but
1547 1543 # later call to `destroyed` will refresh them.
1548 1544 return
1549 1545
1550 1546 if tr is None or tr.changes['revs']:
1551 1547 # updating the unfiltered branchmap should refresh all the others,
1552 1548 self.ui.debug('updating the branch cache\n')
1553 1549 branchmap.updatecache(self.filtered('served'))
1554 1550
1555 1551 if full:
1556 1552 rbc = self.revbranchcache()
1557 1553 for r in self.changelog:
1558 1554 rbc.branchinfo(r)
1559 1555 rbc.write()
1560 1556
1561 1557 def invalidatecaches(self):
1562 1558
1563 1559 if '_tagscache' in vars(self):
1564 1560 # can't use delattr on proxy
1565 1561 del self.__dict__['_tagscache']
1566 1562
1567 1563 self.unfiltered()._branchcaches.clear()
1568 1564 self.invalidatevolatilesets()
1569 1565 self._sparsesignaturecache.clear()
1570 1566
1571 1567 def invalidatevolatilesets(self):
1572 1568 self.filteredrevcache.clear()
1573 1569 obsolete.clearobscaches(self)
1574 1570
1575 1571 def invalidatedirstate(self):
1576 1572 '''Invalidates the dirstate, causing the next call to dirstate
1577 1573 to check if it was modified since the last time it was read,
1578 1574 rereading it if it has.
1579 1575
1580 1576 This is different to dirstate.invalidate() that it doesn't always
1581 1577 rereads the dirstate. Use dirstate.invalidate() if you want to
1582 1578 explicitly read the dirstate again (i.e. restoring it to a previous
1583 1579 known good state).'''
1584 1580 if hasunfilteredcache(self, 'dirstate'):
1585 1581 for k in self.dirstate._filecache:
1586 1582 try:
1587 1583 delattr(self.dirstate, k)
1588 1584 except AttributeError:
1589 1585 pass
1590 1586 delattr(self.unfiltered(), 'dirstate')
1591 1587
1592 1588 def invalidate(self, clearfilecache=False):
1593 1589 '''Invalidates both store and non-store parts other than dirstate
1594 1590
1595 1591 If a transaction is running, invalidation of store is omitted,
1596 1592 because discarding in-memory changes might cause inconsistency
1597 1593 (e.g. incomplete fncache causes unintentional failure, but
1598 1594 redundant one doesn't).
1599 1595 '''
1600 1596 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1601 1597 for k in list(self._filecache.keys()):
1602 1598 # dirstate is invalidated separately in invalidatedirstate()
1603 1599 if k == 'dirstate':
1604 1600 continue
1605 1601 if (k == 'changelog' and
1606 1602 self.currenttransaction() and
1607 1603 self.changelog._delayed):
1608 1604 # The changelog object may store unwritten revisions. We don't
1609 1605 # want to lose them.
1610 1606 # TODO: Solve the problem instead of working around it.
1611 1607 continue
1612 1608
1613 1609 if clearfilecache:
1614 1610 del self._filecache[k]
1615 1611 try:
1616 1612 delattr(unfiltered, k)
1617 1613 except AttributeError:
1618 1614 pass
1619 1615 self.invalidatecaches()
1620 1616 if not self.currenttransaction():
1621 1617 # TODO: Changing contents of store outside transaction
1622 1618 # causes inconsistency. We should make in-memory store
1623 1619 # changes detectable, and abort if changed.
1624 1620 self.store.invalidatecaches()
1625 1621
1626 1622 def invalidateall(self):
1627 1623 '''Fully invalidates both store and non-store parts, causing the
1628 1624 subsequent operation to reread any outside changes.'''
1629 1625 # extension should hook this to invalidate its caches
1630 1626 self.invalidate()
1631 1627 self.invalidatedirstate()
1632 1628
1633 1629 @unfilteredmethod
1634 1630 def _refreshfilecachestats(self, tr):
1635 1631 """Reload stats of cached files so that they are flagged as valid"""
1636 1632 for k, ce in self._filecache.items():
1637 1633 k = pycompat.sysstr(k)
1638 1634 if k == r'dirstate' or k not in self.__dict__:
1639 1635 continue
1640 1636 ce.refresh()
1641 1637
1642 1638 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1643 1639 inheritchecker=None, parentenvvar=None):
1644 1640 parentlock = None
1645 1641 # the contents of parentenvvar are used by the underlying lock to
1646 1642 # determine whether it can be inherited
1647 1643 if parentenvvar is not None:
1648 1644 parentlock = encoding.environ.get(parentenvvar)
1649 1645
1650 1646 timeout = 0
1651 1647 warntimeout = 0
1652 1648 if wait:
1653 1649 timeout = self.ui.configint("ui", "timeout")
1654 1650 warntimeout = self.ui.configint("ui", "timeout.warn")
1655 1651
1656 1652 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1657 1653 releasefn=releasefn,
1658 1654 acquirefn=acquirefn, desc=desc,
1659 1655 inheritchecker=inheritchecker,
1660 1656 parentlock=parentlock)
1661 1657 return l
1662 1658
1663 1659 def _afterlock(self, callback):
1664 1660 """add a callback to be run when the repository is fully unlocked
1665 1661
1666 1662 The callback will be executed when the outermost lock is released
1667 1663 (with wlock being higher level than 'lock')."""
1668 1664 for ref in (self._wlockref, self._lockref):
1669 1665 l = ref and ref()
1670 1666 if l and l.held:
1671 1667 l.postrelease.append(callback)
1672 1668 break
1673 1669 else: # no lock have been found.
1674 1670 callback()
1675 1671
1676 1672 def lock(self, wait=True):
1677 1673 '''Lock the repository store (.hg/store) and return a weak reference
1678 1674 to the lock. Use this before modifying the store (e.g. committing or
1679 1675 stripping). If you are opening a transaction, get a lock as well.)
1680 1676
1681 1677 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1682 1678 'wlock' first to avoid a dead-lock hazard.'''
1683 1679 l = self._currentlock(self._lockref)
1684 1680 if l is not None:
1685 1681 l.lock()
1686 1682 return l
1687 1683
1688 1684 l = self._lock(self.svfs, "lock", wait, None,
1689 1685 self.invalidate, _('repository %s') % self.origroot)
1690 1686 self._lockref = weakref.ref(l)
1691 1687 return l
1692 1688
1693 1689 def _wlockchecktransaction(self):
1694 1690 if self.currenttransaction() is not None:
1695 1691 raise error.LockInheritanceContractViolation(
1696 1692 'wlock cannot be inherited in the middle of a transaction')
1697 1693
1698 1694 def wlock(self, wait=True):
1699 1695 '''Lock the non-store parts of the repository (everything under
1700 1696 .hg except .hg/store) and return a weak reference to the lock.
1701 1697
1702 1698 Use this before modifying files in .hg.
1703 1699
1704 1700 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1705 1701 'wlock' first to avoid a dead-lock hazard.'''
1706 1702 l = self._wlockref and self._wlockref()
1707 1703 if l is not None and l.held:
1708 1704 l.lock()
1709 1705 return l
1710 1706
1711 1707 # We do not need to check for non-waiting lock acquisition. Such
1712 1708 # acquisition would not cause dead-lock as they would just fail.
1713 1709 if wait and (self.ui.configbool('devel', 'all-warnings')
1714 1710 or self.ui.configbool('devel', 'check-locks')):
1715 1711 if self._currentlock(self._lockref) is not None:
1716 1712 self.ui.develwarn('"wlock" acquired after "lock"')
1717 1713
1718 1714 def unlock():
1719 1715 if self.dirstate.pendingparentchange():
1720 1716 self.dirstate.invalidate()
1721 1717 else:
1722 1718 self.dirstate.write(None)
1723 1719
1724 1720 self._filecache['dirstate'].refresh()
1725 1721
1726 1722 l = self._lock(self.vfs, "wlock", wait, unlock,
1727 1723 self.invalidatedirstate, _('working directory of %s') %
1728 1724 self.origroot,
1729 1725 inheritchecker=self._wlockchecktransaction,
1730 1726 parentenvvar='HG_WLOCK_LOCKER')
1731 1727 self._wlockref = weakref.ref(l)
1732 1728 return l
1733 1729
1734 1730 def _currentlock(self, lockref):
1735 1731 """Returns the lock if it's held, or None if it's not."""
1736 1732 if lockref is None:
1737 1733 return None
1738 1734 l = lockref()
1739 1735 if l is None or not l.held:
1740 1736 return None
1741 1737 return l
1742 1738
1743 1739 def currentwlock(self):
1744 1740 """Returns the wlock if it's held, or None if it's not."""
1745 1741 return self._currentlock(self._wlockref)
1746 1742
1747 1743 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1748 1744 """
1749 1745 commit an individual file as part of a larger transaction
1750 1746 """
1751 1747
1752 1748 fname = fctx.path()
1753 1749 fparent1 = manifest1.get(fname, nullid)
1754 1750 fparent2 = manifest2.get(fname, nullid)
1755 1751 if isinstance(fctx, context.filectx):
1756 1752 node = fctx.filenode()
1757 1753 if node in [fparent1, fparent2]:
1758 1754 self.ui.debug('reusing %s filelog entry\n' % fname)
1759 1755 if manifest1.flags(fname) != fctx.flags():
1760 1756 changelist.append(fname)
1761 1757 return node
1762 1758
1763 1759 flog = self.file(fname)
1764 1760 meta = {}
1765 1761 copy = fctx.renamed()
1766 1762 if copy and copy[0] != fname:
1767 1763 # Mark the new revision of this file as a copy of another
1768 1764 # file. This copy data will effectively act as a parent
1769 1765 # of this new revision. If this is a merge, the first
1770 1766 # parent will be the nullid (meaning "look up the copy data")
1771 1767 # and the second one will be the other parent. For example:
1772 1768 #
1773 1769 # 0 --- 1 --- 3 rev1 changes file foo
1774 1770 # \ / rev2 renames foo to bar and changes it
1775 1771 # \- 2 -/ rev3 should have bar with all changes and
1776 1772 # should record that bar descends from
1777 1773 # bar in rev2 and foo in rev1
1778 1774 #
1779 1775 # this allows this merge to succeed:
1780 1776 #
1781 1777 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1782 1778 # \ / merging rev3 and rev4 should use bar@rev2
1783 1779 # \- 2 --- 4 as the merge base
1784 1780 #
1785 1781
1786 1782 cfname = copy[0]
1787 1783 crev = manifest1.get(cfname)
1788 1784 newfparent = fparent2
1789 1785
1790 1786 if manifest2: # branch merge
1791 1787 if fparent2 == nullid or crev is None: # copied on remote side
1792 1788 if cfname in manifest2:
1793 1789 crev = manifest2[cfname]
1794 1790 newfparent = fparent1
1795 1791
1796 1792 # Here, we used to search backwards through history to try to find
1797 1793 # where the file copy came from if the source of a copy was not in
1798 1794 # the parent directory. However, this doesn't actually make sense to
1799 1795 # do (what does a copy from something not in your working copy even
1800 1796 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1801 1797 # the user that copy information was dropped, so if they didn't
1802 1798 # expect this outcome it can be fixed, but this is the correct
1803 1799 # behavior in this circumstance.
1804 1800
1805 1801 if crev:
1806 1802 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1807 1803 meta["copy"] = cfname
1808 1804 meta["copyrev"] = hex(crev)
1809 1805 fparent1, fparent2 = nullid, newfparent
1810 1806 else:
1811 1807 self.ui.warn(_("warning: can't find ancestor for '%s' "
1812 1808 "copied from '%s'!\n") % (fname, cfname))
1813 1809
1814 1810 elif fparent1 == nullid:
1815 1811 fparent1, fparent2 = fparent2, nullid
1816 1812 elif fparent2 != nullid:
1817 1813 # is one parent an ancestor of the other?
1818 1814 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1819 1815 if fparent1 in fparentancestors:
1820 1816 fparent1, fparent2 = fparent2, nullid
1821 1817 elif fparent2 in fparentancestors:
1822 1818 fparent2 = nullid
1823 1819
1824 1820 # is the file changed?
1825 1821 text = fctx.data()
1826 1822 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1827 1823 changelist.append(fname)
1828 1824 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1829 1825 # are just the flags changed during merge?
1830 1826 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1831 1827 changelist.append(fname)
1832 1828
1833 1829 return fparent1
1834 1830
1835 1831 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1836 1832 """check for commit arguments that aren't committable"""
1837 1833 if match.isexact() or match.prefix():
1838 1834 matched = set(status.modified + status.added + status.removed)
1839 1835
1840 1836 for f in match.files():
1841 1837 f = self.dirstate.normalize(f)
1842 1838 if f == '.' or f in matched or f in wctx.substate:
1843 1839 continue
1844 1840 if f in status.deleted:
1845 1841 fail(f, _('file not found!'))
1846 1842 if f in vdirs: # visited directory
1847 1843 d = f + '/'
1848 1844 for mf in matched:
1849 1845 if mf.startswith(d):
1850 1846 break
1851 1847 else:
1852 1848 fail(f, _("no match under directory!"))
1853 1849 elif f not in self.dirstate:
1854 1850 fail(f, _("file not tracked!"))
1855 1851
1856 1852 @unfilteredmethod
1857 1853 def commit(self, text="", user=None, date=None, match=None, force=False,
1858 1854 editor=False, extra=None):
1859 1855 """Add a new revision to current repository.
1860 1856
1861 1857 Revision information is gathered from the working directory,
1862 1858 match can be used to filter the committed files. If editor is
1863 1859 supplied, it is called to get a commit message.
1864 1860 """
1865 1861 if extra is None:
1866 1862 extra = {}
1867 1863
1868 1864 def fail(f, msg):
1869 1865 raise error.Abort('%s: %s' % (f, msg))
1870 1866
1871 1867 if not match:
1872 1868 match = matchmod.always(self.root, '')
1873 1869
1874 1870 if not force:
1875 1871 vdirs = []
1876 1872 match.explicitdir = vdirs.append
1877 1873 match.bad = fail
1878 1874
1879 1875 wlock = lock = tr = None
1880 1876 try:
1881 1877 wlock = self.wlock()
1882 1878 lock = self.lock() # for recent changelog (see issue4368)
1883 1879
1884 1880 wctx = self[None]
1885 1881 merge = len(wctx.parents()) > 1
1886 1882
1887 1883 if not force and merge and not match.always():
1888 1884 raise error.Abort(_('cannot partially commit a merge '
1889 1885 '(do not specify files or patterns)'))
1890 1886
1891 1887 status = self.status(match=match, clean=force)
1892 1888 if force:
1893 1889 status.modified.extend(status.clean) # mq may commit clean files
1894 1890
1895 1891 # check subrepos
1896 1892 subs, commitsubs, newstate = subrepoutil.precommit(
1897 1893 self.ui, wctx, status, match, force=force)
1898 1894
1899 1895 # make sure all explicit patterns are matched
1900 1896 if not force:
1901 1897 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1902 1898
1903 1899 cctx = context.workingcommitctx(self, status,
1904 1900 text, user, date, extra)
1905 1901
1906 1902 # internal config: ui.allowemptycommit
1907 1903 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1908 1904 or extra.get('close') or merge or cctx.files()
1909 1905 or self.ui.configbool('ui', 'allowemptycommit'))
1910 1906 if not allowemptycommit:
1911 1907 return None
1912 1908
1913 1909 if merge and cctx.deleted():
1914 1910 raise error.Abort(_("cannot commit merge with missing files"))
1915 1911
1916 1912 ms = mergemod.mergestate.read(self)
1917 1913 mergeutil.checkunresolved(ms)
1918 1914
1919 1915 if editor:
1920 1916 cctx._text = editor(self, cctx, subs)
1921 1917 edited = (text != cctx._text)
1922 1918
1923 1919 # Save commit message in case this transaction gets rolled back
1924 1920 # (e.g. by a pretxncommit hook). Leave the content alone on
1925 1921 # the assumption that the user will use the same editor again.
1926 1922 msgfn = self.savecommitmessage(cctx._text)
1927 1923
1928 1924 # commit subs and write new state
1929 1925 if subs:
1930 1926 for s in sorted(commitsubs):
1931 1927 sub = wctx.sub(s)
1932 1928 self.ui.status(_('committing subrepository %s\n') %
1933 1929 subrepoutil.subrelpath(sub))
1934 1930 sr = sub.commit(cctx._text, user, date)
1935 1931 newstate[s] = (newstate[s][0], sr)
1936 1932 subrepoutil.writestate(self, newstate)
1937 1933
1938 1934 p1, p2 = self.dirstate.parents()
1939 1935 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1940 1936 try:
1941 1937 self.hook("precommit", throw=True, parent1=hookp1,
1942 1938 parent2=hookp2)
1943 1939 tr = self.transaction('commit')
1944 1940 ret = self.commitctx(cctx, True)
1945 1941 except: # re-raises
1946 1942 if edited:
1947 1943 self.ui.write(
1948 1944 _('note: commit message saved in %s\n') % msgfn)
1949 1945 raise
1950 1946 # update bookmarks, dirstate and mergestate
1951 1947 bookmarks.update(self, [p1, p2], ret)
1952 1948 cctx.markcommitted(ret)
1953 1949 ms.reset()
1954 1950 tr.close()
1955 1951
1956 1952 finally:
1957 1953 lockmod.release(tr, lock, wlock)
1958 1954
1959 1955 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1960 1956 # hack for command that use a temporary commit (eg: histedit)
1961 1957 # temporary commit got stripped before hook release
1962 1958 if self.changelog.hasnode(ret):
1963 1959 self.hook("commit", node=node, parent1=parent1,
1964 1960 parent2=parent2)
1965 1961 self._afterlock(commithook)
1966 1962 return ret
1967 1963
1968 1964 @unfilteredmethod
1969 1965 def commitctx(self, ctx, error=False):
1970 1966 """Add a new revision to current repository.
1971 1967 Revision information is passed via the context argument.
1972 1968 """
1973 1969
1974 1970 tr = None
1975 1971 p1, p2 = ctx.p1(), ctx.p2()
1976 1972 user = ctx.user()
1977 1973
1978 1974 lock = self.lock()
1979 1975 try:
1980 1976 tr = self.transaction("commit")
1981 1977 trp = weakref.proxy(tr)
1982 1978
1983 1979 if ctx.manifestnode():
1984 1980 # reuse an existing manifest revision
1985 1981 mn = ctx.manifestnode()
1986 1982 files = ctx.files()
1987 1983 elif ctx.files():
1988 1984 m1ctx = p1.manifestctx()
1989 1985 m2ctx = p2.manifestctx()
1990 1986 mctx = m1ctx.copy()
1991 1987
1992 1988 m = mctx.read()
1993 1989 m1 = m1ctx.read()
1994 1990 m2 = m2ctx.read()
1995 1991
1996 1992 # check in files
1997 1993 added = []
1998 1994 changed = []
1999 1995 removed = list(ctx.removed())
2000 1996 linkrev = len(self)
2001 1997 self.ui.note(_("committing files:\n"))
2002 1998 for f in sorted(ctx.modified() + ctx.added()):
2003 1999 self.ui.note(f + "\n")
2004 2000 try:
2005 2001 fctx = ctx[f]
2006 2002 if fctx is None:
2007 2003 removed.append(f)
2008 2004 else:
2009 2005 added.append(f)
2010 2006 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2011 2007 trp, changed)
2012 2008 m.setflag(f, fctx.flags())
2013 2009 except OSError as inst:
2014 2010 self.ui.warn(_("trouble committing %s!\n") % f)
2015 2011 raise
2016 2012 except IOError as inst:
2017 2013 errcode = getattr(inst, 'errno', errno.ENOENT)
2018 2014 if error or errcode and errcode != errno.ENOENT:
2019 2015 self.ui.warn(_("trouble committing %s!\n") % f)
2020 2016 raise
2021 2017
2022 2018 # update manifest
2023 2019 self.ui.note(_("committing manifest\n"))
2024 2020 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2025 2021 drop = [f for f in removed if f in m]
2026 2022 for f in drop:
2027 2023 del m[f]
2028 2024 mn = mctx.write(trp, linkrev,
2029 2025 p1.manifestnode(), p2.manifestnode(),
2030 2026 added, drop)
2031 2027 files = changed + removed
2032 2028 else:
2033 2029 mn = p1.manifestnode()
2034 2030 files = []
2035 2031
2036 2032 # update changelog
2037 2033 self.ui.note(_("committing changelog\n"))
2038 2034 self.changelog.delayupdate(tr)
2039 2035 n = self.changelog.add(mn, files, ctx.description(),
2040 2036 trp, p1.node(), p2.node(),
2041 2037 user, ctx.date(), ctx.extra().copy())
2042 2038 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2043 2039 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2044 2040 parent2=xp2)
2045 2041 # set the new commit is proper phase
2046 2042 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2047 2043 if targetphase:
2048 2044 # retract boundary do not alter parent changeset.
2049 2045 # if a parent have higher the resulting phase will
2050 2046 # be compliant anyway
2051 2047 #
2052 2048 # if minimal phase was 0 we don't need to retract anything
2053 2049 phases.registernew(self, tr, targetphase, [n])
2054 2050 tr.close()
2055 2051 return n
2056 2052 finally:
2057 2053 if tr:
2058 2054 tr.release()
2059 2055 lock.release()
2060 2056
2061 2057 @unfilteredmethod
2062 2058 def destroying(self):
2063 2059 '''Inform the repository that nodes are about to be destroyed.
2064 2060 Intended for use by strip and rollback, so there's a common
2065 2061 place for anything that has to be done before destroying history.
2066 2062
2067 2063 This is mostly useful for saving state that is in memory and waiting
2068 2064 to be flushed when the current lock is released. Because a call to
2069 2065 destroyed is imminent, the repo will be invalidated causing those
2070 2066 changes to stay in memory (waiting for the next unlock), or vanish
2071 2067 completely.
2072 2068 '''
2073 2069 # When using the same lock to commit and strip, the phasecache is left
2074 2070 # dirty after committing. Then when we strip, the repo is invalidated,
2075 2071 # causing those changes to disappear.
2076 2072 if '_phasecache' in vars(self):
2077 2073 self._phasecache.write()
2078 2074
2079 2075 @unfilteredmethod
2080 2076 def destroyed(self):
2081 2077 '''Inform the repository that nodes have been destroyed.
2082 2078 Intended for use by strip and rollback, so there's a common
2083 2079 place for anything that has to be done after destroying history.
2084 2080 '''
2085 2081 # When one tries to:
2086 2082 # 1) destroy nodes thus calling this method (e.g. strip)
2087 2083 # 2) use phasecache somewhere (e.g. commit)
2088 2084 #
2089 2085 # then 2) will fail because the phasecache contains nodes that were
2090 2086 # removed. We can either remove phasecache from the filecache,
2091 2087 # causing it to reload next time it is accessed, or simply filter
2092 2088 # the removed nodes now and write the updated cache.
2093 2089 self._phasecache.filterunknown(self)
2094 2090 self._phasecache.write()
2095 2091
2096 2092 # refresh all repository caches
2097 2093 self.updatecaches()
2098 2094
2099 2095 # Ensure the persistent tag cache is updated. Doing it now
2100 2096 # means that the tag cache only has to worry about destroyed
2101 2097 # heads immediately after a strip/rollback. That in turn
2102 2098 # guarantees that "cachetip == currenttip" (comparing both rev
2103 2099 # and node) always means no nodes have been added or destroyed.
2104 2100
2105 2101 # XXX this is suboptimal when qrefresh'ing: we strip the current
2106 2102 # head, refresh the tag cache, then immediately add a new head.
2107 2103 # But I think doing it this way is necessary for the "instant
2108 2104 # tag cache retrieval" case to work.
2109 2105 self.invalidate()
2110 2106
2111 2107 def status(self, node1='.', node2=None, match=None,
2112 2108 ignored=False, clean=False, unknown=False,
2113 2109 listsubrepos=False):
2114 2110 '''a convenience method that calls node1.status(node2)'''
2115 2111 return self[node1].status(node2, match, ignored, clean, unknown,
2116 2112 listsubrepos)
2117 2113
2118 2114 def addpostdsstatus(self, ps):
2119 2115 """Add a callback to run within the wlock, at the point at which status
2120 2116 fixups happen.
2121 2117
2122 2118 On status completion, callback(wctx, status) will be called with the
2123 2119 wlock held, unless the dirstate has changed from underneath or the wlock
2124 2120 couldn't be grabbed.
2125 2121
2126 2122 Callbacks should not capture and use a cached copy of the dirstate --
2127 2123 it might change in the meanwhile. Instead, they should access the
2128 2124 dirstate via wctx.repo().dirstate.
2129 2125
2130 2126 This list is emptied out after each status run -- extensions should
2131 2127 make sure it adds to this list each time dirstate.status is called.
2132 2128 Extensions should also make sure they don't call this for statuses
2133 2129 that don't involve the dirstate.
2134 2130 """
2135 2131
2136 2132 # The list is located here for uniqueness reasons -- it is actually
2137 2133 # managed by the workingctx, but that isn't unique per-repo.
2138 2134 self._postdsstatus.append(ps)
2139 2135
2140 2136 def postdsstatus(self):
2141 2137 """Used by workingctx to get the list of post-dirstate-status hooks."""
2142 2138 return self._postdsstatus
2143 2139
2144 2140 def clearpostdsstatus(self):
2145 2141 """Used by workingctx to clear post-dirstate-status hooks."""
2146 2142 del self._postdsstatus[:]
2147 2143
2148 2144 def heads(self, start=None):
2149 2145 if start is None:
2150 2146 cl = self.changelog
2151 2147 headrevs = reversed(cl.headrevs())
2152 2148 return [cl.node(rev) for rev in headrevs]
2153 2149
2154 2150 heads = self.changelog.heads(start)
2155 2151 # sort the output in rev descending order
2156 2152 return sorted(heads, key=self.changelog.rev, reverse=True)
2157 2153
2158 2154 def branchheads(self, branch=None, start=None, closed=False):
2159 2155 '''return a (possibly filtered) list of heads for the given branch
2160 2156
2161 2157 Heads are returned in topological order, from newest to oldest.
2162 2158 If branch is None, use the dirstate branch.
2163 2159 If start is not None, return only heads reachable from start.
2164 2160 If closed is True, return heads that are marked as closed as well.
2165 2161 '''
2166 2162 if branch is None:
2167 2163 branch = self[None].branch()
2168 2164 branches = self.branchmap()
2169 2165 if branch not in branches:
2170 2166 return []
2171 2167 # the cache returns heads ordered lowest to highest
2172 2168 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2173 2169 if start is not None:
2174 2170 # filter out the heads that cannot be reached from startrev
2175 2171 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2176 2172 bheads = [h for h in bheads if h in fbheads]
2177 2173 return bheads
2178 2174
2179 2175 def branches(self, nodes):
2180 2176 if not nodes:
2181 2177 nodes = [self.changelog.tip()]
2182 2178 b = []
2183 2179 for n in nodes:
2184 2180 t = n
2185 2181 while True:
2186 2182 p = self.changelog.parents(n)
2187 2183 if p[1] != nullid or p[0] == nullid:
2188 2184 b.append((t, n, p[0], p[1]))
2189 2185 break
2190 2186 n = p[0]
2191 2187 return b
2192 2188
2193 2189 def between(self, pairs):
2194 2190 r = []
2195 2191
2196 2192 for top, bottom in pairs:
2197 2193 n, l, i = top, [], 0
2198 2194 f = 1
2199 2195
2200 2196 while n != bottom and n != nullid:
2201 2197 p = self.changelog.parents(n)[0]
2202 2198 if i == f:
2203 2199 l.append(n)
2204 2200 f = f * 2
2205 2201 n = p
2206 2202 i += 1
2207 2203
2208 2204 r.append(l)
2209 2205
2210 2206 return r
2211 2207
2212 2208 def checkpush(self, pushop):
2213 2209 """Extensions can override this function if additional checks have
2214 2210 to be performed before pushing, or call it if they override push
2215 2211 command.
2216 2212 """
2217 2213
2218 2214 @unfilteredpropertycache
2219 2215 def prepushoutgoinghooks(self):
2220 2216 """Return util.hooks consists of a pushop with repo, remote, outgoing
2221 2217 methods, which are called before pushing changesets.
2222 2218 """
2223 2219 return util.hooks()
2224 2220
2225 2221 def pushkey(self, namespace, key, old, new):
2226 2222 try:
2227 2223 tr = self.currenttransaction()
2228 2224 hookargs = {}
2229 2225 if tr is not None:
2230 2226 hookargs.update(tr.hookargs)
2231 2227 hookargs = pycompat.strkwargs(hookargs)
2232 2228 hookargs[r'namespace'] = namespace
2233 2229 hookargs[r'key'] = key
2234 2230 hookargs[r'old'] = old
2235 2231 hookargs[r'new'] = new
2236 2232 self.hook('prepushkey', throw=True, **hookargs)
2237 2233 except error.HookAbort as exc:
2238 2234 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2239 2235 if exc.hint:
2240 2236 self.ui.write_err(_("(%s)\n") % exc.hint)
2241 2237 return False
2242 2238 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2243 2239 ret = pushkey.push(self, namespace, key, old, new)
2244 2240 def runhook():
2245 2241 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2246 2242 ret=ret)
2247 2243 self._afterlock(runhook)
2248 2244 return ret
2249 2245
2250 2246 def listkeys(self, namespace):
2251 2247 self.hook('prelistkeys', throw=True, namespace=namespace)
2252 2248 self.ui.debug('listing keys for "%s"\n' % namespace)
2253 2249 values = pushkey.list(self, namespace)
2254 2250 self.hook('listkeys', namespace=namespace, values=values)
2255 2251 return values
2256 2252
2257 2253 def debugwireargs(self, one, two, three=None, four=None, five=None):
2258 2254 '''used to test argument passing over the wire'''
2259 2255 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2260 2256 pycompat.bytestr(four),
2261 2257 pycompat.bytestr(five))
2262 2258
2263 2259 def savecommitmessage(self, text):
2264 2260 fp = self.vfs('last-message.txt', 'wb')
2265 2261 try:
2266 2262 fp.write(text)
2267 2263 finally:
2268 2264 fp.close()
2269 2265 return self.pathto(fp.name[len(self.root) + 1:])
2270 2266
2271 2267 # used to avoid circular references so destructors work
2272 2268 def aftertrans(files):
2273 2269 renamefiles = [tuple(t) for t in files]
2274 2270 def a():
2275 2271 for vfs, src, dest in renamefiles:
2276 2272 # if src and dest refer to a same file, vfs.rename is a no-op,
2277 2273 # leaving both src and dest on disk. delete dest to make sure
2278 2274 # the rename couldn't be such a no-op.
2279 2275 vfs.tryunlink(dest)
2280 2276 try:
2281 2277 vfs.rename(src, dest)
2282 2278 except OSError: # journal file does not yet exist
2283 2279 pass
2284 2280 return a
2285 2281
2286 2282 def undoname(fn):
2287 2283 base, name = os.path.split(fn)
2288 2284 assert name.startswith('journal')
2289 2285 return os.path.join(base, name.replace('journal', 'undo', 1))
2290 2286
2291 2287 def instance(ui, path, create):
2292 2288 return localrepository(ui, util.urllocalpath(path), create)
2293 2289
2294 2290 def islocal(path):
2295 2291 return True
2296 2292
2297 2293 def newreporequirements(repo):
2298 2294 """Determine the set of requirements for a new local repository.
2299 2295
2300 2296 Extensions can wrap this function to specify custom requirements for
2301 2297 new repositories.
2302 2298 """
2303 2299 ui = repo.ui
2304 2300 requirements = {'revlogv1'}
2305 2301 if ui.configbool('format', 'usestore'):
2306 2302 requirements.add('store')
2307 2303 if ui.configbool('format', 'usefncache'):
2308 2304 requirements.add('fncache')
2309 2305 if ui.configbool('format', 'dotencode'):
2310 2306 requirements.add('dotencode')
2311 2307
2312 2308 compengine = ui.config('experimental', 'format.compression')
2313 2309 if compengine not in util.compengines:
2314 2310 raise error.Abort(_('compression engine %s defined by '
2315 2311 'experimental.format.compression not available') %
2316 2312 compengine,
2317 2313 hint=_('run "hg debuginstall" to list available '
2318 2314 'compression engines'))
2319 2315
2320 2316 # zlib is the historical default and doesn't need an explicit requirement.
2321 2317 if compengine != 'zlib':
2322 2318 requirements.add('exp-compression-%s' % compengine)
2323 2319
2324 2320 if scmutil.gdinitconfig(ui):
2325 2321 requirements.add('generaldelta')
2326 2322 if ui.configbool('experimental', 'treemanifest'):
2327 2323 requirements.add('treemanifest')
2328 2324
2329 2325 revlogv2 = ui.config('experimental', 'revlogv2')
2330 2326 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2331 2327 requirements.remove('revlogv1')
2332 2328 # generaldelta is implied by revlogv2.
2333 2329 requirements.discard('generaldelta')
2334 2330 requirements.add(REVLOGV2_REQUIREMENT)
2335 2331
2336 2332 return requirements
@@ -1,616 +1,612 b''
1 1 # sshpeer.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11 import uuid
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 error,
16 16 pycompat,
17 17 util,
18 18 wireproto,
19 19 wireprotoserver,
20 20 wireprototypes,
21 21 )
22 22 from .utils import (
23 23 procutil,
24 24 )
25 25
26 26 def _serverquote(s):
27 27 """quote a string for the remote shell ... which we assume is sh"""
28 28 if not s:
29 29 return s
30 30 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
31 31 return s
32 32 return "'%s'" % s.replace("'", "'\\''")
33 33
34 34 def _forwardoutput(ui, pipe):
35 35 """display all data currently available on pipe as remote output.
36 36
37 37 This is non blocking."""
38 38 if pipe:
39 39 s = procutil.readpipe(pipe)
40 40 if s:
41 41 for l in s.splitlines():
42 42 ui.status(_("remote: "), l, '\n')
43 43
44 44 class doublepipe(object):
45 45 """Operate a side-channel pipe in addition of a main one
46 46
47 47 The side-channel pipe contains server output to be forwarded to the user
48 48 input. The double pipe will behave as the "main" pipe, but will ensure the
49 49 content of the "side" pipe is properly processed while we wait for blocking
50 50 call on the "main" pipe.
51 51
52 52 If large amounts of data are read from "main", the forward will cease after
53 53 the first bytes start to appear. This simplifies the implementation
54 54 without affecting actual output of sshpeer too much as we rarely issue
55 55 large read for data not yet emitted by the server.
56 56
57 57 The main pipe is expected to be a 'bufferedinputpipe' from the util module
58 58 that handle all the os specific bits. This class lives in this module
59 59 because it focus on behavior specific to the ssh protocol."""
60 60
61 61 def __init__(self, ui, main, side):
62 62 self._ui = ui
63 63 self._main = main
64 64 self._side = side
65 65
66 66 def _wait(self):
67 67 """wait until some data are available on main or side
68 68
69 69 return a pair of boolean (ismainready, issideready)
70 70
71 71 (This will only wait for data if the setup is supported by `util.poll`)
72 72 """
73 73 if (isinstance(self._main, util.bufferedinputpipe) and
74 74 self._main.hasbuffer):
75 75 # Main has data. Assume side is worth poking at.
76 76 return True, True
77 77
78 78 fds = [self._main.fileno(), self._side.fileno()]
79 79 try:
80 80 act = util.poll(fds)
81 81 except NotImplementedError:
82 82 # non supported yet case, assume all have data.
83 83 act = fds
84 84 return (self._main.fileno() in act, self._side.fileno() in act)
85 85
86 86 def write(self, data):
87 87 return self._call('write', data)
88 88
89 89 def read(self, size):
90 90 r = self._call('read', size)
91 91 if size != 0 and not r:
92 92 # We've observed a condition that indicates the
93 93 # stdout closed unexpectedly. Check stderr one
94 94 # more time and snag anything that's there before
95 95 # letting anyone know the main part of the pipe
96 96 # closed prematurely.
97 97 _forwardoutput(self._ui, self._side)
98 98 return r
99 99
100 100 def readline(self):
101 101 return self._call('readline')
102 102
103 103 def _call(self, methname, data=None):
104 104 """call <methname> on "main", forward output of "side" while blocking
105 105 """
106 106 # data can be '' or 0
107 107 if (data is not None and not data) or self._main.closed:
108 108 _forwardoutput(self._ui, self._side)
109 109 return ''
110 110 while True:
111 111 mainready, sideready = self._wait()
112 112 if sideready:
113 113 _forwardoutput(self._ui, self._side)
114 114 if mainready:
115 115 meth = getattr(self._main, methname)
116 116 if data is None:
117 117 return meth()
118 118 else:
119 119 return meth(data)
120 120
121 121 def close(self):
122 122 return self._main.close()
123 123
124 124 def flush(self):
125 125 return self._main.flush()
126 126
127 127 def _cleanuppipes(ui, pipei, pipeo, pipee):
128 128 """Clean up pipes used by an SSH connection."""
129 129 if pipeo:
130 130 pipeo.close()
131 131 if pipei:
132 132 pipei.close()
133 133
134 134 if pipee:
135 135 # Try to read from the err descriptor until EOF.
136 136 try:
137 137 for l in pipee:
138 138 ui.status(_('remote: '), l)
139 139 except (IOError, ValueError):
140 140 pass
141 141
142 142 pipee.close()
143 143
144 144 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
145 145 """Create an SSH connection to a server.
146 146
147 147 Returns a tuple of (process, stdin, stdout, stderr) for the
148 148 spawned process.
149 149 """
150 150 cmd = '%s %s %s' % (
151 151 sshcmd,
152 152 args,
153 153 procutil.shellquote('%s -R %s serve --stdio' % (
154 154 _serverquote(remotecmd), _serverquote(path))))
155 155
156 156 ui.debug('running %s\n' % cmd)
157 157 cmd = procutil.quotecommand(cmd)
158 158
159 159 # no buffer allow the use of 'select'
160 160 # feel free to remove buffering and select usage when we ultimately
161 161 # move to threading.
162 162 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
163 163
164 164 return proc, stdin, stdout, stderr
165 165
166 166 def _performhandshake(ui, stdin, stdout, stderr):
167 167 def badresponse():
168 168 # Flush any output on stderr.
169 169 _forwardoutput(ui, stderr)
170 170
171 171 msg = _('no suitable response from remote hg')
172 172 hint = ui.config('ui', 'ssherrorhint')
173 173 raise error.RepoError(msg, hint=hint)
174 174
175 175 # The handshake consists of sending wire protocol commands in reverse
176 176 # order of protocol implementation and then sniffing for a response
177 177 # to one of them.
178 178 #
179 179 # Those commands (from oldest to newest) are:
180 180 #
181 181 # ``between``
182 182 # Asks for the set of revisions between a pair of revisions. Command
183 183 # present in all Mercurial server implementations.
184 184 #
185 185 # ``hello``
186 186 # Instructs the server to advertise its capabilities. Introduced in
187 187 # Mercurial 0.9.1.
188 188 #
189 189 # ``upgrade``
190 190 # Requests upgrade from default transport protocol version 1 to
191 191 # a newer version. Introduced in Mercurial 4.6 as an experimental
192 192 # feature.
193 193 #
194 194 # The ``between`` command is issued with a request for the null
195 195 # range. If the remote is a Mercurial server, this request will
196 196 # generate a specific response: ``1\n\n``. This represents the
197 197 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
198 198 # in the output stream and know this is the response to ``between``
199 199 # and we're at the end of our handshake reply.
200 200 #
201 201 # The response to the ``hello`` command will be a line with the
202 202 # length of the value returned by that command followed by that
203 203 # value. If the server doesn't support ``hello`` (which should be
204 204 # rare), that line will be ``0\n``. Otherwise, the value will contain
205 205 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
206 206 # the capabilities of the server.
207 207 #
208 208 # The ``upgrade`` command isn't really a command in the traditional
209 209 # sense of version 1 of the transport because it isn't using the
210 210 # proper mechanism for formatting insteads: instead, it just encodes
211 211 # arguments on the line, delimited by spaces.
212 212 #
213 213 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
214 214 # If the server doesn't support protocol upgrades, it will reply to
215 215 # this line with ``0\n``. Otherwise, it emits an
216 216 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
217 217 # Content immediately following this line describes additional
218 218 # protocol and server state.
219 219 #
220 220 # In addition to the responses to our command requests, the server
221 221 # may emit "banner" output on stdout. SSH servers are allowed to
222 222 # print messages to stdout on login. Issuing commands on connection
223 223 # allows us to flush this banner output from the server by scanning
224 224 # for output to our well-known ``between`` command. Of course, if
225 225 # the banner contains ``1\n\n``, this will throw off our detection.
226 226
227 227 requestlog = ui.configbool('devel', 'debug.peer-request')
228 228
229 229 # Generate a random token to help identify responses to version 2
230 230 # upgrade request.
231 231 token = pycompat.sysbytes(str(uuid.uuid4()))
232 232 upgradecaps = [
233 233 ('proto', wireprotoserver.SSHV2),
234 234 ]
235 235 upgradecaps = util.urlreq.urlencode(upgradecaps)
236 236
237 237 try:
238 238 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
239 239 handshake = [
240 240 'hello\n',
241 241 'between\n',
242 242 'pairs %d\n' % len(pairsarg),
243 243 pairsarg,
244 244 ]
245 245
246 246 # Request upgrade to version 2 if configured.
247 247 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
248 248 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
249 249 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
250 250
251 251 if requestlog:
252 252 ui.debug('devel-peer-request: hello\n')
253 253 ui.debug('sending hello command\n')
254 254 if requestlog:
255 255 ui.debug('devel-peer-request: between\n')
256 256 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
257 257 ui.debug('sending between command\n')
258 258
259 259 stdin.write(''.join(handshake))
260 260 stdin.flush()
261 261 except IOError:
262 262 badresponse()
263 263
264 264 # Assume version 1 of wire protocol by default.
265 265 protoname = wireprototypes.SSHV1
266 266 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
267 267
268 268 lines = ['', 'dummy']
269 269 max_noise = 500
270 270 while lines[-1] and max_noise:
271 271 try:
272 272 l = stdout.readline()
273 273 _forwardoutput(ui, stderr)
274 274
275 275 # Look for reply to protocol upgrade request. It has a token
276 276 # in it, so there should be no false positives.
277 277 m = reupgraded.match(l)
278 278 if m:
279 279 protoname = m.group(1)
280 280 ui.debug('protocol upgraded to %s\n' % protoname)
281 281 # If an upgrade was handled, the ``hello`` and ``between``
282 282 # requests are ignored. The next output belongs to the
283 283 # protocol, so stop scanning lines.
284 284 break
285 285
286 286 # Otherwise it could be a banner, ``0\n`` response if server
287 287 # doesn't support upgrade.
288 288
289 289 if lines[-1] == '1\n' and l == '\n':
290 290 break
291 291 if l:
292 292 ui.debug('remote: ', l)
293 293 lines.append(l)
294 294 max_noise -= 1
295 295 except IOError:
296 296 badresponse()
297 297 else:
298 298 badresponse()
299 299
300 300 caps = set()
301 301
302 302 # For version 1, we should see a ``capabilities`` line in response to the
303 303 # ``hello`` command.
304 304 if protoname == wireprototypes.SSHV1:
305 305 for l in reversed(lines):
306 306 # Look for response to ``hello`` command. Scan from the back so
307 307 # we don't misinterpret banner output as the command reply.
308 308 if l.startswith('capabilities:'):
309 309 caps.update(l[:-1].split(':')[1].split())
310 310 break
311 311 elif protoname == wireprotoserver.SSHV2:
312 312 # We see a line with number of bytes to follow and then a value
313 313 # looking like ``capabilities: *``.
314 314 line = stdout.readline()
315 315 try:
316 316 valuelen = int(line)
317 317 except ValueError:
318 318 badresponse()
319 319
320 320 capsline = stdout.read(valuelen)
321 321 if not capsline.startswith('capabilities: '):
322 322 badresponse()
323 323
324 324 ui.debug('remote: %s\n' % capsline)
325 325
326 326 caps.update(capsline.split(':')[1].split())
327 327 # Trailing newline.
328 328 stdout.read(1)
329 329
330 330 # Error if we couldn't find capabilities, this means:
331 331 #
332 332 # 1. Remote isn't a Mercurial server
333 333 # 2. Remote is a <0.9.1 Mercurial server
334 334 # 3. Remote is a future Mercurial server that dropped ``hello``
335 335 # and other attempted handshake mechanisms.
336 336 if not caps:
337 337 badresponse()
338 338
339 339 # Flush any output on stderr before proceeding.
340 340 _forwardoutput(ui, stderr)
341 341
342 342 return protoname, caps
343 343
344 344 class sshv1peer(wireproto.wirepeer):
345 345 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
346 346 autoreadstderr=True):
347 347 """Create a peer from an existing SSH connection.
348 348
349 349 ``proc`` is a handle on the underlying SSH process.
350 350 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
351 351 pipes for that process.
352 352 ``caps`` is a set of capabilities supported by the remote.
353 353 ``autoreadstderr`` denotes whether to automatically read from
354 354 stderr and to forward its output.
355 355 """
356 356 self._url = url
357 self._ui = ui
357 self.ui = ui
358 358 # self._subprocess is unused. Keeping a handle on the process
359 359 # holds a reference and prevents it from being garbage collected.
360 360 self._subprocess = proc
361 361
362 362 # And we hook up our "doublepipe" wrapper to allow querying
363 363 # stderr any time we perform I/O.
364 364 if autoreadstderr:
365 365 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
366 366 stdin = doublepipe(ui, stdin, stderr)
367 367
368 368 self._pipeo = stdin
369 369 self._pipei = stdout
370 370 self._pipee = stderr
371 371 self._caps = caps
372 372 self._autoreadstderr = autoreadstderr
373 373
374 374 # Commands that have a "framed" response where the first line of the
375 375 # response contains the length of that response.
376 376 _FRAMED_COMMANDS = {
377 377 'batch',
378 378 }
379 379
380 380 # Begin of ipeerconnection interface.
381 381
382 @util.propertycache
383 def ui(self):
384 return self._ui
385
386 382 def url(self):
387 383 return self._url
388 384
389 385 def local(self):
390 386 return None
391 387
392 388 def peer(self):
393 389 return self
394 390
395 391 def canpush(self):
396 392 return True
397 393
398 394 def close(self):
399 395 pass
400 396
401 397 # End of ipeerconnection interface.
402 398
403 399 # Begin of ipeercommands interface.
404 400
405 401 def capabilities(self):
406 402 return self._caps
407 403
408 404 # End of ipeercommands interface.
409 405
410 406 def _readerr(self):
411 407 _forwardoutput(self.ui, self._pipee)
412 408
413 409 def _abort(self, exception):
414 410 self._cleanup()
415 411 raise exception
416 412
417 413 def _cleanup(self):
418 414 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
419 415
420 416 __del__ = _cleanup
421 417
422 418 def _sendrequest(self, cmd, args, framed=False):
423 419 if (self.ui.debugflag
424 420 and self.ui.configbool('devel', 'debug.peer-request')):
425 421 dbg = self.ui.debug
426 422 line = 'devel-peer-request: %s\n'
427 423 dbg(line % cmd)
428 424 for key, value in sorted(args.items()):
429 425 if not isinstance(value, dict):
430 426 dbg(line % ' %s: %d bytes' % (key, len(value)))
431 427 else:
432 428 for dk, dv in sorted(value.items()):
433 429 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
434 430 self.ui.debug("sending %s command\n" % cmd)
435 431 self._pipeo.write("%s\n" % cmd)
436 432 _func, names = wireproto.commands[cmd]
437 433 keys = names.split()
438 434 wireargs = {}
439 435 for k in keys:
440 436 if k == '*':
441 437 wireargs['*'] = args
442 438 break
443 439 else:
444 440 wireargs[k] = args[k]
445 441 del args[k]
446 442 for k, v in sorted(wireargs.iteritems()):
447 443 self._pipeo.write("%s %d\n" % (k, len(v)))
448 444 if isinstance(v, dict):
449 445 for dk, dv in v.iteritems():
450 446 self._pipeo.write("%s %d\n" % (dk, len(dv)))
451 447 self._pipeo.write(dv)
452 448 else:
453 449 self._pipeo.write(v)
454 450 self._pipeo.flush()
455 451
456 452 # We know exactly how many bytes are in the response. So return a proxy
457 453 # around the raw output stream that allows reading exactly this many
458 454 # bytes. Callers then can read() without fear of overrunning the
459 455 # response.
460 456 if framed:
461 457 amount = self._getamount()
462 458 return util.cappedreader(self._pipei, amount)
463 459
464 460 return self._pipei
465 461
466 462 def _callstream(self, cmd, **args):
467 463 args = pycompat.byteskwargs(args)
468 464 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
469 465
470 466 def _callcompressable(self, cmd, **args):
471 467 args = pycompat.byteskwargs(args)
472 468 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
473 469
474 470 def _call(self, cmd, **args):
475 471 args = pycompat.byteskwargs(args)
476 472 return self._sendrequest(cmd, args, framed=True).read()
477 473
478 474 def _callpush(self, cmd, fp, **args):
479 475 # The server responds with an empty frame if the client should
480 476 # continue submitting the payload.
481 477 r = self._call(cmd, **args)
482 478 if r:
483 479 return '', r
484 480
485 481 # The payload consists of frames with content followed by an empty
486 482 # frame.
487 483 for d in iter(lambda: fp.read(4096), ''):
488 484 self._writeframed(d)
489 485 self._writeframed("", flush=True)
490 486
491 487 # In case of success, there is an empty frame and a frame containing
492 488 # the integer result (as a string).
493 489 # In case of error, there is a non-empty frame containing the error.
494 490 r = self._readframed()
495 491 if r:
496 492 return '', r
497 493 return self._readframed(), ''
498 494
499 495 def _calltwowaystream(self, cmd, fp, **args):
500 496 # The server responds with an empty frame if the client should
501 497 # continue submitting the payload.
502 498 r = self._call(cmd, **args)
503 499 if r:
504 500 # XXX needs to be made better
505 501 raise error.Abort(_('unexpected remote reply: %s') % r)
506 502
507 503 # The payload consists of frames with content followed by an empty
508 504 # frame.
509 505 for d in iter(lambda: fp.read(4096), ''):
510 506 self._writeframed(d)
511 507 self._writeframed("", flush=True)
512 508
513 509 return self._pipei
514 510
515 511 def _getamount(self):
516 512 l = self._pipei.readline()
517 513 if l == '\n':
518 514 if self._autoreadstderr:
519 515 self._readerr()
520 516 msg = _('check previous remote output')
521 517 self._abort(error.OutOfBandError(hint=msg))
522 518 if self._autoreadstderr:
523 519 self._readerr()
524 520 try:
525 521 return int(l)
526 522 except ValueError:
527 523 self._abort(error.ResponseError(_("unexpected response:"), l))
528 524
529 525 def _readframed(self):
530 526 size = self._getamount()
531 527 if not size:
532 528 return b''
533 529
534 530 return self._pipei.read(size)
535 531
536 532 def _writeframed(self, data, flush=False):
537 533 self._pipeo.write("%d\n" % len(data))
538 534 if data:
539 535 self._pipeo.write(data)
540 536 if flush:
541 537 self._pipeo.flush()
542 538 if self._autoreadstderr:
543 539 self._readerr()
544 540
545 541 class sshv2peer(sshv1peer):
546 542 """A peer that speakers version 2 of the transport protocol."""
547 543 # Currently version 2 is identical to version 1 post handshake.
548 544 # And handshake is performed before the peer is instantiated. So
549 545 # we need no custom code.
550 546
551 547 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
552 548 """Make a peer instance from existing pipes.
553 549
554 550 ``path`` and ``proc`` are stored on the eventual peer instance and may
555 551 not be used for anything meaningful.
556 552
557 553 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
558 554 SSH server's stdio handles.
559 555
560 556 This function is factored out to allow creating peers that don't
561 557 actually spawn a new process. It is useful for starting SSH protocol
562 558 servers and clients via non-standard means, which can be useful for
563 559 testing.
564 560 """
565 561 try:
566 562 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
567 563 except Exception:
568 564 _cleanuppipes(ui, stdout, stdin, stderr)
569 565 raise
570 566
571 567 if protoname == wireprototypes.SSHV1:
572 568 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
573 569 autoreadstderr=autoreadstderr)
574 570 elif protoname == wireprototypes.SSHV2:
575 571 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
576 572 autoreadstderr=autoreadstderr)
577 573 else:
578 574 _cleanuppipes(ui, stdout, stdin, stderr)
579 575 raise error.RepoError(_('unknown version of SSH protocol: %s') %
580 576 protoname)
581 577
582 578 def instance(ui, path, create):
583 579 """Create an SSH peer.
584 580
585 581 The returned object conforms to the ``wireproto.wirepeer`` interface.
586 582 """
587 583 u = util.url(path, parsequery=False, parsefragment=False)
588 584 if u.scheme != 'ssh' or not u.host or u.path is None:
589 585 raise error.RepoError(_("couldn't parse location %s") % path)
590 586
591 587 util.checksafessh(path)
592 588
593 589 if u.passwd is not None:
594 590 raise error.RepoError(_('password in URL not supported'))
595 591
596 592 sshcmd = ui.config('ui', 'ssh')
597 593 remotecmd = ui.config('ui', 'remotecmd')
598 594 sshaddenv = dict(ui.configitems('sshenv'))
599 595 sshenv = procutil.shellenviron(sshaddenv)
600 596 remotepath = u.path or '.'
601 597
602 598 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
603 599
604 600 if create:
605 601 cmd = '%s %s %s' % (sshcmd, args,
606 602 procutil.shellquote('%s init %s' %
607 603 (_serverquote(remotecmd), _serverquote(remotepath))))
608 604 ui.debug('running %s\n' % cmd)
609 605 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
610 606 if res != 0:
611 607 raise error.RepoError(_('could not create remote repo'))
612 608
613 609 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
614 610 remotepath, sshenv)
615 611
616 612 return makepeer(ui, path, proc, stdin, stdout, stderr)
@@ -1,102 +1,98 b''
1 1 from __future__ import absolute_import, print_function
2 2
3 3 from mercurial import (
4 4 error,
5 5 pycompat,
6 6 ui as uimod,
7 7 util,
8 8 wireproto,
9 9 wireprototypes,
10 10 )
11 11 stringio = util.stringio
12 12
13 13 class proto(object):
14 14 def __init__(self, args):
15 15 self.args = args
16 16 self.name = 'dummyproto'
17 17
18 18 def getargs(self, spec):
19 19 args = self.args
20 20 args.setdefault(b'*', {})
21 21 names = spec.split()
22 22 return [args[n] for n in names]
23 23
24 24 def checkperm(self, perm):
25 25 pass
26 26
27 27 wireprototypes.TRANSPORTS['dummyproto'] = {
28 28 'transport': 'dummy',
29 29 'version': 1,
30 30 }
31 31
32 32 class clientpeer(wireproto.wirepeer):
33 33 def __init__(self, serverrepo, ui):
34 34 self.serverrepo = serverrepo
35 self._ui = ui
36
37 @property
38 def ui(self):
39 return self._ui
35 self.ui = ui
40 36
41 37 def url(self):
42 38 return b'test'
43 39
44 40 def local(self):
45 41 return None
46 42
47 43 def peer(self):
48 44 return self
49 45
50 46 def canpush(self):
51 47 return True
52 48
53 49 def close(self):
54 50 pass
55 51
56 52 def capabilities(self):
57 53 return [b'batch']
58 54
59 55 def _call(self, cmd, **args):
60 56 args = pycompat.byteskwargs(args)
61 57 res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
62 58 if isinstance(res, wireprototypes.bytesresponse):
63 59 return res.data
64 60 elif isinstance(res, bytes):
65 61 return res
66 62 else:
67 63 raise error.Abort('dummy client does not support response type')
68 64
69 65 def _callstream(self, cmd, **args):
70 66 return stringio(self._call(cmd, **args))
71 67
72 68 @wireproto.batchable
73 69 def greet(self, name):
74 70 f = wireproto.future()
75 71 yield {b'name': mangle(name)}, f
76 72 yield unmangle(f.value)
77 73
78 74 class serverrepo(object):
79 75 def greet(self, name):
80 76 return b"Hello, " + name
81 77
82 78 def filtered(self, name):
83 79 return self
84 80
85 81 def mangle(s):
86 82 return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
87 83 def unmangle(s):
88 84 return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
89 85
90 86 def greet(repo, proto, name):
91 87 return mangle(repo.greet(unmangle(name)))
92 88
93 89 wireproto.commands[b'greet'] = (greet, b'name',)
94 90
95 91 srv = serverrepo()
96 92 clt = clientpeer(srv, uimod.ui())
97 93
98 94 print(clt.greet(b"Foobar"))
99 95 b = clt.iterbatch()
100 96 list(map(b.greet, (b'Fo, =;:<o', b'Bar')))
101 97 b.submit()
102 98 print([r for r in b.results()])
General Comments 0
You need to be logged in to leave comments. Login now