##// END OF EJS Templates
hg: pass command intents to repo/peer creation (API)...
Gregory Szorc -
r37735:0664be4f default
parent child Browse files
Show More
@@ -1,134 +1,134 b''
1 1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """extend schemes with shortcuts to repository swarms
7 7
8 8 This extension allows you to specify shortcuts for parent URLs with a
9 9 lot of repositories to act like a scheme, for example::
10 10
11 11 [schemes]
12 12 py = http://code.python.org/hg/
13 13
14 14 After that you can use it like::
15 15
16 16 hg clone py://trunk/
17 17
18 18 Additionally there is support for some more complex schemas, for
19 19 example used by Google Code::
20 20
21 21 [schemes]
22 22 gcode = http://{1}.googlecode.com/hg/
23 23
24 24 The syntax is taken from Mercurial templates, and you have unlimited
25 25 number of variables, starting with ``{1}`` and continuing with
26 26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 28 just appended to an URL.
29 29
30 30 For convenience, the extension adds these schemes by default::
31 31
32 32 [schemes]
33 33 py = http://hg.python.org/
34 34 bb = https://bitbucket.org/
35 35 bb+ssh = ssh://hg@bitbucket.org/
36 36 gcode = https://{1}.googlecode.com/hg/
37 37 kiln = https://{1}.kilnhg.com/Repo/
38 38
39 39 You can override a predefined scheme by defining a new scheme with the
40 40 same name.
41 41 """
42 42 from __future__ import absolute_import
43 43
44 44 import os
45 45 import re
46 46
47 47 from mercurial.i18n import _
48 48 from mercurial import (
49 49 error,
50 50 extensions,
51 51 hg,
52 52 pycompat,
53 53 registrar,
54 54 templater,
55 55 util,
56 56 )
57 57
58 58 cmdtable = {}
59 59 command = registrar.command(cmdtable)
60 60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 62 # be specifying the version(s) of Mercurial they are tested with, or
63 63 # leave the attribute unspecified.
64 64 testedwith = 'ships-with-hg-core'
65 65
66 66 _partre = re.compile(br'\{(\d+)\}')
67 67
68 68 class ShortRepository(object):
69 69 def __init__(self, url, scheme, templater):
70 70 self.scheme = scheme
71 71 self.templater = templater
72 72 self.url = url
73 73 try:
74 74 self.parts = max(map(int, _partre.findall(self.url)))
75 75 except ValueError:
76 76 self.parts = 0
77 77
78 78 def __repr__(self):
79 79 return '<ShortRepository: %s>' % self.scheme
80 80
81 def instance(self, ui, url, create):
81 def instance(self, ui, url, create, intents=None):
82 82 url = self.resolve(url)
83 return hg._peerlookup(url).instance(ui, url, create)
83 return hg._peerlookup(url).instance(ui, url, create, intents=intents)
84 84
85 85 def resolve(self, url):
86 86 # Should this use the util.url class, or is manual parsing better?
87 87 try:
88 88 url = url.split('://', 1)[1]
89 89 except IndexError:
90 90 raise error.Abort(_("no '://' in scheme url '%s'") % url)
91 91 parts = url.split('/', self.parts)
92 92 if len(parts) > self.parts:
93 93 tail = parts[-1]
94 94 parts = parts[:-1]
95 95 else:
96 96 tail = ''
97 97 context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts))
98 98 return ''.join(self.templater.process(self.url, context)) + tail
99 99
100 100 def hasdriveletter(orig, path):
101 101 if path:
102 102 for scheme in schemes:
103 103 if path.startswith(scheme + ':'):
104 104 return False
105 105 return orig(path)
106 106
107 107 schemes = {
108 108 'py': 'http://hg.python.org/',
109 109 'bb': 'https://bitbucket.org/',
110 110 'bb+ssh': 'ssh://hg@bitbucket.org/',
111 111 'gcode': 'https://{1}.googlecode.com/hg/',
112 112 'kiln': 'https://{1}.kilnhg.com/Repo/'
113 113 }
114 114
115 115 def extsetup(ui):
116 116 schemes.update(dict(ui.configitems('schemes')))
117 117 t = templater.engine(lambda x: x)
118 118 for scheme, url in schemes.items():
119 119 if (pycompat.iswindows and len(scheme) == 1 and scheme.isalpha()
120 120 and os.path.exists('%s:\\' % scheme)):
121 121 raise error.Abort(_('custom scheme %s:// conflicts with drive '
122 122 'letter %s:\\\n') % (scheme, scheme.upper()))
123 123 hg.schemes[scheme] = ShortRepository(url, scheme, t)
124 124
125 125 extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
126 126
127 127 @command('debugexpandscheme', norepo=True)
128 128 def expandscheme(ui, url, **opts):
129 129 """given a repo path, provide the scheme-expanded path
130 130 """
131 131 repo = hg._peerlookup(url)
132 132 if isinstance(repo, ShortRepository):
133 133 url = repo.resolve(url)
134 134 ui.write(url + '\n')
@@ -1,623 +1,623 b''
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import os
17 17 import shutil
18 18 import tempfile
19 19
20 20 from .i18n import _
21 21 from .node import nullid
22 22
23 23 from . import (
24 24 bundle2,
25 25 changegroup,
26 26 changelog,
27 27 cmdutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 filelog,
32 32 localrepo,
33 33 manifest,
34 34 mdiff,
35 35 node as nodemod,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 revlog,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43
44 44 class bundlerevlog(revlog.revlog):
45 45 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
46 46 # How it works:
47 47 # To retrieve a revision, we need to know the offset of the revision in
48 48 # the bundle (an unbundle object). We store this offset in the index
49 49 # (start). The base of the delta is stored in the base field.
50 50 #
51 51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 52 # check revision against repotiprev.
53 53 opener = vfsmod.readonlyvfs(opener)
54 54 revlog.revlog.__init__(self, opener, indexfile)
55 55 self.bundle = cgunpacker
56 56 n = len(self)
57 57 self.repotiprev = n - 1
58 58 self.bundlerevs = set() # used by 'bundle()' revset expression
59 59 for deltadata in cgunpacker.deltaiter():
60 60 node, p1, p2, cs, deltabase, delta, flags = deltadata
61 61
62 62 size = len(delta)
63 63 start = cgunpacker.tell() - size
64 64
65 65 link = linkmapper(cs)
66 66 if node in self.nodemap:
67 67 # this can happen if two branches make the same change
68 68 self.bundlerevs.add(self.nodemap[node])
69 69 continue
70 70
71 71 for p in (p1, p2):
72 72 if p not in self.nodemap:
73 73 raise error.LookupError(p, self.indexfile,
74 74 _("unknown parent"))
75 75
76 76 if deltabase not in self.nodemap:
77 77 raise LookupError(deltabase, self.indexfile,
78 78 _('unknown delta base'))
79 79
80 80 baserev = self.rev(deltabase)
81 81 # start, size, full unc. size, base (unused), link, p1, p2, node
82 82 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
83 83 self.rev(p1), self.rev(p2), node)
84 84 self.index.insert(-1, e)
85 85 self.nodemap[node] = n
86 86 self.bundlerevs.add(n)
87 87 n += 1
88 88
89 89 def _chunk(self, rev, df=None):
90 90 # Warning: in case of bundle, the diff is against what we stored as
91 91 # delta base, not against rev - 1
92 92 # XXX: could use some caching
93 93 if rev <= self.repotiprev:
94 94 return revlog.revlog._chunk(self, rev)
95 95 self.bundle.seek(self.start(rev))
96 96 return self.bundle.read(self.length(rev))
97 97
98 98 def revdiff(self, rev1, rev2):
99 99 """return or calculate a delta between two revisions"""
100 100 if rev1 > self.repotiprev and rev2 > self.repotiprev:
101 101 # hot path for bundle
102 102 revb = self.index[rev2][3]
103 103 if revb == rev1:
104 104 return self._chunk(rev2)
105 105 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
106 106 return revlog.revlog.revdiff(self, rev1, rev2)
107 107
108 108 return mdiff.textdiff(self.revision(rev1, raw=True),
109 109 self.revision(rev2, raw=True))
110 110
111 111 def revision(self, nodeorrev, _df=None, raw=False):
112 112 """return an uncompressed revision of a given node or revision
113 113 number.
114 114 """
115 115 if isinstance(nodeorrev, int):
116 116 rev = nodeorrev
117 117 node = self.node(rev)
118 118 else:
119 119 node = nodeorrev
120 120 rev = self.rev(node)
121 121
122 122 if node == nullid:
123 123 return ""
124 124
125 125 rawtext = None
126 126 chain = []
127 127 iterrev = rev
128 128 # reconstruct the revision if it is from a changegroup
129 129 while iterrev > self.repotiprev:
130 130 if self._cache and self._cache[1] == iterrev:
131 131 rawtext = self._cache[2]
132 132 break
133 133 chain.append(iterrev)
134 134 iterrev = self.index[iterrev][3]
135 135 if rawtext is None:
136 136 rawtext = self.baserevision(iterrev)
137 137
138 138 while chain:
139 139 delta = self._chunk(chain.pop())
140 140 rawtext = mdiff.patches(rawtext, [delta])
141 141
142 142 text, validatehash = self._processflags(rawtext, self.flags(rev),
143 143 'read', raw=raw)
144 144 if validatehash:
145 145 self.checkhash(text, node, rev=rev)
146 146 self._cache = (node, rev, rawtext)
147 147 return text
148 148
149 149 def baserevision(self, nodeorrev):
150 150 # Revlog subclasses may override 'revision' method to modify format of
151 151 # content retrieved from revlog. To use bundlerevlog with such class one
152 152 # needs to override 'baserevision' and make more specific call here.
153 153 return revlog.revlog.revision(self, nodeorrev, raw=True)
154 154
155 155 def addrevision(self, *args, **kwargs):
156 156 raise NotImplementedError
157 157
158 158 def addgroup(self, *args, **kwargs):
159 159 raise NotImplementedError
160 160
161 161 def strip(self, *args, **kwargs):
162 162 raise NotImplementedError
163 163
164 164 def checksize(self):
165 165 raise NotImplementedError
166 166
167 167 class bundlechangelog(bundlerevlog, changelog.changelog):
168 168 def __init__(self, opener, cgunpacker):
169 169 changelog.changelog.__init__(self, opener)
170 170 linkmapper = lambda x: x
171 171 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
172 172 linkmapper)
173 173
174 174 def baserevision(self, nodeorrev):
175 175 # Although changelog doesn't override 'revision' method, some extensions
176 176 # may replace this class with another that does. Same story with
177 177 # manifest and filelog classes.
178 178
179 179 # This bypasses filtering on changelog.node() and rev() because we need
180 180 # revision text of the bundle base even if it is hidden.
181 181 oldfilter = self.filteredrevs
182 182 try:
183 183 self.filteredrevs = ()
184 184 return changelog.changelog.revision(self, nodeorrev, raw=True)
185 185 finally:
186 186 self.filteredrevs = oldfilter
187 187
188 188 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
189 189 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
190 190 dir=''):
191 191 manifest.manifestrevlog.__init__(self, opener, dir=dir)
192 192 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
193 193 linkmapper)
194 194 if dirlogstarts is None:
195 195 dirlogstarts = {}
196 196 if self.bundle.version == "03":
197 197 dirlogstarts = _getfilestarts(self.bundle)
198 198 self._dirlogstarts = dirlogstarts
199 199 self._linkmapper = linkmapper
200 200
201 201 def baserevision(self, nodeorrev):
202 202 node = nodeorrev
203 203 if isinstance(node, int):
204 204 node = self.node(node)
205 205
206 206 if node in self.fulltextcache:
207 207 result = '%s' % self.fulltextcache[node]
208 208 else:
209 209 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
210 210 return result
211 211
212 212 def dirlog(self, d):
213 213 if d in self._dirlogstarts:
214 214 self.bundle.seek(self._dirlogstarts[d])
215 215 return bundlemanifest(
216 216 self.opener, self.bundle, self._linkmapper,
217 217 self._dirlogstarts, dir=d)
218 218 return super(bundlemanifest, self).dirlog(d)
219 219
220 220 class bundlefilelog(filelog.filelog):
221 221 def __init__(self, opener, path, cgunpacker, linkmapper):
222 222 filelog.filelog.__init__(self, opener, path)
223 223 self._revlog = bundlerevlog(opener, self.indexfile,
224 224 cgunpacker, linkmapper)
225 225
226 226 def baserevision(self, nodeorrev):
227 227 return filelog.filelog.revision(self, nodeorrev, raw=True)
228 228
229 229 class bundlepeer(localrepo.localpeer):
230 230 def canpush(self):
231 231 return False
232 232
233 233 class bundlephasecache(phases.phasecache):
234 234 def __init__(self, *args, **kwargs):
235 235 super(bundlephasecache, self).__init__(*args, **kwargs)
236 236 if util.safehasattr(self, 'opener'):
237 237 self.opener = vfsmod.readonlyvfs(self.opener)
238 238
239 239 def write(self):
240 240 raise NotImplementedError
241 241
242 242 def _write(self, fp):
243 243 raise NotImplementedError
244 244
245 245 def _updateroots(self, phase, newroots, tr):
246 246 self.phaseroots[phase] = newroots
247 247 self.invalidate()
248 248 self.dirty = True
249 249
250 250 def _getfilestarts(cgunpacker):
251 251 filespos = {}
252 252 for chunkdata in iter(cgunpacker.filelogheader, {}):
253 253 fname = chunkdata['filename']
254 254 filespos[fname] = cgunpacker.tell()
255 255 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
256 256 pass
257 257 return filespos
258 258
259 259 class bundlerepository(localrepo.localrepository):
260 260 """A repository instance that is a union of a local repo and a bundle.
261 261
262 262 Instances represent a read-only repository composed of a local repository
263 263 with the contents of a bundle file applied. The repository instance is
264 264 conceptually similar to the state of a repository after an
265 265 ``hg unbundle`` operation. However, the contents of the bundle are never
266 266 applied to the actual base repository.
267 267 """
268 268 def __init__(self, ui, repopath, bundlepath):
269 269 self._tempparent = None
270 270 try:
271 271 localrepo.localrepository.__init__(self, ui, repopath)
272 272 except error.RepoError:
273 273 self._tempparent = tempfile.mkdtemp()
274 274 localrepo.instance(ui, self._tempparent, 1)
275 275 localrepo.localrepository.__init__(self, ui, self._tempparent)
276 276 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
277 277
278 278 if repopath:
279 279 self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
280 280 else:
281 281 self._url = 'bundle:' + bundlepath
282 282
283 283 self.tempfile = None
284 284 f = util.posixfile(bundlepath, "rb")
285 285 bundle = exchange.readbundle(ui, f, bundlepath)
286 286
287 287 if isinstance(bundle, bundle2.unbundle20):
288 288 self._bundlefile = bundle
289 289 self._cgunpacker = None
290 290
291 291 cgpart = None
292 292 for part in bundle.iterparts(seekable=True):
293 293 if part.type == 'changegroup':
294 294 if cgpart:
295 295 raise NotImplementedError("can't process "
296 296 "multiple changegroups")
297 297 cgpart = part
298 298
299 299 self._handlebundle2part(bundle, part)
300 300
301 301 if not cgpart:
302 302 raise error.Abort(_("No changegroups found"))
303 303
304 304 # This is required to placate a later consumer, which expects
305 305 # the payload offset to be at the beginning of the changegroup.
306 306 # We need to do this after the iterparts() generator advances
307 307 # because iterparts() will seek to end of payload after the
308 308 # generator returns control to iterparts().
309 309 cgpart.seek(0, os.SEEK_SET)
310 310
311 311 elif isinstance(bundle, changegroup.cg1unpacker):
312 312 if bundle.compressed():
313 313 f = self._writetempbundle(bundle.read, '.hg10un',
314 314 header='HG10UN')
315 315 bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
316 316
317 317 self._bundlefile = bundle
318 318 self._cgunpacker = bundle
319 319 else:
320 320 raise error.Abort(_('bundle type %s cannot be read') %
321 321 type(bundle))
322 322
323 323 # dict with the mapping 'filename' -> position in the changegroup.
324 324 self._cgfilespos = {}
325 325
326 326 self.firstnewrev = self.changelog.repotiprev + 1
327 327 phases.retractboundary(self, None, phases.draft,
328 328 [ctx.node() for ctx in self[self.firstnewrev:]])
329 329
330 330 def _handlebundle2part(self, bundle, part):
331 331 if part.type != 'changegroup':
332 332 return
333 333
334 334 cgstream = part
335 335 version = part.params.get('version', '01')
336 336 legalcgvers = changegroup.supportedincomingversions(self)
337 337 if version not in legalcgvers:
338 338 msg = _('Unsupported changegroup version: %s')
339 339 raise error.Abort(msg % version)
340 340 if bundle.compressed():
341 341 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
342 342
343 343 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
344 344
345 345 def _writetempbundle(self, readfn, suffix, header=''):
346 346 """Write a temporary file to disk
347 347 """
348 348 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
349 349 suffix=suffix)
350 350 self.tempfile = temp
351 351
352 352 with os.fdopen(fdtemp, r'wb') as fptemp:
353 353 fptemp.write(header)
354 354 while True:
355 355 chunk = readfn(2**18)
356 356 if not chunk:
357 357 break
358 358 fptemp.write(chunk)
359 359
360 360 return self.vfs.open(self.tempfile, mode="rb")
361 361
362 362 @localrepo.unfilteredpropertycache
363 363 def _phasecache(self):
364 364 return bundlephasecache(self, self._phasedefaults)
365 365
366 366 @localrepo.unfilteredpropertycache
367 367 def changelog(self):
368 368 # consume the header if it exists
369 369 self._cgunpacker.changelogheader()
370 370 c = bundlechangelog(self.svfs, self._cgunpacker)
371 371 self.manstart = self._cgunpacker.tell()
372 372 return c
373 373
374 374 def _constructmanifest(self):
375 375 self._cgunpacker.seek(self.manstart)
376 376 # consume the header if it exists
377 377 self._cgunpacker.manifestheader()
378 378 linkmapper = self.unfiltered().changelog.rev
379 379 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
380 380 self.filestart = self._cgunpacker.tell()
381 381 return m
382 382
383 383 def _consumemanifest(self):
384 384 """Consumes the manifest portion of the bundle, setting filestart so the
385 385 file portion can be read."""
386 386 self._cgunpacker.seek(self.manstart)
387 387 self._cgunpacker.manifestheader()
388 388 for delta in self._cgunpacker.deltaiter():
389 389 pass
390 390 self.filestart = self._cgunpacker.tell()
391 391
392 392 @localrepo.unfilteredpropertycache
393 393 def manstart(self):
394 394 self.changelog
395 395 return self.manstart
396 396
397 397 @localrepo.unfilteredpropertycache
398 398 def filestart(self):
399 399 self.manifestlog
400 400
401 401 # If filestart was not set by self.manifestlog, that means the
402 402 # manifestlog implementation did not consume the manifests from the
403 403 # changegroup (ex: it might be consuming trees from a separate bundle2
404 404 # part instead). So we need to manually consume it.
405 405 if r'filestart' not in self.__dict__:
406 406 self._consumemanifest()
407 407
408 408 return self.filestart
409 409
410 410 def url(self):
411 411 return self._url
412 412
413 413 def file(self, f):
414 414 if not self._cgfilespos:
415 415 self._cgunpacker.seek(self.filestart)
416 416 self._cgfilespos = _getfilestarts(self._cgunpacker)
417 417
418 418 if f in self._cgfilespos:
419 419 self._cgunpacker.seek(self._cgfilespos[f])
420 420 linkmapper = self.unfiltered().changelog.rev
421 421 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
422 422 else:
423 423 return super(bundlerepository, self).file(f)
424 424
425 425 def close(self):
426 426 """Close assigned bundle file immediately."""
427 427 self._bundlefile.close()
428 428 if self.tempfile is not None:
429 429 self.vfs.unlink(self.tempfile)
430 430 if self._tempparent:
431 431 shutil.rmtree(self._tempparent, True)
432 432
433 433 def cancopy(self):
434 434 return False
435 435
436 436 def peer(self):
437 437 return bundlepeer(self)
438 438
439 439 def getcwd(self):
440 440 return pycompat.getcwd() # always outside the repo
441 441
442 442 # Check if parents exist in localrepo before setting
443 443 def setparents(self, p1, p2=nullid):
444 444 p1rev = self.changelog.rev(p1)
445 445 p2rev = self.changelog.rev(p2)
446 446 msg = _("setting parent to node %s that only exists in the bundle\n")
447 447 if self.changelog.repotiprev < p1rev:
448 448 self.ui.warn(msg % nodemod.hex(p1))
449 449 if self.changelog.repotiprev < p2rev:
450 450 self.ui.warn(msg % nodemod.hex(p2))
451 451 return super(bundlerepository, self).setparents(p1, p2)
452 452
453 def instance(ui, path, create):
453 def instance(ui, path, create, intents=None):
454 454 if create:
455 455 raise error.Abort(_('cannot create new bundle repository'))
456 456 # internal config: bundle.mainreporoot
457 457 parentpath = ui.config("bundle", "mainreporoot")
458 458 if not parentpath:
459 459 # try to find the correct path to the working directory repo
460 460 parentpath = cmdutil.findrepo(pycompat.getcwd())
461 461 if parentpath is None:
462 462 parentpath = ''
463 463 if parentpath:
464 464 # Try to make the full path relative so we get a nice, short URL.
465 465 # In particular, we don't want temp dir names in test outputs.
466 466 cwd = pycompat.getcwd()
467 467 if parentpath == cwd:
468 468 parentpath = ''
469 469 else:
470 470 cwd = pathutil.normasprefix(cwd)
471 471 if parentpath.startswith(cwd):
472 472 parentpath = parentpath[len(cwd):]
473 473 u = util.url(path)
474 474 path = u.localpath()
475 475 if u.scheme == 'bundle':
476 476 s = path.split("+", 1)
477 477 if len(s) == 1:
478 478 repopath, bundlename = parentpath, s[0]
479 479 else:
480 480 repopath, bundlename = s
481 481 else:
482 482 repopath, bundlename = parentpath, path
483 483 return bundlerepository(ui, repopath, bundlename)
484 484
485 485 class bundletransactionmanager(object):
486 486 def transaction(self):
487 487 return None
488 488
489 489 def close(self):
490 490 raise NotImplementedError
491 491
492 492 def release(self):
493 493 raise NotImplementedError
494 494
495 495 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
496 496 force=False):
497 497 '''obtains a bundle of changes incoming from peer
498 498
499 499 "onlyheads" restricts the returned changes to those reachable from the
500 500 specified heads.
501 501 "bundlename", if given, stores the bundle to this file path permanently;
502 502 otherwise it's stored to a temp file and gets deleted again when you call
503 503 the returned "cleanupfn".
504 504 "force" indicates whether to proceed on unrelated repos.
505 505
506 506 Returns a tuple (local, csets, cleanupfn):
507 507
508 508 "local" is a local repo from which to obtain the actual incoming
509 509 changesets; it is a bundlerepo for the obtained bundle when the
510 510 original "peer" is remote.
511 511 "csets" lists the incoming changeset node ids.
512 512 "cleanupfn" must be called without arguments when you're done processing
513 513 the changes; it closes both the original "peer" and the one returned
514 514 here.
515 515 '''
516 516 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
517 517 force=force)
518 518 common, incoming, rheads = tmp
519 519 if not incoming:
520 520 try:
521 521 if bundlename:
522 522 os.unlink(bundlename)
523 523 except OSError:
524 524 pass
525 525 return repo, [], peer.close
526 526
527 527 commonset = set(common)
528 528 rheads = [x for x in rheads if x not in commonset]
529 529
530 530 bundle = None
531 531 bundlerepo = None
532 532 localrepo = peer.local()
533 533 if bundlename or not localrepo:
534 534 # create a bundle (uncompressed if peer repo is not local)
535 535
536 536 # developer config: devel.legacy.exchange
537 537 legexc = ui.configlist('devel', 'legacy.exchange')
538 538 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
539 539 canbundle2 = (not forcebundle1
540 540 and peer.capable('getbundle')
541 541 and peer.capable('bundle2'))
542 542 if canbundle2:
543 543 with peer.commandexecutor() as e:
544 544 b2 = e.callcommand('getbundle', {
545 545 'source': 'incoming',
546 546 'common': common,
547 547 'heads': rheads,
548 548 'bundlecaps': exchange.caps20to10(repo, role='client'),
549 549 'cg': True,
550 550 }).result()
551 551
552 552 fname = bundle = changegroup.writechunks(ui,
553 553 b2._forwardchunks(),
554 554 bundlename)
555 555 else:
556 556 if peer.capable('getbundle'):
557 557 with peer.commandexecutor() as e:
558 558 cg = e.callcommand('getbundle', {
559 559 'source': 'incoming',
560 560 'common': common,
561 561 'heads': rheads,
562 562 }).result()
563 563 elif onlyheads is None and not peer.capable('changegroupsubset'):
564 564 # compat with older servers when pulling all remote heads
565 565
566 566 with peer.commandexecutor() as e:
567 567 cg = e.callcommand('changegroup', {
568 568 'nodes': incoming,
569 569 'source': 'incoming',
570 570 }).result()
571 571
572 572 rheads = None
573 573 else:
574 574 with peer.commandexecutor() as e:
575 575 cg = e.callcommand('changegroupsubset', {
576 576 'bases': incoming,
577 577 'heads': rheads,
578 578 'source': 'incoming',
579 579 }).result()
580 580
581 581 if localrepo:
582 582 bundletype = "HG10BZ"
583 583 else:
584 584 bundletype = "HG10UN"
585 585 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
586 586 bundletype)
587 587 # keep written bundle?
588 588 if bundlename:
589 589 bundle = None
590 590 if not localrepo:
591 591 # use the created uncompressed bundlerepo
592 592 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
593 593 fname)
594 594 # this repo contains local and peer now, so filter out local again
595 595 common = repo.heads()
596 596 if localrepo:
597 597 # Part of common may be remotely filtered
598 598 # So use an unfiltered version
599 599 # The discovery process probably need cleanup to avoid that
600 600 localrepo = localrepo.unfiltered()
601 601
602 602 csets = localrepo.changelog.findmissing(common, rheads)
603 603
604 604 if bundlerepo:
605 605 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
606 606
607 607 with peer.commandexecutor() as e:
608 608 remotephases = e.callcommand('listkeys', {
609 609 'namespace': 'phases',
610 610 }).result()
611 611
612 612 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
613 613 pullop.trmanager = bundletransactionmanager()
614 614 exchange._pullapplyphases(pullop, remotephases)
615 615
616 616 def cleanup():
617 617 if bundlerepo:
618 618 bundlerepo.close()
619 619 if bundle:
620 620 os.unlink(bundle)
621 621 peer.close()
622 622
623 623 return (localrepo, csets, cleanup)
@@ -1,1052 +1,1053 b''
1 1 # dispatch.py - command dispatching for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import difflib
11 11 import errno
12 12 import getopt
13 13 import os
14 14 import pdb
15 15 import re
16 16 import signal
17 17 import sys
18 18 import time
19 19 import traceback
20 20
21 21
22 22 from .i18n import _
23 23
24 24 from . import (
25 25 cmdutil,
26 26 color,
27 27 commands,
28 28 demandimport,
29 29 encoding,
30 30 error,
31 31 extensions,
32 32 fancyopts,
33 33 help,
34 34 hg,
35 35 hook,
36 36 profiling,
37 37 pycompat,
38 38 scmutil,
39 39 ui as uimod,
40 40 util,
41 41 )
42 42
43 43 from .utils import (
44 44 procutil,
45 45 stringutil,
46 46 )
47 47
48 48 class request(object):
49 49 def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
50 50 ferr=None, prereposetups=None):
51 51 self.args = args
52 52 self.ui = ui
53 53 self.repo = repo
54 54
55 55 # input/output/error streams
56 56 self.fin = fin
57 57 self.fout = fout
58 58 self.ferr = ferr
59 59
60 60 # remember options pre-parsed by _earlyparseopts()
61 61 self.earlyoptions = {}
62 62
63 63 # reposetups which run before extensions, useful for chg to pre-fill
64 64 # low-level repo state (for example, changelog) before extensions.
65 65 self.prereposetups = prereposetups or []
66 66
67 67 def _runexithandlers(self):
68 68 exc = None
69 69 handlers = self.ui._exithandlers
70 70 try:
71 71 while handlers:
72 72 func, args, kwargs = handlers.pop()
73 73 try:
74 74 func(*args, **kwargs)
75 75 except: # re-raises below
76 76 if exc is None:
77 77 exc = sys.exc_info()[1]
78 78 self.ui.warn(('error in exit handlers:\n'))
79 79 self.ui.traceback(force=True)
80 80 finally:
81 81 if exc is not None:
82 82 raise exc
83 83
84 84 def run():
85 85 "run the command in sys.argv"
86 86 _initstdio()
87 87 req = request(pycompat.sysargv[1:])
88 88 err = None
89 89 try:
90 90 status = (dispatch(req) or 0)
91 91 except error.StdioError as e:
92 92 err = e
93 93 status = -1
94 94 if util.safehasattr(req.ui, 'fout'):
95 95 try:
96 96 req.ui.fout.flush()
97 97 except IOError as e:
98 98 err = e
99 99 status = -1
100 100 if util.safehasattr(req.ui, 'ferr'):
101 101 try:
102 102 if err is not None and err.errno != errno.EPIPE:
103 103 req.ui.ferr.write('abort: %s\n' %
104 104 encoding.strtolocal(err.strerror))
105 105 req.ui.ferr.flush()
106 106 # There's not much we can do about an I/O error here. So (possibly)
107 107 # change the status code and move on.
108 108 except IOError:
109 109 status = -1
110 110
111 111 _silencestdio()
112 112 sys.exit(status & 255)
113 113
114 114 if pycompat.ispy3:
115 115 def _initstdio():
116 116 pass
117 117
118 118 def _silencestdio():
119 119 for fp in (sys.stdout, sys.stderr):
120 120 # Check if the file is okay
121 121 try:
122 122 fp.flush()
123 123 continue
124 124 except IOError:
125 125 pass
126 126 # Otherwise mark it as closed to silence "Exception ignored in"
127 127 # message emitted by the interpreter finalizer. Be careful to
128 128 # not close procutil.stdout, which may be a fdopen-ed file object
129 129 # and its close() actually closes the underlying file descriptor.
130 130 try:
131 131 fp.close()
132 132 except IOError:
133 133 pass
134 134 else:
135 135 def _initstdio():
136 136 for fp in (sys.stdin, sys.stdout, sys.stderr):
137 137 procutil.setbinary(fp)
138 138
139 139 def _silencestdio():
140 140 pass
141 141
142 142 def _getsimilar(symbols, value):
143 143 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
144 144 # The cutoff for similarity here is pretty arbitrary. It should
145 145 # probably be investigated and tweaked.
146 146 return [s for s in symbols if sim(s) > 0.6]
147 147
148 148 def _reportsimilar(write, similar):
149 149 if len(similar) == 1:
150 150 write(_("(did you mean %s?)\n") % similar[0])
151 151 elif similar:
152 152 ss = ", ".join(sorted(similar))
153 153 write(_("(did you mean one of %s?)\n") % ss)
154 154
155 155 def _formatparse(write, inst):
156 156 similar = []
157 157 if isinstance(inst, error.UnknownIdentifier):
158 158 # make sure to check fileset first, as revset can invoke fileset
159 159 similar = _getsimilar(inst.symbols, inst.function)
160 160 if len(inst.args) > 1:
161 161 write(_("hg: parse error at %s: %s\n") %
162 162 (pycompat.bytestr(inst.args[1]), inst.args[0]))
163 163 if inst.args[0].startswith(' '):
164 164 write(_("unexpected leading whitespace\n"))
165 165 else:
166 166 write(_("hg: parse error: %s\n") % inst.args[0])
167 167 _reportsimilar(write, similar)
168 168 if inst.hint:
169 169 write(_("(%s)\n") % inst.hint)
170 170
171 171 def _formatargs(args):
172 172 return ' '.join(procutil.shellquote(a) for a in args)
173 173
174 174 def dispatch(req):
175 175 "run the command specified in req.args"
176 176 if req.ferr:
177 177 ferr = req.ferr
178 178 elif req.ui:
179 179 ferr = req.ui.ferr
180 180 else:
181 181 ferr = procutil.stderr
182 182
183 183 try:
184 184 if not req.ui:
185 185 req.ui = uimod.ui.load()
186 186 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
187 187 if req.earlyoptions['traceback']:
188 188 req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
189 189
190 190 # set ui streams from the request
191 191 if req.fin:
192 192 req.ui.fin = req.fin
193 193 if req.fout:
194 194 req.ui.fout = req.fout
195 195 if req.ferr:
196 196 req.ui.ferr = req.ferr
197 197 except error.Abort as inst:
198 198 ferr.write(_("abort: %s\n") % inst)
199 199 if inst.hint:
200 200 ferr.write(_("(%s)\n") % inst.hint)
201 201 return -1
202 202 except error.ParseError as inst:
203 203 _formatparse(ferr.write, inst)
204 204 return -1
205 205
206 206 msg = _formatargs(req.args)
207 207 starttime = util.timer()
208 208 ret = None
209 209 try:
210 210 ret = _runcatch(req)
211 211 except error.ProgrammingError as inst:
212 212 req.ui.warn(_('** ProgrammingError: %s\n') % inst)
213 213 if inst.hint:
214 214 req.ui.warn(_('** (%s)\n') % inst.hint)
215 215 raise
216 216 except KeyboardInterrupt as inst:
217 217 try:
218 218 if isinstance(inst, error.SignalInterrupt):
219 219 msg = _("killed!\n")
220 220 else:
221 221 msg = _("interrupted!\n")
222 222 req.ui.warn(msg)
223 223 except error.SignalInterrupt:
224 224 # maybe pager would quit without consuming all the output, and
225 225 # SIGPIPE was raised. we cannot print anything in this case.
226 226 pass
227 227 except IOError as inst:
228 228 if inst.errno != errno.EPIPE:
229 229 raise
230 230 ret = -1
231 231 finally:
232 232 duration = util.timer() - starttime
233 233 req.ui.flush()
234 234 if req.ui.logblockedtimes:
235 235 req.ui._blockedtimes['command_duration'] = duration * 1000
236 236 req.ui.log('uiblocked', 'ui blocked ms',
237 237 **pycompat.strkwargs(req.ui._blockedtimes))
238 238 req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
239 239 msg, ret or 0, duration)
240 240 try:
241 241 req._runexithandlers()
242 242 except: # exiting, so no re-raises
243 243 ret = ret or -1
244 244 return ret
245 245
246 246 def _runcatch(req):
247 247 def catchterm(*args):
248 248 raise error.SignalInterrupt
249 249
250 250 ui = req.ui
251 251 try:
252 252 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
253 253 num = getattr(signal, name, None)
254 254 if num:
255 255 signal.signal(num, catchterm)
256 256 except ValueError:
257 257 pass # happens if called in a thread
258 258
259 259 def _runcatchfunc():
260 260 realcmd = None
261 261 try:
262 262 cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
263 263 cmd = cmdargs[0]
264 264 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
265 265 realcmd = aliases[0]
266 266 except (error.UnknownCommand, error.AmbiguousCommand,
267 267 IndexError, getopt.GetoptError):
268 268 # Don't handle this here. We know the command is
269 269 # invalid, but all we're worried about for now is that
270 270 # it's not a command that server operators expect to
271 271 # be safe to offer to users in a sandbox.
272 272 pass
273 273 if realcmd == 'serve' and '--stdio' in cmdargs:
274 274 # We want to constrain 'hg serve --stdio' instances pretty
275 275 # closely, as many shared-ssh access tools want to grant
276 276 # access to run *only* 'hg -R $repo serve --stdio'. We
277 277 # restrict to exactly that set of arguments, and prohibit
278 278 # any repo name that starts with '--' to prevent
279 279 # shenanigans wherein a user does something like pass
280 280 # --debugger or --config=ui.debugger=1 as a repo
281 281 # name. This used to actually run the debugger.
282 282 if (len(req.args) != 4 or
283 283 req.args[0] != '-R' or
284 284 req.args[1].startswith('--') or
285 285 req.args[2] != 'serve' or
286 286 req.args[3] != '--stdio'):
287 287 raise error.Abort(
288 288 _('potentially unsafe serve --stdio invocation: %r') %
289 289 (req.args,))
290 290
291 291 try:
292 292 debugger = 'pdb'
293 293 debugtrace = {
294 294 'pdb': pdb.set_trace
295 295 }
296 296 debugmortem = {
297 297 'pdb': pdb.post_mortem
298 298 }
299 299
300 300 # read --config before doing anything else
301 301 # (e.g. to change trust settings for reading .hg/hgrc)
302 302 cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
303 303
304 304 if req.repo:
305 305 # copy configs that were passed on the cmdline (--config) to
306 306 # the repo ui
307 307 for sec, name, val in cfgs:
308 308 req.repo.ui.setconfig(sec, name, val, source='--config')
309 309
310 310 # developer config: ui.debugger
311 311 debugger = ui.config("ui", "debugger")
312 312 debugmod = pdb
313 313 if not debugger or ui.plain():
314 314 # if we are in HGPLAIN mode, then disable custom debugging
315 315 debugger = 'pdb'
316 316 elif req.earlyoptions['debugger']:
317 317 # This import can be slow for fancy debuggers, so only
318 318 # do it when absolutely necessary, i.e. when actual
319 319 # debugging has been requested
320 320 with demandimport.deactivated():
321 321 try:
322 322 debugmod = __import__(debugger)
323 323 except ImportError:
324 324 pass # Leave debugmod = pdb
325 325
326 326 debugtrace[debugger] = debugmod.set_trace
327 327 debugmortem[debugger] = debugmod.post_mortem
328 328
329 329 # enter the debugger before command execution
330 330 if req.earlyoptions['debugger']:
331 331 ui.warn(_("entering debugger - "
332 332 "type c to continue starting hg or h for help\n"))
333 333
334 334 if (debugger != 'pdb' and
335 335 debugtrace[debugger] == debugtrace['pdb']):
336 336 ui.warn(_("%s debugger specified "
337 337 "but its module was not found\n") % debugger)
338 338 with demandimport.deactivated():
339 339 debugtrace[debugger]()
340 340 try:
341 341 return _dispatch(req)
342 342 finally:
343 343 ui.flush()
344 344 except: # re-raises
345 345 # enter the debugger when we hit an exception
346 346 if req.earlyoptions['debugger']:
347 347 traceback.print_exc()
348 348 debugmortem[debugger](sys.exc_info()[2])
349 349 raise
350 350
351 351 return _callcatch(ui, _runcatchfunc)
352 352
353 353 def _callcatch(ui, func):
354 354 """like scmutil.callcatch but handles more high-level exceptions about
355 355 config parsing and commands. besides, use handlecommandexception to handle
356 356 uncaught exceptions.
357 357 """
358 358 try:
359 359 return scmutil.callcatch(ui, func)
360 360 except error.AmbiguousCommand as inst:
361 361 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
362 362 (inst.args[0], " ".join(inst.args[1])))
363 363 except error.CommandError as inst:
364 364 if inst.args[0]:
365 365 ui.pager('help')
366 366 msgbytes = pycompat.bytestr(inst.args[1])
367 367 ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
368 368 commands.help_(ui, inst.args[0], full=False, command=True)
369 369 else:
370 370 ui.pager('help')
371 371 ui.warn(_("hg: %s\n") % inst.args[1])
372 372 commands.help_(ui, 'shortlist')
373 373 except error.ParseError as inst:
374 374 _formatparse(ui.warn, inst)
375 375 return -1
376 376 except error.UnknownCommand as inst:
377 377 nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
378 378 try:
379 379 # check if the command is in a disabled extension
380 380 # (but don't check for extensions themselves)
381 381 formatted = help.formattedhelp(ui, commands, inst.args[0],
382 382 unknowncmd=True)
383 383 ui.warn(nocmdmsg)
384 384 ui.write(formatted)
385 385 except (error.UnknownCommand, error.Abort):
386 386 suggested = False
387 387 if len(inst.args) == 2:
388 388 sim = _getsimilar(inst.args[1], inst.args[0])
389 389 if sim:
390 390 ui.warn(nocmdmsg)
391 391 _reportsimilar(ui.warn, sim)
392 392 suggested = True
393 393 if not suggested:
394 394 ui.pager('help')
395 395 ui.warn(nocmdmsg)
396 396 commands.help_(ui, 'shortlist')
397 397 except IOError:
398 398 raise
399 399 except KeyboardInterrupt:
400 400 raise
401 401 except: # probably re-raises
402 402 if not handlecommandexception(ui):
403 403 raise
404 404
405 405 return -1
406 406
407 407 def aliasargs(fn, givenargs):
408 408 args = []
409 409 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
410 410 if not util.safehasattr(fn, '_origfunc'):
411 411 args = getattr(fn, 'args', args)
412 412 if args:
413 413 cmd = ' '.join(map(procutil.shellquote, args))
414 414
415 415 nums = []
416 416 def replacer(m):
417 417 num = int(m.group(1)) - 1
418 418 nums.append(num)
419 419 if num < len(givenargs):
420 420 return givenargs[num]
421 421 raise error.Abort(_('too few arguments for command alias'))
422 422 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
423 423 givenargs = [x for i, x in enumerate(givenargs)
424 424 if i not in nums]
425 425 args = pycompat.shlexsplit(cmd)
426 426 return args + givenargs
427 427
428 428 def aliasinterpolate(name, args, cmd):
429 429 '''interpolate args into cmd for shell aliases
430 430
431 431 This also handles $0, $@ and "$@".
432 432 '''
433 433 # util.interpolate can't deal with "$@" (with quotes) because it's only
434 434 # built to match prefix + patterns.
435 435 replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
436 436 replacemap['$0'] = name
437 437 replacemap['$$'] = '$'
438 438 replacemap['$@'] = ' '.join(args)
439 439 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
440 440 # parameters, separated out into words. Emulate the same behavior here by
441 441 # quoting the arguments individually. POSIX shells will then typically
442 442 # tokenize each argument into exactly one word.
443 443 replacemap['"$@"'] = ' '.join(procutil.shellquote(arg) for arg in args)
444 444 # escape '\$' for regex
445 445 regex = '|'.join(replacemap.keys()).replace('$', br'\$')
446 446 r = re.compile(regex)
447 447 return r.sub(lambda x: replacemap[x.group()], cmd)
448 448
449 449 class cmdalias(object):
450 450 def __init__(self, ui, name, definition, cmdtable, source):
451 451 self.name = self.cmd = name
452 452 self.cmdname = ''
453 453 self.definition = definition
454 454 self.fn = None
455 455 self.givenargs = []
456 456 self.opts = []
457 457 self.help = ''
458 458 self.badalias = None
459 459 self.unknowncmd = False
460 460 self.source = source
461 461
462 462 try:
463 463 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
464 464 for alias, e in cmdtable.iteritems():
465 465 if e is entry:
466 466 self.cmd = alias
467 467 break
468 468 self.shadows = True
469 469 except error.UnknownCommand:
470 470 self.shadows = False
471 471
472 472 if not self.definition:
473 473 self.badalias = _("no definition for alias '%s'") % self.name
474 474 return
475 475
476 476 if self.definition.startswith('!'):
477 477 shdef = self.definition[1:]
478 478 self.shell = True
479 479 def fn(ui, *args):
480 480 env = {'HG_ARGS': ' '.join((self.name,) + args)}
481 481 def _checkvar(m):
482 482 if m.groups()[0] == '$':
483 483 return m.group()
484 484 elif int(m.groups()[0]) <= len(args):
485 485 return m.group()
486 486 else:
487 487 ui.debug("No argument found for substitution "
488 488 "of %i variable in alias '%s' definition.\n"
489 489 % (int(m.groups()[0]), self.name))
490 490 return ''
491 491 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
492 492 cmd = aliasinterpolate(self.name, args, cmd)
493 493 return ui.system(cmd, environ=env,
494 494 blockedtag='alias_%s' % self.name)
495 495 self.fn = fn
496 496 self._populatehelp(ui, name, shdef, self.fn)
497 497 return
498 498
499 499 try:
500 500 args = pycompat.shlexsplit(self.definition)
501 501 except ValueError as inst:
502 502 self.badalias = (_("error in definition for alias '%s': %s")
503 503 % (self.name, stringutil.forcebytestr(inst)))
504 504 return
505 505 earlyopts, args = _earlysplitopts(args)
506 506 if earlyopts:
507 507 self.badalias = (_("error in definition for alias '%s': %s may "
508 508 "only be given on the command line")
509 509 % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
510 510 [0])))
511 511 return
512 512 self.cmdname = cmd = args.pop(0)
513 513 self.givenargs = args
514 514
515 515 try:
516 516 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
517 517 if len(tableentry) > 2:
518 518 self.fn, self.opts, cmdhelp = tableentry
519 519 else:
520 520 self.fn, self.opts = tableentry
521 521 cmdhelp = None
522 522
523 523 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
524 524
525 525 except error.UnknownCommand:
526 526 self.badalias = (_("alias '%s' resolves to unknown command '%s'")
527 527 % (self.name, cmd))
528 528 self.unknowncmd = True
529 529 except error.AmbiguousCommand:
530 530 self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
531 531 % (self.name, cmd))
532 532
533 533 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
534 534 # confine strings to be passed to i18n.gettext()
535 535 cfg = {}
536 536 for k in ('doc', 'help'):
537 537 v = ui.config('alias', '%s:%s' % (name, k), None)
538 538 if v is None:
539 539 continue
540 540 if not encoding.isasciistr(v):
541 541 self.badalias = (_("non-ASCII character in alias definition "
542 542 "'%s:%s'") % (name, k))
543 543 return
544 544 cfg[k] = v
545 545
546 546 self.help = cfg.get('help', defaulthelp or '')
547 547 if self.help and self.help.startswith("hg " + cmd):
548 548 # drop prefix in old-style help lines so hg shows the alias
549 549 self.help = self.help[4 + len(cmd):]
550 550
551 551 doc = cfg.get('doc', pycompat.getdoc(fn))
552 552 if doc is not None:
553 553 doc = pycompat.sysstr(doc)
554 554 self.__doc__ = doc
555 555
556 556 @property
557 557 def args(self):
558 558 args = pycompat.maplist(util.expandpath, self.givenargs)
559 559 return aliasargs(self.fn, args)
560 560
561 561 def __getattr__(self, name):
562 562 adefaults = {r'norepo': True, r'intents': set(),
563 563 r'optionalrepo': False, r'inferrepo': False}
564 564 if name not in adefaults:
565 565 raise AttributeError(name)
566 566 if self.badalias or util.safehasattr(self, 'shell'):
567 567 return adefaults[name]
568 568 return getattr(self.fn, name)
569 569
570 570 def __call__(self, ui, *args, **opts):
571 571 if self.badalias:
572 572 hint = None
573 573 if self.unknowncmd:
574 574 try:
575 575 # check if the command is in a disabled extension
576 576 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
577 577 hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
578 578 except error.UnknownCommand:
579 579 pass
580 580 raise error.Abort(self.badalias, hint=hint)
581 581 if self.shadows:
582 582 ui.debug("alias '%s' shadows command '%s'\n" %
583 583 (self.name, self.cmdname))
584 584
585 585 ui.log('commandalias', "alias '%s' expands to '%s'\n",
586 586 self.name, self.definition)
587 587 if util.safehasattr(self, 'shell'):
588 588 return self.fn(ui, *args, **opts)
589 589 else:
590 590 try:
591 591 return util.checksignature(self.fn)(ui, *args, **opts)
592 592 except error.SignatureError:
593 593 args = ' '.join([self.cmdname] + self.args)
594 594 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
595 595 raise
596 596
597 597 class lazyaliasentry(object):
598 598 """like a typical command entry (func, opts, help), but is lazy"""
599 599
600 600 def __init__(self, ui, name, definition, cmdtable, source):
601 601 self.ui = ui
602 602 self.name = name
603 603 self.definition = definition
604 604 self.cmdtable = cmdtable.copy()
605 605 self.source = source
606 606
607 607 @util.propertycache
608 608 def _aliasdef(self):
609 609 return cmdalias(self.ui, self.name, self.definition, self.cmdtable,
610 610 self.source)
611 611
612 612 def __getitem__(self, n):
613 613 aliasdef = self._aliasdef
614 614 if n == 0:
615 615 return aliasdef
616 616 elif n == 1:
617 617 return aliasdef.opts
618 618 elif n == 2:
619 619 return aliasdef.help
620 620 else:
621 621 raise IndexError
622 622
623 623 def __iter__(self):
624 624 for i in range(3):
625 625 yield self[i]
626 626
627 627 def __len__(self):
628 628 return 3
629 629
630 630 def addaliases(ui, cmdtable):
631 631 # aliases are processed after extensions have been loaded, so they
632 632 # may use extension commands. Aliases can also use other alias definitions,
633 633 # but only if they have been defined prior to the current definition.
634 634 for alias, definition in ui.configitems('alias', ignoresub=True):
635 635 try:
636 636 if cmdtable[alias].definition == definition:
637 637 continue
638 638 except (KeyError, AttributeError):
639 639 # definition might not exist or it might not be a cmdalias
640 640 pass
641 641
642 642 source = ui.configsource('alias', alias)
643 643 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
644 644 cmdtable[alias] = entry
645 645
646 646 def _parse(ui, args):
647 647 options = {}
648 648 cmdoptions = {}
649 649
650 650 try:
651 651 args = fancyopts.fancyopts(args, commands.globalopts, options)
652 652 except getopt.GetoptError as inst:
653 653 raise error.CommandError(None, stringutil.forcebytestr(inst))
654 654
655 655 if args:
656 656 cmd, args = args[0], args[1:]
657 657 aliases, entry = cmdutil.findcmd(cmd, commands.table,
658 658 ui.configbool("ui", "strict"))
659 659 cmd = aliases[0]
660 660 args = aliasargs(entry[0], args)
661 661 defaults = ui.config("defaults", cmd)
662 662 if defaults:
663 663 args = pycompat.maplist(
664 664 util.expandpath, pycompat.shlexsplit(defaults)) + args
665 665 c = list(entry[1])
666 666 else:
667 667 cmd = None
668 668 c = []
669 669
670 670 # combine global options into local
671 671 for o in commands.globalopts:
672 672 c.append((o[0], o[1], options[o[1]], o[3]))
673 673
674 674 try:
675 675 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
676 676 except getopt.GetoptError as inst:
677 677 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
678 678
679 679 # separate global options back out
680 680 for o in commands.globalopts:
681 681 n = o[1]
682 682 options[n] = cmdoptions[n]
683 683 del cmdoptions[n]
684 684
685 685 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
686 686
687 687 def _parseconfig(ui, config):
688 688 """parse the --config options from the command line"""
689 689 configs = []
690 690
691 691 for cfg in config:
692 692 try:
693 693 name, value = [cfgelem.strip()
694 694 for cfgelem in cfg.split('=', 1)]
695 695 section, name = name.split('.', 1)
696 696 if not section or not name:
697 697 raise IndexError
698 698 ui.setconfig(section, name, value, '--config')
699 699 configs.append((section, name, value))
700 700 except (IndexError, ValueError):
701 701 raise error.Abort(_('malformed --config option: %r '
702 702 '(use --config section.name=value)')
703 703 % pycompat.bytestr(cfg))
704 704
705 705 return configs
706 706
707 707 def _earlyparseopts(ui, args):
708 708 options = {}
709 709 fancyopts.fancyopts(args, commands.globalopts, options,
710 710 gnu=not ui.plain('strictflags'), early=True,
711 711 optaliases={'repository': ['repo']})
712 712 return options
713 713
714 714 def _earlysplitopts(args):
715 715 """Split args into a list of possible early options and remainder args"""
716 716 shortoptions = 'R:'
717 717 # TODO: perhaps 'debugger' should be included
718 718 longoptions = ['cwd=', 'repository=', 'repo=', 'config=']
719 719 return fancyopts.earlygetopt(args, shortoptions, longoptions,
720 720 gnu=True, keepsep=True)
721 721
722 722 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
723 723 # run pre-hook, and abort if it fails
724 724 hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
725 725 pats=cmdpats, opts=cmdoptions)
726 726 try:
727 727 ret = _runcommand(ui, options, cmd, d)
728 728 # run post-hook, passing command result
729 729 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
730 730 result=ret, pats=cmdpats, opts=cmdoptions)
731 731 except Exception:
732 732 # run failure hook and re-raise
733 733 hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs),
734 734 pats=cmdpats, opts=cmdoptions)
735 735 raise
736 736 return ret
737 737
738 738 def _getlocal(ui, rpath, wd=None):
739 739 """Return (path, local ui object) for the given target path.
740 740
741 741 Takes paths in [cwd]/.hg/hgrc into account."
742 742 """
743 743 if wd is None:
744 744 try:
745 745 wd = pycompat.getcwd()
746 746 except OSError as e:
747 747 raise error.Abort(_("error getting current working directory: %s") %
748 748 encoding.strtolocal(e.strerror))
749 749 path = cmdutil.findrepo(wd) or ""
750 750 if not path:
751 751 lui = ui
752 752 else:
753 753 lui = ui.copy()
754 754 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
755 755
756 756 if rpath:
757 757 path = lui.expandpath(rpath)
758 758 lui = ui.copy()
759 759 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
760 760
761 761 return path, lui
762 762
763 763 def _checkshellalias(lui, ui, args):
764 764 """Return the function to run the shell alias, if it is required"""
765 765 options = {}
766 766
767 767 try:
768 768 args = fancyopts.fancyopts(args, commands.globalopts, options)
769 769 except getopt.GetoptError:
770 770 return
771 771
772 772 if not args:
773 773 return
774 774
775 775 cmdtable = commands.table
776 776
777 777 cmd = args[0]
778 778 try:
779 779 strict = ui.configbool("ui", "strict")
780 780 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
781 781 except (error.AmbiguousCommand, error.UnknownCommand):
782 782 return
783 783
784 784 cmd = aliases[0]
785 785 fn = entry[0]
786 786
787 787 if cmd and util.safehasattr(fn, 'shell'):
788 788 # shell alias shouldn't receive early options which are consumed by hg
789 789 _earlyopts, args = _earlysplitopts(args)
790 790 d = lambda: fn(ui, *args[1:])
791 791 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
792 792 [], {})
793 793
794 794 def _dispatch(req):
795 795 args = req.args
796 796 ui = req.ui
797 797
798 798 # check for cwd
799 799 cwd = req.earlyoptions['cwd']
800 800 if cwd:
801 801 os.chdir(cwd)
802 802
803 803 rpath = req.earlyoptions['repository']
804 804 path, lui = _getlocal(ui, rpath)
805 805
806 806 uis = {ui, lui}
807 807
808 808 if req.repo:
809 809 uis.add(req.repo.ui)
810 810
811 811 if req.earlyoptions['profile']:
812 812 for ui_ in uis:
813 813 ui_.setconfig('profiling', 'enabled', 'true', '--profile')
814 814
815 815 profile = lui.configbool('profiling', 'enabled')
816 816 with profiling.profile(lui, enabled=profile) as profiler:
817 817 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
818 818 # reposetup
819 819 extensions.loadall(lui)
820 820 # Propagate any changes to lui.__class__ by extensions
821 821 ui.__class__ = lui.__class__
822 822
823 823 # (uisetup and extsetup are handled in extensions.loadall)
824 824
825 825 # (reposetup is handled in hg.repository)
826 826
827 827 addaliases(lui, commands.table)
828 828
829 829 # All aliases and commands are completely defined, now.
830 830 # Check abbreviation/ambiguity of shell alias.
831 831 shellaliasfn = _checkshellalias(lui, ui, args)
832 832 if shellaliasfn:
833 833 return shellaliasfn()
834 834
835 835 # check for fallback encoding
836 836 fallback = lui.config('ui', 'fallbackencoding')
837 837 if fallback:
838 838 encoding.fallbackencoding = fallback
839 839
840 840 fullargs = args
841 841 cmd, func, args, options, cmdoptions = _parse(lui, args)
842 842
843 843 if options["config"] != req.earlyoptions["config"]:
844 844 raise error.Abort(_("option --config may not be abbreviated!"))
845 845 if options["cwd"] != req.earlyoptions["cwd"]:
846 846 raise error.Abort(_("option --cwd may not be abbreviated!"))
847 847 if options["repository"] != req.earlyoptions["repository"]:
848 848 raise error.Abort(_(
849 849 "option -R has to be separated from other options (e.g. not "
850 850 "-qR) and --repository may only be abbreviated as --repo!"))
851 851 if options["debugger"] != req.earlyoptions["debugger"]:
852 852 raise error.Abort(_("option --debugger may not be abbreviated!"))
853 853 # don't validate --profile/--traceback, which can be enabled from now
854 854
855 855 if options["encoding"]:
856 856 encoding.encoding = options["encoding"]
857 857 if options["encodingmode"]:
858 858 encoding.encodingmode = options["encodingmode"]
859 859 if options["time"]:
860 860 def get_times():
861 861 t = os.times()
862 862 if t[4] == 0.0:
863 863 # Windows leaves this as zero, so use time.clock()
864 864 t = (t[0], t[1], t[2], t[3], time.clock())
865 865 return t
866 866 s = get_times()
867 867 def print_time():
868 868 t = get_times()
869 869 ui.warn(
870 870 _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
871 871 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
872 872 ui.atexit(print_time)
873 873 if options["profile"]:
874 874 profiler.start()
875 875
876 876 if options['verbose'] or options['debug'] or options['quiet']:
877 877 for opt in ('verbose', 'debug', 'quiet'):
878 878 val = pycompat.bytestr(bool(options[opt]))
879 879 for ui_ in uis:
880 880 ui_.setconfig('ui', opt, val, '--' + opt)
881 881
882 882 if options['traceback']:
883 883 for ui_ in uis:
884 884 ui_.setconfig('ui', 'traceback', 'on', '--traceback')
885 885
886 886 if options['noninteractive']:
887 887 for ui_ in uis:
888 888 ui_.setconfig('ui', 'interactive', 'off', '-y')
889 889
890 890 if cmdoptions.get('insecure', False):
891 891 for ui_ in uis:
892 892 ui_.insecureconnections = True
893 893
894 894 # setup color handling before pager, because setting up pager
895 895 # might cause incorrect console information
896 896 coloropt = options['color']
897 897 for ui_ in uis:
898 898 if coloropt:
899 899 ui_.setconfig('ui', 'color', coloropt, '--color')
900 900 color.setup(ui_)
901 901
902 902 if stringutil.parsebool(options['pager']):
903 903 # ui.pager() expects 'internal-always-' prefix in this case
904 904 ui.pager('internal-always-' + cmd)
905 905 elif options['pager'] != 'auto':
906 906 for ui_ in uis:
907 907 ui_.disablepager()
908 908
909 909 if options['version']:
910 910 return commands.version_(ui)
911 911 if options['help']:
912 912 return commands.help_(ui, cmd, command=cmd is not None)
913 913 elif not cmd:
914 914 return commands.help_(ui, 'shortlist')
915 915
916 916 repo = None
917 917 cmdpats = args[:]
918 918 if not func.norepo:
919 919 # use the repo from the request only if we don't have -R
920 920 if not rpath and not cwd:
921 921 repo = req.repo
922 922
923 923 if repo:
924 924 # set the descriptors of the repo ui to those of ui
925 925 repo.ui.fin = ui.fin
926 926 repo.ui.fout = ui.fout
927 927 repo.ui.ferr = ui.ferr
928 928 else:
929 929 try:
930 930 repo = hg.repository(ui, path=path,
931 presetupfuncs=req.prereposetups)
931 presetupfuncs=req.prereposetups,
932 intents=func.intents)
932 933 if not repo.local():
933 934 raise error.Abort(_("repository '%s' is not local")
934 935 % path)
935 936 repo.ui.setconfig("bundle", "mainreporoot", repo.root,
936 937 'repo')
937 938 except error.RequirementError:
938 939 raise
939 940 except error.RepoError:
940 941 if rpath: # invalid -R path
941 942 raise
942 943 if not func.optionalrepo:
943 944 if func.inferrepo and args and not path:
944 945 # try to infer -R from command args
945 946 repos = pycompat.maplist(cmdutil.findrepo, args)
946 947 guess = repos[0]
947 948 if guess and repos.count(guess) == len(repos):
948 949 req.args = ['--repository', guess] + fullargs
949 950 req.earlyoptions['repository'] = guess
950 951 return _dispatch(req)
951 952 if not path:
952 953 raise error.RepoError(_("no repository found in"
953 954 " '%s' (.hg not found)")
954 955 % pycompat.getcwd())
955 956 raise
956 957 if repo:
957 958 ui = repo.ui
958 959 if options['hidden']:
959 960 repo = repo.unfiltered()
960 961 args.insert(0, repo)
961 962 elif rpath:
962 963 ui.warn(_("warning: --repository ignored\n"))
963 964
964 965 msg = _formatargs(fullargs)
965 966 ui.log("command", '%s\n', msg)
966 967 strcmdopt = pycompat.strkwargs(cmdoptions)
967 968 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
968 969 try:
969 970 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
970 971 cmdpats, cmdoptions)
971 972 finally:
972 973 if repo and repo != req.repo:
973 974 repo.close()
974 975
975 976 def _runcommand(ui, options, cmd, cmdfunc):
976 977 """Run a command function, possibly with profiling enabled."""
977 978 try:
978 979 return cmdfunc()
979 980 except error.SignatureError:
980 981 raise error.CommandError(cmd, _('invalid arguments'))
981 982
982 983 def _exceptionwarning(ui):
983 984 """Produce a warning message for the current active exception"""
984 985
985 986 # For compatibility checking, we discard the portion of the hg
986 987 # version after the + on the assumption that if a "normal
987 988 # user" is running a build with a + in it the packager
988 989 # probably built from fairly close to a tag and anyone with a
989 990 # 'make local' copy of hg (where the version number can be out
990 991 # of date) will be clueful enough to notice the implausible
991 992 # version number and try updating.
992 993 ct = util.versiontuple(n=2)
993 994 worst = None, ct, ''
994 995 if ui.config('ui', 'supportcontact') is None:
995 996 for name, mod in extensions.extensions():
996 997 # 'testedwith' should be bytes, but not all extensions are ported
997 998 # to py3 and we don't want UnicodeException because of that.
998 999 testedwith = stringutil.forcebytestr(getattr(mod, 'testedwith', ''))
999 1000 report = getattr(mod, 'buglink', _('the extension author.'))
1000 1001 if not testedwith.strip():
1001 1002 # We found an untested extension. It's likely the culprit.
1002 1003 worst = name, 'unknown', report
1003 1004 break
1004 1005
1005 1006 # Never blame on extensions bundled with Mercurial.
1006 1007 if extensions.ismoduleinternal(mod):
1007 1008 continue
1008 1009
1009 1010 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1010 1011 if ct in tested:
1011 1012 continue
1012 1013
1013 1014 lower = [t for t in tested if t < ct]
1014 1015 nearest = max(lower or tested)
1015 1016 if worst[0] is None or nearest < worst[1]:
1016 1017 worst = name, nearest, report
1017 1018 if worst[0] is not None:
1018 1019 name, testedwith, report = worst
1019 1020 if not isinstance(testedwith, (bytes, str)):
1020 1021 testedwith = '.'.join([stringutil.forcebytestr(c)
1021 1022 for c in testedwith])
1022 1023 warning = (_('** Unknown exception encountered with '
1023 1024 'possibly-broken third-party extension %s\n'
1024 1025 '** which supports versions %s of Mercurial.\n'
1025 1026 '** Please disable %s and try your action again.\n'
1026 1027 '** If that fixes the bug please report it to %s\n')
1027 1028 % (name, testedwith, name, report))
1028 1029 else:
1029 1030 bugtracker = ui.config('ui', 'supportcontact')
1030 1031 if bugtracker is None:
1031 1032 bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
1032 1033 warning = (_("** unknown exception encountered, "
1033 1034 "please report by visiting\n** ") + bugtracker + '\n')
1034 1035 sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
1035 1036 warning += ((_("** Python %s\n") % sysversion) +
1036 1037 (_("** Mercurial Distributed SCM (version %s)\n") %
1037 1038 util.version()) +
1038 1039 (_("** Extensions loaded: %s\n") %
1039 1040 ", ".join([x[0] for x in extensions.extensions()])))
1040 1041 return warning
1041 1042
1042 1043 def handlecommandexception(ui):
1043 1044 """Produce a warning message for broken commands
1044 1045
1045 1046 Called when handling an exception; the exception is reraised if
1046 1047 this function returns False, ignored otherwise.
1047 1048 """
1048 1049 warning = _exceptionwarning(ui)
1049 1050 ui.log("commandexception", "%s\n%s\n", warning,
1050 1051 pycompat.sysbytes(traceback.format_exc()))
1051 1052 ui.warn(warning)
1052 1053 return False # re-raise the exception
@@ -1,1168 +1,1170 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 node,
39 39 phases,
40 40 scmutil,
41 41 sshpeer,
42 42 statichttprepo,
43 43 ui as uimod,
44 44 unionrepo,
45 45 url,
46 46 util,
47 47 verify as verifymod,
48 48 vfs as vfsmod,
49 49 )
50 50
51 51 from .utils import (
52 52 stringutil,
53 53 )
54 54
55 55 release = lock.release
56 56
57 57 # shared features
58 58 sharedbookmarks = 'bookmarks'
59 59
60 60 def _local(path):
61 61 path = util.expandpath(util.urllocalpath(path))
62 62 return (os.path.isfile(path) and bundlerepo or localrepo)
63 63
64 64 def addbranchrevs(lrepo, other, branches, revs):
65 65 peer = other.peer() # a courtesy to callers using a localrepo for other
66 66 hashbranch, branches = branches
67 67 if not hashbranch and not branches:
68 68 x = revs or None
69 69 if revs:
70 70 y = revs[0]
71 71 else:
72 72 y = None
73 73 return x, y
74 74 if revs:
75 75 revs = list(revs)
76 76 else:
77 77 revs = []
78 78
79 79 if not peer.capable('branchmap'):
80 80 if branches:
81 81 raise error.Abort(_("remote branch lookup not supported"))
82 82 revs.append(hashbranch)
83 83 return revs, revs[0]
84 84
85 85 with peer.commandexecutor() as e:
86 86 branchmap = e.callcommand('branchmap', {}).result()
87 87
88 88 def primary(branch):
89 89 if branch == '.':
90 90 if not lrepo:
91 91 raise error.Abort(_("dirstate branch not accessible"))
92 92 branch = lrepo.dirstate.branch()
93 93 if branch in branchmap:
94 94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 95 return True
96 96 else:
97 97 return False
98 98
99 99 for branch in branches:
100 100 if not primary(branch):
101 101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 102 if hashbranch:
103 103 if not primary(hashbranch):
104 104 revs.append(hashbranch)
105 105 return revs, revs[0]
106 106
107 107 def parseurl(path, branches=None):
108 108 '''parse url#branch, returning (url, (branch, branches))'''
109 109
110 110 u = util.url(path)
111 111 branch = None
112 112 if u.fragment:
113 113 branch = u.fragment
114 114 u.fragment = None
115 115 return bytes(u), (branch, branches or [])
116 116
117 117 schemes = {
118 118 'bundle': bundlerepo,
119 119 'union': unionrepo,
120 120 'file': _local,
121 121 'http': httppeer,
122 122 'https': httppeer,
123 123 'ssh': sshpeer,
124 124 'static-http': statichttprepo,
125 125 }
126 126
127 127 def _peerlookup(path):
128 128 u = util.url(path)
129 129 scheme = u.scheme or 'file'
130 130 thing = schemes.get(scheme) or schemes['file']
131 131 try:
132 132 return thing(path)
133 133 except TypeError:
134 134 # we can't test callable(thing) because 'thing' can be an unloaded
135 135 # module that implements __call__
136 136 if not util.safehasattr(thing, 'instance'):
137 137 raise
138 138 return thing
139 139
140 140 def islocal(repo):
141 141 '''return true if repo (or path pointing to repo) is local'''
142 142 if isinstance(repo, bytes):
143 143 try:
144 144 return _peerlookup(repo).islocal(repo)
145 145 except AttributeError:
146 146 return False
147 147 return repo.local()
148 148
149 149 def openpath(ui, path):
150 150 '''open path with open if local, url.open if remote'''
151 151 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 152 if pathurl.islocal():
153 153 return util.posixfile(pathurl.localpath(), 'rb')
154 154 else:
155 155 return url.open(ui, path)
156 156
157 157 # a list of (ui, repo) functions called for wire peer initialization
158 158 wirepeersetupfuncs = []
159 159
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 intents=None):
161 162 """return a repository object for the specified path"""
162 obj = _peerlookup(path).instance(ui, path, create)
163 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
163 164 ui = getattr(obj, "ui", ui)
164 165 for f in presetupfuncs or []:
165 166 f(ui, obj)
166 167 for name, module in extensions.extensions(ui):
167 168 hook = getattr(module, 'reposetup', None)
168 169 if hook:
169 170 hook(ui, obj)
170 171 if not obj.local():
171 172 for f in wirepeersetupfuncs:
172 173 f(ui, obj)
173 174 return obj
174 175
175 def repository(ui, path='', create=False, presetupfuncs=None):
176 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
176 177 """return a repository object for the specified path"""
177 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
178 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
179 intents=intents)
178 180 repo = peer.local()
179 181 if not repo:
180 182 raise error.Abort(_("repository '%s' is not local") %
181 183 (path or peer.url()))
182 184 return repo.filtered('visible')
183 185
184 def peer(uiorrepo, opts, path, create=False):
186 def peer(uiorrepo, opts, path, create=False, intents=None):
185 187 '''return a repository peer for the specified path'''
186 188 rui = remoteui(uiorrepo, opts)
187 return _peerorrepo(rui, path, create).peer()
189 return _peerorrepo(rui, path, create, intents=intents).peer()
188 190
189 191 def defaultdest(source):
190 192 '''return default destination of clone if none is given
191 193
192 194 >>> defaultdest(b'foo')
193 195 'foo'
194 196 >>> defaultdest(b'/foo/bar')
195 197 'bar'
196 198 >>> defaultdest(b'/')
197 199 ''
198 200 >>> defaultdest(b'')
199 201 ''
200 202 >>> defaultdest(b'http://example.org/')
201 203 ''
202 204 >>> defaultdest(b'http://example.org/foo/')
203 205 'foo'
204 206 '''
205 207 path = util.url(source).path
206 208 if not path:
207 209 return ''
208 210 return os.path.basename(os.path.normpath(path))
209 211
210 212 def sharedreposource(repo):
211 213 """Returns repository object for source repository of a shared repo.
212 214
213 215 If repo is not a shared repository, returns None.
214 216 """
215 217 if repo.sharedpath == repo.path:
216 218 return None
217 219
218 220 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
219 221 return repo.srcrepo
220 222
221 223 # the sharedpath always ends in the .hg; we want the path to the repo
222 224 source = repo.vfs.split(repo.sharedpath)[0]
223 225 srcurl, branches = parseurl(source)
224 226 srcrepo = repository(repo.ui, srcurl)
225 227 repo.srcrepo = srcrepo
226 228 return srcrepo
227 229
228 230 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
229 231 relative=False):
230 232 '''create a shared repository'''
231 233
232 234 if not islocal(source):
233 235 raise error.Abort(_('can only share local repositories'))
234 236
235 237 if not dest:
236 238 dest = defaultdest(source)
237 239 else:
238 240 dest = ui.expandpath(dest)
239 241
240 242 if isinstance(source, bytes):
241 243 origsource = ui.expandpath(source)
242 244 source, branches = parseurl(origsource)
243 245 srcrepo = repository(ui, source)
244 246 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
245 247 else:
246 248 srcrepo = source.local()
247 249 origsource = source = srcrepo.url()
248 250 checkout = None
249 251
250 252 sharedpath = srcrepo.sharedpath # if our source is already sharing
251 253
252 254 destwvfs = vfsmod.vfs(dest, realpath=True)
253 255 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
254 256
255 257 if destvfs.lexists():
256 258 raise error.Abort(_('destination already exists'))
257 259
258 260 if not destwvfs.isdir():
259 261 destwvfs.mkdir()
260 262 destvfs.makedir()
261 263
262 264 requirements = ''
263 265 try:
264 266 requirements = srcrepo.vfs.read('requires')
265 267 except IOError as inst:
266 268 if inst.errno != errno.ENOENT:
267 269 raise
268 270
269 271 if relative:
270 272 try:
271 273 sharedpath = os.path.relpath(sharedpath, destvfs.base)
272 274 requirements += 'relshared\n'
273 275 except (IOError, ValueError) as e:
274 276 # ValueError is raised on Windows if the drive letters differ on
275 277 # each path
276 278 raise error.Abort(_('cannot calculate relative path'),
277 279 hint=stringutil.forcebytestr(e))
278 280 else:
279 281 requirements += 'shared\n'
280 282
281 283 destvfs.write('requires', requirements)
282 284 destvfs.write('sharedpath', sharedpath)
283 285
284 286 r = repository(ui, destwvfs.base)
285 287 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
286 288 _postshareupdate(r, update, checkout=checkout)
287 289 return r
288 290
289 291 def unshare(ui, repo):
290 292 """convert a shared repository to a normal one
291 293
292 294 Copy the store data to the repo and remove the sharedpath data.
293 295 """
294 296
295 297 destlock = lock = None
296 298 lock = repo.lock()
297 299 try:
298 300 # we use locks here because if we race with commit, we
299 301 # can end up with extra data in the cloned revlogs that's
300 302 # not pointed to by changesets, thus causing verify to
301 303 # fail
302 304
303 305 destlock = copystore(ui, repo, repo.path)
304 306
305 307 sharefile = repo.vfs.join('sharedpath')
306 308 util.rename(sharefile, sharefile + '.old')
307 309
308 310 repo.requirements.discard('shared')
309 311 repo.requirements.discard('relshared')
310 312 repo._writerequirements()
311 313 finally:
312 314 destlock and destlock.release()
313 315 lock and lock.release()
314 316
315 317 # update store, spath, svfs and sjoin of repo
316 318 repo.unfiltered().__init__(repo.baseui, repo.root)
317 319
318 320 # TODO: figure out how to access subrepos that exist, but were previously
319 321 # removed from .hgsub
320 322 c = repo['.']
321 323 subs = c.substate
322 324 for s in sorted(subs):
323 325 c.sub(s).unshare()
324 326
325 327 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
326 328 """Called after a new shared repo is created.
327 329
328 330 The new repo only has a requirements file and pointer to the source.
329 331 This function configures additional shared data.
330 332
331 333 Extensions can wrap this function and write additional entries to
332 334 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 335 """
334 336 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 337 if default:
336 338 template = ('[paths]\n'
337 339 'default = %s\n')
338 340 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339 341
340 342 with destrepo.wlock():
341 343 if bookmarks:
342 344 destrepo.vfs.write('shared', sharedbookmarks + '\n')
343 345
344 346 def _postshareupdate(repo, update, checkout=None):
345 347 """Maybe perform a working directory update after a shared repo is created.
346 348
347 349 ``update`` can be a boolean or a revision to update to.
348 350 """
349 351 if not update:
350 352 return
351 353
352 354 repo.ui.status(_("updating working directory\n"))
353 355 if update is not True:
354 356 checkout = update
355 357 for test in (checkout, 'default', 'tip'):
356 358 if test is None:
357 359 continue
358 360 try:
359 361 uprev = repo.lookup(test)
360 362 break
361 363 except error.RepoLookupError:
362 364 continue
363 365 _update(repo, uprev)
364 366
365 367 def copystore(ui, srcrepo, destpath):
366 368 '''copy files from store of srcrepo in destpath
367 369
368 370 returns destlock
369 371 '''
370 372 destlock = None
371 373 try:
372 374 hardlink = None
373 375 num = 0
374 376 closetopic = [None]
375 377 def prog(topic, pos):
376 378 if pos is None:
377 379 closetopic[0] = topic
378 380 else:
379 381 ui.progress(topic, pos + num)
380 382 srcpublishing = srcrepo.publishing()
381 383 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
382 384 dstvfs = vfsmod.vfs(destpath)
383 385 for f in srcrepo.store.copylist():
384 386 if srcpublishing and f.endswith('phaseroots'):
385 387 continue
386 388 dstbase = os.path.dirname(f)
387 389 if dstbase and not dstvfs.exists(dstbase):
388 390 dstvfs.mkdir(dstbase)
389 391 if srcvfs.exists(f):
390 392 if f.endswith('data'):
391 393 # 'dstbase' may be empty (e.g. revlog format 0)
392 394 lockfile = os.path.join(dstbase, "lock")
393 395 # lock to avoid premature writing to the target
394 396 destlock = lock.lock(dstvfs, lockfile)
395 397 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
396 398 hardlink, progress=prog)
397 399 num += n
398 400 if hardlink:
399 401 ui.debug("linked %d files\n" % num)
400 402 if closetopic[0]:
401 403 ui.progress(closetopic[0], None)
402 404 else:
403 405 ui.debug("copied %d files\n" % num)
404 406 if closetopic[0]:
405 407 ui.progress(closetopic[0], None)
406 408 return destlock
407 409 except: # re-raises
408 410 release(destlock)
409 411 raise
410 412
411 413 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
412 414 rev=None, update=True, stream=False):
413 415 """Perform a clone using a shared repo.
414 416
415 417 The store for the repository will be located at <sharepath>/.hg. The
416 418 specified revisions will be cloned or pulled from "source". A shared repo
417 419 will be created at "dest" and a working copy will be created if "update" is
418 420 True.
419 421 """
420 422 revs = None
421 423 if rev:
422 424 if not srcpeer.capable('lookup'):
423 425 raise error.Abort(_("src repository does not support "
424 426 "revision lookup and so doesn't "
425 427 "support clone by revision"))
426 428
427 429 # TODO this is batchable.
428 430 remoterevs = []
429 431 for r in rev:
430 432 with srcpeer.commandexecutor() as e:
431 433 remoterevs.append(e.callcommand('lookup', {
432 434 'key': r,
433 435 }).result())
434 436 revs = remoterevs
435 437
436 438 # Obtain a lock before checking for or cloning the pooled repo otherwise
437 439 # 2 clients may race creating or populating it.
438 440 pooldir = os.path.dirname(sharepath)
439 441 # lock class requires the directory to exist.
440 442 try:
441 443 util.makedir(pooldir, False)
442 444 except OSError as e:
443 445 if e.errno != errno.EEXIST:
444 446 raise
445 447
446 448 poolvfs = vfsmod.vfs(pooldir)
447 449 basename = os.path.basename(sharepath)
448 450
449 451 with lock.lock(poolvfs, '%s.lock' % basename):
450 452 if os.path.exists(sharepath):
451 453 ui.status(_('(sharing from existing pooled repository %s)\n') %
452 454 basename)
453 455 else:
454 456 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
455 457 # Always use pull mode because hardlinks in share mode don't work
456 458 # well. Never update because working copies aren't necessary in
457 459 # share mode.
458 460 clone(ui, peeropts, source, dest=sharepath, pull=True,
459 461 revs=rev, update=False, stream=stream)
460 462
461 463 # Resolve the value to put in [paths] section for the source.
462 464 if islocal(source):
463 465 defaultpath = os.path.abspath(util.urllocalpath(source))
464 466 else:
465 467 defaultpath = source
466 468
467 469 sharerepo = repository(ui, path=sharepath)
468 470 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
469 471 defaultpath=defaultpath)
470 472
471 473 # We need to perform a pull against the dest repo to fetch bookmarks
472 474 # and other non-store data that isn't shared by default. In the case of
473 475 # non-existing shared repo, this means we pull from the remote twice. This
474 476 # is a bit weird. But at the time it was implemented, there wasn't an easy
475 477 # way to pull just non-changegroup data.
476 478 destrepo = repository(ui, path=dest)
477 479 exchange.pull(destrepo, srcpeer, heads=revs)
478 480
479 481 _postshareupdate(destrepo, update)
480 482
481 483 return srcpeer, peer(ui, peeropts, dest)
482 484
483 485 # Recomputing branch cache might be slow on big repos,
484 486 # so just copy it
485 487 def _copycache(srcrepo, dstcachedir, fname):
486 488 """copy a cache from srcrepo to destcachedir (if it exists)"""
487 489 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
488 490 dstbranchcache = os.path.join(dstcachedir, fname)
489 491 if os.path.exists(srcbranchcache):
490 492 if not os.path.exists(dstcachedir):
491 493 os.mkdir(dstcachedir)
492 494 util.copyfile(srcbranchcache, dstbranchcache)
493 495
494 496 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
495 497 update=True, stream=False, branch=None, shareopts=None):
496 498 """Make a copy of an existing repository.
497 499
498 500 Create a copy of an existing repository in a new directory. The
499 501 source and destination are URLs, as passed to the repository
500 502 function. Returns a pair of repository peers, the source and
501 503 newly created destination.
502 504
503 505 The location of the source is added to the new repository's
504 506 .hg/hgrc file, as the default to be used for future pulls and
505 507 pushes.
506 508
507 509 If an exception is raised, the partly cloned/updated destination
508 510 repository will be deleted.
509 511
510 512 Arguments:
511 513
512 514 source: repository object or URL
513 515
514 516 dest: URL of destination repository to create (defaults to base
515 517 name of source repository)
516 518
517 519 pull: always pull from source repository, even in local case or if the
518 520 server prefers streaming
519 521
520 522 stream: stream raw data uncompressed from repository (fast over
521 523 LAN, slow over WAN)
522 524
523 525 revs: revision to clone up to (implies pull=True)
524 526
525 527 update: update working directory after clone completes, if
526 528 destination is local repository (True means update to default rev,
527 529 anything else is treated as a revision)
528 530
529 531 branch: branches to clone
530 532
531 533 shareopts: dict of options to control auto sharing behavior. The "pool" key
532 534 activates auto sharing mode and defines the directory for stores. The
533 535 "mode" key determines how to construct the directory name of the shared
534 536 repository. "identity" means the name is derived from the node of the first
535 537 changeset in the repository. "remote" means the name is derived from the
536 538 remote's path/URL. Defaults to "identity."
537 539 """
538 540
539 541 if isinstance(source, bytes):
540 542 origsource = ui.expandpath(source)
541 543 source, branches = parseurl(origsource, branch)
542 544 srcpeer = peer(ui, peeropts, source)
543 545 else:
544 546 srcpeer = source.peer() # in case we were called with a localrepo
545 547 branches = (None, branch or [])
546 548 origsource = source = srcpeer.url()
547 549 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
548 550
549 551 if dest is None:
550 552 dest = defaultdest(source)
551 553 if dest:
552 554 ui.status(_("destination directory: %s\n") % dest)
553 555 else:
554 556 dest = ui.expandpath(dest)
555 557
556 558 dest = util.urllocalpath(dest)
557 559 source = util.urllocalpath(source)
558 560
559 561 if not dest:
560 562 raise error.Abort(_("empty destination path is not valid"))
561 563
562 564 destvfs = vfsmod.vfs(dest, expandpath=True)
563 565 if destvfs.lexists():
564 566 if not destvfs.isdir():
565 567 raise error.Abort(_("destination '%s' already exists") % dest)
566 568 elif destvfs.listdir():
567 569 raise error.Abort(_("destination '%s' is not empty") % dest)
568 570
569 571 shareopts = shareopts or {}
570 572 sharepool = shareopts.get('pool')
571 573 sharenamemode = shareopts.get('mode')
572 574 if sharepool and islocal(dest):
573 575 sharepath = None
574 576 if sharenamemode == 'identity':
575 577 # Resolve the name from the initial changeset in the remote
576 578 # repository. This returns nullid when the remote is empty. It
577 579 # raises RepoLookupError if revision 0 is filtered or otherwise
578 580 # not available. If we fail to resolve, sharing is not enabled.
579 581 try:
580 582 with srcpeer.commandexecutor() as e:
581 583 rootnode = e.callcommand('lookup', {
582 584 'key': '0',
583 585 }).result()
584 586
585 587 if rootnode != node.nullid:
586 588 sharepath = os.path.join(sharepool, node.hex(rootnode))
587 589 else:
588 590 ui.status(_('(not using pooled storage: '
589 591 'remote appears to be empty)\n'))
590 592 except error.RepoLookupError:
591 593 ui.status(_('(not using pooled storage: '
592 594 'unable to resolve identity of remote)\n'))
593 595 elif sharenamemode == 'remote':
594 596 sharepath = os.path.join(
595 597 sharepool, node.hex(hashlib.sha1(source).digest()))
596 598 else:
597 599 raise error.Abort(_('unknown share naming mode: %s') %
598 600 sharenamemode)
599 601
600 602 if sharepath:
601 603 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
602 604 dest, pull=pull, rev=revs, update=update,
603 605 stream=stream)
604 606
605 607 srclock = destlock = cleandir = None
606 608 srcrepo = srcpeer.local()
607 609 try:
608 610 abspath = origsource
609 611 if islocal(origsource):
610 612 abspath = os.path.abspath(util.urllocalpath(origsource))
611 613
612 614 if islocal(dest):
613 615 cleandir = dest
614 616
615 617 copy = False
616 618 if (srcrepo and srcrepo.cancopy() and islocal(dest)
617 619 and not phases.hassecret(srcrepo)):
618 620 copy = not pull and not revs
619 621
620 622 if copy:
621 623 try:
622 624 # we use a lock here because if we race with commit, we
623 625 # can end up with extra data in the cloned revlogs that's
624 626 # not pointed to by changesets, thus causing verify to
625 627 # fail
626 628 srclock = srcrepo.lock(wait=False)
627 629 except error.LockError:
628 630 copy = False
629 631
630 632 if copy:
631 633 srcrepo.hook('preoutgoing', throw=True, source='clone')
632 634 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
633 635 if not os.path.exists(dest):
634 636 os.mkdir(dest)
635 637 else:
636 638 # only clean up directories we create ourselves
637 639 cleandir = hgdir
638 640 try:
639 641 destpath = hgdir
640 642 util.makedir(destpath, notindexed=True)
641 643 except OSError as inst:
642 644 if inst.errno == errno.EEXIST:
643 645 cleandir = None
644 646 raise error.Abort(_("destination '%s' already exists")
645 647 % dest)
646 648 raise
647 649
648 650 destlock = copystore(ui, srcrepo, destpath)
649 651 # copy bookmarks over
650 652 srcbookmarks = srcrepo.vfs.join('bookmarks')
651 653 dstbookmarks = os.path.join(destpath, 'bookmarks')
652 654 if os.path.exists(srcbookmarks):
653 655 util.copyfile(srcbookmarks, dstbookmarks)
654 656
655 657 dstcachedir = os.path.join(destpath, 'cache')
656 658 for cache in cacheutil.cachetocopy(srcrepo):
657 659 _copycache(srcrepo, dstcachedir, cache)
658 660
659 661 # we need to re-init the repo after manually copying the data
660 662 # into it
661 663 destpeer = peer(srcrepo, peeropts, dest)
662 664 srcrepo.hook('outgoing', source='clone',
663 665 node=node.hex(node.nullid))
664 666 else:
665 667 try:
666 668 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
667 669 # only pass ui when no srcrepo
668 670 except OSError as inst:
669 671 if inst.errno == errno.EEXIST:
670 672 cleandir = None
671 673 raise error.Abort(_("destination '%s' already exists")
672 674 % dest)
673 675 raise
674 676
675 677 if revs:
676 678 if not srcpeer.capable('lookup'):
677 679 raise error.Abort(_("src repository does not support "
678 680 "revision lookup and so doesn't "
679 681 "support clone by revision"))
680 682
681 683 # TODO this is batchable.
682 684 remoterevs = []
683 685 for rev in revs:
684 686 with srcpeer.commandexecutor() as e:
685 687 remoterevs.append(e.callcommand('lookup', {
686 688 'key': rev,
687 689 }).result())
688 690 revs = remoterevs
689 691
690 692 checkout = revs[0]
691 693 else:
692 694 revs = None
693 695 local = destpeer.local()
694 696 if local:
695 697 u = util.url(abspath)
696 698 defaulturl = bytes(u)
697 699 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
698 700 if not stream:
699 701 if pull:
700 702 stream = False
701 703 else:
702 704 stream = None
703 705 # internal config: ui.quietbookmarkmove
704 706 overrides = {('ui', 'quietbookmarkmove'): True}
705 707 with local.ui.configoverride(overrides, 'clone'):
706 708 exchange.pull(local, srcpeer, revs,
707 709 streamclonerequested=stream)
708 710 elif srcrepo:
709 711 exchange.push(srcrepo, destpeer, revs=revs,
710 712 bookmarks=srcrepo._bookmarks.keys())
711 713 else:
712 714 raise error.Abort(_("clone from remote to remote not supported")
713 715 )
714 716
715 717 cleandir = None
716 718
717 719 destrepo = destpeer.local()
718 720 if destrepo:
719 721 template = uimod.samplehgrcs['cloned']
720 722 u = util.url(abspath)
721 723 u.passwd = None
722 724 defaulturl = bytes(u)
723 725 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
724 726 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
725 727
726 728 if ui.configbool('experimental', 'remotenames'):
727 729 logexchange.pullremotenames(destrepo, srcpeer)
728 730
729 731 if update:
730 732 if update is not True:
731 733 with srcpeer.commandexecutor() as e:
732 734 checkout = e.callcommand('lookup', {
733 735 'key': update,
734 736 }).result()
735 737
736 738 uprev = None
737 739 status = None
738 740 if checkout is not None:
739 741 if checkout in destrepo:
740 742 uprev = checkout
741 743 else:
742 744 if update is not True:
743 745 try:
744 746 uprev = destrepo.lookup(update)
745 747 except error.RepoLookupError:
746 748 pass
747 749 if uprev is None:
748 750 try:
749 751 uprev = destrepo._bookmarks['@']
750 752 update = '@'
751 753 bn = destrepo[uprev].branch()
752 754 if bn == 'default':
753 755 status = _("updating to bookmark @\n")
754 756 else:
755 757 status = (_("updating to bookmark @ on branch %s\n")
756 758 % bn)
757 759 except KeyError:
758 760 try:
759 761 uprev = destrepo.branchtip('default')
760 762 except error.RepoLookupError:
761 763 uprev = destrepo.lookup('tip')
762 764 if not status:
763 765 bn = destrepo[uprev].branch()
764 766 status = _("updating to branch %s\n") % bn
765 767 destrepo.ui.status(status)
766 768 _update(destrepo, uprev)
767 769 if update in destrepo._bookmarks:
768 770 bookmarks.activate(destrepo, update)
769 771 finally:
770 772 release(srclock, destlock)
771 773 if cleandir is not None:
772 774 shutil.rmtree(cleandir, True)
773 775 if srcpeer is not None:
774 776 srcpeer.close()
775 777 return srcpeer, destpeer
776 778
777 779 def _showstats(repo, stats, quietempty=False):
778 780 if quietempty and stats.isempty():
779 781 return
780 782 repo.ui.status(_("%d files updated, %d files merged, "
781 783 "%d files removed, %d files unresolved\n") % (
782 784 stats.updatedcount, stats.mergedcount,
783 785 stats.removedcount, stats.unresolvedcount))
784 786
785 787 def updaterepo(repo, node, overwrite, updatecheck=None):
786 788 """Update the working directory to node.
787 789
788 790 When overwrite is set, changes are clobbered, merged else
789 791
790 792 returns stats (see pydoc mercurial.merge.applyupdates)"""
791 793 return mergemod.update(repo, node, False, overwrite,
792 794 labels=['working copy', 'destination'],
793 795 updatecheck=updatecheck)
794 796
795 797 def update(repo, node, quietempty=False, updatecheck=None):
796 798 """update the working directory to node"""
797 799 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
798 800 _showstats(repo, stats, quietempty)
799 801 if stats.unresolvedcount:
800 802 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
801 803 return stats.unresolvedcount > 0
802 804
803 805 # naming conflict in clone()
804 806 _update = update
805 807
806 808 def clean(repo, node, show_stats=True, quietempty=False):
807 809 """forcibly switch the working directory to node, clobbering changes"""
808 810 stats = updaterepo(repo, node, True)
809 811 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
810 812 if show_stats:
811 813 _showstats(repo, stats, quietempty)
812 814 return stats.unresolvedcount > 0
813 815
814 816 # naming conflict in updatetotally()
815 817 _clean = clean
816 818
817 819 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
818 820 """Update the working directory with extra care for non-file components
819 821
820 822 This takes care of non-file components below:
821 823
822 824 :bookmark: might be advanced or (in)activated
823 825
824 826 This takes arguments below:
825 827
826 828 :checkout: to which revision the working directory is updated
827 829 :brev: a name, which might be a bookmark to be activated after updating
828 830 :clean: whether changes in the working directory can be discarded
829 831 :updatecheck: how to deal with a dirty working directory
830 832
831 833 Valid values for updatecheck are (None => linear):
832 834
833 835 * abort: abort if the working directory is dirty
834 836 * none: don't check (merge working directory changes into destination)
835 837 * linear: check that update is linear before merging working directory
836 838 changes into destination
837 839 * noconflict: check that the update does not result in file merges
838 840
839 841 This returns whether conflict is detected at updating or not.
840 842 """
841 843 if updatecheck is None:
842 844 updatecheck = ui.config('commands', 'update.check')
843 845 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
844 846 # If not configured, or invalid value configured
845 847 updatecheck = 'linear'
846 848 with repo.wlock():
847 849 movemarkfrom = None
848 850 warndest = False
849 851 if checkout is None:
850 852 updata = destutil.destupdate(repo, clean=clean)
851 853 checkout, movemarkfrom, brev = updata
852 854 warndest = True
853 855
854 856 if clean:
855 857 ret = _clean(repo, checkout)
856 858 else:
857 859 if updatecheck == 'abort':
858 860 cmdutil.bailifchanged(repo, merge=False)
859 861 updatecheck = 'none'
860 862 ret = _update(repo, checkout, updatecheck=updatecheck)
861 863
862 864 if not ret and movemarkfrom:
863 865 if movemarkfrom == repo['.'].node():
864 866 pass # no-op update
865 867 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
866 868 b = ui.label(repo._activebookmark, 'bookmarks.active')
867 869 ui.status(_("updating bookmark %s\n") % b)
868 870 else:
869 871 # this can happen with a non-linear update
870 872 b = ui.label(repo._activebookmark, 'bookmarks')
871 873 ui.status(_("(leaving bookmark %s)\n") % b)
872 874 bookmarks.deactivate(repo)
873 875 elif brev in repo._bookmarks:
874 876 if brev != repo._activebookmark:
875 877 b = ui.label(brev, 'bookmarks.active')
876 878 ui.status(_("(activating bookmark %s)\n") % b)
877 879 bookmarks.activate(repo, brev)
878 880 elif brev:
879 881 if repo._activebookmark:
880 882 b = ui.label(repo._activebookmark, 'bookmarks')
881 883 ui.status(_("(leaving bookmark %s)\n") % b)
882 884 bookmarks.deactivate(repo)
883 885
884 886 if warndest:
885 887 destutil.statusotherdests(ui, repo)
886 888
887 889 return ret
888 890
889 891 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
890 892 abort=False):
891 893 """Branch merge with node, resolving changes. Return true if any
892 894 unresolved conflicts."""
893 895 if not abort:
894 896 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
895 897 labels=labels)
896 898 else:
897 899 ms = mergemod.mergestate.read(repo)
898 900 if ms.active():
899 901 # there were conflicts
900 902 node = ms.localctx.hex()
901 903 else:
902 904 # there were no conficts, mergestate was not stored
903 905 node = repo['.'].hex()
904 906
905 907 repo.ui.status(_("aborting the merge, updating back to"
906 908 " %s\n") % node[:12])
907 909 stats = mergemod.update(repo, node, branchmerge=False, force=True,
908 910 labels=labels)
909 911
910 912 _showstats(repo, stats)
911 913 if stats.unresolvedcount:
912 914 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
913 915 "or 'hg merge --abort' to abandon\n"))
914 916 elif remind and not abort:
915 917 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
916 918 return stats.unresolvedcount > 0
917 919
918 920 def _incoming(displaychlist, subreporecurse, ui, repo, source,
919 921 opts, buffered=False):
920 922 """
921 923 Helper for incoming / gincoming.
922 924 displaychlist gets called with
923 925 (remoterepo, incomingchangesetlist, displayer) parameters,
924 926 and is supposed to contain only code that can't be unified.
925 927 """
926 928 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
927 929 other = peer(repo, opts, source)
928 930 ui.status(_('comparing with %s\n') % util.hidepassword(source))
929 931 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
930 932
931 933 if revs:
932 934 revs = [other.lookup(rev) for rev in revs]
933 935 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
934 936 revs, opts["bundle"], opts["force"])
935 937 try:
936 938 if not chlist:
937 939 ui.status(_("no changes found\n"))
938 940 return subreporecurse()
939 941 ui.pager('incoming')
940 942 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
941 943 buffered=buffered)
942 944 displaychlist(other, chlist, displayer)
943 945 displayer.close()
944 946 finally:
945 947 cleanupfn()
946 948 subreporecurse()
947 949 return 0 # exit code is zero since we found incoming changes
948 950
949 951 def incoming(ui, repo, source, opts):
950 952 def subreporecurse():
951 953 ret = 1
952 954 if opts.get('subrepos'):
953 955 ctx = repo[None]
954 956 for subpath in sorted(ctx.substate):
955 957 sub = ctx.sub(subpath)
956 958 ret = min(ret, sub.incoming(ui, source, opts))
957 959 return ret
958 960
959 961 def display(other, chlist, displayer):
960 962 limit = logcmdutil.getlimit(opts)
961 963 if opts.get('newest_first'):
962 964 chlist.reverse()
963 965 count = 0
964 966 for n in chlist:
965 967 if limit is not None and count >= limit:
966 968 break
967 969 parents = [p for p in other.changelog.parents(n) if p != nullid]
968 970 if opts.get('no_merges') and len(parents) == 2:
969 971 continue
970 972 count += 1
971 973 displayer.show(other[n])
972 974 return _incoming(display, subreporecurse, ui, repo, source, opts)
973 975
974 976 def _outgoing(ui, repo, dest, opts):
975 977 path = ui.paths.getpath(dest, default=('default-push', 'default'))
976 978 if not path:
977 979 raise error.Abort(_('default repository not configured!'),
978 980 hint=_("see 'hg help config.paths'"))
979 981 dest = path.pushloc or path.loc
980 982 branches = path.branch, opts.get('branch') or []
981 983
982 984 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
983 985 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
984 986 if revs:
985 987 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
986 988
987 989 other = peer(repo, opts, dest)
988 990 outgoing = discovery.findcommonoutgoing(repo, other, revs,
989 991 force=opts.get('force'))
990 992 o = outgoing.missing
991 993 if not o:
992 994 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
993 995 return o, other
994 996
995 997 def outgoing(ui, repo, dest, opts):
996 998 def recurse():
997 999 ret = 1
998 1000 if opts.get('subrepos'):
999 1001 ctx = repo[None]
1000 1002 for subpath in sorted(ctx.substate):
1001 1003 sub = ctx.sub(subpath)
1002 1004 ret = min(ret, sub.outgoing(ui, dest, opts))
1003 1005 return ret
1004 1006
1005 1007 limit = logcmdutil.getlimit(opts)
1006 1008 o, other = _outgoing(ui, repo, dest, opts)
1007 1009 if not o:
1008 1010 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1009 1011 return recurse()
1010 1012
1011 1013 if opts.get('newest_first'):
1012 1014 o.reverse()
1013 1015 ui.pager('outgoing')
1014 1016 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1015 1017 count = 0
1016 1018 for n in o:
1017 1019 if limit is not None and count >= limit:
1018 1020 break
1019 1021 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1020 1022 if opts.get('no_merges') and len(parents) == 2:
1021 1023 continue
1022 1024 count += 1
1023 1025 displayer.show(repo[n])
1024 1026 displayer.close()
1025 1027 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1026 1028 recurse()
1027 1029 return 0 # exit code is zero since we found outgoing changes
1028 1030
1029 1031 def verify(repo):
1030 1032 """verify the consistency of a repository"""
1031 1033 ret = verifymod.verify(repo)
1032 1034
1033 1035 # Broken subrepo references in hidden csets don't seem worth worrying about,
1034 1036 # since they can't be pushed/pulled, and --hidden can be used if they are a
1035 1037 # concern.
1036 1038
1037 1039 # pathto() is needed for -R case
1038 1040 revs = repo.revs("filelog(%s)",
1039 1041 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1040 1042
1041 1043 if revs:
1042 1044 repo.ui.status(_('checking subrepo links\n'))
1043 1045 for rev in revs:
1044 1046 ctx = repo[rev]
1045 1047 try:
1046 1048 for subpath in ctx.substate:
1047 1049 try:
1048 1050 ret = (ctx.sub(subpath, allowcreate=False).verify()
1049 1051 or ret)
1050 1052 except error.RepoError as e:
1051 1053 repo.ui.warn(('%d: %s\n') % (rev, e))
1052 1054 except Exception:
1053 1055 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1054 1056 node.short(ctx.node()))
1055 1057
1056 1058 return ret
1057 1059
1058 1060 def remoteui(src, opts):
1059 1061 'build a remote ui from ui or repo and opts'
1060 1062 if util.safehasattr(src, 'baseui'): # looks like a repository
1061 1063 dst = src.baseui.copy() # drop repo-specific config
1062 1064 src = src.ui # copy target options from repo
1063 1065 else: # assume it's a global ui object
1064 1066 dst = src.copy() # keep all global options
1065 1067
1066 1068 # copy ssh-specific options
1067 1069 for o in 'ssh', 'remotecmd':
1068 1070 v = opts.get(o) or src.config('ui', o)
1069 1071 if v:
1070 1072 dst.setconfig("ui", o, v, 'copied')
1071 1073
1072 1074 # copy bundle-specific options
1073 1075 r = src.config('bundle', 'mainreporoot')
1074 1076 if r:
1075 1077 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1076 1078
1077 1079 # copy selected local settings to the remote ui
1078 1080 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1079 1081 for key, val in src.configitems(sect):
1080 1082 dst.setconfig(sect, key, val, 'copied')
1081 1083 v = src.config('web', 'cacerts')
1082 1084 if v:
1083 1085 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1084 1086
1085 1087 return dst
1086 1088
1087 1089 # Files of interest
1088 1090 # Used to check if the repository has changed looking at mtime and size of
1089 1091 # these files.
1090 1092 foi = [('spath', '00changelog.i'),
1091 1093 ('spath', 'phaseroots'), # ! phase can change content at the same size
1092 1094 ('spath', 'obsstore'),
1093 1095 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1094 1096 ]
1095 1097
1096 1098 class cachedlocalrepo(object):
1097 1099 """Holds a localrepository that can be cached and reused."""
1098 1100
1099 1101 def __init__(self, repo):
1100 1102 """Create a new cached repo from an existing repo.
1101 1103
1102 1104 We assume the passed in repo was recently created. If the
1103 1105 repo has changed between when it was created and when it was
1104 1106 turned into a cache, it may not refresh properly.
1105 1107 """
1106 1108 assert isinstance(repo, localrepo.localrepository)
1107 1109 self._repo = repo
1108 1110 self._state, self.mtime = self._repostate()
1109 1111 self._filtername = repo.filtername
1110 1112
1111 1113 def fetch(self):
1112 1114 """Refresh (if necessary) and return a repository.
1113 1115
1114 1116 If the cached instance is out of date, it will be recreated
1115 1117 automatically and returned.
1116 1118
1117 1119 Returns a tuple of the repo and a boolean indicating whether a new
1118 1120 repo instance was created.
1119 1121 """
1120 1122 # We compare the mtimes and sizes of some well-known files to
1121 1123 # determine if the repo changed. This is not precise, as mtimes
1122 1124 # are susceptible to clock skew and imprecise filesystems and
1123 1125 # file content can change while maintaining the same size.
1124 1126
1125 1127 state, mtime = self._repostate()
1126 1128 if state == self._state:
1127 1129 return self._repo, False
1128 1130
1129 1131 repo = repository(self._repo.baseui, self._repo.url())
1130 1132 if self._filtername:
1131 1133 self._repo = repo.filtered(self._filtername)
1132 1134 else:
1133 1135 self._repo = repo.unfiltered()
1134 1136 self._state = state
1135 1137 self.mtime = mtime
1136 1138
1137 1139 return self._repo, True
1138 1140
1139 1141 def _repostate(self):
1140 1142 state = []
1141 1143 maxmtime = -1
1142 1144 for attr, fname in foi:
1143 1145 prefix = getattr(self._repo, attr)
1144 1146 p = os.path.join(prefix, fname)
1145 1147 try:
1146 1148 st = os.stat(p)
1147 1149 except OSError:
1148 1150 st = os.stat(prefix)
1149 1151 state.append((st[stat.ST_MTIME], st.st_size))
1150 1152 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1151 1153
1152 1154 return tuple(state), maxmtime
1153 1155
1154 1156 def copy(self):
1155 1157 """Obtain a copy of this class instance.
1156 1158
1157 1159 A new localrepository instance is obtained. The new instance should be
1158 1160 completely independent of the original.
1159 1161 """
1160 1162 repo = repository(self._repo.baseui, self._repo.origroot)
1161 1163 if self._filtername:
1162 1164 repo = repo.filtered(self._filtername)
1163 1165 else:
1164 1166 repo = repo.unfiltered()
1165 1167 c = cachedlocalrepo(repo)
1166 1168 c._state = self._state
1167 1169 c.mtime = self.mtime
1168 1170 return c
@@ -1,1010 +1,1010 b''
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import io
13 13 import os
14 14 import socket
15 15 import struct
16 16 import sys
17 17 import tempfile
18 18 import weakref
19 19
20 20 from .i18n import _
21 21 from .thirdparty import (
22 22 cbor,
23 23 )
24 24 from .thirdparty.zope import (
25 25 interface as zi,
26 26 )
27 27 from . import (
28 28 bundle2,
29 29 error,
30 30 httpconnection,
31 31 pycompat,
32 32 repository,
33 33 statichttprepo,
34 34 url as urlmod,
35 35 util,
36 36 wireprotoframing,
37 37 wireprototypes,
38 38 wireprotov1peer,
39 39 wireprotov2server,
40 40 )
41 41
42 42 httplib = util.httplib
43 43 urlerr = util.urlerr
44 44 urlreq = util.urlreq
45 45
46 46 def encodevalueinheaders(value, header, limit):
47 47 """Encode a string value into multiple HTTP headers.
48 48
49 49 ``value`` will be encoded into 1 or more HTTP headers with the names
50 50 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
51 51 name + value will be at most ``limit`` bytes long.
52 52
53 53 Returns an iterable of 2-tuples consisting of header names and
54 54 values as native strings.
55 55 """
56 56 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
57 57 # not bytes. This function always takes bytes in as arguments.
58 58 fmt = pycompat.strurl(header) + r'-%s'
59 59 # Note: it is *NOT* a bug that the last bit here is a bytestring
60 60 # and not a unicode: we're just getting the encoded length anyway,
61 61 # and using an r-string to make it portable between Python 2 and 3
62 62 # doesn't work because then the \r is a literal backslash-r
63 63 # instead of a carriage return.
64 64 valuelen = limit - len(fmt % r'000') - len(': \r\n')
65 65 result = []
66 66
67 67 n = 0
68 68 for i in xrange(0, len(value), valuelen):
69 69 n += 1
70 70 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
71 71
72 72 return result
73 73
74 74 def _wraphttpresponse(resp):
75 75 """Wrap an HTTPResponse with common error handlers.
76 76
77 77 This ensures that any I/O from any consumer raises the appropriate
78 78 error and messaging.
79 79 """
80 80 origread = resp.read
81 81
82 82 class readerproxy(resp.__class__):
83 83 def read(self, size=None):
84 84 try:
85 85 return origread(size)
86 86 except httplib.IncompleteRead as e:
87 87 # e.expected is an integer if length known or None otherwise.
88 88 if e.expected:
89 89 msg = _('HTTP request error (incomplete response; '
90 90 'expected %d bytes got %d)') % (e.expected,
91 91 len(e.partial))
92 92 else:
93 93 msg = _('HTTP request error (incomplete response)')
94 94
95 95 raise error.PeerTransportError(
96 96 msg,
97 97 hint=_('this may be an intermittent network failure; '
98 98 'if the error persists, consider contacting the '
99 99 'network or server operator'))
100 100 except httplib.HTTPException as e:
101 101 raise error.PeerTransportError(
102 102 _('HTTP request error (%s)') % e,
103 103 hint=_('this may be an intermittent network failure; '
104 104 'if the error persists, consider contacting the '
105 105 'network or server operator'))
106 106
107 107 resp.__class__ = readerproxy
108 108
109 109 class _multifile(object):
110 110 def __init__(self, *fileobjs):
111 111 for f in fileobjs:
112 112 if not util.safehasattr(f, 'length'):
113 113 raise ValueError(
114 114 '_multifile only supports file objects that '
115 115 'have a length but this one does not:', type(f), f)
116 116 self._fileobjs = fileobjs
117 117 self._index = 0
118 118
119 119 @property
120 120 def length(self):
121 121 return sum(f.length for f in self._fileobjs)
122 122
123 123 def read(self, amt=None):
124 124 if amt <= 0:
125 125 return ''.join(f.read() for f in self._fileobjs)
126 126 parts = []
127 127 while amt and self._index < len(self._fileobjs):
128 128 parts.append(self._fileobjs[self._index].read(amt))
129 129 got = len(parts[-1])
130 130 if got < amt:
131 131 self._index += 1
132 132 amt -= got
133 133 return ''.join(parts)
134 134
135 135 def seek(self, offset, whence=os.SEEK_SET):
136 136 if whence != os.SEEK_SET:
137 137 raise NotImplementedError(
138 138 '_multifile does not support anything other'
139 139 ' than os.SEEK_SET for whence on seek()')
140 140 if offset != 0:
141 141 raise NotImplementedError(
142 142 '_multifile only supports seeking to start, but that '
143 143 'could be fixed if you need it')
144 144 for f in self._fileobjs:
145 145 f.seek(0)
146 146 self._index = 0
147 147
148 148 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
149 149 repobaseurl, cmd, args):
150 150 """Make an HTTP request to run a command for a version 1 client.
151 151
152 152 ``caps`` is a set of known server capabilities. The value may be
153 153 None if capabilities are not yet known.
154 154
155 155 ``capablefn`` is a function to evaluate a capability.
156 156
157 157 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
158 158 raw data to pass to it.
159 159 """
160 160 if cmd == 'pushkey':
161 161 args['data'] = ''
162 162 data = args.pop('data', None)
163 163 headers = args.pop('headers', {})
164 164
165 165 ui.debug("sending %s command\n" % cmd)
166 166 q = [('cmd', cmd)]
167 167 headersize = 0
168 168 # Important: don't use self.capable() here or else you end up
169 169 # with infinite recursion when trying to look up capabilities
170 170 # for the first time.
171 171 postargsok = caps is not None and 'httppostargs' in caps
172 172
173 173 # Send arguments via POST.
174 174 if postargsok and args:
175 175 strargs = urlreq.urlencode(sorted(args.items()))
176 176 if not data:
177 177 data = strargs
178 178 else:
179 179 if isinstance(data, bytes):
180 180 i = io.BytesIO(data)
181 181 i.length = len(data)
182 182 data = i
183 183 argsio = io.BytesIO(strargs)
184 184 argsio.length = len(strargs)
185 185 data = _multifile(argsio, data)
186 186 headers[r'X-HgArgs-Post'] = len(strargs)
187 187 elif args:
188 188 # Calling self.capable() can infinite loop if we are calling
189 189 # "capabilities". But that command should never accept wire
190 190 # protocol arguments. So this should never happen.
191 191 assert cmd != 'capabilities'
192 192 httpheader = capablefn('httpheader')
193 193 if httpheader:
194 194 headersize = int(httpheader.split(',', 1)[0])
195 195
196 196 # Send arguments via HTTP headers.
197 197 if headersize > 0:
198 198 # The headers can typically carry more data than the URL.
199 199 encargs = urlreq.urlencode(sorted(args.items()))
200 200 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
201 201 headersize):
202 202 headers[header] = value
203 203 # Send arguments via query string (Mercurial <1.9).
204 204 else:
205 205 q += sorted(args.items())
206 206
207 207 qs = '?%s' % urlreq.urlencode(q)
208 208 cu = "%s%s" % (repobaseurl, qs)
209 209 size = 0
210 210 if util.safehasattr(data, 'length'):
211 211 size = data.length
212 212 elif data is not None:
213 213 size = len(data)
214 214 if data is not None and r'Content-Type' not in headers:
215 215 headers[r'Content-Type'] = r'application/mercurial-0.1'
216 216
217 217 # Tell the server we accept application/mercurial-0.2 and multiple
218 218 # compression formats if the server is capable of emitting those
219 219 # payloads.
220 220 # Note: Keep this set empty by default, as client advertisement of
221 221 # protocol parameters should only occur after the handshake.
222 222 protoparams = set()
223 223
224 224 mediatypes = set()
225 225 if caps is not None:
226 226 mt = capablefn('httpmediatype')
227 227 if mt:
228 228 protoparams.add('0.1')
229 229 mediatypes = set(mt.split(','))
230 230
231 231 protoparams.add('partial-pull')
232 232
233 233 if '0.2tx' in mediatypes:
234 234 protoparams.add('0.2')
235 235
236 236 if '0.2tx' in mediatypes and capablefn('compression'):
237 237 # We /could/ compare supported compression formats and prune
238 238 # non-mutually supported or error if nothing is mutually supported.
239 239 # For now, send the full list to the server and have it error.
240 240 comps = [e.wireprotosupport().name for e in
241 241 util.compengines.supportedwireengines(util.CLIENTROLE)]
242 242 protoparams.add('comp=%s' % ','.join(comps))
243 243
244 244 if protoparams:
245 245 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
246 246 'X-HgProto',
247 247 headersize or 1024)
248 248 for header, value in protoheaders:
249 249 headers[header] = value
250 250
251 251 varyheaders = []
252 252 for header in headers:
253 253 if header.lower().startswith(r'x-hg'):
254 254 varyheaders.append(header)
255 255
256 256 if varyheaders:
257 257 headers[r'Vary'] = r','.join(sorted(varyheaders))
258 258
259 259 req = requestbuilder(pycompat.strurl(cu), data, headers)
260 260
261 261 if data is not None:
262 262 ui.debug("sending %d bytes\n" % size)
263 263 req.add_unredirected_header(r'Content-Length', r'%d' % size)
264 264
265 265 return req, cu, qs
266 266
267 267 def sendrequest(ui, opener, req):
268 268 """Send a prepared HTTP request.
269 269
270 270 Returns the response object.
271 271 """
272 272 if (ui.debugflag
273 273 and ui.configbool('devel', 'debug.peer-request')):
274 274 dbg = ui.debug
275 275 line = 'devel-peer-request: %s\n'
276 276 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
277 277 hgargssize = None
278 278
279 279 for header, value in sorted(req.header_items()):
280 280 if header.startswith('X-hgarg-'):
281 281 if hgargssize is None:
282 282 hgargssize = 0
283 283 hgargssize += len(value)
284 284 else:
285 285 dbg(line % ' %s %s' % (header, value))
286 286
287 287 if hgargssize is not None:
288 288 dbg(line % ' %d bytes of commands arguments in headers'
289 289 % hgargssize)
290 290
291 291 if req.has_data():
292 292 data = req.get_data()
293 293 length = getattr(data, 'length', None)
294 294 if length is None:
295 295 length = len(data)
296 296 dbg(line % ' %d bytes of data' % length)
297 297
298 298 start = util.timer()
299 299
300 300 try:
301 301 res = opener.open(req)
302 302 except urlerr.httperror as inst:
303 303 if inst.code == 401:
304 304 raise error.Abort(_('authorization failed'))
305 305 raise
306 306 except httplib.HTTPException as inst:
307 307 ui.debug('http error requesting %s\n' %
308 308 util.hidepassword(req.get_full_url()))
309 309 ui.traceback()
310 310 raise IOError(None, inst)
311 311 finally:
312 312 if ui.configbool('devel', 'debug.peer-request'):
313 313 dbg(line % ' finished in %.4f seconds (%s)'
314 314 % (util.timer() - start, res.code))
315 315
316 316 # Insert error handlers for common I/O failures.
317 317 _wraphttpresponse(res)
318 318
319 319 return res
320 320
321 321 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
322 322 allowcbor=False):
323 323 # record the url we got redirected to
324 324 respurl = pycompat.bytesurl(resp.geturl())
325 325 if respurl.endswith(qs):
326 326 respurl = respurl[:-len(qs)]
327 327 if baseurl.rstrip('/') != respurl.rstrip('/'):
328 328 if not ui.quiet:
329 329 ui.warn(_('real URL is %s\n') % respurl)
330 330
331 331 try:
332 332 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
333 333 except AttributeError:
334 334 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
335 335
336 336 safeurl = util.hidepassword(baseurl)
337 337 if proto.startswith('application/hg-error'):
338 338 raise error.OutOfBandError(resp.read())
339 339
340 340 # Pre 1.0 versions of Mercurial used text/plain and
341 341 # application/hg-changegroup. We don't support such old servers.
342 342 if not proto.startswith('application/mercurial-'):
343 343 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
344 344 raise error.RepoError(
345 345 _("'%s' does not appear to be an hg repository:\n"
346 346 "---%%<--- (%s)\n%s\n---%%<---\n")
347 347 % (safeurl, proto or 'no content-type', resp.read(1024)))
348 348
349 349 try:
350 350 subtype = proto.split('-', 1)[1]
351 351
352 352 # Unless we end up supporting CBOR in the legacy wire protocol,
353 353 # this should ONLY be encountered for the initial capabilities
354 354 # request during handshake.
355 355 if subtype == 'cbor':
356 356 if allowcbor:
357 357 return respurl, proto, resp
358 358 else:
359 359 raise error.RepoError(_('unexpected CBOR response from '
360 360 'server'))
361 361
362 362 version_info = tuple([int(n) for n in subtype.split('.')])
363 363 except ValueError:
364 364 raise error.RepoError(_("'%s' sent a broken Content-Type "
365 365 "header (%s)") % (safeurl, proto))
366 366
367 367 # TODO consider switching to a decompression reader that uses
368 368 # generators.
369 369 if version_info == (0, 1):
370 370 if compressible:
371 371 resp = util.compengines['zlib'].decompressorreader(resp)
372 372
373 373 elif version_info == (0, 2):
374 374 # application/mercurial-0.2 always identifies the compression
375 375 # engine in the payload header.
376 376 elen = struct.unpack('B', resp.read(1))[0]
377 377 ename = resp.read(elen)
378 378 engine = util.compengines.forwiretype(ename)
379 379
380 380 resp = engine.decompressorreader(resp)
381 381 else:
382 382 raise error.RepoError(_("'%s' uses newer protocol %s") %
383 383 (safeurl, subtype))
384 384
385 385 return respurl, proto, resp
386 386
387 387 class httppeer(wireprotov1peer.wirepeer):
388 388 def __init__(self, ui, path, url, opener, requestbuilder, caps):
389 389 self.ui = ui
390 390 self._path = path
391 391 self._url = url
392 392 self._caps = caps
393 393 self._urlopener = opener
394 394 self._requestbuilder = requestbuilder
395 395
396 396 def __del__(self):
397 397 for h in self._urlopener.handlers:
398 398 h.close()
399 399 getattr(h, "close_all", lambda: None)()
400 400
401 401 # Begin of ipeerconnection interface.
402 402
403 403 def url(self):
404 404 return self._path
405 405
406 406 def local(self):
407 407 return None
408 408
409 409 def peer(self):
410 410 return self
411 411
412 412 def canpush(self):
413 413 return True
414 414
415 415 def close(self):
416 416 pass
417 417
418 418 # End of ipeerconnection interface.
419 419
420 420 # Begin of ipeercommands interface.
421 421
422 422 def capabilities(self):
423 423 return self._caps
424 424
425 425 # End of ipeercommands interface.
426 426
427 427 # look up capabilities only when needed
428 428
429 429 def _callstream(self, cmd, _compressible=False, **args):
430 430 args = pycompat.byteskwargs(args)
431 431
432 432 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
433 433 self._caps, self.capable,
434 434 self._url, cmd, args)
435 435
436 436 resp = sendrequest(self.ui, self._urlopener, req)
437 437
438 438 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
439 439 resp, _compressible)
440 440
441 441 return resp
442 442
443 443 def _call(self, cmd, **args):
444 444 fp = self._callstream(cmd, **args)
445 445 try:
446 446 return fp.read()
447 447 finally:
448 448 # if using keepalive, allow connection to be reused
449 449 fp.close()
450 450
451 451 def _callpush(self, cmd, cg, **args):
452 452 # have to stream bundle to a temp file because we do not have
453 453 # http 1.1 chunked transfer.
454 454
455 455 types = self.capable('unbundle')
456 456 try:
457 457 types = types.split(',')
458 458 except AttributeError:
459 459 # servers older than d1b16a746db6 will send 'unbundle' as a
460 460 # boolean capability. They only support headerless/uncompressed
461 461 # bundles.
462 462 types = [""]
463 463 for x in types:
464 464 if x in bundle2.bundletypes:
465 465 type = x
466 466 break
467 467
468 468 tempname = bundle2.writebundle(self.ui, cg, None, type)
469 469 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
470 470 headers = {r'Content-Type': r'application/mercurial-0.1'}
471 471
472 472 try:
473 473 r = self._call(cmd, data=fp, headers=headers, **args)
474 474 vals = r.split('\n', 1)
475 475 if len(vals) < 2:
476 476 raise error.ResponseError(_("unexpected response:"), r)
477 477 return vals
478 478 except urlerr.httperror:
479 479 # Catch and re-raise these so we don't try and treat them
480 480 # like generic socket errors. They lack any values in
481 481 # .args on Python 3 which breaks our socket.error block.
482 482 raise
483 483 except socket.error as err:
484 484 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
485 485 raise error.Abort(_('push failed: %s') % err.args[1])
486 486 raise error.Abort(err.args[1])
487 487 finally:
488 488 fp.close()
489 489 os.unlink(tempname)
490 490
491 491 def _calltwowaystream(self, cmd, fp, **args):
492 492 fh = None
493 493 fp_ = None
494 494 filename = None
495 495 try:
496 496 # dump bundle to disk
497 497 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
498 498 fh = os.fdopen(fd, r"wb")
499 499 d = fp.read(4096)
500 500 while d:
501 501 fh.write(d)
502 502 d = fp.read(4096)
503 503 fh.close()
504 504 # start http push
505 505 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
506 506 headers = {r'Content-Type': r'application/mercurial-0.1'}
507 507 return self._callstream(cmd, data=fp_, headers=headers, **args)
508 508 finally:
509 509 if fp_ is not None:
510 510 fp_.close()
511 511 if fh is not None:
512 512 fh.close()
513 513 os.unlink(filename)
514 514
515 515 def _callcompressable(self, cmd, **args):
516 516 return self._callstream(cmd, _compressible=True, **args)
517 517
518 518 def _abort(self, exception):
519 519 raise exception
520 520
521 521 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
522 522 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
523 523 buffersends=True)
524 524
525 525 url = '%s/%s' % (apiurl, permission)
526 526
527 527 if len(requests) > 1:
528 528 url += '/multirequest'
529 529 else:
530 530 url += '/%s' % requests[0][0]
531 531
532 532 # Request ID to (request, future)
533 533 requestmap = {}
534 534
535 535 for command, args, f in requests:
536 536 request, action, meta = reactor.callcommand(command, args)
537 537 assert action == 'noop'
538 538
539 539 requestmap[request.requestid] = (request, f)
540 540
541 541 action, meta = reactor.flushcommands()
542 542 assert action == 'sendframes'
543 543
544 544 # TODO stream this.
545 545 body = b''.join(map(bytes, meta['framegen']))
546 546
547 547 # TODO modify user-agent to reflect v2
548 548 headers = {
549 549 r'Accept': wireprotov2server.FRAMINGTYPE,
550 550 r'Content-Type': wireprotov2server.FRAMINGTYPE,
551 551 }
552 552
553 553 req = requestbuilder(pycompat.strurl(url), body, headers)
554 554 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
555 555
556 556 try:
557 557 res = opener.open(req)
558 558 except urlerr.httperror as e:
559 559 if e.code == 401:
560 560 raise error.Abort(_('authorization failed'))
561 561
562 562 raise
563 563 except httplib.HTTPException as e:
564 564 ui.traceback()
565 565 raise IOError(None, e)
566 566
567 567 return reactor, requestmap, res
568 568
569 569 class queuedcommandfuture(pycompat.futures.Future):
570 570 """Wraps result() on command futures to trigger submission on call."""
571 571
572 572 def result(self, timeout=None):
573 573 if self.done():
574 574 return pycompat.futures.Future.result(self, timeout)
575 575
576 576 self._peerexecutor.sendcommands()
577 577
578 578 # sendcommands() will restore the original __class__ and self.result
579 579 # will resolve to Future.result.
580 580 return self.result(timeout)
581 581
582 582 @zi.implementer(repository.ipeercommandexecutor)
583 583 class httpv2executor(object):
584 584 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
585 585 self._ui = ui
586 586 self._opener = opener
587 587 self._requestbuilder = requestbuilder
588 588 self._apiurl = apiurl
589 589 self._descriptor = descriptor
590 590 self._sent = False
591 591 self._closed = False
592 592 self._neededpermissions = set()
593 593 self._calls = []
594 594 self._futures = weakref.WeakSet()
595 595 self._responseexecutor = None
596 596 self._responsef = None
597 597
598 598 def __enter__(self):
599 599 return self
600 600
601 601 def __exit__(self, exctype, excvalue, exctb):
602 602 self.close()
603 603
604 604 def callcommand(self, command, args):
605 605 if self._sent:
606 606 raise error.ProgrammingError('callcommand() cannot be used after '
607 607 'commands are sent')
608 608
609 609 if self._closed:
610 610 raise error.ProgrammingError('callcommand() cannot be used after '
611 611 'close()')
612 612
613 613 # The service advertises which commands are available. So if we attempt
614 614 # to call an unknown command or pass an unknown argument, we can screen
615 615 # for this.
616 616 if command not in self._descriptor['commands']:
617 617 raise error.ProgrammingError(
618 618 'wire protocol command %s is not available' % command)
619 619
620 620 cmdinfo = self._descriptor['commands'][command]
621 621 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
622 622
623 623 if unknownargs:
624 624 raise error.ProgrammingError(
625 625 'wire protocol command %s does not accept argument: %s' % (
626 626 command, ', '.join(sorted(unknownargs))))
627 627
628 628 self._neededpermissions |= set(cmdinfo['permissions'])
629 629
630 630 # TODO we /could/ also validate types here, since the API descriptor
631 631 # includes types...
632 632
633 633 f = pycompat.futures.Future()
634 634
635 635 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
636 636 # could deadlock.
637 637 f.__class__ = queuedcommandfuture
638 638 f._peerexecutor = self
639 639
640 640 self._futures.add(f)
641 641 self._calls.append((command, args, f))
642 642
643 643 return f
644 644
645 645 def sendcommands(self):
646 646 if self._sent:
647 647 return
648 648
649 649 if not self._calls:
650 650 return
651 651
652 652 self._sent = True
653 653
654 654 # Unhack any future types so caller sees a clean type and so we
655 655 # break reference cycle.
656 656 for f in self._futures:
657 657 if isinstance(f, queuedcommandfuture):
658 658 f.__class__ = pycompat.futures.Future
659 659 f._peerexecutor = None
660 660
661 661 # Mark the future as running and filter out cancelled futures.
662 662 calls = [(command, args, f)
663 663 for command, args, f in self._calls
664 664 if f.set_running_or_notify_cancel()]
665 665
666 666 # Clear out references, prevent improper object usage.
667 667 self._calls = None
668 668
669 669 if not calls:
670 670 return
671 671
672 672 permissions = set(self._neededpermissions)
673 673
674 674 if 'push' in permissions and 'pull' in permissions:
675 675 permissions.remove('pull')
676 676
677 677 if len(permissions) > 1:
678 678 raise error.RepoError(_('cannot make request requiring multiple '
679 679 'permissions: %s') %
680 680 _(', ').join(sorted(permissions)))
681 681
682 682 permission = {
683 683 'push': 'rw',
684 684 'pull': 'ro',
685 685 }[permissions.pop()]
686 686
687 687 reactor, requests, resp = sendv2request(
688 688 self._ui, self._opener, self._requestbuilder, self._apiurl,
689 689 permission, calls)
690 690
691 691 # TODO we probably want to validate the HTTP code, media type, etc.
692 692
693 693 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
694 694 self._responsef = self._responseexecutor.submit(self._handleresponse,
695 695 reactor,
696 696 requests,
697 697 resp)
698 698
699 699 def close(self):
700 700 if self._closed:
701 701 return
702 702
703 703 self.sendcommands()
704 704
705 705 self._closed = True
706 706
707 707 if not self._responsef:
708 708 return
709 709
710 710 try:
711 711 self._responsef.result()
712 712 finally:
713 713 self._responseexecutor.shutdown(wait=True)
714 714 self._responsef = None
715 715 self._responseexecutor = None
716 716
717 717 # If any of our futures are still in progress, mark them as
718 718 # errored, otherwise a result() could wait indefinitely.
719 719 for f in self._futures:
720 720 if not f.done():
721 721 f.set_exception(error.ResponseError(
722 722 _('unfulfilled command response')))
723 723
724 724 self._futures = None
725 725
726 726 def _handleresponse(self, reactor, requests, resp):
727 727 # Called in a thread to read the response.
728 728
729 729 results = {k: [] for k in requests}
730 730
731 731 while True:
732 732 frame = wireprotoframing.readframe(resp)
733 733 if frame is None:
734 734 break
735 735
736 736 self._ui.note(_('received %r\n') % frame)
737 737
738 738 # Guard against receiving a frame with a request ID that we
739 739 # didn't issue. This should never happen.
740 740 request, f = requests.get(frame.requestid, [None, None])
741 741
742 742 action, meta = reactor.onframerecv(frame)
743 743
744 744 if action == 'responsedata':
745 745 assert request.requestid == meta['request'].requestid
746 746
747 747 result = results[request.requestid]
748 748
749 749 if meta['cbor']:
750 750 payload = util.bytesio(meta['data'])
751 751
752 752 decoder = cbor.CBORDecoder(payload)
753 753 while payload.tell() + 1 < len(meta['data']):
754 754 try:
755 755 result.append(decoder.decode())
756 756 except Exception:
757 757 pycompat.future_set_exception_info(
758 758 f, sys.exc_info()[1:])
759 759 continue
760 760 else:
761 761 result.append(meta['data'])
762 762
763 763 if meta['eos']:
764 764 f.set_result(result)
765 765 del results[request.requestid]
766 766
767 767 elif action == 'error':
768 768 e = error.RepoError(meta['message'])
769 769
770 770 if f:
771 771 f.set_exception(e)
772 772 else:
773 773 raise e
774 774
775 775 else:
776 776 e = error.ProgrammingError('unhandled action: %s' % action)
777 777
778 778 if f:
779 779 f.set_exception(e)
780 780 else:
781 781 raise e
782 782
783 783 # TODO implement interface for version 2 peers
784 784 @zi.implementer(repository.ipeerconnection, repository.ipeercapabilities,
785 785 repository.ipeerrequests)
786 786 class httpv2peer(object):
787 787 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
788 788 apidescriptor):
789 789 self.ui = ui
790 790
791 791 if repourl.endswith('/'):
792 792 repourl = repourl[:-1]
793 793
794 794 self._url = repourl
795 795 self._apipath = apipath
796 796 self._apiurl = '%s/%s' % (repourl, apipath)
797 797 self._opener = opener
798 798 self._requestbuilder = requestbuilder
799 799 self._descriptor = apidescriptor
800 800
801 801 # Start of ipeerconnection.
802 802
803 803 def url(self):
804 804 return self._url
805 805
806 806 def local(self):
807 807 return None
808 808
809 809 def peer(self):
810 810 return self
811 811
812 812 def canpush(self):
813 813 # TODO change once implemented.
814 814 return False
815 815
816 816 def close(self):
817 817 pass
818 818
819 819 # End of ipeerconnection.
820 820
821 821 # Start of ipeercapabilities.
822 822
823 823 def capable(self, name):
824 824 # The capabilities used internally historically map to capabilities
825 825 # advertised from the "capabilities" wire protocol command. However,
826 826 # version 2 of that command works differently.
827 827
828 828 # Maps to commands that are available.
829 829 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
830 830 return True
831 831
832 832 # Other concepts.
833 833 if name in ('bundle2',):
834 834 return True
835 835
836 836 return False
837 837
838 838 def requirecap(self, name, purpose):
839 839 if self.capable(name):
840 840 return
841 841
842 842 raise error.CapabilityError(
843 843 _('cannot %s; client or remote repository does not support the %r '
844 844 'capability') % (purpose, name))
845 845
846 846 # End of ipeercapabilities.
847 847
848 848 def _call(self, name, **args):
849 849 with self.commandexecutor() as e:
850 850 return e.callcommand(name, args).result()
851 851
852 852 def commandexecutor(self):
853 853 return httpv2executor(self.ui, self._opener, self._requestbuilder,
854 854 self._apiurl, self._descriptor)
855 855
856 856 # Registry of API service names to metadata about peers that handle it.
857 857 #
858 858 # The following keys are meaningful:
859 859 #
860 860 # init
861 861 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
862 862 # apidescriptor) to create a peer.
863 863 #
864 864 # priority
865 865 # Integer priority for the service. If we could choose from multiple
866 866 # services, we choose the one with the highest priority.
867 867 API_PEERS = {
868 868 wireprototypes.HTTP_WIREPROTO_V2: {
869 869 'init': httpv2peer,
870 870 'priority': 50,
871 871 },
872 872 }
873 873
874 874 def performhandshake(ui, url, opener, requestbuilder):
875 875 # The handshake is a request to the capabilities command.
876 876
877 877 caps = None
878 878 def capable(x):
879 879 raise error.ProgrammingError('should not be called')
880 880
881 881 args = {}
882 882
883 883 # The client advertises support for newer protocols by adding an
884 884 # X-HgUpgrade-* header with a list of supported APIs and an
885 885 # X-HgProto-* header advertising which serializing formats it supports.
886 886 # We only support the HTTP version 2 transport and CBOR responses for
887 887 # now.
888 888 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
889 889
890 890 if advertisev2:
891 891 args['headers'] = {
892 892 r'X-HgProto-1': r'cbor',
893 893 }
894 894
895 895 args['headers'].update(
896 896 encodevalueinheaders(' '.join(sorted(API_PEERS)),
897 897 'X-HgUpgrade',
898 898 # We don't know the header limit this early.
899 899 # So make it small.
900 900 1024))
901 901
902 902 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
903 903 capable, url, 'capabilities',
904 904 args)
905 905
906 906 resp = sendrequest(ui, opener, req)
907 907
908 908 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
909 909 compressible=False,
910 910 allowcbor=advertisev2)
911 911
912 912 try:
913 913 rawdata = resp.read()
914 914 finally:
915 915 resp.close()
916 916
917 917 if not ct.startswith('application/mercurial-'):
918 918 raise error.ProgrammingError('unexpected content-type: %s' % ct)
919 919
920 920 if advertisev2:
921 921 if ct == 'application/mercurial-cbor':
922 922 try:
923 923 info = cbor.loads(rawdata)
924 924 except cbor.CBORDecodeError:
925 925 raise error.Abort(_('error decoding CBOR from remote server'),
926 926 hint=_('try again and consider contacting '
927 927 'the server operator'))
928 928
929 929 # We got a legacy response. That's fine.
930 930 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
931 931 info = {
932 932 'v1capabilities': set(rawdata.split())
933 933 }
934 934
935 935 else:
936 936 raise error.RepoError(
937 937 _('unexpected response type from server: %s') % ct)
938 938 else:
939 939 info = {
940 940 'v1capabilities': set(rawdata.split())
941 941 }
942 942
943 943 return respurl, info
944 944
945 945 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
946 946 """Construct an appropriate HTTP peer instance.
947 947
948 948 ``opener`` is an ``url.opener`` that should be used to establish
949 949 connections, perform HTTP requests.
950 950
951 951 ``requestbuilder`` is the type used for constructing HTTP requests.
952 952 It exists as an argument so extensions can override the default.
953 953 """
954 954 u = util.url(path)
955 955 if u.query or u.fragment:
956 956 raise error.Abort(_('unsupported URL component: "%s"') %
957 957 (u.query or u.fragment))
958 958
959 959 # urllib cannot handle URLs with embedded user or passwd.
960 960 url, authinfo = u.authinfo()
961 961 ui.debug('using %s\n' % url)
962 962
963 963 opener = opener or urlmod.opener(ui, authinfo)
964 964
965 965 respurl, info = performhandshake(ui, url, opener, requestbuilder)
966 966
967 967 # Given the intersection of APIs that both we and the server support,
968 968 # sort by their advertised priority and pick the first one.
969 969 #
970 970 # TODO consider making this request-based and interface driven. For
971 971 # example, the caller could say "I want a peer that does X." It's quite
972 972 # possible that not all peers would do that. Since we know the service
973 973 # capabilities, we could filter out services not meeting the
974 974 # requirements. Possibly by consulting the interfaces defined by the
975 975 # peer type.
976 976 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
977 977
978 978 preferredchoices = sorted(apipeerchoices,
979 979 key=lambda x: API_PEERS[x]['priority'],
980 980 reverse=True)
981 981
982 982 for service in preferredchoices:
983 983 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
984 984
985 985 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
986 986 requestbuilder,
987 987 info['apis'][service])
988 988
989 989 # Failed to construct an API peer. Fall back to legacy.
990 990 return httppeer(ui, path, respurl, opener, requestbuilder,
991 991 info['v1capabilities'])
992 992
993 def instance(ui, path, create):
993 def instance(ui, path, create, intents=None):
994 994 if create:
995 995 raise error.Abort(_('cannot create new http repository'))
996 996 try:
997 997 if path.startswith('https:') and not urlmod.has_https:
998 998 raise error.Abort(_('Python support for SSL and HTTPS '
999 999 'is not installed'))
1000 1000
1001 1001 inst = makepeer(ui, path)
1002 1002
1003 1003 return inst
1004 1004 except error.RepoError as httpexception:
1005 1005 try:
1006 1006 r = statichttprepo.instance(ui, "static-" + path, create)
1007 1007 ui.note(_('(falling back to static-http)\n'))
1008 1008 return r
1009 1009 except error.RepoError:
1010 1010 raise httpexception # use the original http RepoError instead
@@ -1,2380 +1,2381 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from .thirdparty.zope import (
25 25 interface as zi,
26 26 )
27 27 from . import (
28 28 bookmarks,
29 29 branchmap,
30 30 bundle2,
31 31 changegroup,
32 32 changelog,
33 33 color,
34 34 context,
35 35 dirstate,
36 36 dirstateguard,
37 37 discovery,
38 38 encoding,
39 39 error,
40 40 exchange,
41 41 extensions,
42 42 filelog,
43 43 hook,
44 44 lock as lockmod,
45 45 manifest,
46 46 match as matchmod,
47 47 merge as mergemod,
48 48 mergeutil,
49 49 namespaces,
50 50 narrowspec,
51 51 obsolete,
52 52 pathutil,
53 53 phases,
54 54 pushkey,
55 55 pycompat,
56 56 repository,
57 57 repoview,
58 58 revset,
59 59 revsetlang,
60 60 scmutil,
61 61 sparse,
62 62 store,
63 63 subrepoutil,
64 64 tags as tagsmod,
65 65 transaction,
66 66 txnutil,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70 from .utils import (
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 release = lockmod.release
76 76 urlerr = util.urlerr
77 77 urlreq = util.urlreq
78 78
79 79 # set of (path, vfs-location) tuples. vfs-location is:
80 80 # - 'plain for vfs relative paths
81 81 # - '' for svfs relative paths
82 82 _cachedfiles = set()
83 83
84 84 class _basefilecache(scmutil.filecache):
85 85 """All filecache usage on repo are done for logic that should be unfiltered
86 86 """
87 87 def __get__(self, repo, type=None):
88 88 if repo is None:
89 89 return self
90 90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 91 def __set__(self, repo, value):
92 92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 93 def __delete__(self, repo):
94 94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95 95
96 96 class repofilecache(_basefilecache):
97 97 """filecache for files in .hg but outside of .hg/store"""
98 98 def __init__(self, *paths):
99 99 super(repofilecache, self).__init__(*paths)
100 100 for path in paths:
101 101 _cachedfiles.add((path, 'plain'))
102 102
103 103 def join(self, obj, fname):
104 104 return obj.vfs.join(fname)
105 105
106 106 class storecache(_basefilecache):
107 107 """filecache for files in the store"""
108 108 def __init__(self, *paths):
109 109 super(storecache, self).__init__(*paths)
110 110 for path in paths:
111 111 _cachedfiles.add((path, ''))
112 112
113 113 def join(self, obj, fname):
114 114 return obj.sjoin(fname)
115 115
116 116 def isfilecached(repo, name):
117 117 """check if a repo has already cached "name" filecache-ed property
118 118
119 119 This returns (cachedobj-or-None, iscached) tuple.
120 120 """
121 121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 122 if not cacheentry:
123 123 return None, False
124 124 return cacheentry.obj, True
125 125
126 126 class unfilteredpropertycache(util.propertycache):
127 127 """propertycache that apply to unfiltered repo only"""
128 128
129 129 def __get__(self, repo, type=None):
130 130 unfi = repo.unfiltered()
131 131 if unfi is repo:
132 132 return super(unfilteredpropertycache, self).__get__(unfi)
133 133 return getattr(unfi, self.name)
134 134
135 135 class filteredpropertycache(util.propertycache):
136 136 """propertycache that must take filtering in account"""
137 137
138 138 def cachevalue(self, obj, value):
139 139 object.__setattr__(obj, self.name, value)
140 140
141 141
142 142 def hasunfilteredcache(repo, name):
143 143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 144 return name in vars(repo.unfiltered())
145 145
146 146 def unfilteredmethod(orig):
147 147 """decorate method that always need to be run on unfiltered version"""
148 148 def wrapper(repo, *args, **kwargs):
149 149 return orig(repo.unfiltered(), *args, **kwargs)
150 150 return wrapper
151 151
152 152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 153 'unbundle'}
154 154 legacycaps = moderncaps.union({'changegroupsubset'})
155 155
156 156 @zi.implementer(repository.ipeercommandexecutor)
157 157 class localcommandexecutor(object):
158 158 def __init__(self, peer):
159 159 self._peer = peer
160 160 self._sent = False
161 161 self._closed = False
162 162
163 163 def __enter__(self):
164 164 return self
165 165
166 166 def __exit__(self, exctype, excvalue, exctb):
167 167 self.close()
168 168
169 169 def callcommand(self, command, args):
170 170 if self._sent:
171 171 raise error.ProgrammingError('callcommand() cannot be used after '
172 172 'sendcommands()')
173 173
174 174 if self._closed:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'close()')
177 177
178 178 # We don't need to support anything fancy. Just call the named
179 179 # method on the peer and return a resolved future.
180 180 fn = getattr(self._peer, pycompat.sysstr(command))
181 181
182 182 f = pycompat.futures.Future()
183 183
184 184 try:
185 185 result = fn(**pycompat.strkwargs(args))
186 186 except Exception:
187 187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
188 188 else:
189 189 f.set_result(result)
190 190
191 191 return f
192 192
193 193 def sendcommands(self):
194 194 self._sent = True
195 195
196 196 def close(self):
197 197 self._closed = True
198 198
199 199 @zi.implementer(repository.ipeercommands)
200 200 class localpeer(repository.peer):
201 201 '''peer for a local repo; reflects only the most recent API'''
202 202
203 203 def __init__(self, repo, caps=None):
204 204 super(localpeer, self).__init__()
205 205
206 206 if caps is None:
207 207 caps = moderncaps.copy()
208 208 self._repo = repo.filtered('served')
209 209 self.ui = repo.ui
210 210 self._caps = repo._restrictcapabilities(caps)
211 211
212 212 # Begin of _basepeer interface.
213 213
214 214 def url(self):
215 215 return self._repo.url()
216 216
217 217 def local(self):
218 218 return self._repo
219 219
220 220 def peer(self):
221 221 return self
222 222
223 223 def canpush(self):
224 224 return True
225 225
226 226 def close(self):
227 227 self._repo.close()
228 228
229 229 # End of _basepeer interface.
230 230
231 231 # Begin of _basewirecommands interface.
232 232
233 233 def branchmap(self):
234 234 return self._repo.branchmap()
235 235
236 236 def capabilities(self):
237 237 return self._caps
238 238
239 239 def clonebundles(self):
240 240 return self._repo.tryread('clonebundles.manifest')
241 241
242 242 def debugwireargs(self, one, two, three=None, four=None, five=None):
243 243 """Used to test argument passing over the wire"""
244 244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
245 245 pycompat.bytestr(four),
246 246 pycompat.bytestr(five))
247 247
248 248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
249 249 **kwargs):
250 250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
251 251 common=common, bundlecaps=bundlecaps,
252 252 **kwargs)[1]
253 253 cb = util.chunkbuffer(chunks)
254 254
255 255 if exchange.bundle2requested(bundlecaps):
256 256 # When requesting a bundle2, getbundle returns a stream to make the
257 257 # wire level function happier. We need to build a proper object
258 258 # from it in local peer.
259 259 return bundle2.getunbundler(self.ui, cb)
260 260 else:
261 261 return changegroup.getunbundler('01', cb, None)
262 262
263 263 def heads(self):
264 264 return self._repo.heads()
265 265
266 266 def known(self, nodes):
267 267 return self._repo.known(nodes)
268 268
269 269 def listkeys(self, namespace):
270 270 return self._repo.listkeys(namespace)
271 271
272 272 def lookup(self, key):
273 273 return self._repo.lookup(key)
274 274
275 275 def pushkey(self, namespace, key, old, new):
276 276 return self._repo.pushkey(namespace, key, old, new)
277 277
278 278 def stream_out(self):
279 279 raise error.Abort(_('cannot perform stream clone against local '
280 280 'peer'))
281 281
282 282 def unbundle(self, bundle, heads, url):
283 283 """apply a bundle on a repo
284 284
285 285 This function handles the repo locking itself."""
286 286 try:
287 287 try:
288 288 bundle = exchange.readbundle(self.ui, bundle, None)
289 289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
290 290 if util.safehasattr(ret, 'getchunks'):
291 291 # This is a bundle20 object, turn it into an unbundler.
292 292 # This little dance should be dropped eventually when the
293 293 # API is finally improved.
294 294 stream = util.chunkbuffer(ret.getchunks())
295 295 ret = bundle2.getunbundler(self.ui, stream)
296 296 return ret
297 297 except Exception as exc:
298 298 # If the exception contains output salvaged from a bundle2
299 299 # reply, we need to make sure it is printed before continuing
300 300 # to fail. So we build a bundle2 with such output and consume
301 301 # it directly.
302 302 #
303 303 # This is not very elegant but allows a "simple" solution for
304 304 # issue4594
305 305 output = getattr(exc, '_bundle2salvagedoutput', ())
306 306 if output:
307 307 bundler = bundle2.bundle20(self._repo.ui)
308 308 for out in output:
309 309 bundler.addpart(out)
310 310 stream = util.chunkbuffer(bundler.getchunks())
311 311 b = bundle2.getunbundler(self.ui, stream)
312 312 bundle2.processbundle(self._repo, b)
313 313 raise
314 314 except error.PushRaced as exc:
315 315 raise error.ResponseError(_('push failed:'),
316 316 stringutil.forcebytestr(exc))
317 317
318 318 # End of _basewirecommands interface.
319 319
320 320 # Begin of peer interface.
321 321
322 322 def commandexecutor(self):
323 323 return localcommandexecutor(self)
324 324
325 325 # End of peer interface.
326 326
327 327 @zi.implementer(repository.ipeerlegacycommands)
328 328 class locallegacypeer(localpeer):
329 329 '''peer extension which implements legacy methods too; used for tests with
330 330 restricted capabilities'''
331 331
332 332 def __init__(self, repo):
333 333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
334 334
335 335 # Begin of baselegacywirecommands interface.
336 336
337 337 def between(self, pairs):
338 338 return self._repo.between(pairs)
339 339
340 340 def branches(self, nodes):
341 341 return self._repo.branches(nodes)
342 342
343 343 def changegroup(self, nodes, source):
344 344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
345 345 missingheads=self._repo.heads())
346 346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
347 347
348 348 def changegroupsubset(self, bases, heads, source):
349 349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
350 350 missingheads=heads)
351 351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
352 352
353 353 # End of baselegacywirecommands interface.
354 354
355 355 # Increment the sub-version when the revlog v2 format changes to lock out old
356 356 # clients.
357 357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
358 358
359 359 # Functions receiving (ui, features) that extensions can register to impact
360 360 # the ability to load repositories with custom requirements. Only
361 361 # functions defined in loaded extensions are called.
362 362 #
363 363 # The function receives a set of requirement strings that the repository
364 364 # is capable of opening. Functions will typically add elements to the
365 365 # set to reflect that the extension knows how to handle that requirements.
366 366 featuresetupfuncs = set()
367 367
368 368 @zi.implementer(repository.completelocalrepository)
369 369 class localrepository(object):
370 370
371 371 # obsolete experimental requirements:
372 372 # - manifestv2: An experimental new manifest format that allowed
373 373 # for stem compression of long paths. Experiment ended up not
374 374 # being successful (repository sizes went up due to worse delta
375 375 # chains), and the code was deleted in 4.6.
376 376 supportedformats = {
377 377 'revlogv1',
378 378 'generaldelta',
379 379 'treemanifest',
380 380 REVLOGV2_REQUIREMENT,
381 381 }
382 382 _basesupported = supportedformats | {
383 383 'store',
384 384 'fncache',
385 385 'shared',
386 386 'relshared',
387 387 'dotencode',
388 388 'exp-sparse',
389 389 }
390 390 openerreqs = {
391 391 'revlogv1',
392 392 'generaldelta',
393 393 'treemanifest',
394 394 }
395 395
396 396 # list of prefix for file which can be written without 'wlock'
397 397 # Extensions should extend this list when needed
398 398 _wlockfreeprefix = {
399 399 # We migh consider requiring 'wlock' for the next
400 400 # two, but pretty much all the existing code assume
401 401 # wlock is not needed so we keep them excluded for
402 402 # now.
403 403 'hgrc',
404 404 'requires',
405 405 # XXX cache is a complicatged business someone
406 406 # should investigate this in depth at some point
407 407 'cache/',
408 408 # XXX shouldn't be dirstate covered by the wlock?
409 409 'dirstate',
410 410 # XXX bisect was still a bit too messy at the time
411 411 # this changeset was introduced. Someone should fix
412 412 # the remainig bit and drop this line
413 413 'bisect.state',
414 414 }
415 415
416 def __init__(self, baseui, path, create=False):
416 def __init__(self, baseui, path, create=False, intents=None):
417 417 self.requirements = set()
418 418 self.filtername = None
419 419 # wvfs: rooted at the repository root, used to access the working copy
420 420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
421 421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
422 422 self.vfs = None
423 423 # svfs: usually rooted at .hg/store, used to access repository history
424 424 # If this is a shared repository, this vfs may point to another
425 425 # repository's .hg/store directory.
426 426 self.svfs = None
427 427 self.root = self.wvfs.base
428 428 self.path = self.wvfs.join(".hg")
429 429 self.origroot = path
430 430 # This is only used by context.workingctx.match in order to
431 431 # detect files in subrepos.
432 432 self.auditor = pathutil.pathauditor(
433 433 self.root, callback=self._checknested)
434 434 # This is only used by context.basectx.match in order to detect
435 435 # files in subrepos.
436 436 self.nofsauditor = pathutil.pathauditor(
437 437 self.root, callback=self._checknested, realfs=False, cached=True)
438 438 self.baseui = baseui
439 439 self.ui = baseui.copy()
440 440 self.ui.copy = baseui.copy # prevent copying repo configuration
441 441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 442 if (self.ui.configbool('devel', 'all-warnings') or
443 443 self.ui.configbool('devel', 'check-locks')):
444 444 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 445 # A list of callback to shape the phase if no data were found.
446 446 # Callback are in the form: func(repo, roots) --> processed root.
447 447 # This list it to be filled by extension during repo setup
448 448 self._phasedefaults = []
449 449 try:
450 450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 451 self._loadextensions()
452 452 except IOError:
453 453 pass
454 454
455 455 if featuresetupfuncs:
456 456 self.supported = set(self._basesupported) # use private copy
457 457 extmods = set(m.__name__ for n, m
458 458 in extensions.extensions(self.ui))
459 459 for setupfunc in featuresetupfuncs:
460 460 if setupfunc.__module__ in extmods:
461 461 setupfunc(self.ui, self.supported)
462 462 else:
463 463 self.supported = self._basesupported
464 464 color.setup(self.ui)
465 465
466 466 # Add compression engines.
467 467 for name in util.compengines:
468 468 engine = util.compengines[name]
469 469 if engine.revlogheader():
470 470 self.supported.add('exp-compression-%s' % name)
471 471
472 472 if not self.vfs.isdir():
473 473 if create:
474 474 self.requirements = newreporequirements(self)
475 475
476 476 if not self.wvfs.exists():
477 477 self.wvfs.makedirs()
478 478 self.vfs.makedir(notindexed=True)
479 479
480 480 if 'store' in self.requirements:
481 481 self.vfs.mkdir("store")
482 482
483 483 # create an invalid changelog
484 484 self.vfs.append(
485 485 "00changelog.i",
486 486 '\0\0\0\2' # represents revlogv2
487 487 ' dummy changelog to prevent using the old repo layout'
488 488 )
489 489 else:
490 490 raise error.RepoError(_("repository %s not found") % path)
491 491 elif create:
492 492 raise error.RepoError(_("repository %s already exists") % path)
493 493 else:
494 494 try:
495 495 self.requirements = scmutil.readrequires(
496 496 self.vfs, self.supported)
497 497 except IOError as inst:
498 498 if inst.errno != errno.ENOENT:
499 499 raise
500 500
501 501 cachepath = self.vfs.join('cache')
502 502 self.sharedpath = self.path
503 503 try:
504 504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
505 505 if 'relshared' in self.requirements:
506 506 sharedpath = self.vfs.join(sharedpath)
507 507 vfs = vfsmod.vfs(sharedpath, realpath=True)
508 508 cachepath = vfs.join('cache')
509 509 s = vfs.base
510 510 if not vfs.exists():
511 511 raise error.RepoError(
512 512 _('.hg/sharedpath points to nonexistent directory %s') % s)
513 513 self.sharedpath = s
514 514 except IOError as inst:
515 515 if inst.errno != errno.ENOENT:
516 516 raise
517 517
518 518 if 'exp-sparse' in self.requirements and not sparse.enabled:
519 519 raise error.RepoError(_('repository is using sparse feature but '
520 520 'sparse is not enabled; enable the '
521 521 '"sparse" extensions to access'))
522 522
523 523 self.store = store.store(
524 524 self.requirements, self.sharedpath,
525 525 lambda base: vfsmod.vfs(base, cacheaudited=True))
526 526 self.spath = self.store.path
527 527 self.svfs = self.store.vfs
528 528 self.sjoin = self.store.join
529 529 self.vfs.createmode = self.store.createmode
530 530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
531 531 self.cachevfs.createmode = self.store.createmode
532 532 if (self.ui.configbool('devel', 'all-warnings') or
533 533 self.ui.configbool('devel', 'check-locks')):
534 534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
535 535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
536 536 else: # standard vfs
537 537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
538 538 self._applyopenerreqs()
539 539 if create:
540 540 self._writerequirements()
541 541
542 542 self._dirstatevalidatewarned = False
543 543
544 544 self._branchcaches = {}
545 545 self._revbranchcache = None
546 546 self._filterpats = {}
547 547 self._datafilters = {}
548 548 self._transref = self._lockref = self._wlockref = None
549 549
550 550 # A cache for various files under .hg/ that tracks file changes,
551 551 # (used by the filecache decorator)
552 552 #
553 553 # Maps a property name to its util.filecacheentry
554 554 self._filecache = {}
555 555
556 556 # hold sets of revision to be filtered
557 557 # should be cleared when something might have changed the filter value:
558 558 # - new changesets,
559 559 # - phase change,
560 560 # - new obsolescence marker,
561 561 # - working directory parent change,
562 562 # - bookmark changes
563 563 self.filteredrevcache = {}
564 564
565 565 # post-dirstate-status hooks
566 566 self._postdsstatus = []
567 567
568 568 # generic mapping between names and nodes
569 569 self.names = namespaces.namespaces()
570 570
571 571 # Key to signature value.
572 572 self._sparsesignaturecache = {}
573 573 # Signature to cached matcher instance.
574 574 self._sparsematchercache = {}
575 575
576 576 def _getvfsward(self, origfunc):
577 577 """build a ward for self.vfs"""
578 578 rref = weakref.ref(self)
579 579 def checkvfs(path, mode=None):
580 580 ret = origfunc(path, mode=mode)
581 581 repo = rref()
582 582 if (repo is None
583 583 or not util.safehasattr(repo, '_wlockref')
584 584 or not util.safehasattr(repo, '_lockref')):
585 585 return
586 586 if mode in (None, 'r', 'rb'):
587 587 return
588 588 if path.startswith(repo.path):
589 589 # truncate name relative to the repository (.hg)
590 590 path = path[len(repo.path) + 1:]
591 591 if path.startswith('cache/'):
592 592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
593 593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
594 594 if path.startswith('journal.'):
595 595 # journal is covered by 'lock'
596 596 if repo._currentlock(repo._lockref) is None:
597 597 repo.ui.develwarn('write with no lock: "%s"' % path,
598 598 stacklevel=2, config='check-locks')
599 599 elif repo._currentlock(repo._wlockref) is None:
600 600 # rest of vfs files are covered by 'wlock'
601 601 #
602 602 # exclude special files
603 603 for prefix in self._wlockfreeprefix:
604 604 if path.startswith(prefix):
605 605 return
606 606 repo.ui.develwarn('write with no wlock: "%s"' % path,
607 607 stacklevel=2, config='check-locks')
608 608 return ret
609 609 return checkvfs
610 610
611 611 def _getsvfsward(self, origfunc):
612 612 """build a ward for self.svfs"""
613 613 rref = weakref.ref(self)
614 614 def checksvfs(path, mode=None):
615 615 ret = origfunc(path, mode=mode)
616 616 repo = rref()
617 617 if repo is None or not util.safehasattr(repo, '_lockref'):
618 618 return
619 619 if mode in (None, 'r', 'rb'):
620 620 return
621 621 if path.startswith(repo.sharedpath):
622 622 # truncate name relative to the repository (.hg)
623 623 path = path[len(repo.sharedpath) + 1:]
624 624 if repo._currentlock(repo._lockref) is None:
625 625 repo.ui.develwarn('write with no lock: "%s"' % path,
626 626 stacklevel=3)
627 627 return ret
628 628 return checksvfs
629 629
630 630 def close(self):
631 631 self._writecaches()
632 632
633 633 def _loadextensions(self):
634 634 extensions.loadall(self.ui)
635 635
636 636 def _writecaches(self):
637 637 if self._revbranchcache:
638 638 self._revbranchcache.write()
639 639
640 640 def _restrictcapabilities(self, caps):
641 641 if self.ui.configbool('experimental', 'bundle2-advertise'):
642 642 caps = set(caps)
643 643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
644 644 role='client'))
645 645 caps.add('bundle2=' + urlreq.quote(capsblob))
646 646 return caps
647 647
648 648 def _applyopenerreqs(self):
649 649 self.svfs.options = dict((r, 1) for r in self.requirements
650 650 if r in self.openerreqs)
651 651 # experimental config: format.chunkcachesize
652 652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
653 653 if chunkcachesize is not None:
654 654 self.svfs.options['chunkcachesize'] = chunkcachesize
655 655 # experimental config: format.maxchainlen
656 656 maxchainlen = self.ui.configint('format', 'maxchainlen')
657 657 if maxchainlen is not None:
658 658 self.svfs.options['maxchainlen'] = maxchainlen
659 659 # experimental config: format.manifestcachesize
660 660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
661 661 if manifestcachesize is not None:
662 662 self.svfs.options['manifestcachesize'] = manifestcachesize
663 663 # experimental config: format.aggressivemergedeltas
664 664 aggressivemergedeltas = self.ui.configbool('format',
665 665 'aggressivemergedeltas')
666 666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
667 667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
668 668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
669 669 if 0 <= chainspan:
670 670 self.svfs.options['maxdeltachainspan'] = chainspan
671 671 mmapindexthreshold = self.ui.configbytes('experimental',
672 672 'mmapindexthreshold')
673 673 if mmapindexthreshold is not None:
674 674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
675 675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
676 676 srdensitythres = float(self.ui.config('experimental',
677 677 'sparse-read.density-threshold'))
678 678 srmingapsize = self.ui.configbytes('experimental',
679 679 'sparse-read.min-gap-size')
680 680 self.svfs.options['with-sparse-read'] = withsparseread
681 681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
682 682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
683 683
684 684 for r in self.requirements:
685 685 if r.startswith('exp-compression-'):
686 686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687 687
688 688 # TODO move "revlogv2" to openerreqs once finalized.
689 689 if REVLOGV2_REQUIREMENT in self.requirements:
690 690 self.svfs.options['revlogv2'] = True
691 691
692 692 def _writerequirements(self):
693 693 scmutil.writerequires(self.vfs, self.requirements)
694 694
695 695 def _checknested(self, path):
696 696 """Determine if path is a legal nested repository."""
697 697 if not path.startswith(self.root):
698 698 return False
699 699 subpath = path[len(self.root) + 1:]
700 700 normsubpath = util.pconvert(subpath)
701 701
702 702 # XXX: Checking against the current working copy is wrong in
703 703 # the sense that it can reject things like
704 704 #
705 705 # $ hg cat -r 10 sub/x.txt
706 706 #
707 707 # if sub/ is no longer a subrepository in the working copy
708 708 # parent revision.
709 709 #
710 710 # However, it can of course also allow things that would have
711 711 # been rejected before, such as the above cat command if sub/
712 712 # is a subrepository now, but was a normal directory before.
713 713 # The old path auditor would have rejected by mistake since it
714 714 # panics when it sees sub/.hg/.
715 715 #
716 716 # All in all, checking against the working copy seems sensible
717 717 # since we want to prevent access to nested repositories on
718 718 # the filesystem *now*.
719 719 ctx = self[None]
720 720 parts = util.splitpath(subpath)
721 721 while parts:
722 722 prefix = '/'.join(parts)
723 723 if prefix in ctx.substate:
724 724 if prefix == normsubpath:
725 725 return True
726 726 else:
727 727 sub = ctx.sub(prefix)
728 728 return sub.checknested(subpath[len(prefix) + 1:])
729 729 else:
730 730 parts.pop()
731 731 return False
732 732
733 733 def peer(self):
734 734 return localpeer(self) # not cached to avoid reference cycle
735 735
736 736 def unfiltered(self):
737 737 """Return unfiltered version of the repository
738 738
739 739 Intended to be overwritten by filtered repo."""
740 740 return self
741 741
742 742 def filtered(self, name, visibilityexceptions=None):
743 743 """Return a filtered version of a repository"""
744 744 cls = repoview.newtype(self.unfiltered().__class__)
745 745 return cls(self, name, visibilityexceptions)
746 746
747 747 @repofilecache('bookmarks', 'bookmarks.current')
748 748 def _bookmarks(self):
749 749 return bookmarks.bmstore(self)
750 750
751 751 @property
752 752 def _activebookmark(self):
753 753 return self._bookmarks.active
754 754
755 755 # _phasesets depend on changelog. what we need is to call
756 756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
757 757 # can't be easily expressed in filecache mechanism.
758 758 @storecache('phaseroots', '00changelog.i')
759 759 def _phasecache(self):
760 760 return phases.phasecache(self, self._phasedefaults)
761 761
762 762 @storecache('obsstore')
763 763 def obsstore(self):
764 764 return obsolete.makestore(self.ui, self)
765 765
766 766 @storecache('00changelog.i')
767 767 def changelog(self):
768 768 return changelog.changelog(self.svfs,
769 769 trypending=txnutil.mayhavepending(self.root))
770 770
771 771 def _constructmanifest(self):
772 772 # This is a temporary function while we migrate from manifest to
773 773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
774 774 # manifest creation.
775 775 return manifest.manifestrevlog(self.svfs)
776 776
777 777 @storecache('00manifest.i')
778 778 def manifestlog(self):
779 779 return manifest.manifestlog(self.svfs, self)
780 780
781 781 @repofilecache('dirstate')
782 782 def dirstate(self):
783 783 sparsematchfn = lambda: sparse.matcher(self)
784 784
785 785 return dirstate.dirstate(self.vfs, self.ui, self.root,
786 786 self._dirstatevalidate, sparsematchfn)
787 787
788 788 def _dirstatevalidate(self, node):
789 789 try:
790 790 self.changelog.rev(node)
791 791 return node
792 792 except error.LookupError:
793 793 if not self._dirstatevalidatewarned:
794 794 self._dirstatevalidatewarned = True
795 795 self.ui.warn(_("warning: ignoring unknown"
796 796 " working parent %s!\n") % short(node))
797 797 return nullid
798 798
799 799 @repofilecache(narrowspec.FILENAME)
800 800 def narrowpats(self):
801 801 """matcher patterns for this repository's narrowspec
802 802
803 803 A tuple of (includes, excludes).
804 804 """
805 805 source = self
806 806 if self.shared():
807 807 from . import hg
808 808 source = hg.sharedreposource(self)
809 809 return narrowspec.load(source)
810 810
811 811 @repofilecache(narrowspec.FILENAME)
812 812 def _narrowmatch(self):
813 813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
814 814 return matchmod.always(self.root, '')
815 815 include, exclude = self.narrowpats
816 816 return narrowspec.match(self.root, include=include, exclude=exclude)
817 817
818 818 # TODO(martinvonz): make this property-like instead?
819 819 def narrowmatch(self):
820 820 return self._narrowmatch
821 821
822 822 def setnarrowpats(self, newincludes, newexcludes):
823 823 target = self
824 824 if self.shared():
825 825 from . import hg
826 826 target = hg.sharedreposource(self)
827 827 narrowspec.save(target, newincludes, newexcludes)
828 828 self.invalidate(clearfilecache=True)
829 829
830 830 def __getitem__(self, changeid):
831 831 if changeid is None:
832 832 return context.workingctx(self)
833 833 if isinstance(changeid, context.basectx):
834 834 return changeid
835 835 if isinstance(changeid, slice):
836 836 # wdirrev isn't contiguous so the slice shouldn't include it
837 837 return [context.changectx(self, i)
838 838 for i in xrange(*changeid.indices(len(self)))
839 839 if i not in self.changelog.filteredrevs]
840 840 try:
841 841 return context.changectx(self, changeid)
842 842 except error.WdirUnsupported:
843 843 return context.workingctx(self)
844 844
845 845 def __contains__(self, changeid):
846 846 """True if the given changeid exists
847 847
848 848 error.LookupError is raised if an ambiguous node specified.
849 849 """
850 850 try:
851 851 self[changeid]
852 852 return True
853 853 except (error.RepoLookupError, error.FilteredIndexError,
854 854 error.FilteredLookupError):
855 855 return False
856 856
857 857 def __nonzero__(self):
858 858 return True
859 859
860 860 __bool__ = __nonzero__
861 861
862 862 def __len__(self):
863 863 # no need to pay the cost of repoview.changelog
864 864 unfi = self.unfiltered()
865 865 return len(unfi.changelog)
866 866
867 867 def __iter__(self):
868 868 return iter(self.changelog)
869 869
870 870 def revs(self, expr, *args):
871 871 '''Find revisions matching a revset.
872 872
873 873 The revset is specified as a string ``expr`` that may contain
874 874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
875 875
876 876 Revset aliases from the configuration are not expanded. To expand
877 877 user aliases, consider calling ``scmutil.revrange()`` or
878 878 ``repo.anyrevs([expr], user=True)``.
879 879
880 880 Returns a revset.abstractsmartset, which is a list-like interface
881 881 that contains integer revisions.
882 882 '''
883 883 expr = revsetlang.formatspec(expr, *args)
884 884 m = revset.match(None, expr)
885 885 return m(self)
886 886
887 887 def set(self, expr, *args):
888 888 '''Find revisions matching a revset and emit changectx instances.
889 889
890 890 This is a convenience wrapper around ``revs()`` that iterates the
891 891 result and is a generator of changectx instances.
892 892
893 893 Revset aliases from the configuration are not expanded. To expand
894 894 user aliases, consider calling ``scmutil.revrange()``.
895 895 '''
896 896 for r in self.revs(expr, *args):
897 897 yield self[r]
898 898
899 899 def anyrevs(self, specs, user=False, localalias=None):
900 900 '''Find revisions matching one of the given revsets.
901 901
902 902 Revset aliases from the configuration are not expanded by default. To
903 903 expand user aliases, specify ``user=True``. To provide some local
904 904 definitions overriding user aliases, set ``localalias`` to
905 905 ``{name: definitionstring}``.
906 906 '''
907 907 if user:
908 908 m = revset.matchany(self.ui, specs,
909 909 lookup=revset.lookupfn(self),
910 910 localalias=localalias)
911 911 else:
912 912 m = revset.matchany(None, specs, localalias=localalias)
913 913 return m(self)
914 914
915 915 def url(self):
916 916 return 'file:' + self.root
917 917
918 918 def hook(self, name, throw=False, **args):
919 919 """Call a hook, passing this repo instance.
920 920
921 921 This a convenience method to aid invoking hooks. Extensions likely
922 922 won't call this unless they have registered a custom hook or are
923 923 replacing code that is expected to call a hook.
924 924 """
925 925 return hook.hook(self.ui, self, name, throw, **args)
926 926
927 927 @filteredpropertycache
928 928 def _tagscache(self):
929 929 '''Returns a tagscache object that contains various tags related
930 930 caches.'''
931 931
932 932 # This simplifies its cache management by having one decorated
933 933 # function (this one) and the rest simply fetch things from it.
934 934 class tagscache(object):
935 935 def __init__(self):
936 936 # These two define the set of tags for this repository. tags
937 937 # maps tag name to node; tagtypes maps tag name to 'global' or
938 938 # 'local'. (Global tags are defined by .hgtags across all
939 939 # heads, and local tags are defined in .hg/localtags.)
940 940 # They constitute the in-memory cache of tags.
941 941 self.tags = self.tagtypes = None
942 942
943 943 self.nodetagscache = self.tagslist = None
944 944
945 945 cache = tagscache()
946 946 cache.tags, cache.tagtypes = self._findtags()
947 947
948 948 return cache
949 949
950 950 def tags(self):
951 951 '''return a mapping of tag to node'''
952 952 t = {}
953 953 if self.changelog.filteredrevs:
954 954 tags, tt = self._findtags()
955 955 else:
956 956 tags = self._tagscache.tags
957 957 for k, v in tags.iteritems():
958 958 try:
959 959 # ignore tags to unknown nodes
960 960 self.changelog.rev(v)
961 961 t[k] = v
962 962 except (error.LookupError, ValueError):
963 963 pass
964 964 return t
965 965
966 966 def _findtags(self):
967 967 '''Do the hard work of finding tags. Return a pair of dicts
968 968 (tags, tagtypes) where tags maps tag name to node, and tagtypes
969 969 maps tag name to a string like \'global\' or \'local\'.
970 970 Subclasses or extensions are free to add their own tags, but
971 971 should be aware that the returned dicts will be retained for the
972 972 duration of the localrepo object.'''
973 973
974 974 # XXX what tagtype should subclasses/extensions use? Currently
975 975 # mq and bookmarks add tags, but do not set the tagtype at all.
976 976 # Should each extension invent its own tag type? Should there
977 977 # be one tagtype for all such "virtual" tags? Or is the status
978 978 # quo fine?
979 979
980 980
981 981 # map tag name to (node, hist)
982 982 alltags = tagsmod.findglobaltags(self.ui, self)
983 983 # map tag name to tag type
984 984 tagtypes = dict((tag, 'global') for tag in alltags)
985 985
986 986 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
987 987
988 988 # Build the return dicts. Have to re-encode tag names because
989 989 # the tags module always uses UTF-8 (in order not to lose info
990 990 # writing to the cache), but the rest of Mercurial wants them in
991 991 # local encoding.
992 992 tags = {}
993 993 for (name, (node, hist)) in alltags.iteritems():
994 994 if node != nullid:
995 995 tags[encoding.tolocal(name)] = node
996 996 tags['tip'] = self.changelog.tip()
997 997 tagtypes = dict([(encoding.tolocal(name), value)
998 998 for (name, value) in tagtypes.iteritems()])
999 999 return (tags, tagtypes)
1000 1000
1001 1001 def tagtype(self, tagname):
1002 1002 '''
1003 1003 return the type of the given tag. result can be:
1004 1004
1005 1005 'local' : a local tag
1006 1006 'global' : a global tag
1007 1007 None : tag does not exist
1008 1008 '''
1009 1009
1010 1010 return self._tagscache.tagtypes.get(tagname)
1011 1011
1012 1012 def tagslist(self):
1013 1013 '''return a list of tags ordered by revision'''
1014 1014 if not self._tagscache.tagslist:
1015 1015 l = []
1016 1016 for t, n in self.tags().iteritems():
1017 1017 l.append((self.changelog.rev(n), t, n))
1018 1018 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1019 1019
1020 1020 return self._tagscache.tagslist
1021 1021
1022 1022 def nodetags(self, node):
1023 1023 '''return the tags associated with a node'''
1024 1024 if not self._tagscache.nodetagscache:
1025 1025 nodetagscache = {}
1026 1026 for t, n in self._tagscache.tags.iteritems():
1027 1027 nodetagscache.setdefault(n, []).append(t)
1028 1028 for tags in nodetagscache.itervalues():
1029 1029 tags.sort()
1030 1030 self._tagscache.nodetagscache = nodetagscache
1031 1031 return self._tagscache.nodetagscache.get(node, [])
1032 1032
1033 1033 def nodebookmarks(self, node):
1034 1034 """return the list of bookmarks pointing to the specified node"""
1035 1035 marks = []
1036 1036 for bookmark, n in self._bookmarks.iteritems():
1037 1037 if n == node:
1038 1038 marks.append(bookmark)
1039 1039 return sorted(marks)
1040 1040
1041 1041 def branchmap(self):
1042 1042 '''returns a dictionary {branch: [branchheads]} with branchheads
1043 1043 ordered by increasing revision number'''
1044 1044 branchmap.updatecache(self)
1045 1045 return self._branchcaches[self.filtername]
1046 1046
1047 1047 @unfilteredmethod
1048 1048 def revbranchcache(self):
1049 1049 if not self._revbranchcache:
1050 1050 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1051 1051 return self._revbranchcache
1052 1052
1053 1053 def branchtip(self, branch, ignoremissing=False):
1054 1054 '''return the tip node for a given branch
1055 1055
1056 1056 If ignoremissing is True, then this method will not raise an error.
1057 1057 This is helpful for callers that only expect None for a missing branch
1058 1058 (e.g. namespace).
1059 1059
1060 1060 '''
1061 1061 try:
1062 1062 return self.branchmap().branchtip(branch)
1063 1063 except KeyError:
1064 1064 if not ignoremissing:
1065 1065 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1066 1066 else:
1067 1067 pass
1068 1068
1069 1069 def lookup(self, key):
1070 1070 return scmutil.revsymbol(self, key).node()
1071 1071
1072 1072 def lookupbranch(self, key):
1073 1073 if key in self.branchmap():
1074 1074 return key
1075 1075
1076 1076 return scmutil.revsymbol(self, key).branch()
1077 1077
1078 1078 def known(self, nodes):
1079 1079 cl = self.changelog
1080 1080 nm = cl.nodemap
1081 1081 filtered = cl.filteredrevs
1082 1082 result = []
1083 1083 for n in nodes:
1084 1084 r = nm.get(n)
1085 1085 resp = not (r is None or r in filtered)
1086 1086 result.append(resp)
1087 1087 return result
1088 1088
1089 1089 def local(self):
1090 1090 return self
1091 1091
1092 1092 def publishing(self):
1093 1093 # it's safe (and desirable) to trust the publish flag unconditionally
1094 1094 # so that we don't finalize changes shared between users via ssh or nfs
1095 1095 return self.ui.configbool('phases', 'publish', untrusted=True)
1096 1096
1097 1097 def cancopy(self):
1098 1098 # so statichttprepo's override of local() works
1099 1099 if not self.local():
1100 1100 return False
1101 1101 if not self.publishing():
1102 1102 return True
1103 1103 # if publishing we can't copy if there is filtered content
1104 1104 return not self.filtered('visible').changelog.filteredrevs
1105 1105
1106 1106 def shared(self):
1107 1107 '''the type of shared repository (None if not shared)'''
1108 1108 if self.sharedpath != self.path:
1109 1109 return 'store'
1110 1110 return None
1111 1111
1112 1112 def wjoin(self, f, *insidef):
1113 1113 return self.vfs.reljoin(self.root, f, *insidef)
1114 1114
1115 1115 def file(self, f):
1116 1116 if f[0] == '/':
1117 1117 f = f[1:]
1118 1118 return filelog.filelog(self.svfs, f)
1119 1119
1120 1120 def setparents(self, p1, p2=nullid):
1121 1121 with self.dirstate.parentchange():
1122 1122 copies = self.dirstate.setparents(p1, p2)
1123 1123 pctx = self[p1]
1124 1124 if copies:
1125 1125 # Adjust copy records, the dirstate cannot do it, it
1126 1126 # requires access to parents manifests. Preserve them
1127 1127 # only for entries added to first parent.
1128 1128 for f in copies:
1129 1129 if f not in pctx and copies[f] in pctx:
1130 1130 self.dirstate.copy(copies[f], f)
1131 1131 if p2 == nullid:
1132 1132 for f, s in sorted(self.dirstate.copies().items()):
1133 1133 if f not in pctx and s not in pctx:
1134 1134 self.dirstate.copy(None, f)
1135 1135
1136 1136 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1137 1137 """changeid can be a changeset revision, node, or tag.
1138 1138 fileid can be a file revision or node."""
1139 1139 return context.filectx(self, path, changeid, fileid,
1140 1140 changectx=changectx)
1141 1141
1142 1142 def getcwd(self):
1143 1143 return self.dirstate.getcwd()
1144 1144
1145 1145 def pathto(self, f, cwd=None):
1146 1146 return self.dirstate.pathto(f, cwd)
1147 1147
1148 1148 def _loadfilter(self, filter):
1149 1149 if filter not in self._filterpats:
1150 1150 l = []
1151 1151 for pat, cmd in self.ui.configitems(filter):
1152 1152 if cmd == '!':
1153 1153 continue
1154 1154 mf = matchmod.match(self.root, '', [pat])
1155 1155 fn = None
1156 1156 params = cmd
1157 1157 for name, filterfn in self._datafilters.iteritems():
1158 1158 if cmd.startswith(name):
1159 1159 fn = filterfn
1160 1160 params = cmd[len(name):].lstrip()
1161 1161 break
1162 1162 if not fn:
1163 1163 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1164 1164 # Wrap old filters not supporting keyword arguments
1165 1165 if not pycompat.getargspec(fn)[2]:
1166 1166 oldfn = fn
1167 1167 fn = lambda s, c, **kwargs: oldfn(s, c)
1168 1168 l.append((mf, fn, params))
1169 1169 self._filterpats[filter] = l
1170 1170 return self._filterpats[filter]
1171 1171
1172 1172 def _filter(self, filterpats, filename, data):
1173 1173 for mf, fn, cmd in filterpats:
1174 1174 if mf(filename):
1175 1175 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1176 1176 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1177 1177 break
1178 1178
1179 1179 return data
1180 1180
1181 1181 @unfilteredpropertycache
1182 1182 def _encodefilterpats(self):
1183 1183 return self._loadfilter('encode')
1184 1184
1185 1185 @unfilteredpropertycache
1186 1186 def _decodefilterpats(self):
1187 1187 return self._loadfilter('decode')
1188 1188
1189 1189 def adddatafilter(self, name, filter):
1190 1190 self._datafilters[name] = filter
1191 1191
1192 1192 def wread(self, filename):
1193 1193 if self.wvfs.islink(filename):
1194 1194 data = self.wvfs.readlink(filename)
1195 1195 else:
1196 1196 data = self.wvfs.read(filename)
1197 1197 return self._filter(self._encodefilterpats, filename, data)
1198 1198
1199 1199 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1200 1200 """write ``data`` into ``filename`` in the working directory
1201 1201
1202 1202 This returns length of written (maybe decoded) data.
1203 1203 """
1204 1204 data = self._filter(self._decodefilterpats, filename, data)
1205 1205 if 'l' in flags:
1206 1206 self.wvfs.symlink(data, filename)
1207 1207 else:
1208 1208 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1209 1209 **kwargs)
1210 1210 if 'x' in flags:
1211 1211 self.wvfs.setflags(filename, False, True)
1212 1212 else:
1213 1213 self.wvfs.setflags(filename, False, False)
1214 1214 return len(data)
1215 1215
1216 1216 def wwritedata(self, filename, data):
1217 1217 return self._filter(self._decodefilterpats, filename, data)
1218 1218
1219 1219 def currenttransaction(self):
1220 1220 """return the current transaction or None if non exists"""
1221 1221 if self._transref:
1222 1222 tr = self._transref()
1223 1223 else:
1224 1224 tr = None
1225 1225
1226 1226 if tr and tr.running():
1227 1227 return tr
1228 1228 return None
1229 1229
1230 1230 def transaction(self, desc, report=None):
1231 1231 if (self.ui.configbool('devel', 'all-warnings')
1232 1232 or self.ui.configbool('devel', 'check-locks')):
1233 1233 if self._currentlock(self._lockref) is None:
1234 1234 raise error.ProgrammingError('transaction requires locking')
1235 1235 tr = self.currenttransaction()
1236 1236 if tr is not None:
1237 1237 return tr.nest(name=desc)
1238 1238
1239 1239 # abort here if the journal already exists
1240 1240 if self.svfs.exists("journal"):
1241 1241 raise error.RepoError(
1242 1242 _("abandoned transaction found"),
1243 1243 hint=_("run 'hg recover' to clean up transaction"))
1244 1244
1245 1245 idbase = "%.40f#%f" % (random.random(), time.time())
1246 1246 ha = hex(hashlib.sha1(idbase).digest())
1247 1247 txnid = 'TXN:' + ha
1248 1248 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1249 1249
1250 1250 self._writejournal(desc)
1251 1251 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1252 1252 if report:
1253 1253 rp = report
1254 1254 else:
1255 1255 rp = self.ui.warn
1256 1256 vfsmap = {'plain': self.vfs} # root of .hg/
1257 1257 # we must avoid cyclic reference between repo and transaction.
1258 1258 reporef = weakref.ref(self)
1259 1259 # Code to track tag movement
1260 1260 #
1261 1261 # Since tags are all handled as file content, it is actually quite hard
1262 1262 # to track these movement from a code perspective. So we fallback to a
1263 1263 # tracking at the repository level. One could envision to track changes
1264 1264 # to the '.hgtags' file through changegroup apply but that fails to
1265 1265 # cope with case where transaction expose new heads without changegroup
1266 1266 # being involved (eg: phase movement).
1267 1267 #
1268 1268 # For now, We gate the feature behind a flag since this likely comes
1269 1269 # with performance impacts. The current code run more often than needed
1270 1270 # and do not use caches as much as it could. The current focus is on
1271 1271 # the behavior of the feature so we disable it by default. The flag
1272 1272 # will be removed when we are happy with the performance impact.
1273 1273 #
1274 1274 # Once this feature is no longer experimental move the following
1275 1275 # documentation to the appropriate help section:
1276 1276 #
1277 1277 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1278 1278 # tags (new or changed or deleted tags). In addition the details of
1279 1279 # these changes are made available in a file at:
1280 1280 # ``REPOROOT/.hg/changes/tags.changes``.
1281 1281 # Make sure you check for HG_TAG_MOVED before reading that file as it
1282 1282 # might exist from a previous transaction even if no tag were touched
1283 1283 # in this one. Changes are recorded in a line base format::
1284 1284 #
1285 1285 # <action> <hex-node> <tag-name>\n
1286 1286 #
1287 1287 # Actions are defined as follow:
1288 1288 # "-R": tag is removed,
1289 1289 # "+A": tag is added,
1290 1290 # "-M": tag is moved (old value),
1291 1291 # "+M": tag is moved (new value),
1292 1292 tracktags = lambda x: None
1293 1293 # experimental config: experimental.hook-track-tags
1294 1294 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1295 1295 if desc != 'strip' and shouldtracktags:
1296 1296 oldheads = self.changelog.headrevs()
1297 1297 def tracktags(tr2):
1298 1298 repo = reporef()
1299 1299 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1300 1300 newheads = repo.changelog.headrevs()
1301 1301 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1302 1302 # notes: we compare lists here.
1303 1303 # As we do it only once buiding set would not be cheaper
1304 1304 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1305 1305 if changes:
1306 1306 tr2.hookargs['tag_moved'] = '1'
1307 1307 with repo.vfs('changes/tags.changes', 'w',
1308 1308 atomictemp=True) as changesfile:
1309 1309 # note: we do not register the file to the transaction
1310 1310 # because we needs it to still exist on the transaction
1311 1311 # is close (for txnclose hooks)
1312 1312 tagsmod.writediff(changesfile, changes)
1313 1313 def validate(tr2):
1314 1314 """will run pre-closing hooks"""
1315 1315 # XXX the transaction API is a bit lacking here so we take a hacky
1316 1316 # path for now
1317 1317 #
1318 1318 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1319 1319 # dict is copied before these run. In addition we needs the data
1320 1320 # available to in memory hooks too.
1321 1321 #
1322 1322 # Moreover, we also need to make sure this runs before txnclose
1323 1323 # hooks and there is no "pending" mechanism that would execute
1324 1324 # logic only if hooks are about to run.
1325 1325 #
1326 1326 # Fixing this limitation of the transaction is also needed to track
1327 1327 # other families of changes (bookmarks, phases, obsolescence).
1328 1328 #
1329 1329 # This will have to be fixed before we remove the experimental
1330 1330 # gating.
1331 1331 tracktags(tr2)
1332 1332 repo = reporef()
1333 1333 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1334 1334 scmutil.enforcesinglehead(repo, tr2, desc)
1335 1335 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1336 1336 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1337 1337 args = tr.hookargs.copy()
1338 1338 args.update(bookmarks.preparehookargs(name, old, new))
1339 1339 repo.hook('pretxnclose-bookmark', throw=True,
1340 1340 txnname=desc,
1341 1341 **pycompat.strkwargs(args))
1342 1342 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1343 1343 cl = repo.unfiltered().changelog
1344 1344 for rev, (old, new) in tr.changes['phases'].items():
1345 1345 args = tr.hookargs.copy()
1346 1346 node = hex(cl.node(rev))
1347 1347 args.update(phases.preparehookargs(node, old, new))
1348 1348 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1349 1349 **pycompat.strkwargs(args))
1350 1350
1351 1351 repo.hook('pretxnclose', throw=True,
1352 1352 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1353 1353 def releasefn(tr, success):
1354 1354 repo = reporef()
1355 1355 if success:
1356 1356 # this should be explicitly invoked here, because
1357 1357 # in-memory changes aren't written out at closing
1358 1358 # transaction, if tr.addfilegenerator (via
1359 1359 # dirstate.write or so) isn't invoked while
1360 1360 # transaction running
1361 1361 repo.dirstate.write(None)
1362 1362 else:
1363 1363 # discard all changes (including ones already written
1364 1364 # out) in this transaction
1365 1365 repo.dirstate.restorebackup(None, 'journal.dirstate')
1366 1366
1367 1367 repo.invalidate(clearfilecache=True)
1368 1368
1369 1369 tr = transaction.transaction(rp, self.svfs, vfsmap,
1370 1370 "journal",
1371 1371 "undo",
1372 1372 aftertrans(renames),
1373 1373 self.store.createmode,
1374 1374 validator=validate,
1375 1375 releasefn=releasefn,
1376 1376 checkambigfiles=_cachedfiles,
1377 1377 name=desc)
1378 1378 tr.changes['revs'] = xrange(0, 0)
1379 1379 tr.changes['obsmarkers'] = set()
1380 1380 tr.changes['phases'] = {}
1381 1381 tr.changes['bookmarks'] = {}
1382 1382
1383 1383 tr.hookargs['txnid'] = txnid
1384 1384 # note: writing the fncache only during finalize mean that the file is
1385 1385 # outdated when running hooks. As fncache is used for streaming clone,
1386 1386 # this is not expected to break anything that happen during the hooks.
1387 1387 tr.addfinalize('flush-fncache', self.store.write)
1388 1388 def txnclosehook(tr2):
1389 1389 """To be run if transaction is successful, will schedule a hook run
1390 1390 """
1391 1391 # Don't reference tr2 in hook() so we don't hold a reference.
1392 1392 # This reduces memory consumption when there are multiple
1393 1393 # transactions per lock. This can likely go away if issue5045
1394 1394 # fixes the function accumulation.
1395 1395 hookargs = tr2.hookargs
1396 1396
1397 1397 def hookfunc():
1398 1398 repo = reporef()
1399 1399 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1400 1400 bmchanges = sorted(tr.changes['bookmarks'].items())
1401 1401 for name, (old, new) in bmchanges:
1402 1402 args = tr.hookargs.copy()
1403 1403 args.update(bookmarks.preparehookargs(name, old, new))
1404 1404 repo.hook('txnclose-bookmark', throw=False,
1405 1405 txnname=desc, **pycompat.strkwargs(args))
1406 1406
1407 1407 if hook.hashook(repo.ui, 'txnclose-phase'):
1408 1408 cl = repo.unfiltered().changelog
1409 1409 phasemv = sorted(tr.changes['phases'].items())
1410 1410 for rev, (old, new) in phasemv:
1411 1411 args = tr.hookargs.copy()
1412 1412 node = hex(cl.node(rev))
1413 1413 args.update(phases.preparehookargs(node, old, new))
1414 1414 repo.hook('txnclose-phase', throw=False, txnname=desc,
1415 1415 **pycompat.strkwargs(args))
1416 1416
1417 1417 repo.hook('txnclose', throw=False, txnname=desc,
1418 1418 **pycompat.strkwargs(hookargs))
1419 1419 reporef()._afterlock(hookfunc)
1420 1420 tr.addfinalize('txnclose-hook', txnclosehook)
1421 1421 # Include a leading "-" to make it happen before the transaction summary
1422 1422 # reports registered via scmutil.registersummarycallback() whose names
1423 1423 # are 00-txnreport etc. That way, the caches will be warm when the
1424 1424 # callbacks run.
1425 1425 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1426 1426 def txnaborthook(tr2):
1427 1427 """To be run if transaction is aborted
1428 1428 """
1429 1429 reporef().hook('txnabort', throw=False, txnname=desc,
1430 1430 **pycompat.strkwargs(tr2.hookargs))
1431 1431 tr.addabort('txnabort-hook', txnaborthook)
1432 1432 # avoid eager cache invalidation. in-memory data should be identical
1433 1433 # to stored data if transaction has no error.
1434 1434 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1435 1435 self._transref = weakref.ref(tr)
1436 1436 scmutil.registersummarycallback(self, tr, desc)
1437 1437 return tr
1438 1438
1439 1439 def _journalfiles(self):
1440 1440 return ((self.svfs, 'journal'),
1441 1441 (self.vfs, 'journal.dirstate'),
1442 1442 (self.vfs, 'journal.branch'),
1443 1443 (self.vfs, 'journal.desc'),
1444 1444 (self.vfs, 'journal.bookmarks'),
1445 1445 (self.svfs, 'journal.phaseroots'))
1446 1446
1447 1447 def undofiles(self):
1448 1448 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1449 1449
1450 1450 @unfilteredmethod
1451 1451 def _writejournal(self, desc):
1452 1452 self.dirstate.savebackup(None, 'journal.dirstate')
1453 1453 self.vfs.write("journal.branch",
1454 1454 encoding.fromlocal(self.dirstate.branch()))
1455 1455 self.vfs.write("journal.desc",
1456 1456 "%d\n%s\n" % (len(self), desc))
1457 1457 self.vfs.write("journal.bookmarks",
1458 1458 self.vfs.tryread("bookmarks"))
1459 1459 self.svfs.write("journal.phaseroots",
1460 1460 self.svfs.tryread("phaseroots"))
1461 1461
1462 1462 def recover(self):
1463 1463 with self.lock():
1464 1464 if self.svfs.exists("journal"):
1465 1465 self.ui.status(_("rolling back interrupted transaction\n"))
1466 1466 vfsmap = {'': self.svfs,
1467 1467 'plain': self.vfs,}
1468 1468 transaction.rollback(self.svfs, vfsmap, "journal",
1469 1469 self.ui.warn,
1470 1470 checkambigfiles=_cachedfiles)
1471 1471 self.invalidate()
1472 1472 return True
1473 1473 else:
1474 1474 self.ui.warn(_("no interrupted transaction available\n"))
1475 1475 return False
1476 1476
1477 1477 def rollback(self, dryrun=False, force=False):
1478 1478 wlock = lock = dsguard = None
1479 1479 try:
1480 1480 wlock = self.wlock()
1481 1481 lock = self.lock()
1482 1482 if self.svfs.exists("undo"):
1483 1483 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1484 1484
1485 1485 return self._rollback(dryrun, force, dsguard)
1486 1486 else:
1487 1487 self.ui.warn(_("no rollback information available\n"))
1488 1488 return 1
1489 1489 finally:
1490 1490 release(dsguard, lock, wlock)
1491 1491
1492 1492 @unfilteredmethod # Until we get smarter cache management
1493 1493 def _rollback(self, dryrun, force, dsguard):
1494 1494 ui = self.ui
1495 1495 try:
1496 1496 args = self.vfs.read('undo.desc').splitlines()
1497 1497 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1498 1498 if len(args) >= 3:
1499 1499 detail = args[2]
1500 1500 oldtip = oldlen - 1
1501 1501
1502 1502 if detail and ui.verbose:
1503 1503 msg = (_('repository tip rolled back to revision %d'
1504 1504 ' (undo %s: %s)\n')
1505 1505 % (oldtip, desc, detail))
1506 1506 else:
1507 1507 msg = (_('repository tip rolled back to revision %d'
1508 1508 ' (undo %s)\n')
1509 1509 % (oldtip, desc))
1510 1510 except IOError:
1511 1511 msg = _('rolling back unknown transaction\n')
1512 1512 desc = None
1513 1513
1514 1514 if not force and self['.'] != self['tip'] and desc == 'commit':
1515 1515 raise error.Abort(
1516 1516 _('rollback of last commit while not checked out '
1517 1517 'may lose data'), hint=_('use -f to force'))
1518 1518
1519 1519 ui.status(msg)
1520 1520 if dryrun:
1521 1521 return 0
1522 1522
1523 1523 parents = self.dirstate.parents()
1524 1524 self.destroying()
1525 1525 vfsmap = {'plain': self.vfs, '': self.svfs}
1526 1526 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1527 1527 checkambigfiles=_cachedfiles)
1528 1528 if self.vfs.exists('undo.bookmarks'):
1529 1529 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1530 1530 if self.svfs.exists('undo.phaseroots'):
1531 1531 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1532 1532 self.invalidate()
1533 1533
1534 1534 parentgone = (parents[0] not in self.changelog.nodemap or
1535 1535 parents[1] not in self.changelog.nodemap)
1536 1536 if parentgone:
1537 1537 # prevent dirstateguard from overwriting already restored one
1538 1538 dsguard.close()
1539 1539
1540 1540 self.dirstate.restorebackup(None, 'undo.dirstate')
1541 1541 try:
1542 1542 branch = self.vfs.read('undo.branch')
1543 1543 self.dirstate.setbranch(encoding.tolocal(branch))
1544 1544 except IOError:
1545 1545 ui.warn(_('named branch could not be reset: '
1546 1546 'current branch is still \'%s\'\n')
1547 1547 % self.dirstate.branch())
1548 1548
1549 1549 parents = tuple([p.rev() for p in self[None].parents()])
1550 1550 if len(parents) > 1:
1551 1551 ui.status(_('working directory now based on '
1552 1552 'revisions %d and %d\n') % parents)
1553 1553 else:
1554 1554 ui.status(_('working directory now based on '
1555 1555 'revision %d\n') % parents)
1556 1556 mergemod.mergestate.clean(self, self['.'].node())
1557 1557
1558 1558 # TODO: if we know which new heads may result from this rollback, pass
1559 1559 # them to destroy(), which will prevent the branchhead cache from being
1560 1560 # invalidated.
1561 1561 self.destroyed()
1562 1562 return 0
1563 1563
1564 1564 def _buildcacheupdater(self, newtransaction):
1565 1565 """called during transaction to build the callback updating cache
1566 1566
1567 1567 Lives on the repository to help extension who might want to augment
1568 1568 this logic. For this purpose, the created transaction is passed to the
1569 1569 method.
1570 1570 """
1571 1571 # we must avoid cyclic reference between repo and transaction.
1572 1572 reporef = weakref.ref(self)
1573 1573 def updater(tr):
1574 1574 repo = reporef()
1575 1575 repo.updatecaches(tr)
1576 1576 return updater
1577 1577
1578 1578 @unfilteredmethod
1579 1579 def updatecaches(self, tr=None, full=False):
1580 1580 """warm appropriate caches
1581 1581
1582 1582 If this function is called after a transaction closed. The transaction
1583 1583 will be available in the 'tr' argument. This can be used to selectively
1584 1584 update caches relevant to the changes in that transaction.
1585 1585
1586 1586 If 'full' is set, make sure all caches the function knows about have
1587 1587 up-to-date data. Even the ones usually loaded more lazily.
1588 1588 """
1589 1589 if tr is not None and tr.hookargs.get('source') == 'strip':
1590 1590 # During strip, many caches are invalid but
1591 1591 # later call to `destroyed` will refresh them.
1592 1592 return
1593 1593
1594 1594 if tr is None or tr.changes['revs']:
1595 1595 # updating the unfiltered branchmap should refresh all the others,
1596 1596 self.ui.debug('updating the branch cache\n')
1597 1597 branchmap.updatecache(self.filtered('served'))
1598 1598
1599 1599 if full:
1600 1600 rbc = self.revbranchcache()
1601 1601 for r in self.changelog:
1602 1602 rbc.branchinfo(r)
1603 1603 rbc.write()
1604 1604
1605 1605 def invalidatecaches(self):
1606 1606
1607 1607 if '_tagscache' in vars(self):
1608 1608 # can't use delattr on proxy
1609 1609 del self.__dict__['_tagscache']
1610 1610
1611 1611 self.unfiltered()._branchcaches.clear()
1612 1612 self.invalidatevolatilesets()
1613 1613 self._sparsesignaturecache.clear()
1614 1614
1615 1615 def invalidatevolatilesets(self):
1616 1616 self.filteredrevcache.clear()
1617 1617 obsolete.clearobscaches(self)
1618 1618
1619 1619 def invalidatedirstate(self):
1620 1620 '''Invalidates the dirstate, causing the next call to dirstate
1621 1621 to check if it was modified since the last time it was read,
1622 1622 rereading it if it has.
1623 1623
1624 1624 This is different to dirstate.invalidate() that it doesn't always
1625 1625 rereads the dirstate. Use dirstate.invalidate() if you want to
1626 1626 explicitly read the dirstate again (i.e. restoring it to a previous
1627 1627 known good state).'''
1628 1628 if hasunfilteredcache(self, 'dirstate'):
1629 1629 for k in self.dirstate._filecache:
1630 1630 try:
1631 1631 delattr(self.dirstate, k)
1632 1632 except AttributeError:
1633 1633 pass
1634 1634 delattr(self.unfiltered(), 'dirstate')
1635 1635
1636 1636 def invalidate(self, clearfilecache=False):
1637 1637 '''Invalidates both store and non-store parts other than dirstate
1638 1638
1639 1639 If a transaction is running, invalidation of store is omitted,
1640 1640 because discarding in-memory changes might cause inconsistency
1641 1641 (e.g. incomplete fncache causes unintentional failure, but
1642 1642 redundant one doesn't).
1643 1643 '''
1644 1644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1645 1645 for k in list(self._filecache.keys()):
1646 1646 # dirstate is invalidated separately in invalidatedirstate()
1647 1647 if k == 'dirstate':
1648 1648 continue
1649 1649 if (k == 'changelog' and
1650 1650 self.currenttransaction() and
1651 1651 self.changelog._delayed):
1652 1652 # The changelog object may store unwritten revisions. We don't
1653 1653 # want to lose them.
1654 1654 # TODO: Solve the problem instead of working around it.
1655 1655 continue
1656 1656
1657 1657 if clearfilecache:
1658 1658 del self._filecache[k]
1659 1659 try:
1660 1660 delattr(unfiltered, k)
1661 1661 except AttributeError:
1662 1662 pass
1663 1663 self.invalidatecaches()
1664 1664 if not self.currenttransaction():
1665 1665 # TODO: Changing contents of store outside transaction
1666 1666 # causes inconsistency. We should make in-memory store
1667 1667 # changes detectable, and abort if changed.
1668 1668 self.store.invalidatecaches()
1669 1669
1670 1670 def invalidateall(self):
1671 1671 '''Fully invalidates both store and non-store parts, causing the
1672 1672 subsequent operation to reread any outside changes.'''
1673 1673 # extension should hook this to invalidate its caches
1674 1674 self.invalidate()
1675 1675 self.invalidatedirstate()
1676 1676
1677 1677 @unfilteredmethod
1678 1678 def _refreshfilecachestats(self, tr):
1679 1679 """Reload stats of cached files so that they are flagged as valid"""
1680 1680 for k, ce in self._filecache.items():
1681 1681 k = pycompat.sysstr(k)
1682 1682 if k == r'dirstate' or k not in self.__dict__:
1683 1683 continue
1684 1684 ce.refresh()
1685 1685
1686 1686 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1687 1687 inheritchecker=None, parentenvvar=None):
1688 1688 parentlock = None
1689 1689 # the contents of parentenvvar are used by the underlying lock to
1690 1690 # determine whether it can be inherited
1691 1691 if parentenvvar is not None:
1692 1692 parentlock = encoding.environ.get(parentenvvar)
1693 1693
1694 1694 timeout = 0
1695 1695 warntimeout = 0
1696 1696 if wait:
1697 1697 timeout = self.ui.configint("ui", "timeout")
1698 1698 warntimeout = self.ui.configint("ui", "timeout.warn")
1699 1699
1700 1700 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1701 1701 releasefn=releasefn,
1702 1702 acquirefn=acquirefn, desc=desc,
1703 1703 inheritchecker=inheritchecker,
1704 1704 parentlock=parentlock)
1705 1705 return l
1706 1706
1707 1707 def _afterlock(self, callback):
1708 1708 """add a callback to be run when the repository is fully unlocked
1709 1709
1710 1710 The callback will be executed when the outermost lock is released
1711 1711 (with wlock being higher level than 'lock')."""
1712 1712 for ref in (self._wlockref, self._lockref):
1713 1713 l = ref and ref()
1714 1714 if l and l.held:
1715 1715 l.postrelease.append(callback)
1716 1716 break
1717 1717 else: # no lock have been found.
1718 1718 callback()
1719 1719
1720 1720 def lock(self, wait=True):
1721 1721 '''Lock the repository store (.hg/store) and return a weak reference
1722 1722 to the lock. Use this before modifying the store (e.g. committing or
1723 1723 stripping). If you are opening a transaction, get a lock as well.)
1724 1724
1725 1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1726 1726 'wlock' first to avoid a dead-lock hazard.'''
1727 1727 l = self._currentlock(self._lockref)
1728 1728 if l is not None:
1729 1729 l.lock()
1730 1730 return l
1731 1731
1732 1732 l = self._lock(self.svfs, "lock", wait, None,
1733 1733 self.invalidate, _('repository %s') % self.origroot)
1734 1734 self._lockref = weakref.ref(l)
1735 1735 return l
1736 1736
1737 1737 def _wlockchecktransaction(self):
1738 1738 if self.currenttransaction() is not None:
1739 1739 raise error.LockInheritanceContractViolation(
1740 1740 'wlock cannot be inherited in the middle of a transaction')
1741 1741
1742 1742 def wlock(self, wait=True):
1743 1743 '''Lock the non-store parts of the repository (everything under
1744 1744 .hg except .hg/store) and return a weak reference to the lock.
1745 1745
1746 1746 Use this before modifying files in .hg.
1747 1747
1748 1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 1749 'wlock' first to avoid a dead-lock hazard.'''
1750 1750 l = self._wlockref and self._wlockref()
1751 1751 if l is not None and l.held:
1752 1752 l.lock()
1753 1753 return l
1754 1754
1755 1755 # We do not need to check for non-waiting lock acquisition. Such
1756 1756 # acquisition would not cause dead-lock as they would just fail.
1757 1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1758 1758 or self.ui.configbool('devel', 'check-locks')):
1759 1759 if self._currentlock(self._lockref) is not None:
1760 1760 self.ui.develwarn('"wlock" acquired after "lock"')
1761 1761
1762 1762 def unlock():
1763 1763 if self.dirstate.pendingparentchange():
1764 1764 self.dirstate.invalidate()
1765 1765 else:
1766 1766 self.dirstate.write(None)
1767 1767
1768 1768 self._filecache['dirstate'].refresh()
1769 1769
1770 1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1771 1771 self.invalidatedirstate, _('working directory of %s') %
1772 1772 self.origroot,
1773 1773 inheritchecker=self._wlockchecktransaction,
1774 1774 parentenvvar='HG_WLOCK_LOCKER')
1775 1775 self._wlockref = weakref.ref(l)
1776 1776 return l
1777 1777
1778 1778 def _currentlock(self, lockref):
1779 1779 """Returns the lock if it's held, or None if it's not."""
1780 1780 if lockref is None:
1781 1781 return None
1782 1782 l = lockref()
1783 1783 if l is None or not l.held:
1784 1784 return None
1785 1785 return l
1786 1786
1787 1787 def currentwlock(self):
1788 1788 """Returns the wlock if it's held, or None if it's not."""
1789 1789 return self._currentlock(self._wlockref)
1790 1790
1791 1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1792 1792 """
1793 1793 commit an individual file as part of a larger transaction
1794 1794 """
1795 1795
1796 1796 fname = fctx.path()
1797 1797 fparent1 = manifest1.get(fname, nullid)
1798 1798 fparent2 = manifest2.get(fname, nullid)
1799 1799 if isinstance(fctx, context.filectx):
1800 1800 node = fctx.filenode()
1801 1801 if node in [fparent1, fparent2]:
1802 1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1803 1803 if manifest1.flags(fname) != fctx.flags():
1804 1804 changelist.append(fname)
1805 1805 return node
1806 1806
1807 1807 flog = self.file(fname)
1808 1808 meta = {}
1809 1809 copy = fctx.renamed()
1810 1810 if copy and copy[0] != fname:
1811 1811 # Mark the new revision of this file as a copy of another
1812 1812 # file. This copy data will effectively act as a parent
1813 1813 # of this new revision. If this is a merge, the first
1814 1814 # parent will be the nullid (meaning "look up the copy data")
1815 1815 # and the second one will be the other parent. For example:
1816 1816 #
1817 1817 # 0 --- 1 --- 3 rev1 changes file foo
1818 1818 # \ / rev2 renames foo to bar and changes it
1819 1819 # \- 2 -/ rev3 should have bar with all changes and
1820 1820 # should record that bar descends from
1821 1821 # bar in rev2 and foo in rev1
1822 1822 #
1823 1823 # this allows this merge to succeed:
1824 1824 #
1825 1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1826 1826 # \ / merging rev3 and rev4 should use bar@rev2
1827 1827 # \- 2 --- 4 as the merge base
1828 1828 #
1829 1829
1830 1830 cfname = copy[0]
1831 1831 crev = manifest1.get(cfname)
1832 1832 newfparent = fparent2
1833 1833
1834 1834 if manifest2: # branch merge
1835 1835 if fparent2 == nullid or crev is None: # copied on remote side
1836 1836 if cfname in manifest2:
1837 1837 crev = manifest2[cfname]
1838 1838 newfparent = fparent1
1839 1839
1840 1840 # Here, we used to search backwards through history to try to find
1841 1841 # where the file copy came from if the source of a copy was not in
1842 1842 # the parent directory. However, this doesn't actually make sense to
1843 1843 # do (what does a copy from something not in your working copy even
1844 1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1845 1845 # the user that copy information was dropped, so if they didn't
1846 1846 # expect this outcome it can be fixed, but this is the correct
1847 1847 # behavior in this circumstance.
1848 1848
1849 1849 if crev:
1850 1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1851 1851 meta["copy"] = cfname
1852 1852 meta["copyrev"] = hex(crev)
1853 1853 fparent1, fparent2 = nullid, newfparent
1854 1854 else:
1855 1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1856 1856 "copied from '%s'!\n") % (fname, cfname))
1857 1857
1858 1858 elif fparent1 == nullid:
1859 1859 fparent1, fparent2 = fparent2, nullid
1860 1860 elif fparent2 != nullid:
1861 1861 # is one parent an ancestor of the other?
1862 1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1863 1863 if fparent1 in fparentancestors:
1864 1864 fparent1, fparent2 = fparent2, nullid
1865 1865 elif fparent2 in fparentancestors:
1866 1866 fparent2 = nullid
1867 1867
1868 1868 # is the file changed?
1869 1869 text = fctx.data()
1870 1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1871 1871 changelist.append(fname)
1872 1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1873 1873 # are just the flags changed during merge?
1874 1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1875 1875 changelist.append(fname)
1876 1876
1877 1877 return fparent1
1878 1878
1879 1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1880 1880 """check for commit arguments that aren't committable"""
1881 1881 if match.isexact() or match.prefix():
1882 1882 matched = set(status.modified + status.added + status.removed)
1883 1883
1884 1884 for f in match.files():
1885 1885 f = self.dirstate.normalize(f)
1886 1886 if f == '.' or f in matched or f in wctx.substate:
1887 1887 continue
1888 1888 if f in status.deleted:
1889 1889 fail(f, _('file not found!'))
1890 1890 if f in vdirs: # visited directory
1891 1891 d = f + '/'
1892 1892 for mf in matched:
1893 1893 if mf.startswith(d):
1894 1894 break
1895 1895 else:
1896 1896 fail(f, _("no match under directory!"))
1897 1897 elif f not in self.dirstate:
1898 1898 fail(f, _("file not tracked!"))
1899 1899
1900 1900 @unfilteredmethod
1901 1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1902 1902 editor=False, extra=None):
1903 1903 """Add a new revision to current repository.
1904 1904
1905 1905 Revision information is gathered from the working directory,
1906 1906 match can be used to filter the committed files. If editor is
1907 1907 supplied, it is called to get a commit message.
1908 1908 """
1909 1909 if extra is None:
1910 1910 extra = {}
1911 1911
1912 1912 def fail(f, msg):
1913 1913 raise error.Abort('%s: %s' % (f, msg))
1914 1914
1915 1915 if not match:
1916 1916 match = matchmod.always(self.root, '')
1917 1917
1918 1918 if not force:
1919 1919 vdirs = []
1920 1920 match.explicitdir = vdirs.append
1921 1921 match.bad = fail
1922 1922
1923 1923 wlock = lock = tr = None
1924 1924 try:
1925 1925 wlock = self.wlock()
1926 1926 lock = self.lock() # for recent changelog (see issue4368)
1927 1927
1928 1928 wctx = self[None]
1929 1929 merge = len(wctx.parents()) > 1
1930 1930
1931 1931 if not force and merge and not match.always():
1932 1932 raise error.Abort(_('cannot partially commit a merge '
1933 1933 '(do not specify files or patterns)'))
1934 1934
1935 1935 status = self.status(match=match, clean=force)
1936 1936 if force:
1937 1937 status.modified.extend(status.clean) # mq may commit clean files
1938 1938
1939 1939 # check subrepos
1940 1940 subs, commitsubs, newstate = subrepoutil.precommit(
1941 1941 self.ui, wctx, status, match, force=force)
1942 1942
1943 1943 # make sure all explicit patterns are matched
1944 1944 if not force:
1945 1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1946 1946
1947 1947 cctx = context.workingcommitctx(self, status,
1948 1948 text, user, date, extra)
1949 1949
1950 1950 # internal config: ui.allowemptycommit
1951 1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1952 1952 or extra.get('close') or merge or cctx.files()
1953 1953 or self.ui.configbool('ui', 'allowemptycommit'))
1954 1954 if not allowemptycommit:
1955 1955 return None
1956 1956
1957 1957 if merge and cctx.deleted():
1958 1958 raise error.Abort(_("cannot commit merge with missing files"))
1959 1959
1960 1960 ms = mergemod.mergestate.read(self)
1961 1961 mergeutil.checkunresolved(ms)
1962 1962
1963 1963 if editor:
1964 1964 cctx._text = editor(self, cctx, subs)
1965 1965 edited = (text != cctx._text)
1966 1966
1967 1967 # Save commit message in case this transaction gets rolled back
1968 1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1969 1969 # the assumption that the user will use the same editor again.
1970 1970 msgfn = self.savecommitmessage(cctx._text)
1971 1971
1972 1972 # commit subs and write new state
1973 1973 if subs:
1974 1974 for s in sorted(commitsubs):
1975 1975 sub = wctx.sub(s)
1976 1976 self.ui.status(_('committing subrepository %s\n') %
1977 1977 subrepoutil.subrelpath(sub))
1978 1978 sr = sub.commit(cctx._text, user, date)
1979 1979 newstate[s] = (newstate[s][0], sr)
1980 1980 subrepoutil.writestate(self, newstate)
1981 1981
1982 1982 p1, p2 = self.dirstate.parents()
1983 1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1984 1984 try:
1985 1985 self.hook("precommit", throw=True, parent1=hookp1,
1986 1986 parent2=hookp2)
1987 1987 tr = self.transaction('commit')
1988 1988 ret = self.commitctx(cctx, True)
1989 1989 except: # re-raises
1990 1990 if edited:
1991 1991 self.ui.write(
1992 1992 _('note: commit message saved in %s\n') % msgfn)
1993 1993 raise
1994 1994 # update bookmarks, dirstate and mergestate
1995 1995 bookmarks.update(self, [p1, p2], ret)
1996 1996 cctx.markcommitted(ret)
1997 1997 ms.reset()
1998 1998 tr.close()
1999 1999
2000 2000 finally:
2001 2001 lockmod.release(tr, lock, wlock)
2002 2002
2003 2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2004 2004 # hack for command that use a temporary commit (eg: histedit)
2005 2005 # temporary commit got stripped before hook release
2006 2006 if self.changelog.hasnode(ret):
2007 2007 self.hook("commit", node=node, parent1=parent1,
2008 2008 parent2=parent2)
2009 2009 self._afterlock(commithook)
2010 2010 return ret
2011 2011
2012 2012 @unfilteredmethod
2013 2013 def commitctx(self, ctx, error=False):
2014 2014 """Add a new revision to current repository.
2015 2015 Revision information is passed via the context argument.
2016 2016 """
2017 2017
2018 2018 tr = None
2019 2019 p1, p2 = ctx.p1(), ctx.p2()
2020 2020 user = ctx.user()
2021 2021
2022 2022 lock = self.lock()
2023 2023 try:
2024 2024 tr = self.transaction("commit")
2025 2025 trp = weakref.proxy(tr)
2026 2026
2027 2027 if ctx.manifestnode():
2028 2028 # reuse an existing manifest revision
2029 2029 mn = ctx.manifestnode()
2030 2030 files = ctx.files()
2031 2031 elif ctx.files():
2032 2032 m1ctx = p1.manifestctx()
2033 2033 m2ctx = p2.manifestctx()
2034 2034 mctx = m1ctx.copy()
2035 2035
2036 2036 m = mctx.read()
2037 2037 m1 = m1ctx.read()
2038 2038 m2 = m2ctx.read()
2039 2039
2040 2040 # check in files
2041 2041 added = []
2042 2042 changed = []
2043 2043 removed = list(ctx.removed())
2044 2044 linkrev = len(self)
2045 2045 self.ui.note(_("committing files:\n"))
2046 2046 for f in sorted(ctx.modified() + ctx.added()):
2047 2047 self.ui.note(f + "\n")
2048 2048 try:
2049 2049 fctx = ctx[f]
2050 2050 if fctx is None:
2051 2051 removed.append(f)
2052 2052 else:
2053 2053 added.append(f)
2054 2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2055 2055 trp, changed)
2056 2056 m.setflag(f, fctx.flags())
2057 2057 except OSError as inst:
2058 2058 self.ui.warn(_("trouble committing %s!\n") % f)
2059 2059 raise
2060 2060 except IOError as inst:
2061 2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2062 2062 if error or errcode and errcode != errno.ENOENT:
2063 2063 self.ui.warn(_("trouble committing %s!\n") % f)
2064 2064 raise
2065 2065
2066 2066 # update manifest
2067 2067 self.ui.note(_("committing manifest\n"))
2068 2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2069 2069 drop = [f for f in removed if f in m]
2070 2070 for f in drop:
2071 2071 del m[f]
2072 2072 mn = mctx.write(trp, linkrev,
2073 2073 p1.manifestnode(), p2.manifestnode(),
2074 2074 added, drop)
2075 2075 files = changed + removed
2076 2076 else:
2077 2077 mn = p1.manifestnode()
2078 2078 files = []
2079 2079
2080 2080 # update changelog
2081 2081 self.ui.note(_("committing changelog\n"))
2082 2082 self.changelog.delayupdate(tr)
2083 2083 n = self.changelog.add(mn, files, ctx.description(),
2084 2084 trp, p1.node(), p2.node(),
2085 2085 user, ctx.date(), ctx.extra().copy())
2086 2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2087 2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2088 2088 parent2=xp2)
2089 2089 # set the new commit is proper phase
2090 2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2091 2091 if targetphase:
2092 2092 # retract boundary do not alter parent changeset.
2093 2093 # if a parent have higher the resulting phase will
2094 2094 # be compliant anyway
2095 2095 #
2096 2096 # if minimal phase was 0 we don't need to retract anything
2097 2097 phases.registernew(self, tr, targetphase, [n])
2098 2098 tr.close()
2099 2099 return n
2100 2100 finally:
2101 2101 if tr:
2102 2102 tr.release()
2103 2103 lock.release()
2104 2104
2105 2105 @unfilteredmethod
2106 2106 def destroying(self):
2107 2107 '''Inform the repository that nodes are about to be destroyed.
2108 2108 Intended for use by strip and rollback, so there's a common
2109 2109 place for anything that has to be done before destroying history.
2110 2110
2111 2111 This is mostly useful for saving state that is in memory and waiting
2112 2112 to be flushed when the current lock is released. Because a call to
2113 2113 destroyed is imminent, the repo will be invalidated causing those
2114 2114 changes to stay in memory (waiting for the next unlock), or vanish
2115 2115 completely.
2116 2116 '''
2117 2117 # When using the same lock to commit and strip, the phasecache is left
2118 2118 # dirty after committing. Then when we strip, the repo is invalidated,
2119 2119 # causing those changes to disappear.
2120 2120 if '_phasecache' in vars(self):
2121 2121 self._phasecache.write()
2122 2122
2123 2123 @unfilteredmethod
2124 2124 def destroyed(self):
2125 2125 '''Inform the repository that nodes have been destroyed.
2126 2126 Intended for use by strip and rollback, so there's a common
2127 2127 place for anything that has to be done after destroying history.
2128 2128 '''
2129 2129 # When one tries to:
2130 2130 # 1) destroy nodes thus calling this method (e.g. strip)
2131 2131 # 2) use phasecache somewhere (e.g. commit)
2132 2132 #
2133 2133 # then 2) will fail because the phasecache contains nodes that were
2134 2134 # removed. We can either remove phasecache from the filecache,
2135 2135 # causing it to reload next time it is accessed, or simply filter
2136 2136 # the removed nodes now and write the updated cache.
2137 2137 self._phasecache.filterunknown(self)
2138 2138 self._phasecache.write()
2139 2139
2140 2140 # refresh all repository caches
2141 2141 self.updatecaches()
2142 2142
2143 2143 # Ensure the persistent tag cache is updated. Doing it now
2144 2144 # means that the tag cache only has to worry about destroyed
2145 2145 # heads immediately after a strip/rollback. That in turn
2146 2146 # guarantees that "cachetip == currenttip" (comparing both rev
2147 2147 # and node) always means no nodes have been added or destroyed.
2148 2148
2149 2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2150 2150 # head, refresh the tag cache, then immediately add a new head.
2151 2151 # But I think doing it this way is necessary for the "instant
2152 2152 # tag cache retrieval" case to work.
2153 2153 self.invalidate()
2154 2154
2155 2155 def status(self, node1='.', node2=None, match=None,
2156 2156 ignored=False, clean=False, unknown=False,
2157 2157 listsubrepos=False):
2158 2158 '''a convenience method that calls node1.status(node2)'''
2159 2159 return self[node1].status(node2, match, ignored, clean, unknown,
2160 2160 listsubrepos)
2161 2161
2162 2162 def addpostdsstatus(self, ps):
2163 2163 """Add a callback to run within the wlock, at the point at which status
2164 2164 fixups happen.
2165 2165
2166 2166 On status completion, callback(wctx, status) will be called with the
2167 2167 wlock held, unless the dirstate has changed from underneath or the wlock
2168 2168 couldn't be grabbed.
2169 2169
2170 2170 Callbacks should not capture and use a cached copy of the dirstate --
2171 2171 it might change in the meanwhile. Instead, they should access the
2172 2172 dirstate via wctx.repo().dirstate.
2173 2173
2174 2174 This list is emptied out after each status run -- extensions should
2175 2175 make sure it adds to this list each time dirstate.status is called.
2176 2176 Extensions should also make sure they don't call this for statuses
2177 2177 that don't involve the dirstate.
2178 2178 """
2179 2179
2180 2180 # The list is located here for uniqueness reasons -- it is actually
2181 2181 # managed by the workingctx, but that isn't unique per-repo.
2182 2182 self._postdsstatus.append(ps)
2183 2183
2184 2184 def postdsstatus(self):
2185 2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2186 2186 return self._postdsstatus
2187 2187
2188 2188 def clearpostdsstatus(self):
2189 2189 """Used by workingctx to clear post-dirstate-status hooks."""
2190 2190 del self._postdsstatus[:]
2191 2191
2192 2192 def heads(self, start=None):
2193 2193 if start is None:
2194 2194 cl = self.changelog
2195 2195 headrevs = reversed(cl.headrevs())
2196 2196 return [cl.node(rev) for rev in headrevs]
2197 2197
2198 2198 heads = self.changelog.heads(start)
2199 2199 # sort the output in rev descending order
2200 2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2201 2201
2202 2202 def branchheads(self, branch=None, start=None, closed=False):
2203 2203 '''return a (possibly filtered) list of heads for the given branch
2204 2204
2205 2205 Heads are returned in topological order, from newest to oldest.
2206 2206 If branch is None, use the dirstate branch.
2207 2207 If start is not None, return only heads reachable from start.
2208 2208 If closed is True, return heads that are marked as closed as well.
2209 2209 '''
2210 2210 if branch is None:
2211 2211 branch = self[None].branch()
2212 2212 branches = self.branchmap()
2213 2213 if branch not in branches:
2214 2214 return []
2215 2215 # the cache returns heads ordered lowest to highest
2216 2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2217 2217 if start is not None:
2218 2218 # filter out the heads that cannot be reached from startrev
2219 2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2220 2220 bheads = [h for h in bheads if h in fbheads]
2221 2221 return bheads
2222 2222
2223 2223 def branches(self, nodes):
2224 2224 if not nodes:
2225 2225 nodes = [self.changelog.tip()]
2226 2226 b = []
2227 2227 for n in nodes:
2228 2228 t = n
2229 2229 while True:
2230 2230 p = self.changelog.parents(n)
2231 2231 if p[1] != nullid or p[0] == nullid:
2232 2232 b.append((t, n, p[0], p[1]))
2233 2233 break
2234 2234 n = p[0]
2235 2235 return b
2236 2236
2237 2237 def between(self, pairs):
2238 2238 r = []
2239 2239
2240 2240 for top, bottom in pairs:
2241 2241 n, l, i = top, [], 0
2242 2242 f = 1
2243 2243
2244 2244 while n != bottom and n != nullid:
2245 2245 p = self.changelog.parents(n)[0]
2246 2246 if i == f:
2247 2247 l.append(n)
2248 2248 f = f * 2
2249 2249 n = p
2250 2250 i += 1
2251 2251
2252 2252 r.append(l)
2253 2253
2254 2254 return r
2255 2255
2256 2256 def checkpush(self, pushop):
2257 2257 """Extensions can override this function if additional checks have
2258 2258 to be performed before pushing, or call it if they override push
2259 2259 command.
2260 2260 """
2261 2261
2262 2262 @unfilteredpropertycache
2263 2263 def prepushoutgoinghooks(self):
2264 2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2265 2265 methods, which are called before pushing changesets.
2266 2266 """
2267 2267 return util.hooks()
2268 2268
2269 2269 def pushkey(self, namespace, key, old, new):
2270 2270 try:
2271 2271 tr = self.currenttransaction()
2272 2272 hookargs = {}
2273 2273 if tr is not None:
2274 2274 hookargs.update(tr.hookargs)
2275 2275 hookargs = pycompat.strkwargs(hookargs)
2276 2276 hookargs[r'namespace'] = namespace
2277 2277 hookargs[r'key'] = key
2278 2278 hookargs[r'old'] = old
2279 2279 hookargs[r'new'] = new
2280 2280 self.hook('prepushkey', throw=True, **hookargs)
2281 2281 except error.HookAbort as exc:
2282 2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2283 2283 if exc.hint:
2284 2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2285 2285 return False
2286 2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2287 2287 ret = pushkey.push(self, namespace, key, old, new)
2288 2288 def runhook():
2289 2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2290 2290 ret=ret)
2291 2291 self._afterlock(runhook)
2292 2292 return ret
2293 2293
2294 2294 def listkeys(self, namespace):
2295 2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2296 2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2297 2297 values = pushkey.list(self, namespace)
2298 2298 self.hook('listkeys', namespace=namespace, values=values)
2299 2299 return values
2300 2300
2301 2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2302 2302 '''used to test argument passing over the wire'''
2303 2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2304 2304 pycompat.bytestr(four),
2305 2305 pycompat.bytestr(five))
2306 2306
2307 2307 def savecommitmessage(self, text):
2308 2308 fp = self.vfs('last-message.txt', 'wb')
2309 2309 try:
2310 2310 fp.write(text)
2311 2311 finally:
2312 2312 fp.close()
2313 2313 return self.pathto(fp.name[len(self.root) + 1:])
2314 2314
2315 2315 # used to avoid circular references so destructors work
2316 2316 def aftertrans(files):
2317 2317 renamefiles = [tuple(t) for t in files]
2318 2318 def a():
2319 2319 for vfs, src, dest in renamefiles:
2320 2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2321 2321 # leaving both src and dest on disk. delete dest to make sure
2322 2322 # the rename couldn't be such a no-op.
2323 2323 vfs.tryunlink(dest)
2324 2324 try:
2325 2325 vfs.rename(src, dest)
2326 2326 except OSError: # journal file does not yet exist
2327 2327 pass
2328 2328 return a
2329 2329
2330 2330 def undoname(fn):
2331 2331 base, name = os.path.split(fn)
2332 2332 assert name.startswith('journal')
2333 2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2334 2334
2335 def instance(ui, path, create):
2336 return localrepository(ui, util.urllocalpath(path), create)
2335 def instance(ui, path, create, intents=None):
2336 return localrepository(ui, util.urllocalpath(path), create,
2337 intents=intents)
2337 2338
2338 2339 def islocal(path):
2339 2340 return True
2340 2341
2341 2342 def newreporequirements(repo):
2342 2343 """Determine the set of requirements for a new local repository.
2343 2344
2344 2345 Extensions can wrap this function to specify custom requirements for
2345 2346 new repositories.
2346 2347 """
2347 2348 ui = repo.ui
2348 2349 requirements = {'revlogv1'}
2349 2350 if ui.configbool('format', 'usestore'):
2350 2351 requirements.add('store')
2351 2352 if ui.configbool('format', 'usefncache'):
2352 2353 requirements.add('fncache')
2353 2354 if ui.configbool('format', 'dotencode'):
2354 2355 requirements.add('dotencode')
2355 2356
2356 2357 compengine = ui.config('experimental', 'format.compression')
2357 2358 if compengine not in util.compengines:
2358 2359 raise error.Abort(_('compression engine %s defined by '
2359 2360 'experimental.format.compression not available') %
2360 2361 compengine,
2361 2362 hint=_('run "hg debuginstall" to list available '
2362 2363 'compression engines'))
2363 2364
2364 2365 # zlib is the historical default and doesn't need an explicit requirement.
2365 2366 if compengine != 'zlib':
2366 2367 requirements.add('exp-compression-%s' % compengine)
2367 2368
2368 2369 if scmutil.gdinitconfig(ui):
2369 2370 requirements.add('generaldelta')
2370 2371 if ui.configbool('experimental', 'treemanifest'):
2371 2372 requirements.add('treemanifest')
2372 2373
2373 2374 revlogv2 = ui.config('experimental', 'revlogv2')
2374 2375 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2375 2376 requirements.remove('revlogv1')
2376 2377 # generaldelta is implied by revlogv2.
2377 2378 requirements.discard('generaldelta')
2378 2379 requirements.add(REVLOGV2_REQUIREMENT)
2379 2380
2380 2381 return requirements
@@ -1,636 +1,636 b''
1 1 # sshpeer.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11 import uuid
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 error,
16 16 pycompat,
17 17 util,
18 18 wireproto,
19 19 wireprotoserver,
20 20 wireprototypes,
21 21 wireprotov1peer,
22 22 )
23 23 from .utils import (
24 24 procutil,
25 25 )
26 26
27 27 def _serverquote(s):
28 28 """quote a string for the remote shell ... which we assume is sh"""
29 29 if not s:
30 30 return s
31 31 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
32 32 return s
33 33 return "'%s'" % s.replace("'", "'\\''")
34 34
35 35 def _forwardoutput(ui, pipe):
36 36 """display all data currently available on pipe as remote output.
37 37
38 38 This is non blocking."""
39 39 if pipe:
40 40 s = procutil.readpipe(pipe)
41 41 if s:
42 42 for l in s.splitlines():
43 43 ui.status(_("remote: "), l, '\n')
44 44
45 45 class doublepipe(object):
46 46 """Operate a side-channel pipe in addition of a main one
47 47
48 48 The side-channel pipe contains server output to be forwarded to the user
49 49 input. The double pipe will behave as the "main" pipe, but will ensure the
50 50 content of the "side" pipe is properly processed while we wait for blocking
51 51 call on the "main" pipe.
52 52
53 53 If large amounts of data are read from "main", the forward will cease after
54 54 the first bytes start to appear. This simplifies the implementation
55 55 without affecting actual output of sshpeer too much as we rarely issue
56 56 large read for data not yet emitted by the server.
57 57
58 58 The main pipe is expected to be a 'bufferedinputpipe' from the util module
59 59 that handle all the os specific bits. This class lives in this module
60 60 because it focus on behavior specific to the ssh protocol."""
61 61
62 62 def __init__(self, ui, main, side):
63 63 self._ui = ui
64 64 self._main = main
65 65 self._side = side
66 66
67 67 def _wait(self):
68 68 """wait until some data are available on main or side
69 69
70 70 return a pair of boolean (ismainready, issideready)
71 71
72 72 (This will only wait for data if the setup is supported by `util.poll`)
73 73 """
74 74 if (isinstance(self._main, util.bufferedinputpipe) and
75 75 self._main.hasbuffer):
76 76 # Main has data. Assume side is worth poking at.
77 77 return True, True
78 78
79 79 fds = [self._main.fileno(), self._side.fileno()]
80 80 try:
81 81 act = util.poll(fds)
82 82 except NotImplementedError:
83 83 # non supported yet case, assume all have data.
84 84 act = fds
85 85 return (self._main.fileno() in act, self._side.fileno() in act)
86 86
87 87 def write(self, data):
88 88 return self._call('write', data)
89 89
90 90 def read(self, size):
91 91 r = self._call('read', size)
92 92 if size != 0 and not r:
93 93 # We've observed a condition that indicates the
94 94 # stdout closed unexpectedly. Check stderr one
95 95 # more time and snag anything that's there before
96 96 # letting anyone know the main part of the pipe
97 97 # closed prematurely.
98 98 _forwardoutput(self._ui, self._side)
99 99 return r
100 100
101 101 def readline(self):
102 102 return self._call('readline')
103 103
104 104 def _call(self, methname, data=None):
105 105 """call <methname> on "main", forward output of "side" while blocking
106 106 """
107 107 # data can be '' or 0
108 108 if (data is not None and not data) or self._main.closed:
109 109 _forwardoutput(self._ui, self._side)
110 110 return ''
111 111 while True:
112 112 mainready, sideready = self._wait()
113 113 if sideready:
114 114 _forwardoutput(self._ui, self._side)
115 115 if mainready:
116 116 meth = getattr(self._main, methname)
117 117 if data is None:
118 118 return meth()
119 119 else:
120 120 return meth(data)
121 121
122 122 def close(self):
123 123 return self._main.close()
124 124
125 125 def flush(self):
126 126 return self._main.flush()
127 127
128 128 def _cleanuppipes(ui, pipei, pipeo, pipee):
129 129 """Clean up pipes used by an SSH connection."""
130 130 if pipeo:
131 131 pipeo.close()
132 132 if pipei:
133 133 pipei.close()
134 134
135 135 if pipee:
136 136 # Try to read from the err descriptor until EOF.
137 137 try:
138 138 for l in pipee:
139 139 ui.status(_('remote: '), l)
140 140 except (IOError, ValueError):
141 141 pass
142 142
143 143 pipee.close()
144 144
145 145 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
146 146 """Create an SSH connection to a server.
147 147
148 148 Returns a tuple of (process, stdin, stdout, stderr) for the
149 149 spawned process.
150 150 """
151 151 cmd = '%s %s %s' % (
152 152 sshcmd,
153 153 args,
154 154 procutil.shellquote('%s -R %s serve --stdio' % (
155 155 _serverquote(remotecmd), _serverquote(path))))
156 156
157 157 ui.debug('running %s\n' % cmd)
158 158 cmd = procutil.quotecommand(cmd)
159 159
160 160 # no buffer allow the use of 'select'
161 161 # feel free to remove buffering and select usage when we ultimately
162 162 # move to threading.
163 163 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
164 164
165 165 return proc, stdin, stdout, stderr
166 166
167 167 def _clientcapabilities():
168 168 """Return list of capabilities of this client.
169 169
170 170 Returns a list of capabilities that are supported by this client.
171 171 """
172 172 protoparams = {'partial-pull'}
173 173 comps = [e.wireprotosupport().name for e in
174 174 util.compengines.supportedwireengines(util.CLIENTROLE)]
175 175 protoparams.add('comp=%s' % ','.join(comps))
176 176 return protoparams
177 177
178 178 def _performhandshake(ui, stdin, stdout, stderr):
179 179 def badresponse():
180 180 # Flush any output on stderr.
181 181 _forwardoutput(ui, stderr)
182 182
183 183 msg = _('no suitable response from remote hg')
184 184 hint = ui.config('ui', 'ssherrorhint')
185 185 raise error.RepoError(msg, hint=hint)
186 186
187 187 # The handshake consists of sending wire protocol commands in reverse
188 188 # order of protocol implementation and then sniffing for a response
189 189 # to one of them.
190 190 #
191 191 # Those commands (from oldest to newest) are:
192 192 #
193 193 # ``between``
194 194 # Asks for the set of revisions between a pair of revisions. Command
195 195 # present in all Mercurial server implementations.
196 196 #
197 197 # ``hello``
198 198 # Instructs the server to advertise its capabilities. Introduced in
199 199 # Mercurial 0.9.1.
200 200 #
201 201 # ``upgrade``
202 202 # Requests upgrade from default transport protocol version 1 to
203 203 # a newer version. Introduced in Mercurial 4.6 as an experimental
204 204 # feature.
205 205 #
206 206 # The ``between`` command is issued with a request for the null
207 207 # range. If the remote is a Mercurial server, this request will
208 208 # generate a specific response: ``1\n\n``. This represents the
209 209 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
210 210 # in the output stream and know this is the response to ``between``
211 211 # and we're at the end of our handshake reply.
212 212 #
213 213 # The response to the ``hello`` command will be a line with the
214 214 # length of the value returned by that command followed by that
215 215 # value. If the server doesn't support ``hello`` (which should be
216 216 # rare), that line will be ``0\n``. Otherwise, the value will contain
217 217 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
218 218 # the capabilities of the server.
219 219 #
220 220 # The ``upgrade`` command isn't really a command in the traditional
221 221 # sense of version 1 of the transport because it isn't using the
222 222 # proper mechanism for formatting insteads: instead, it just encodes
223 223 # arguments on the line, delimited by spaces.
224 224 #
225 225 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
226 226 # If the server doesn't support protocol upgrades, it will reply to
227 227 # this line with ``0\n``. Otherwise, it emits an
228 228 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
229 229 # Content immediately following this line describes additional
230 230 # protocol and server state.
231 231 #
232 232 # In addition to the responses to our command requests, the server
233 233 # may emit "banner" output on stdout. SSH servers are allowed to
234 234 # print messages to stdout on login. Issuing commands on connection
235 235 # allows us to flush this banner output from the server by scanning
236 236 # for output to our well-known ``between`` command. Of course, if
237 237 # the banner contains ``1\n\n``, this will throw off our detection.
238 238
239 239 requestlog = ui.configbool('devel', 'debug.peer-request')
240 240
241 241 # Generate a random token to help identify responses to version 2
242 242 # upgrade request.
243 243 token = pycompat.sysbytes(str(uuid.uuid4()))
244 244 upgradecaps = [
245 245 ('proto', wireprotoserver.SSHV2),
246 246 ]
247 247 upgradecaps = util.urlreq.urlencode(upgradecaps)
248 248
249 249 try:
250 250 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
251 251 handshake = [
252 252 'hello\n',
253 253 'between\n',
254 254 'pairs %d\n' % len(pairsarg),
255 255 pairsarg,
256 256 ]
257 257
258 258 # Request upgrade to version 2 if configured.
259 259 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
260 260 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
261 261 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
262 262
263 263 if requestlog:
264 264 ui.debug('devel-peer-request: hello\n')
265 265 ui.debug('sending hello command\n')
266 266 if requestlog:
267 267 ui.debug('devel-peer-request: between\n')
268 268 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
269 269 ui.debug('sending between command\n')
270 270
271 271 stdin.write(''.join(handshake))
272 272 stdin.flush()
273 273 except IOError:
274 274 badresponse()
275 275
276 276 # Assume version 1 of wire protocol by default.
277 277 protoname = wireprototypes.SSHV1
278 278 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
279 279
280 280 lines = ['', 'dummy']
281 281 max_noise = 500
282 282 while lines[-1] and max_noise:
283 283 try:
284 284 l = stdout.readline()
285 285 _forwardoutput(ui, stderr)
286 286
287 287 # Look for reply to protocol upgrade request. It has a token
288 288 # in it, so there should be no false positives.
289 289 m = reupgraded.match(l)
290 290 if m:
291 291 protoname = m.group(1)
292 292 ui.debug('protocol upgraded to %s\n' % protoname)
293 293 # If an upgrade was handled, the ``hello`` and ``between``
294 294 # requests are ignored. The next output belongs to the
295 295 # protocol, so stop scanning lines.
296 296 break
297 297
298 298 # Otherwise it could be a banner, ``0\n`` response if server
299 299 # doesn't support upgrade.
300 300
301 301 if lines[-1] == '1\n' and l == '\n':
302 302 break
303 303 if l:
304 304 ui.debug('remote: ', l)
305 305 lines.append(l)
306 306 max_noise -= 1
307 307 except IOError:
308 308 badresponse()
309 309 else:
310 310 badresponse()
311 311
312 312 caps = set()
313 313
314 314 # For version 1, we should see a ``capabilities`` line in response to the
315 315 # ``hello`` command.
316 316 if protoname == wireprototypes.SSHV1:
317 317 for l in reversed(lines):
318 318 # Look for response to ``hello`` command. Scan from the back so
319 319 # we don't misinterpret banner output as the command reply.
320 320 if l.startswith('capabilities:'):
321 321 caps.update(l[:-1].split(':')[1].split())
322 322 break
323 323 elif protoname == wireprotoserver.SSHV2:
324 324 # We see a line with number of bytes to follow and then a value
325 325 # looking like ``capabilities: *``.
326 326 line = stdout.readline()
327 327 try:
328 328 valuelen = int(line)
329 329 except ValueError:
330 330 badresponse()
331 331
332 332 capsline = stdout.read(valuelen)
333 333 if not capsline.startswith('capabilities: '):
334 334 badresponse()
335 335
336 336 ui.debug('remote: %s\n' % capsline)
337 337
338 338 caps.update(capsline.split(':')[1].split())
339 339 # Trailing newline.
340 340 stdout.read(1)
341 341
342 342 # Error if we couldn't find capabilities, this means:
343 343 #
344 344 # 1. Remote isn't a Mercurial server
345 345 # 2. Remote is a <0.9.1 Mercurial server
346 346 # 3. Remote is a future Mercurial server that dropped ``hello``
347 347 # and other attempted handshake mechanisms.
348 348 if not caps:
349 349 badresponse()
350 350
351 351 # Flush any output on stderr before proceeding.
352 352 _forwardoutput(ui, stderr)
353 353
354 354 return protoname, caps
355 355
356 356 class sshv1peer(wireprotov1peer.wirepeer):
357 357 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
358 358 autoreadstderr=True):
359 359 """Create a peer from an existing SSH connection.
360 360
361 361 ``proc`` is a handle on the underlying SSH process.
362 362 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
363 363 pipes for that process.
364 364 ``caps`` is a set of capabilities supported by the remote.
365 365 ``autoreadstderr`` denotes whether to automatically read from
366 366 stderr and to forward its output.
367 367 """
368 368 self._url = url
369 369 self.ui = ui
370 370 # self._subprocess is unused. Keeping a handle on the process
371 371 # holds a reference and prevents it from being garbage collected.
372 372 self._subprocess = proc
373 373
374 374 # And we hook up our "doublepipe" wrapper to allow querying
375 375 # stderr any time we perform I/O.
376 376 if autoreadstderr:
377 377 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
378 378 stdin = doublepipe(ui, stdin, stderr)
379 379
380 380 self._pipeo = stdin
381 381 self._pipei = stdout
382 382 self._pipee = stderr
383 383 self._caps = caps
384 384 self._autoreadstderr = autoreadstderr
385 385
386 386 # Commands that have a "framed" response where the first line of the
387 387 # response contains the length of that response.
388 388 _FRAMED_COMMANDS = {
389 389 'batch',
390 390 }
391 391
392 392 # Begin of ipeerconnection interface.
393 393
394 394 def url(self):
395 395 return self._url
396 396
397 397 def local(self):
398 398 return None
399 399
400 400 def peer(self):
401 401 return self
402 402
403 403 def canpush(self):
404 404 return True
405 405
406 406 def close(self):
407 407 pass
408 408
409 409 # End of ipeerconnection interface.
410 410
411 411 # Begin of ipeercommands interface.
412 412
413 413 def capabilities(self):
414 414 return self._caps
415 415
416 416 # End of ipeercommands interface.
417 417
418 418 def _readerr(self):
419 419 _forwardoutput(self.ui, self._pipee)
420 420
421 421 def _abort(self, exception):
422 422 self._cleanup()
423 423 raise exception
424 424
425 425 def _cleanup(self):
426 426 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
427 427
428 428 __del__ = _cleanup
429 429
430 430 def _sendrequest(self, cmd, args, framed=False):
431 431 if (self.ui.debugflag
432 432 and self.ui.configbool('devel', 'debug.peer-request')):
433 433 dbg = self.ui.debug
434 434 line = 'devel-peer-request: %s\n'
435 435 dbg(line % cmd)
436 436 for key, value in sorted(args.items()):
437 437 if not isinstance(value, dict):
438 438 dbg(line % ' %s: %d bytes' % (key, len(value)))
439 439 else:
440 440 for dk, dv in sorted(value.items()):
441 441 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
442 442 self.ui.debug("sending %s command\n" % cmd)
443 443 self._pipeo.write("%s\n" % cmd)
444 444 _func, names = wireproto.commands[cmd]
445 445 keys = names.split()
446 446 wireargs = {}
447 447 for k in keys:
448 448 if k == '*':
449 449 wireargs['*'] = args
450 450 break
451 451 else:
452 452 wireargs[k] = args[k]
453 453 del args[k]
454 454 for k, v in sorted(wireargs.iteritems()):
455 455 self._pipeo.write("%s %d\n" % (k, len(v)))
456 456 if isinstance(v, dict):
457 457 for dk, dv in v.iteritems():
458 458 self._pipeo.write("%s %d\n" % (dk, len(dv)))
459 459 self._pipeo.write(dv)
460 460 else:
461 461 self._pipeo.write(v)
462 462 self._pipeo.flush()
463 463
464 464 # We know exactly how many bytes are in the response. So return a proxy
465 465 # around the raw output stream that allows reading exactly this many
466 466 # bytes. Callers then can read() without fear of overrunning the
467 467 # response.
468 468 if framed:
469 469 amount = self._getamount()
470 470 return util.cappedreader(self._pipei, amount)
471 471
472 472 return self._pipei
473 473
474 474 def _callstream(self, cmd, **args):
475 475 args = pycompat.byteskwargs(args)
476 476 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
477 477
478 478 def _callcompressable(self, cmd, **args):
479 479 args = pycompat.byteskwargs(args)
480 480 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
481 481
482 482 def _call(self, cmd, **args):
483 483 args = pycompat.byteskwargs(args)
484 484 return self._sendrequest(cmd, args, framed=True).read()
485 485
486 486 def _callpush(self, cmd, fp, **args):
487 487 # The server responds with an empty frame if the client should
488 488 # continue submitting the payload.
489 489 r = self._call(cmd, **args)
490 490 if r:
491 491 return '', r
492 492
493 493 # The payload consists of frames with content followed by an empty
494 494 # frame.
495 495 for d in iter(lambda: fp.read(4096), ''):
496 496 self._writeframed(d)
497 497 self._writeframed("", flush=True)
498 498
499 499 # In case of success, there is an empty frame and a frame containing
500 500 # the integer result (as a string).
501 501 # In case of error, there is a non-empty frame containing the error.
502 502 r = self._readframed()
503 503 if r:
504 504 return '', r
505 505 return self._readframed(), ''
506 506
507 507 def _calltwowaystream(self, cmd, fp, **args):
508 508 # The server responds with an empty frame if the client should
509 509 # continue submitting the payload.
510 510 r = self._call(cmd, **args)
511 511 if r:
512 512 # XXX needs to be made better
513 513 raise error.Abort(_('unexpected remote reply: %s') % r)
514 514
515 515 # The payload consists of frames with content followed by an empty
516 516 # frame.
517 517 for d in iter(lambda: fp.read(4096), ''):
518 518 self._writeframed(d)
519 519 self._writeframed("", flush=True)
520 520
521 521 return self._pipei
522 522
523 523 def _getamount(self):
524 524 l = self._pipei.readline()
525 525 if l == '\n':
526 526 if self._autoreadstderr:
527 527 self._readerr()
528 528 msg = _('check previous remote output')
529 529 self._abort(error.OutOfBandError(hint=msg))
530 530 if self._autoreadstderr:
531 531 self._readerr()
532 532 try:
533 533 return int(l)
534 534 except ValueError:
535 535 self._abort(error.ResponseError(_("unexpected response:"), l))
536 536
537 537 def _readframed(self):
538 538 size = self._getamount()
539 539 if not size:
540 540 return b''
541 541
542 542 return self._pipei.read(size)
543 543
544 544 def _writeframed(self, data, flush=False):
545 545 self._pipeo.write("%d\n" % len(data))
546 546 if data:
547 547 self._pipeo.write(data)
548 548 if flush:
549 549 self._pipeo.flush()
550 550 if self._autoreadstderr:
551 551 self._readerr()
552 552
553 553 class sshv2peer(sshv1peer):
554 554 """A peer that speakers version 2 of the transport protocol."""
555 555 # Currently version 2 is identical to version 1 post handshake.
556 556 # And handshake is performed before the peer is instantiated. So
557 557 # we need no custom code.
558 558
559 559 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
560 560 """Make a peer instance from existing pipes.
561 561
562 562 ``path`` and ``proc`` are stored on the eventual peer instance and may
563 563 not be used for anything meaningful.
564 564
565 565 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
566 566 SSH server's stdio handles.
567 567
568 568 This function is factored out to allow creating peers that don't
569 569 actually spawn a new process. It is useful for starting SSH protocol
570 570 servers and clients via non-standard means, which can be useful for
571 571 testing.
572 572 """
573 573 try:
574 574 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
575 575 except Exception:
576 576 _cleanuppipes(ui, stdout, stdin, stderr)
577 577 raise
578 578
579 579 if protoname == wireprototypes.SSHV1:
580 580 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
581 581 autoreadstderr=autoreadstderr)
582 582 elif protoname == wireprototypes.SSHV2:
583 583 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
584 584 autoreadstderr=autoreadstderr)
585 585 else:
586 586 _cleanuppipes(ui, stdout, stdin, stderr)
587 587 raise error.RepoError(_('unknown version of SSH protocol: %s') %
588 588 protoname)
589 589
590 def instance(ui, path, create):
590 def instance(ui, path, create, intents=None):
591 591 """Create an SSH peer.
592 592
593 593 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
594 594 """
595 595 u = util.url(path, parsequery=False, parsefragment=False)
596 596 if u.scheme != 'ssh' or not u.host or u.path is None:
597 597 raise error.RepoError(_("couldn't parse location %s") % path)
598 598
599 599 util.checksafessh(path)
600 600
601 601 if u.passwd is not None:
602 602 raise error.RepoError(_('password in URL not supported'))
603 603
604 604 sshcmd = ui.config('ui', 'ssh')
605 605 remotecmd = ui.config('ui', 'remotecmd')
606 606 sshaddenv = dict(ui.configitems('sshenv'))
607 607 sshenv = procutil.shellenviron(sshaddenv)
608 608 remotepath = u.path or '.'
609 609
610 610 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
611 611
612 612 if create:
613 613 cmd = '%s %s %s' % (sshcmd, args,
614 614 procutil.shellquote('%s init %s' %
615 615 (_serverquote(remotecmd), _serverquote(remotepath))))
616 616 ui.debug('running %s\n' % cmd)
617 617 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
618 618 if res != 0:
619 619 raise error.RepoError(_('could not create remote repo'))
620 620
621 621 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
622 622 remotepath, sshenv)
623 623
624 624 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
625 625
626 626 # Finally, if supported by the server, notify it about our own
627 627 # capabilities.
628 628 if 'protocaps' in peer.capabilities():
629 629 try:
630 630 peer._call("protocaps",
631 631 caps=' '.join(sorted(_clientcapabilities())))
632 632 except IOError:
633 633 peer._cleanup()
634 634 raise error.RepoError(_('capability exchange failed'))
635 635
636 636 return peer
@@ -1,221 +1,221 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 changelog,
17 17 error,
18 18 localrepo,
19 19 manifest,
20 20 namespaces,
21 21 pathutil,
22 22 scmutil,
23 23 store,
24 24 url,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28
29 29 urlerr = util.urlerr
30 30 urlreq = util.urlreq
31 31
32 32 class httprangereader(object):
33 33 def __init__(self, url, opener):
34 34 # we assume opener has HTTPRangeHandler
35 35 self.url = url
36 36 self.pos = 0
37 37 self.opener = opener
38 38 self.name = url
39 39
40 40 def __enter__(self):
41 41 return self
42 42
43 43 def __exit__(self, exc_type, exc_value, traceback):
44 44 self.close()
45 45
46 46 def seek(self, pos):
47 47 self.pos = pos
48 48 def read(self, bytes=None):
49 49 req = urlreq.request(self.url)
50 50 end = ''
51 51 if bytes:
52 52 end = self.pos + bytes - 1
53 53 if self.pos or end:
54 54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55 55
56 56 try:
57 57 f = self.opener.open(req)
58 58 data = f.read()
59 59 code = f.code
60 60 except urlerr.httperror as inst:
61 61 num = inst.code == 404 and errno.ENOENT or None
62 62 raise IOError(num, inst)
63 63 except urlerr.urlerror as inst:
64 64 raise IOError(None, inst.reason[1])
65 65
66 66 if code == 200:
67 67 # HTTPRangeHandler does nothing if remote does not support
68 68 # Range headers and returns the full entity. Let's slice it.
69 69 if bytes:
70 70 data = data[self.pos:self.pos + bytes]
71 71 else:
72 72 data = data[self.pos:]
73 73 elif bytes:
74 74 data = data[:bytes]
75 75 self.pos += len(data)
76 76 return data
77 77 def readlines(self):
78 78 return self.read().splitlines(True)
79 79 def __iter__(self):
80 80 return iter(self.readlines())
81 81 def close(self):
82 82 pass
83 83
84 84 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
85 85 # which was itself extracted from urlgrabber. See the last version of
86 86 # byterange.py from history if you need more information.
87 87 class _RangeError(IOError):
88 88 """Error raised when an unsatisfiable range is requested."""
89 89
90 90 class _HTTPRangeHandler(urlreq.basehandler):
91 91 """Handler that enables HTTP Range headers.
92 92
93 93 This was extremely simple. The Range header is a HTTP feature to
94 94 begin with so all this class does is tell urllib2 that the
95 95 "206 Partial Content" response from the HTTP server is what we
96 96 expected.
97 97 """
98 98
99 99 def http_error_206(self, req, fp, code, msg, hdrs):
100 100 # 206 Partial Content Response
101 101 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
102 102 r.code = code
103 103 r.msg = msg
104 104 return r
105 105
106 106 def http_error_416(self, req, fp, code, msg, hdrs):
107 107 # HTTP's Range Not Satisfiable error
108 108 raise _RangeError('Requested Range Not Satisfiable')
109 109
110 110 def build_opener(ui, authinfo):
111 111 # urllib cannot handle URLs with embedded user or passwd
112 112 urlopener = url.opener(ui, authinfo)
113 113 urlopener.add_handler(_HTTPRangeHandler())
114 114
115 115 class statichttpvfs(vfsmod.abstractvfs):
116 116 def __init__(self, base):
117 117 self.base = base
118 118
119 119 def __call__(self, path, mode='r', *args, **kw):
120 120 if mode not in ('r', 'rb'):
121 121 raise IOError('Permission denied')
122 122 f = "/".join((self.base, urlreq.quote(path)))
123 123 return httprangereader(f, urlopener)
124 124
125 125 def join(self, path):
126 126 if path:
127 127 return pathutil.join(self.base, path)
128 128 else:
129 129 return self.base
130 130
131 131 return statichttpvfs
132 132
133 133 class statichttppeer(localrepo.localpeer):
134 134 def local(self):
135 135 return None
136 136 def canpush(self):
137 137 return False
138 138
139 139 class statichttprepository(localrepo.localrepository):
140 140 supported = localrepo.localrepository._basesupported
141 141
142 142 def __init__(self, ui, path):
143 143 self._url = path
144 144 self.ui = ui
145 145
146 146 self.root = path
147 147 u = util.url(path.rstrip('/') + "/.hg")
148 148 self.path, authinfo = u.authinfo()
149 149
150 150 vfsclass = build_opener(ui, authinfo)
151 151 self.vfs = vfsclass(self.path)
152 152 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 153 self._phasedefaults = []
154 154
155 155 self.names = namespaces.namespaces()
156 156 self.filtername = None
157 157
158 158 try:
159 159 requirements = scmutil.readrequires(self.vfs, self.supported)
160 160 except IOError as inst:
161 161 if inst.errno != errno.ENOENT:
162 162 raise
163 163 requirements = set()
164 164
165 165 # check if it is a non-empty old-style repository
166 166 try:
167 167 fp = self.vfs("00changelog.i")
168 168 fp.read(1)
169 169 fp.close()
170 170 except IOError as inst:
171 171 if inst.errno != errno.ENOENT:
172 172 raise
173 173 # we do not care about empty old-style repositories here
174 174 msg = _("'%s' does not appear to be an hg repository") % path
175 175 raise error.RepoError(msg)
176 176
177 177 # setup store
178 178 self.store = store.store(requirements, self.path, vfsclass)
179 179 self.spath = self.store.path
180 180 self.svfs = self.store.opener
181 181 self.sjoin = self.store.join
182 182 self._filecache = {}
183 183 self.requirements = requirements
184 184
185 185 self.manifestlog = manifest.manifestlog(self.svfs, self)
186 186 self.changelog = changelog.changelog(self.svfs)
187 187 self._tags = None
188 188 self.nodetagscache = None
189 189 self._branchcaches = {}
190 190 self._revbranchcache = None
191 191 self.encodepats = None
192 192 self.decodepats = None
193 193 self._transref = None
194 194
195 195 def _restrictcapabilities(self, caps):
196 196 caps = super(statichttprepository, self)._restrictcapabilities(caps)
197 197 return caps.difference(["pushkey"])
198 198
199 199 def url(self):
200 200 return self._url
201 201
202 202 def local(self):
203 203 return False
204 204
205 205 def peer(self):
206 206 return statichttppeer(self)
207 207
208 208 def wlock(self, wait=True):
209 209 raise error.LockUnavailable(0, _('lock not available'), 'lock',
210 210 _('cannot lock static-http repository'))
211 211
212 212 def lock(self, wait=True):
213 213 raise error.Abort(_('cannot lock static-http repository'))
214 214
215 215 def _writecaches(self):
216 216 pass # statichttprepository are read only
217 217
218 def instance(ui, path, create):
218 def instance(ui, path, create, intents=None):
219 219 if create:
220 220 raise error.Abort(_('cannot create new static-http repository'))
221 221 return statichttprepository(ui, path[7:])
@@ -1,261 +1,261 b''
1 1 # unionrepo.py - repository class for viewing union of repository changesets
2 2 #
3 3 # Derived from bundlerepo.py
4 4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Repository class for "in-memory pull" of one local repository to another,
11 11 allowing operations like diff and log with revsets.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18
19 19 from . import (
20 20 changelog,
21 21 cmdutil,
22 22 error,
23 23 filelog,
24 24 localrepo,
25 25 manifest,
26 26 mdiff,
27 27 pathutil,
28 28 pycompat,
29 29 revlog,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33
34 34 class unionrevlog(revlog.revlog):
35 35 def __init__(self, opener, indexfile, revlog2, linkmapper):
36 36 # How it works:
37 37 # To retrieve a revision, we just need to know the node id so we can
38 38 # look it up in revlog2.
39 39 #
40 40 # To differentiate a rev in the second revlog from a rev in the revlog,
41 41 # we check revision against repotiprev.
42 42 opener = vfsmod.readonlyvfs(opener)
43 43 revlog.revlog.__init__(self, opener, indexfile)
44 44 self.revlog2 = revlog2
45 45
46 46 n = len(self)
47 47 self.repotiprev = n - 1
48 48 self.bundlerevs = set() # used by 'bundle()' revset expression
49 49 for rev2 in self.revlog2:
50 50 rev = self.revlog2.index[rev2]
51 51 # rev numbers - in revlog2, very different from self.rev
52 52 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
53 53 flags = _start & 0xFFFF
54 54
55 55 if linkmapper is None: # link is to same revlog
56 56 assert linkrev == rev2 # we never link back
57 57 link = n
58 58 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
59 59 link = linkmapper(linkrev)
60 60
61 61 if linkmapper is not None: # link is to same revlog
62 62 base = linkmapper(base)
63 63
64 64 if node in self.nodemap:
65 65 # this happens for the common revlog revisions
66 66 self.bundlerevs.add(self.nodemap[node])
67 67 continue
68 68
69 69 p1node = self.revlog2.node(p1rev)
70 70 p2node = self.revlog2.node(p2rev)
71 71
72 72 e = (flags, None, None, base,
73 73 link, self.rev(p1node), self.rev(p2node), node)
74 74 self.index.insert(-1, e)
75 75 self.nodemap[node] = n
76 76 self.bundlerevs.add(n)
77 77 n += 1
78 78
79 79 def _chunk(self, rev):
80 80 if rev <= self.repotiprev:
81 81 return revlog.revlog._chunk(self, rev)
82 82 return self.revlog2._chunk(self.node(rev))
83 83
84 84 def revdiff(self, rev1, rev2):
85 85 """return or calculate a delta between two revisions"""
86 86 if rev1 > self.repotiprev and rev2 > self.repotiprev:
87 87 return self.revlog2.revdiff(
88 88 self.revlog2.rev(self.node(rev1)),
89 89 self.revlog2.rev(self.node(rev2)))
90 90 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
91 91 return self.baserevdiff(rev1, rev2)
92 92
93 93 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
94 94
95 95 def revision(self, nodeorrev, _df=None, raw=False):
96 96 """return an uncompressed revision of a given node or revision
97 97 number.
98 98 """
99 99 if isinstance(nodeorrev, int):
100 100 rev = nodeorrev
101 101 node = self.node(rev)
102 102 else:
103 103 node = nodeorrev
104 104 rev = self.rev(node)
105 105
106 106 if node == nullid:
107 107 return ""
108 108
109 109 if rev > self.repotiprev:
110 110 text = self.revlog2.revision(node)
111 111 self._cache = (node, rev, text)
112 112 else:
113 113 text = self.baserevision(rev)
114 114 # already cached
115 115 return text
116 116
117 117 def baserevision(self, nodeorrev):
118 118 # Revlog subclasses may override 'revision' method to modify format of
119 119 # content retrieved from revlog. To use unionrevlog with such class one
120 120 # needs to override 'baserevision' and make more specific call here.
121 121 return revlog.revlog.revision(self, nodeorrev)
122 122
123 123 def baserevdiff(self, rev1, rev2):
124 124 # Exists for the same purpose as baserevision.
125 125 return revlog.revlog.revdiff(self, rev1, rev2)
126 126
127 127 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 128 raise NotImplementedError
129 129 def addgroup(self, deltas, transaction, addrevisioncb=None):
130 130 raise NotImplementedError
131 131 def strip(self, rev, minlink):
132 132 raise NotImplementedError
133 133 def checksize(self):
134 134 raise NotImplementedError
135 135
136 136 class unionchangelog(unionrevlog, changelog.changelog):
137 137 def __init__(self, opener, opener2):
138 138 changelog.changelog.__init__(self, opener)
139 139 linkmapper = None
140 140 changelog2 = changelog.changelog(opener2)
141 141 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
142 142 linkmapper)
143 143
144 144 def baserevision(self, nodeorrev):
145 145 # Although changelog doesn't override 'revision' method, some extensions
146 146 # may replace this class with another that does. Same story with
147 147 # manifest and filelog classes.
148 148 return changelog.changelog.revision(self, nodeorrev)
149 149
150 150 def baserevdiff(self, rev1, rev2):
151 151 return changelog.changelog.revdiff(self, rev1, rev2)
152 152
153 153 class unionmanifest(unionrevlog, manifest.manifestrevlog):
154 154 def __init__(self, opener, opener2, linkmapper):
155 155 manifest.manifestrevlog.__init__(self, opener)
156 156 manifest2 = manifest.manifestrevlog(opener2)
157 157 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
158 158 linkmapper)
159 159
160 160 def baserevision(self, nodeorrev):
161 161 return manifest.manifestrevlog.revision(self, nodeorrev)
162 162
163 163 def baserevdiff(self, rev1, rev2):
164 164 return manifest.manifestrevlog.revdiff(self, rev1, rev2)
165 165
166 166 class unionfilelog(filelog.filelog):
167 167 def __init__(self, opener, path, opener2, linkmapper, repo):
168 168 filelog.filelog.__init__(self, opener, path)
169 169 filelog2 = filelog.filelog(opener2, path)
170 170 self._revlog = unionrevlog(opener, self.indexfile,
171 171 filelog2._revlog, linkmapper)
172 172 self._repo = repo
173 173 self.repotiprev = self._revlog.repotiprev
174 174 self.revlog2 = self._revlog.revlog2
175 175
176 176 def baserevision(self, nodeorrev):
177 177 return filelog.filelog.revision(self, nodeorrev)
178 178
179 179 def baserevdiff(self, rev1, rev2):
180 180 return filelog.filelog.revdiff(self, rev1, rev2)
181 181
182 182 def iscensored(self, rev):
183 183 """Check if a revision is censored."""
184 184 if rev <= self.repotiprev:
185 185 return filelog.filelog.iscensored(self, rev)
186 186 node = self.node(rev)
187 187 return self.revlog2.iscensored(self.revlog2.rev(node))
188 188
189 189 class unionpeer(localrepo.localpeer):
190 190 def canpush(self):
191 191 return False
192 192
193 193 class unionrepository(localrepo.localrepository):
194 194 def __init__(self, ui, path, path2):
195 195 localrepo.localrepository.__init__(self, ui, path)
196 196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
197 197
198 198 self._url = 'union:%s+%s' % (util.expandpath(path),
199 199 util.expandpath(path2))
200 200 self.repo2 = localrepo.localrepository(ui, path2)
201 201
202 202 @localrepo.unfilteredpropertycache
203 203 def changelog(self):
204 204 return unionchangelog(self.svfs, self.repo2.svfs)
205 205
206 206 def _clrev(self, rev2):
207 207 """map from repo2 changelog rev to temporary rev in self.changelog"""
208 208 node = self.repo2.changelog.node(rev2)
209 209 return self.changelog.rev(node)
210 210
211 211 def _constructmanifest(self):
212 212 return unionmanifest(self.svfs, self.repo2.svfs,
213 213 self.unfiltered()._clrev)
214 214
215 215 def url(self):
216 216 return self._url
217 217
218 218 def file(self, f):
219 219 return unionfilelog(self.svfs, f, self.repo2.svfs,
220 220 self.unfiltered()._clrev, self)
221 221
222 222 def close(self):
223 223 self.repo2.close()
224 224
225 225 def cancopy(self):
226 226 return False
227 227
228 228 def peer(self):
229 229 return unionpeer(self)
230 230
231 231 def getcwd(self):
232 232 return pycompat.getcwd() # always outside the repo
233 233
234 def instance(ui, path, create):
234 def instance(ui, path, create, intents=None):
235 235 if create:
236 236 raise error.Abort(_('cannot create new union repository'))
237 237 parentpath = ui.config("bundle", "mainreporoot")
238 238 if not parentpath:
239 239 # try to find the correct path to the working directory repo
240 240 parentpath = cmdutil.findrepo(pycompat.getcwd())
241 241 if parentpath is None:
242 242 parentpath = ''
243 243 if parentpath:
244 244 # Try to make the full path relative so we get a nice, short URL.
245 245 # In particular, we don't want temp dir names in test outputs.
246 246 cwd = pycompat.getcwd()
247 247 if parentpath == cwd:
248 248 parentpath = ''
249 249 else:
250 250 cwd = pathutil.normasprefix(cwd)
251 251 if parentpath.startswith(cwd):
252 252 parentpath = parentpath[len(cwd):]
253 253 if path.startswith('union:'):
254 254 s = path.split(":", 1)[1].split("+", 1)
255 255 if len(s) == 1:
256 256 repopath, repopath2 = parentpath, s[0]
257 257 else:
258 258 repopath, repopath2 = s
259 259 else:
260 260 repopath, repopath2 = parentpath, path
261 261 return unionrepository(ui, repopath, repopath2)
General Comments 0
You need to be logged in to leave comments. Login now