##// END OF EJS Templates
peer: pass the `path` object to `make_peer`...
marmoute -
r50652:5f71fff8 default
parent child Browse files
Show More
@@ -1,176 +1,175 b''
1 1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """extend schemes with shortcuts to repository swarms
7 7
8 8 This extension allows you to specify shortcuts for parent URLs with a
9 9 lot of repositories to act like a scheme, for example::
10 10
11 11 [schemes]
12 12 py = http://code.python.org/hg/
13 13
14 14 After that you can use it like::
15 15
16 16 hg clone py://trunk/
17 17
18 18 Additionally there is support for some more complex schemas, for
19 19 example used by Google Code::
20 20
21 21 [schemes]
22 22 gcode = http://{1}.googlecode.com/hg/
23 23
24 24 The syntax is taken from Mercurial templates, and you have unlimited
25 25 number of variables, starting with ``{1}`` and continuing with
26 26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 28 just appended to an URL.
29 29
30 30 For convenience, the extension adds these schemes by default::
31 31
32 32 [schemes]
33 33 py = http://hg.python.org/
34 34 bb = https://bitbucket.org/
35 35 bb+ssh = ssh://hg@bitbucket.org/
36 36 gcode = https://{1}.googlecode.com/hg/
37 37 kiln = https://{1}.kilnhg.com/Repo/
38 38
39 39 You can override a predefined scheme by defining a new scheme with the
40 40 same name.
41 41 """
42 42
43 43 import os
44 44 import re
45 45
46 46 from mercurial.i18n import _
47 47 from mercurial import (
48 48 error,
49 49 extensions,
50 50 hg,
51 51 pycompat,
52 52 registrar,
53 53 templater,
54 54 )
55 55 from mercurial.utils import (
56 56 urlutil,
57 57 )
58 58
59 59 cmdtable = {}
60 60 command = registrar.command(cmdtable)
61 61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 63 # be specifying the version(s) of Mercurial they are tested with, or
64 64 # leave the attribute unspecified.
65 65 testedwith = b'ships-with-hg-core'
66 66
67 67 _partre = re.compile(br'{(\d+)\}')
68 68
69 69
70 70 class ShortRepository:
71 71 def __init__(self, url, scheme, templater):
72 72 self.scheme = scheme
73 73 self.templater = templater
74 74 self.url = url
75 75 try:
76 76 self.parts = max(map(int, _partre.findall(self.url)))
77 77 except ValueError:
78 78 self.parts = 0
79 79
80 80 def __repr__(self):
81 81 return b'<ShortRepository: %s>' % self.scheme
82 82
83 def make_peer(self, ui, url, *args, **kwargs):
84 url = self.resolve(url)
85 u = urlutil.url(url)
86 scheme = u.scheme or b'file'
87 cls = hg.peer_schemes.get(scheme)
83 def make_peer(self, ui, path, *args, **kwargs):
84 new_url = self.resolve(path.rawloc)
85 path = path.copy(new_raw_location=new_url)
86 cls = hg.peer_schemes.get(path.url.scheme)
88 87 if cls is not None:
89 return cls.make_peer(ui, url, *args, **kwargs)
88 return cls.make_peer(ui, path, *args, **kwargs)
90 89 return None
91 90
92 91 def instance(self, ui, url, create, intents=None, createopts=None):
93 92 url = self.resolve(url)
94 93 u = urlutil.url(url)
95 94 scheme = u.scheme or b'file'
96 95 if scheme in hg.peer_schemes:
97 96 cls = hg.peer_schemes[scheme]
98 97 elif scheme in hg.repo_schemes:
99 98 cls = hg.repo_schemes[scheme]
100 99 else:
101 100 cls = hg.LocalFactory
102 101 return cls.instance(
103 102 ui, url, create, intents=intents, createopts=createopts
104 103 )
105 104
106 105 def resolve(self, url):
107 106 # Should this use the urlutil.url class, or is manual parsing better?
108 107 try:
109 108 url = url.split(b'://', 1)[1]
110 109 except IndexError:
111 110 raise error.Abort(_(b"no '://' in scheme url '%s'") % url)
112 111 parts = url.split(b'/', self.parts)
113 112 if len(parts) > self.parts:
114 113 tail = parts[-1]
115 114 parts = parts[:-1]
116 115 else:
117 116 tail = b''
118 117 context = {b'%d' % (i + 1): v for i, v in enumerate(parts)}
119 118 return b''.join(self.templater.process(self.url, context)) + tail
120 119
121 120
122 121 def hasdriveletter(orig, path):
123 122 if path:
124 123 for scheme in schemes:
125 124 if path.startswith(scheme + b':'):
126 125 return False
127 126 return orig(path)
128 127
129 128
130 129 schemes = {
131 130 b'py': b'http://hg.python.org/',
132 131 b'bb': b'https://bitbucket.org/',
133 132 b'bb+ssh': b'ssh://hg@bitbucket.org/',
134 133 b'gcode': b'https://{1}.googlecode.com/hg/',
135 134 b'kiln': b'https://{1}.kilnhg.com/Repo/',
136 135 }
137 136
138 137
139 138 def _check_drive_letter(scheme):
140 139 """check if a scheme conflict with a Windows drive letter"""
141 140 if (
142 141 pycompat.iswindows
143 142 and len(scheme) == 1
144 143 and scheme.isalpha()
145 144 and os.path.exists(b'%s:\\' % scheme)
146 145 ):
147 146 msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
148 147 msg %= (scheme, scheme.upper())
149 148 raise error.Abort(msg)
150 149
151 150
152 151 def extsetup(ui):
153 152 schemes.update(dict(ui.configitems(b'schemes')))
154 153 t = templater.engine(templater.parse)
155 154 for scheme, url in schemes.items():
156 155 _check_drive_letter(schemes)
157 156 url_scheme = urlutil.url(url).scheme
158 157 if url_scheme in hg.peer_schemes:
159 158 hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
160 159 else:
161 160 hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
162 161
163 162 extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
164 163
165 164
166 165 @command(b'debugexpandscheme', norepo=True)
167 166 def expandscheme(ui, url, **opts):
168 167 """given a repo path, provide the scheme-expanded path"""
169 168 scheme = urlutil.url(url).scheme
170 169 if scheme in hg.peer_schemes:
171 170 cls = hg.peer_schemes[scheme]
172 171 else:
173 172 cls = hg.repo_schemes.get(scheme)
174 173 if cls is not None and isinstance(cls, ShortRepository):
175 174 url = cls.resolve(url)
176 175 ui.write(url + b'\n')
@@ -1,1668 +1,1668 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import os
11 11 import posixpath
12 12 import shutil
13 13 import stat
14 14 import weakref
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 sha1nodeconstants,
20 20 short,
21 21 )
22 22 from .pycompat import getattr
23 23
24 24 from . import (
25 25 bookmarks,
26 26 bundlerepo,
27 27 cmdutil,
28 28 destutil,
29 29 discovery,
30 30 error,
31 31 exchange,
32 32 extensions,
33 33 graphmod,
34 34 httppeer,
35 35 localrepo,
36 36 lock,
37 37 logcmdutil,
38 38 logexchange,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 narrowspec,
42 42 phases,
43 43 requirements,
44 44 scmutil,
45 45 sshpeer,
46 46 statichttprepo,
47 47 ui as uimod,
48 48 unionrepo,
49 49 url,
50 50 util,
51 51 verify as verifymod,
52 52 vfs as vfsmod,
53 53 )
54 54 from .interfaces import repository as repositorymod
55 55 from .utils import (
56 56 hashutil,
57 57 stringutil,
58 58 urlutil,
59 59 )
60 60
61 61
62 62 release = lock.release
63 63
64 64 # shared features
65 65 sharedbookmarks = b'bookmarks'
66 66
67 67
68 68 def addbranchrevs(lrepo, other, branches, revs):
69 69 if util.safehasattr(other, 'peer'):
70 70 # a courtesy to callers using a localrepo for other
71 71 peer = other.peer()
72 72 else:
73 73 peer = other
74 74 hashbranch, branches = branches
75 75 if not hashbranch and not branches:
76 76 x = revs or None
77 77 if revs:
78 78 y = revs[0]
79 79 else:
80 80 y = None
81 81 return x, y
82 82 if revs:
83 83 revs = list(revs)
84 84 else:
85 85 revs = []
86 86
87 87 if not peer.capable(b'branchmap'):
88 88 if branches:
89 89 raise error.Abort(_(b"remote branch lookup not supported"))
90 90 revs.append(hashbranch)
91 91 return revs, revs[0]
92 92
93 93 with peer.commandexecutor() as e:
94 94 branchmap = e.callcommand(b'branchmap', {}).result()
95 95
96 96 def primary(branch):
97 97 if branch == b'.':
98 98 if not lrepo:
99 99 raise error.Abort(_(b"dirstate branch not accessible"))
100 100 branch = lrepo.dirstate.branch()
101 101 if branch in branchmap:
102 102 revs.extend(hex(r) for r in reversed(branchmap[branch]))
103 103 return True
104 104 else:
105 105 return False
106 106
107 107 for branch in branches:
108 108 if not primary(branch):
109 109 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
110 110 if hashbranch:
111 111 if not primary(hashbranch):
112 112 revs.append(hashbranch)
113 113 return revs, revs[0]
114 114
115 115
116 116 def _isfile(path):
117 117 try:
118 118 # we use os.stat() directly here instead of os.path.isfile()
119 119 # because the latter started returning `False` on invalid path
120 120 # exceptions starting in 3.8 and we care about handling
121 121 # invalid paths specially here.
122 122 st = os.stat(path)
123 123 except ValueError as e:
124 124 msg = stringutil.forcebytestr(e)
125 125 raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
126 126 except OSError:
127 127 return False
128 128 else:
129 129 return stat.S_ISREG(st.st_mode)
130 130
131 131
132 132 class LocalFactory:
133 133 """thin wrapper to dispatch between localrepo and bundle repo"""
134 134
135 135 @staticmethod
136 136 def islocal(path: bytes) -> bool:
137 137 path = util.expandpath(urlutil.urllocalpath(path))
138 138 return not _isfile(path)
139 139
140 140 @staticmethod
141 141 def instance(ui, path, *args, **kwargs):
142 142 path = util.expandpath(urlutil.urllocalpath(path))
143 143 if _isfile(path):
144 144 cls = bundlerepo
145 145 else:
146 146 cls = localrepo
147 147 return cls.instance(ui, path, *args, **kwargs)
148 148
149 149
150 150 repo_schemes = {
151 151 b'bundle': bundlerepo,
152 152 b'union': unionrepo,
153 153 b'file': LocalFactory,
154 154 }
155 155
156 156 peer_schemes = {
157 157 b'http': httppeer,
158 158 b'https': httppeer,
159 159 b'ssh': sshpeer,
160 160 b'static-http': statichttprepo,
161 161 }
162 162
163 163
164 164 def islocal(repo):
165 165 '''return true if repo (or path pointing to repo) is local'''
166 166 if isinstance(repo, bytes):
167 167 u = urlutil.url(repo)
168 168 scheme = u.scheme or b'file'
169 169 if scheme in peer_schemes:
170 170 cls = peer_schemes[scheme]
171 171 cls.make_peer # make sure we load the module
172 172 elif scheme in repo_schemes:
173 173 cls = repo_schemes[scheme]
174 174 cls.instance # make sure we load the module
175 175 else:
176 176 cls = LocalFactory
177 177 if util.safehasattr(cls, 'islocal'):
178 178 return cls.islocal(repo) # pytype: disable=module-attr
179 179 return False
180 180 repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
181 181 return repo.local()
182 182
183 183
184 184 def openpath(ui, path, sendaccept=True):
185 185 '''open path with open if local, url.open if remote'''
186 186 pathurl = urlutil.url(path, parsequery=False, parsefragment=False)
187 187 if pathurl.islocal():
188 188 return util.posixfile(pathurl.localpath(), b'rb')
189 189 else:
190 190 return url.open(ui, path, sendaccept=sendaccept)
191 191
192 192
193 193 # a list of (ui, repo) functions called for wire peer initialization
194 194 wirepeersetupfuncs = []
195 195
196 196
197 197 def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
198 198 ui = getattr(obj, "ui", ui)
199 199 for f in presetupfuncs or []:
200 200 f(ui, obj)
201 201 ui.log(b'extension', b'- executing reposetup hooks\n')
202 202 with util.timedcm('all reposetup') as allreposetupstats:
203 203 for name, module in extensions.extensions(ui):
204 204 ui.log(b'extension', b' - running reposetup for %s\n', name)
205 205 hook = getattr(module, 'reposetup', None)
206 206 if hook:
207 207 with util.timedcm('reposetup %r', name) as stats:
208 208 hook(ui, obj)
209 209 msg = b' > reposetup for %s took %s\n'
210 210 ui.log(b'extension', msg, name, stats)
211 211 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
212 212 if not obj.local():
213 213 for f in wirepeersetupfuncs:
214 214 f(ui, obj)
215 215
216 216
217 217 def repository(
218 218 ui,
219 219 path=b'',
220 220 create=False,
221 221 presetupfuncs=None,
222 222 intents=None,
223 223 createopts=None,
224 224 ):
225 225 """return a repository object for the specified path"""
226 226 scheme = urlutil.url(path).scheme
227 227 if scheme is None:
228 228 scheme = b'file'
229 229 cls = repo_schemes.get(scheme)
230 230 if cls is None:
231 231 if scheme in peer_schemes:
232 232 raise error.Abort(_(b"repository '%s' is not local") % path)
233 233 cls = LocalFactory
234 234 repo = cls.instance(
235 235 ui,
236 236 path,
237 237 create,
238 238 intents=intents,
239 239 createopts=createopts,
240 240 )
241 241 _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
242 242 return repo.filtered(b'visible')
243 243
244 244
245 245 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
246 246 '''return a repository peer for the specified path'''
247 247 ui = getattr(uiorrepo, 'ui', uiorrepo)
248 248 rui = remoteui(uiorrepo, opts)
249 249 if util.safehasattr(path, 'url'):
250 250 # this is already a urlutil.path object
251 251 peer_path = path
252 252 else:
253 253 peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
254 254 scheme = peer_path.url.scheme # pytype: disable=attribute-error
255 255 if scheme in peer_schemes:
256 256 cls = peer_schemes[scheme]
257 257 peer = cls.make_peer(
258 258 rui,
259 peer_path.loc,
259 peer_path,
260 260 create,
261 261 intents=intents,
262 262 createopts=createopts,
263 263 )
264 264 _setup_repo_or_peer(rui, peer)
265 265 else:
266 266 # this is a repository
267 267 repo_path = peer_path.loc # pytype: disable=attribute-error
268 268 if not repo_path:
269 269 repo_path = peer_path.rawloc # pytype: disable=attribute-error
270 270 repo = repository(
271 271 rui,
272 272 repo_path,
273 273 create,
274 274 intents=intents,
275 275 createopts=createopts,
276 276 )
277 277 peer = repo.peer(path=peer_path)
278 278 return peer
279 279
280 280
281 281 def defaultdest(source):
282 282 """return default destination of clone if none is given
283 283
284 284 >>> defaultdest(b'foo')
285 285 'foo'
286 286 >>> defaultdest(b'/foo/bar')
287 287 'bar'
288 288 >>> defaultdest(b'/')
289 289 ''
290 290 >>> defaultdest(b'')
291 291 ''
292 292 >>> defaultdest(b'http://example.org/')
293 293 ''
294 294 >>> defaultdest(b'http://example.org/foo/')
295 295 'foo'
296 296 """
297 297 path = urlutil.url(source).path
298 298 if not path:
299 299 return b''
300 300 return os.path.basename(os.path.normpath(path))
301 301
302 302
303 303 def sharedreposource(repo):
304 304 """Returns repository object for source repository of a shared repo.
305 305
306 306 If repo is not a shared repository, returns None.
307 307 """
308 308 if repo.sharedpath == repo.path:
309 309 return None
310 310
311 311 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
312 312 return repo.srcrepo
313 313
314 314 # the sharedpath always ends in the .hg; we want the path to the repo
315 315 source = repo.vfs.split(repo.sharedpath)[0]
316 316 srcurl, branches = urlutil.parseurl(source)
317 317 srcrepo = repository(repo.ui, srcurl)
318 318 repo.srcrepo = srcrepo
319 319 return srcrepo
320 320
321 321
322 322 def share(
323 323 ui,
324 324 source,
325 325 dest=None,
326 326 update=True,
327 327 bookmarks=True,
328 328 defaultpath=None,
329 329 relative=False,
330 330 ):
331 331 '''create a shared repository'''
332 332
333 333 not_local_msg = _(b'can only share local repositories')
334 334 if util.safehasattr(source, 'local'):
335 335 if source.local() is None:
336 336 raise error.Abort(not_local_msg)
337 337 elif not islocal(source):
338 338 # XXX why are we getting bytes here ?
339 339 raise error.Abort(not_local_msg)
340 340
341 341 if not dest:
342 342 dest = defaultdest(source)
343 343 else:
344 344 dest = urlutil.get_clone_path_obj(ui, dest).loc
345 345
346 346 if isinstance(source, bytes):
347 347 source_path = urlutil.get_clone_path_obj(ui, source)
348 348 srcrepo = repository(ui, source_path.loc)
349 349 branches = (source_path.branch, [])
350 350 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
351 351 else:
352 352 srcrepo = source.local()
353 353 checkout = None
354 354
355 355 shareditems = set()
356 356 if bookmarks:
357 357 shareditems.add(sharedbookmarks)
358 358
359 359 r = repository(
360 360 ui,
361 361 dest,
362 362 create=True,
363 363 createopts={
364 364 b'sharedrepo': srcrepo,
365 365 b'sharedrelative': relative,
366 366 b'shareditems': shareditems,
367 367 },
368 368 )
369 369
370 370 postshare(srcrepo, r, defaultpath=defaultpath)
371 371 r = repository(ui, dest)
372 372 _postshareupdate(r, update, checkout=checkout)
373 373 return r
374 374
375 375
376 376 def _prependsourcehgrc(repo):
377 377 """copies the source repo config and prepend it in current repo .hg/hgrc
378 378 on unshare. This is only done if the share was perfomed using share safe
379 379 method where we share config of source in shares"""
380 380 srcvfs = vfsmod.vfs(repo.sharedpath)
381 381 dstvfs = vfsmod.vfs(repo.path)
382 382
383 383 if not srcvfs.exists(b'hgrc'):
384 384 return
385 385
386 386 currentconfig = b''
387 387 if dstvfs.exists(b'hgrc'):
388 388 currentconfig = dstvfs.read(b'hgrc')
389 389
390 390 with dstvfs(b'hgrc', b'wb') as fp:
391 391 sourceconfig = srcvfs.read(b'hgrc')
392 392 fp.write(b"# Config copied from shared source\n")
393 393 fp.write(sourceconfig)
394 394 fp.write(b'\n')
395 395 fp.write(currentconfig)
396 396
397 397
398 398 def unshare(ui, repo):
399 399 """convert a shared repository to a normal one
400 400
401 401 Copy the store data to the repo and remove the sharedpath data.
402 402
403 403 Returns a new repository object representing the unshared repository.
404 404
405 405 The passed repository object is not usable after this function is
406 406 called.
407 407 """
408 408
409 409 with repo.lock():
410 410 # we use locks here because if we race with commit, we
411 411 # can end up with extra data in the cloned revlogs that's
412 412 # not pointed to by changesets, thus causing verify to
413 413 # fail
414 414 destlock = copystore(ui, repo, repo.path)
415 415 with destlock or util.nullcontextmanager():
416 416 if requirements.SHARESAFE_REQUIREMENT in repo.requirements:
417 417 # we were sharing .hg/hgrc of the share source with the current
418 418 # repo. We need to copy that while unsharing otherwise it can
419 419 # disable hooks and other checks
420 420 _prependsourcehgrc(repo)
421 421
422 422 sharefile = repo.vfs.join(b'sharedpath')
423 423 util.rename(sharefile, sharefile + b'.old')
424 424
425 425 repo.requirements.discard(requirements.SHARED_REQUIREMENT)
426 426 repo.requirements.discard(requirements.RELATIVE_SHARED_REQUIREMENT)
427 427 scmutil.writereporequirements(repo)
428 428
429 429 # Removing share changes some fundamental properties of the repo instance.
430 430 # So we instantiate a new repo object and operate on it rather than
431 431 # try to keep the existing repo usable.
432 432 newrepo = repository(repo.baseui, repo.root, create=False)
433 433
434 434 # TODO: figure out how to access subrepos that exist, but were previously
435 435 # removed from .hgsub
436 436 c = newrepo[b'.']
437 437 subs = c.substate
438 438 for s in sorted(subs):
439 439 c.sub(s).unshare()
440 440
441 441 localrepo.poisonrepository(repo)
442 442
443 443 return newrepo
444 444
445 445
446 446 def postshare(sourcerepo, destrepo, defaultpath=None):
447 447 """Called after a new shared repo is created.
448 448
449 449 The new repo only has a requirements file and pointer to the source.
450 450 This function configures additional shared data.
451 451
452 452 Extensions can wrap this function and write additional entries to
453 453 destrepo/.hg/shared to indicate additional pieces of data to be shared.
454 454 """
455 455 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
456 456 if default:
457 457 template = b'[paths]\ndefault = %s\n'
458 458 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
459 459 if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
460 460 with destrepo.wlock():
461 461 narrowspec.copytoworkingcopy(destrepo)
462 462
463 463
464 464 def _postshareupdate(repo, update, checkout=None):
465 465 """Maybe perform a working directory update after a shared repo is created.
466 466
467 467 ``update`` can be a boolean or a revision to update to.
468 468 """
469 469 if not update:
470 470 return
471 471
472 472 repo.ui.status(_(b"updating working directory\n"))
473 473 if update is not True:
474 474 checkout = update
475 475 for test in (checkout, b'default', b'tip'):
476 476 if test is None:
477 477 continue
478 478 try:
479 479 uprev = repo.lookup(test)
480 480 break
481 481 except error.RepoLookupError:
482 482 continue
483 483 _update(repo, uprev)
484 484
485 485
486 486 def copystore(ui, srcrepo, destpath):
487 487 """copy files from store of srcrepo in destpath
488 488
489 489 returns destlock
490 490 """
491 491 destlock = None
492 492 try:
493 493 hardlink = None
494 494 topic = _(b'linking') if hardlink else _(b'copying')
495 495 with ui.makeprogress(topic, unit=_(b'files')) as progress:
496 496 num = 0
497 497 srcpublishing = srcrepo.publishing()
498 498 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
499 499 dstvfs = vfsmod.vfs(destpath)
500 500 for f in srcrepo.store.copylist():
501 501 if srcpublishing and f.endswith(b'phaseroots'):
502 502 continue
503 503 dstbase = os.path.dirname(f)
504 504 if dstbase and not dstvfs.exists(dstbase):
505 505 dstvfs.mkdir(dstbase)
506 506 if srcvfs.exists(f):
507 507 if f.endswith(b'data'):
508 508 # 'dstbase' may be empty (e.g. revlog format 0)
509 509 lockfile = os.path.join(dstbase, b"lock")
510 510 # lock to avoid premature writing to the target
511 511 destlock = lock.lock(dstvfs, lockfile)
512 512 hardlink, n = util.copyfiles(
513 513 srcvfs.join(f), dstvfs.join(f), hardlink, progress
514 514 )
515 515 num += n
516 516 if hardlink:
517 517 ui.debug(b"linked %d files\n" % num)
518 518 else:
519 519 ui.debug(b"copied %d files\n" % num)
520 520 return destlock
521 521 except: # re-raises
522 522 release(destlock)
523 523 raise
524 524
525 525
526 526 def clonewithshare(
527 527 ui,
528 528 peeropts,
529 529 sharepath,
530 530 source,
531 531 srcpeer,
532 532 dest,
533 533 pull=False,
534 534 rev=None,
535 535 update=True,
536 536 stream=False,
537 537 ):
538 538 """Perform a clone using a shared repo.
539 539
540 540 The store for the repository will be located at <sharepath>/.hg. The
541 541 specified revisions will be cloned or pulled from "source". A shared repo
542 542 will be created at "dest" and a working copy will be created if "update" is
543 543 True.
544 544 """
545 545 revs = None
546 546 if rev:
547 547 if not srcpeer.capable(b'lookup'):
548 548 raise error.Abort(
549 549 _(
550 550 b"src repository does not support "
551 551 b"revision lookup and so doesn't "
552 552 b"support clone by revision"
553 553 )
554 554 )
555 555
556 556 # TODO this is batchable.
557 557 remoterevs = []
558 558 for r in rev:
559 559 with srcpeer.commandexecutor() as e:
560 560 remoterevs.append(
561 561 e.callcommand(
562 562 b'lookup',
563 563 {
564 564 b'key': r,
565 565 },
566 566 ).result()
567 567 )
568 568 revs = remoterevs
569 569
570 570 # Obtain a lock before checking for or cloning the pooled repo otherwise
571 571 # 2 clients may race creating or populating it.
572 572 pooldir = os.path.dirname(sharepath)
573 573 # lock class requires the directory to exist.
574 574 try:
575 575 util.makedir(pooldir, False)
576 576 except FileExistsError:
577 577 pass
578 578
579 579 poolvfs = vfsmod.vfs(pooldir)
580 580 basename = os.path.basename(sharepath)
581 581
582 582 with lock.lock(poolvfs, b'%s.lock' % basename):
583 583 if os.path.exists(sharepath):
584 584 ui.status(
585 585 _(b'(sharing from existing pooled repository %s)\n') % basename
586 586 )
587 587 else:
588 588 ui.status(
589 589 _(b'(sharing from new pooled repository %s)\n') % basename
590 590 )
591 591 # Always use pull mode because hardlinks in share mode don't work
592 592 # well. Never update because working copies aren't necessary in
593 593 # share mode.
594 594 clone(
595 595 ui,
596 596 peeropts,
597 597 source,
598 598 dest=sharepath,
599 599 pull=True,
600 600 revs=rev,
601 601 update=False,
602 602 stream=stream,
603 603 )
604 604
605 605 # Resolve the value to put in [paths] section for the source.
606 606 if islocal(source):
607 607 defaultpath = util.abspath(urlutil.urllocalpath(source))
608 608 else:
609 609 defaultpath = source
610 610
611 611 sharerepo = repository(ui, path=sharepath)
612 612 destrepo = share(
613 613 ui,
614 614 sharerepo,
615 615 dest=dest,
616 616 update=False,
617 617 bookmarks=False,
618 618 defaultpath=defaultpath,
619 619 )
620 620
621 621 # We need to perform a pull against the dest repo to fetch bookmarks
622 622 # and other non-store data that isn't shared by default. In the case of
623 623 # non-existing shared repo, this means we pull from the remote twice. This
624 624 # is a bit weird. But at the time it was implemented, there wasn't an easy
625 625 # way to pull just non-changegroup data.
626 626 exchange.pull(destrepo, srcpeer, heads=revs)
627 627
628 628 _postshareupdate(destrepo, update)
629 629
630 630 return srcpeer, peer(ui, peeropts, dest)
631 631
632 632
633 633 # Recomputing caches is often slow on big repos, so copy them.
634 634 def _copycache(srcrepo, dstcachedir, fname):
635 635 """copy a cache from srcrepo to destcachedir (if it exists)"""
636 636 srcfname = srcrepo.cachevfs.join(fname)
637 637 dstfname = os.path.join(dstcachedir, fname)
638 638 if os.path.exists(srcfname):
639 639 if not os.path.exists(dstcachedir):
640 640 os.mkdir(dstcachedir)
641 641 util.copyfile(srcfname, dstfname)
642 642
643 643
644 644 def clone(
645 645 ui,
646 646 peeropts,
647 647 source,
648 648 dest=None,
649 649 pull=False,
650 650 revs=None,
651 651 update=True,
652 652 stream=False,
653 653 branch=None,
654 654 shareopts=None,
655 655 storeincludepats=None,
656 656 storeexcludepats=None,
657 657 depth=None,
658 658 ):
659 659 """Make a copy of an existing repository.
660 660
661 661 Create a copy of an existing repository in a new directory. The
662 662 source and destination are URLs, as passed to the repository
663 663 function. Returns a pair of repository peers, the source and
664 664 newly created destination.
665 665
666 666 The location of the source is added to the new repository's
667 667 .hg/hgrc file, as the default to be used for future pulls and
668 668 pushes.
669 669
670 670 If an exception is raised, the partly cloned/updated destination
671 671 repository will be deleted.
672 672
673 673 Arguments:
674 674
675 675 source: repository object or URL
676 676
677 677 dest: URL of destination repository to create (defaults to base
678 678 name of source repository)
679 679
680 680 pull: always pull from source repository, even in local case or if the
681 681 server prefers streaming
682 682
683 683 stream: stream raw data uncompressed from repository (fast over
684 684 LAN, slow over WAN)
685 685
686 686 revs: revision to clone up to (implies pull=True)
687 687
688 688 update: update working directory after clone completes, if
689 689 destination is local repository (True means update to default rev,
690 690 anything else is treated as a revision)
691 691
692 692 branch: branches to clone
693 693
694 694 shareopts: dict of options to control auto sharing behavior. The "pool" key
695 695 activates auto sharing mode and defines the directory for stores. The
696 696 "mode" key determines how to construct the directory name of the shared
697 697 repository. "identity" means the name is derived from the node of the first
698 698 changeset in the repository. "remote" means the name is derived from the
699 699 remote's path/URL. Defaults to "identity."
700 700
701 701 storeincludepats and storeexcludepats: sets of file patterns to include and
702 702 exclude in the repository copy, respectively. If not defined, all files
703 703 will be included (a "full" clone). Otherwise a "narrow" clone containing
704 704 only the requested files will be performed. If ``storeincludepats`` is not
705 705 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
706 706 ``path:.``. If both are empty sets, no files will be cloned.
707 707 """
708 708
709 709 if isinstance(source, bytes):
710 710 src_path = urlutil.get_clone_path_obj(ui, source)
711 711 if src_path is None:
712 712 srcpeer = peer(ui, peeropts, b'')
713 713 origsource = source = b''
714 714 branches = (None, branch or [])
715 715 else:
716 716 srcpeer = peer(ui, peeropts, src_path)
717 717 origsource = src_path.rawloc
718 718 branches = (src_path.branch, branch or [])
719 719 source = src_path.loc
720 720 else:
721 721 if util.safehasattr(source, 'peer'):
722 722 srcpeer = source.peer() # in case we were called with a localrepo
723 723 else:
724 724 srcpeer = source
725 725 branches = (None, branch or [])
726 726 # XXX path: simply use the peer `path` object when this become available
727 727 origsource = source = srcpeer.url()
728 728 srclock = destlock = destwlock = cleandir = None
729 729 destpeer = None
730 730 try:
731 731 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
732 732
733 733 if dest is None:
734 734 dest = defaultdest(source)
735 735 if dest:
736 736 ui.status(_(b"destination directory: %s\n") % dest)
737 737 else:
738 738 dest_path = urlutil.get_clone_path_obj(ui, dest)
739 739 if dest_path is not None:
740 740 dest = dest_path.rawloc
741 741 else:
742 742 dest = b''
743 743
744 744 dest = urlutil.urllocalpath(dest)
745 745 source = urlutil.urllocalpath(source)
746 746
747 747 if not dest:
748 748 raise error.InputError(_(b"empty destination path is not valid"))
749 749
750 750 destvfs = vfsmod.vfs(dest, expandpath=True)
751 751 if destvfs.lexists():
752 752 if not destvfs.isdir():
753 753 raise error.InputError(
754 754 _(b"destination '%s' already exists") % dest
755 755 )
756 756 elif destvfs.listdir():
757 757 raise error.InputError(
758 758 _(b"destination '%s' is not empty") % dest
759 759 )
760 760
761 761 createopts = {}
762 762 narrow = False
763 763
764 764 if storeincludepats is not None:
765 765 narrowspec.validatepatterns(storeincludepats)
766 766 narrow = True
767 767
768 768 if storeexcludepats is not None:
769 769 narrowspec.validatepatterns(storeexcludepats)
770 770 narrow = True
771 771
772 772 if narrow:
773 773 # Include everything by default if only exclusion patterns defined.
774 774 if storeexcludepats and not storeincludepats:
775 775 storeincludepats = {b'path:.'}
776 776
777 777 createopts[b'narrowfiles'] = True
778 778
779 779 if depth:
780 780 createopts[b'shallowfilestore'] = True
781 781
782 782 if srcpeer.capable(b'lfs-serve'):
783 783 # Repository creation honors the config if it disabled the extension, so
784 784 # we can't just announce that lfs will be enabled. This check avoids
785 785 # saying that lfs will be enabled, and then saying it's an unknown
786 786 # feature. The lfs creation option is set in either case so that a
787 787 # requirement is added. If the extension is explicitly disabled but the
788 788 # requirement is set, the clone aborts early, before transferring any
789 789 # data.
790 790 createopts[b'lfs'] = True
791 791
792 792 if extensions.disabled_help(b'lfs'):
793 793 ui.status(
794 794 _(
795 795 b'(remote is using large file support (lfs), but it is '
796 796 b'explicitly disabled in the local configuration)\n'
797 797 )
798 798 )
799 799 else:
800 800 ui.status(
801 801 _(
802 802 b'(remote is using large file support (lfs); lfs will '
803 803 b'be enabled for this repository)\n'
804 804 )
805 805 )
806 806
807 807 shareopts = shareopts or {}
808 808 sharepool = shareopts.get(b'pool')
809 809 sharenamemode = shareopts.get(b'mode')
810 810 if sharepool and islocal(dest):
811 811 sharepath = None
812 812 if sharenamemode == b'identity':
813 813 # Resolve the name from the initial changeset in the remote
814 814 # repository. This returns nullid when the remote is empty. It
815 815 # raises RepoLookupError if revision 0 is filtered or otherwise
816 816 # not available. If we fail to resolve, sharing is not enabled.
817 817 try:
818 818 with srcpeer.commandexecutor() as e:
819 819 rootnode = e.callcommand(
820 820 b'lookup',
821 821 {
822 822 b'key': b'0',
823 823 },
824 824 ).result()
825 825
826 826 if rootnode != sha1nodeconstants.nullid:
827 827 sharepath = os.path.join(sharepool, hex(rootnode))
828 828 else:
829 829 ui.status(
830 830 _(
831 831 b'(not using pooled storage: '
832 832 b'remote appears to be empty)\n'
833 833 )
834 834 )
835 835 except error.RepoLookupError:
836 836 ui.status(
837 837 _(
838 838 b'(not using pooled storage: '
839 839 b'unable to resolve identity of remote)\n'
840 840 )
841 841 )
842 842 elif sharenamemode == b'remote':
843 843 sharepath = os.path.join(
844 844 sharepool, hex(hashutil.sha1(source).digest())
845 845 )
846 846 else:
847 847 raise error.Abort(
848 848 _(b'unknown share naming mode: %s') % sharenamemode
849 849 )
850 850
851 851 # TODO this is a somewhat arbitrary restriction.
852 852 if narrow:
853 853 ui.status(
854 854 _(b'(pooled storage not supported for narrow clones)\n')
855 855 )
856 856 sharepath = None
857 857
858 858 if sharepath:
859 859 return clonewithshare(
860 860 ui,
861 861 peeropts,
862 862 sharepath,
863 863 source,
864 864 srcpeer,
865 865 dest,
866 866 pull=pull,
867 867 rev=revs,
868 868 update=update,
869 869 stream=stream,
870 870 )
871 871
872 872 srcrepo = srcpeer.local()
873 873
874 874 abspath = origsource
875 875 if islocal(origsource):
876 876 abspath = util.abspath(urlutil.urllocalpath(origsource))
877 877
878 878 if islocal(dest):
879 879 if os.path.exists(dest):
880 880 # only clean up directories we create ourselves
881 881 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
882 882 cleandir = hgdir
883 883 else:
884 884 cleandir = dest
885 885
886 886 copy = False
887 887 if (
888 888 srcrepo
889 889 and srcrepo.cancopy()
890 890 and islocal(dest)
891 891 and not phases.hassecret(srcrepo)
892 892 ):
893 893 copy = not pull and not revs
894 894
895 895 # TODO this is a somewhat arbitrary restriction.
896 896 if narrow:
897 897 copy = False
898 898
899 899 if copy:
900 900 try:
901 901 # we use a lock here because if we race with commit, we
902 902 # can end up with extra data in the cloned revlogs that's
903 903 # not pointed to by changesets, thus causing verify to
904 904 # fail
905 905 srclock = srcrepo.lock(wait=False)
906 906 except error.LockError:
907 907 copy = False
908 908
909 909 if copy:
910 910 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
911 911
912 912 destrootpath = urlutil.urllocalpath(dest)
913 913 dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
914 914 localrepo.createrepository(
915 915 ui,
916 916 destrootpath,
917 917 requirements=dest_reqs,
918 918 )
919 919 destrepo = localrepo.makelocalrepository(ui, destrootpath)
920 920
921 921 destwlock = destrepo.wlock()
922 922 destlock = destrepo.lock()
923 923 from . import streamclone # avoid cycle
924 924
925 925 streamclone.local_copy(srcrepo, destrepo)
926 926
927 927 # we need to re-init the repo after manually copying the data
928 928 # into it
929 929 destpeer = peer(srcrepo, peeropts, dest)
930 930
931 931 # make the peer aware that is it already locked
932 932 #
933 933 # important:
934 934 #
935 935 # We still need to release that lock at the end of the function
936 936 destpeer.local()._lockref = weakref.ref(destlock)
937 937 destpeer.local()._wlockref = weakref.ref(destwlock)
938 938 # dirstate also needs to be copied because `_wlockref` has a reference
939 939 # to it: this dirstate is saved to disk when the wlock is released
940 940 destpeer.local().dirstate = destrepo.dirstate
941 941
942 942 srcrepo.hook(
943 943 b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
944 944 )
945 945 else:
946 946 try:
947 947 # only pass ui when no srcrepo
948 948 destpeer = peer(
949 949 srcrepo or ui,
950 950 peeropts,
951 951 dest,
952 952 create=True,
953 953 createopts=createopts,
954 954 )
955 955 except FileExistsError:
956 956 cleandir = None
957 957 raise error.Abort(_(b"destination '%s' already exists") % dest)
958 958
959 959 if revs:
960 960 if not srcpeer.capable(b'lookup'):
961 961 raise error.Abort(
962 962 _(
963 963 b"src repository does not support "
964 964 b"revision lookup and so doesn't "
965 965 b"support clone by revision"
966 966 )
967 967 )
968 968
969 969 # TODO this is batchable.
970 970 remoterevs = []
971 971 for rev in revs:
972 972 with srcpeer.commandexecutor() as e:
973 973 remoterevs.append(
974 974 e.callcommand(
975 975 b'lookup',
976 976 {
977 977 b'key': rev,
978 978 },
979 979 ).result()
980 980 )
981 981 revs = remoterevs
982 982
983 983 checkout = revs[0]
984 984 else:
985 985 revs = None
986 986 local = destpeer.local()
987 987 if local:
988 988 if narrow:
989 989 with local.wlock(), local.lock():
990 990 local.setnarrowpats(storeincludepats, storeexcludepats)
991 991 narrowspec.copytoworkingcopy(local)
992 992
993 993 u = urlutil.url(abspath)
994 994 defaulturl = bytes(u)
995 995 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
996 996 if not stream:
997 997 if pull:
998 998 stream = False
999 999 else:
1000 1000 stream = None
1001 1001 # internal config: ui.quietbookmarkmove
1002 1002 overrides = {(b'ui', b'quietbookmarkmove'): True}
1003 1003 with local.ui.configoverride(overrides, b'clone'):
1004 1004 exchange.pull(
1005 1005 local,
1006 1006 srcpeer,
1007 1007 heads=revs,
1008 1008 streamclonerequested=stream,
1009 1009 includepats=storeincludepats,
1010 1010 excludepats=storeexcludepats,
1011 1011 depth=depth,
1012 1012 )
1013 1013 elif srcrepo:
1014 1014 # TODO lift restriction once exchange.push() accepts narrow
1015 1015 # push.
1016 1016 if narrow:
1017 1017 raise error.Abort(
1018 1018 _(
1019 1019 b'narrow clone not available for '
1020 1020 b'remote destinations'
1021 1021 )
1022 1022 )
1023 1023
1024 1024 exchange.push(
1025 1025 srcrepo,
1026 1026 destpeer,
1027 1027 revs=revs,
1028 1028 bookmarks=srcrepo._bookmarks.keys(),
1029 1029 )
1030 1030 else:
1031 1031 raise error.Abort(
1032 1032 _(b"clone from remote to remote not supported")
1033 1033 )
1034 1034
1035 1035 cleandir = None
1036 1036
1037 1037 destrepo = destpeer.local()
1038 1038 if destrepo:
1039 1039 template = uimod.samplehgrcs[b'cloned']
1040 1040 u = urlutil.url(abspath)
1041 1041 u.passwd = None
1042 1042 defaulturl = bytes(u)
1043 1043 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
1044 1044 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
1045 1045
1046 1046 if ui.configbool(b'experimental', b'remotenames'):
1047 1047 logexchange.pullremotenames(destrepo, srcpeer)
1048 1048
1049 1049 if update:
1050 1050 if update is not True:
1051 1051 with srcpeer.commandexecutor() as e:
1052 1052 checkout = e.callcommand(
1053 1053 b'lookup',
1054 1054 {
1055 1055 b'key': update,
1056 1056 },
1057 1057 ).result()
1058 1058
1059 1059 uprev = None
1060 1060 status = None
1061 1061 if checkout is not None:
1062 1062 # Some extensions (at least hg-git and hg-subversion) have
1063 1063 # a peer.lookup() implementation that returns a name instead
1064 1064 # of a nodeid. We work around it here until we've figured
1065 1065 # out a better solution.
1066 1066 if len(checkout) == 20 and checkout in destrepo:
1067 1067 uprev = checkout
1068 1068 elif scmutil.isrevsymbol(destrepo, checkout):
1069 1069 uprev = scmutil.revsymbol(destrepo, checkout).node()
1070 1070 else:
1071 1071 if update is not True:
1072 1072 try:
1073 1073 uprev = destrepo.lookup(update)
1074 1074 except error.RepoLookupError:
1075 1075 pass
1076 1076 if uprev is None:
1077 1077 try:
1078 1078 if destrepo._activebookmark:
1079 1079 uprev = destrepo.lookup(destrepo._activebookmark)
1080 1080 update = destrepo._activebookmark
1081 1081 else:
1082 1082 uprev = destrepo._bookmarks[b'@']
1083 1083 update = b'@'
1084 1084 bn = destrepo[uprev].branch()
1085 1085 if bn == b'default':
1086 1086 status = _(b"updating to bookmark %s\n" % update)
1087 1087 else:
1088 1088 status = (
1089 1089 _(b"updating to bookmark %s on branch %s\n")
1090 1090 ) % (update, bn)
1091 1091 except KeyError:
1092 1092 try:
1093 1093 uprev = destrepo.branchtip(b'default')
1094 1094 except error.RepoLookupError:
1095 1095 uprev = destrepo.lookup(b'tip')
1096 1096 if not status:
1097 1097 bn = destrepo[uprev].branch()
1098 1098 status = _(b"updating to branch %s\n") % bn
1099 1099 destrepo.ui.status(status)
1100 1100 _update(destrepo, uprev)
1101 1101 if update in destrepo._bookmarks:
1102 1102 bookmarks.activate(destrepo, update)
1103 1103 if destlock is not None:
1104 1104 release(destlock)
1105 1105 if destwlock is not None:
1106 1106 release(destlock)
1107 1107 # here is a tiny windows were someone could end up writing the
1108 1108 # repository before the cache are sure to be warm. This is "fine"
1109 1109 # as the only "bad" outcome would be some slowness. That potential
1110 1110 # slowness already affect reader.
1111 1111 with destrepo.lock():
1112 1112 destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
1113 1113 finally:
1114 1114 release(srclock, destlock, destwlock)
1115 1115 if cleandir is not None:
1116 1116 shutil.rmtree(cleandir, True)
1117 1117 if srcpeer is not None:
1118 1118 srcpeer.close()
1119 1119 if destpeer and destpeer.local() is None:
1120 1120 destpeer.close()
1121 1121 return srcpeer, destpeer
1122 1122
1123 1123
1124 1124 def _showstats(repo, stats, quietempty=False):
1125 1125 if quietempty and stats.isempty():
1126 1126 return
1127 1127 repo.ui.status(
1128 1128 _(
1129 1129 b"%d files updated, %d files merged, "
1130 1130 b"%d files removed, %d files unresolved\n"
1131 1131 )
1132 1132 % (
1133 1133 stats.updatedcount,
1134 1134 stats.mergedcount,
1135 1135 stats.removedcount,
1136 1136 stats.unresolvedcount,
1137 1137 )
1138 1138 )
1139 1139
1140 1140
1141 1141 def updaterepo(repo, node, overwrite, updatecheck=None):
1142 1142 """Update the working directory to node.
1143 1143
1144 1144 When overwrite is set, changes are clobbered, merged else
1145 1145
1146 1146 returns stats (see pydoc mercurial.merge.applyupdates)"""
1147 1147 repo.ui.deprecwarn(
1148 1148 b'prefer merge.update() or merge.clean_update() over hg.updaterepo()',
1149 1149 b'5.7',
1150 1150 )
1151 1151 return mergemod._update(
1152 1152 repo,
1153 1153 node,
1154 1154 branchmerge=False,
1155 1155 force=overwrite,
1156 1156 labels=[b'working copy', b'destination'],
1157 1157 updatecheck=updatecheck,
1158 1158 )
1159 1159
1160 1160
1161 1161 def update(repo, node, quietempty=False, updatecheck=None):
1162 1162 """update the working directory to node"""
1163 1163 stats = mergemod.update(repo[node], updatecheck=updatecheck)
1164 1164 _showstats(repo, stats, quietempty)
1165 1165 if stats.unresolvedcount:
1166 1166 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1167 1167 return stats.unresolvedcount > 0
1168 1168
1169 1169
1170 1170 # naming conflict in clone()
1171 1171 _update = update
1172 1172
1173 1173
1174 1174 def clean(repo, node, show_stats=True, quietempty=False):
1175 1175 """forcibly switch the working directory to node, clobbering changes"""
1176 1176 stats = mergemod.clean_update(repo[node])
1177 1177 assert stats.unresolvedcount == 0
1178 1178 if show_stats:
1179 1179 _showstats(repo, stats, quietempty)
1180 1180 return False
1181 1181
1182 1182
1183 1183 # naming conflict in updatetotally()
1184 1184 _clean = clean
1185 1185
1186 1186 _VALID_UPDATECHECKS = {
1187 1187 mergemod.UPDATECHECK_ABORT,
1188 1188 mergemod.UPDATECHECK_NONE,
1189 1189 mergemod.UPDATECHECK_LINEAR,
1190 1190 mergemod.UPDATECHECK_NO_CONFLICT,
1191 1191 }
1192 1192
1193 1193
1194 1194 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1195 1195 """Update the working directory with extra care for non-file components
1196 1196
1197 1197 This takes care of non-file components below:
1198 1198
1199 1199 :bookmark: might be advanced or (in)activated
1200 1200
1201 1201 This takes arguments below:
1202 1202
1203 1203 :checkout: to which revision the working directory is updated
1204 1204 :brev: a name, which might be a bookmark to be activated after updating
1205 1205 :clean: whether changes in the working directory can be discarded
1206 1206 :updatecheck: how to deal with a dirty working directory
1207 1207
1208 1208 Valid values for updatecheck are the UPDATECHECK_* constants
1209 1209 defined in the merge module. Passing `None` will result in using the
1210 1210 configured default.
1211 1211
1212 1212 * ABORT: abort if the working directory is dirty
1213 1213 * NONE: don't check (merge working directory changes into destination)
1214 1214 * LINEAR: check that update is linear before merging working directory
1215 1215 changes into destination
1216 1216 * NO_CONFLICT: check that the update does not result in file merges
1217 1217
1218 1218 This returns whether conflict is detected at updating or not.
1219 1219 """
1220 1220 if updatecheck is None:
1221 1221 updatecheck = ui.config(b'commands', b'update.check')
1222 1222 if updatecheck not in _VALID_UPDATECHECKS:
1223 1223 # If not configured, or invalid value configured
1224 1224 updatecheck = mergemod.UPDATECHECK_LINEAR
1225 1225 if updatecheck not in _VALID_UPDATECHECKS:
1226 1226 raise ValueError(
1227 1227 r'Invalid updatecheck value %r (can accept %r)'
1228 1228 % (updatecheck, _VALID_UPDATECHECKS)
1229 1229 )
1230 1230 with repo.wlock():
1231 1231 movemarkfrom = None
1232 1232 warndest = False
1233 1233 if checkout is None:
1234 1234 updata = destutil.destupdate(repo, clean=clean)
1235 1235 checkout, movemarkfrom, brev = updata
1236 1236 warndest = True
1237 1237
1238 1238 if clean:
1239 1239 ret = _clean(repo, checkout)
1240 1240 else:
1241 1241 if updatecheck == mergemod.UPDATECHECK_ABORT:
1242 1242 cmdutil.bailifchanged(repo, merge=False)
1243 1243 updatecheck = mergemod.UPDATECHECK_NONE
1244 1244 ret = _update(repo, checkout, updatecheck=updatecheck)
1245 1245
1246 1246 if not ret and movemarkfrom:
1247 1247 if movemarkfrom == repo[b'.'].node():
1248 1248 pass # no-op update
1249 1249 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1250 1250 b = ui.label(repo._activebookmark, b'bookmarks.active')
1251 1251 ui.status(_(b"updating bookmark %s\n") % b)
1252 1252 else:
1253 1253 # this can happen with a non-linear update
1254 1254 b = ui.label(repo._activebookmark, b'bookmarks')
1255 1255 ui.status(_(b"(leaving bookmark %s)\n") % b)
1256 1256 bookmarks.deactivate(repo)
1257 1257 elif brev in repo._bookmarks:
1258 1258 if brev != repo._activebookmark:
1259 1259 b = ui.label(brev, b'bookmarks.active')
1260 1260 ui.status(_(b"(activating bookmark %s)\n") % b)
1261 1261 bookmarks.activate(repo, brev)
1262 1262 elif brev:
1263 1263 if repo._activebookmark:
1264 1264 b = ui.label(repo._activebookmark, b'bookmarks')
1265 1265 ui.status(_(b"(leaving bookmark %s)\n") % b)
1266 1266 bookmarks.deactivate(repo)
1267 1267
1268 1268 if warndest:
1269 1269 destutil.statusotherdests(ui, repo)
1270 1270
1271 1271 return ret
1272 1272
1273 1273
1274 1274 def merge(
1275 1275 ctx,
1276 1276 force=False,
1277 1277 remind=True,
1278 1278 labels=None,
1279 1279 ):
1280 1280 """Branch merge with node, resolving changes. Return true if any
1281 1281 unresolved conflicts."""
1282 1282 repo = ctx.repo()
1283 1283 stats = mergemod.merge(ctx, force=force, labels=labels)
1284 1284 _showstats(repo, stats)
1285 1285 if stats.unresolvedcount:
1286 1286 repo.ui.status(
1287 1287 _(
1288 1288 b"use 'hg resolve' to retry unresolved file merges "
1289 1289 b"or 'hg merge --abort' to abandon\n"
1290 1290 )
1291 1291 )
1292 1292 elif remind:
1293 1293 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1294 1294 return stats.unresolvedcount > 0
1295 1295
1296 1296
1297 1297 def abortmerge(ui, repo):
1298 1298 ms = mergestatemod.mergestate.read(repo)
1299 1299 if ms.active():
1300 1300 # there were conflicts
1301 1301 node = ms.localctx.hex()
1302 1302 else:
1303 1303 # there were no conficts, mergestate was not stored
1304 1304 node = repo[b'.'].hex()
1305 1305
1306 1306 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1307 1307 stats = mergemod.clean_update(repo[node])
1308 1308 assert stats.unresolvedcount == 0
1309 1309 _showstats(repo, stats)
1310 1310
1311 1311
1312 1312 def _incoming(
1313 1313 displaychlist,
1314 1314 subreporecurse,
1315 1315 ui,
1316 1316 repo,
1317 1317 source,
1318 1318 opts,
1319 1319 buffered=False,
1320 1320 subpath=None,
1321 1321 ):
1322 1322 """
1323 1323 Helper for incoming / gincoming.
1324 1324 displaychlist gets called with
1325 1325 (remoterepo, incomingchangesetlist, displayer) parameters,
1326 1326 and is supposed to contain only code that can't be unified.
1327 1327 """
1328 1328 srcs = urlutil.get_pull_paths(repo, ui, [source])
1329 1329 srcs = list(srcs)
1330 1330 if len(srcs) != 1:
1331 1331 msg = _(b'for now, incoming supports only a single source, %d provided')
1332 1332 msg %= len(srcs)
1333 1333 raise error.Abort(msg)
1334 1334 path = srcs[0]
1335 1335 if subpath is None:
1336 1336 peer_path = path
1337 1337 url = path.loc
1338 1338 else:
1339 1339 # XXX path: we are losing the `path` object here. Keeping it would be
1340 1340 # valuable. For example as a "variant" as we do for pushes.
1341 1341 subpath = urlutil.url(subpath)
1342 1342 if subpath.isabs():
1343 1343 peer_path = url = bytes(subpath)
1344 1344 else:
1345 1345 p = urlutil.url(path.loc)
1346 1346 if p.islocal():
1347 1347 normpath = os.path.normpath
1348 1348 else:
1349 1349 normpath = posixpath.normpath
1350 1350 p.path = normpath(b'%s/%s' % (p.path, subpath))
1351 1351 peer_path = url = bytes(p)
1352 1352 other = peer(repo, opts, peer_path)
1353 1353 cleanupfn = other.close
1354 1354 try:
1355 1355 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
1356 1356 branches = (path.branch, opts.get(b'branch', []))
1357 1357 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1358 1358
1359 1359 if revs:
1360 1360 revs = [other.lookup(rev) for rev in revs]
1361 1361 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1362 1362 ui, repo, other, revs, opts.get(b"bundle"), opts.get(b"force")
1363 1363 )
1364 1364
1365 1365 if not chlist:
1366 1366 ui.status(_(b"no changes found\n"))
1367 1367 return subreporecurse()
1368 1368 ui.pager(b'incoming')
1369 1369 displayer = logcmdutil.changesetdisplayer(
1370 1370 ui, other, opts, buffered=buffered
1371 1371 )
1372 1372 displaychlist(other, chlist, displayer)
1373 1373 displayer.close()
1374 1374 finally:
1375 1375 cleanupfn()
1376 1376 subreporecurse()
1377 1377 return 0 # exit code is zero since we found incoming changes
1378 1378
1379 1379
1380 1380 def incoming(ui, repo, source, opts, subpath=None):
1381 1381 def subreporecurse():
1382 1382 ret = 1
1383 1383 if opts.get(b'subrepos'):
1384 1384 ctx = repo[None]
1385 1385 for subpath in sorted(ctx.substate):
1386 1386 sub = ctx.sub(subpath)
1387 1387 ret = min(ret, sub.incoming(ui, source, opts))
1388 1388 return ret
1389 1389
1390 1390 def display(other, chlist, displayer):
1391 1391 limit = logcmdutil.getlimit(opts)
1392 1392 if opts.get(b'newest_first'):
1393 1393 chlist.reverse()
1394 1394 count = 0
1395 1395 for n in chlist:
1396 1396 if limit is not None and count >= limit:
1397 1397 break
1398 1398 parents = [
1399 1399 p for p in other.changelog.parents(n) if p != repo.nullid
1400 1400 ]
1401 1401 if opts.get(b'no_merges') and len(parents) == 2:
1402 1402 continue
1403 1403 count += 1
1404 1404 displayer.show(other[n])
1405 1405
1406 1406 return _incoming(
1407 1407 display, subreporecurse, ui, repo, source, opts, subpath=subpath
1408 1408 )
1409 1409
1410 1410
1411 1411 def _outgoing(ui, repo, dests, opts, subpath=None):
1412 1412 out = set()
1413 1413 others = []
1414 1414 for path in urlutil.get_push_paths(repo, ui, dests):
1415 1415 dest = path.loc
1416 1416 if subpath is not None:
1417 1417 subpath = urlutil.url(subpath)
1418 1418 if subpath.isabs():
1419 1419 dest = bytes(subpath)
1420 1420 else:
1421 1421 p = urlutil.url(dest)
1422 1422 if p.islocal():
1423 1423 normpath = os.path.normpath
1424 1424 else:
1425 1425 normpath = posixpath.normpath
1426 1426 p.path = normpath(b'%s/%s' % (p.path, subpath))
1427 1427 dest = bytes(p)
1428 1428 branches = path.branch, opts.get(b'branch') or []
1429 1429
1430 1430 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1431 1431 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1432 1432 if revs:
1433 1433 revs = [repo[rev].node() for rev in logcmdutil.revrange(repo, revs)]
1434 1434
1435 1435 other = peer(repo, opts, dest)
1436 1436 try:
1437 1437 outgoing = discovery.findcommonoutgoing(
1438 1438 repo, other, revs, force=opts.get(b'force')
1439 1439 )
1440 1440 o = outgoing.missing
1441 1441 out.update(o)
1442 1442 if not o:
1443 1443 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1444 1444 others.append(other)
1445 1445 except: # re-raises
1446 1446 other.close()
1447 1447 raise
1448 1448 # make sure this is ordered by revision number
1449 1449 outgoing_revs = list(out)
1450 1450 cl = repo.changelog
1451 1451 outgoing_revs.sort(key=cl.rev)
1452 1452 return outgoing_revs, others
1453 1453
1454 1454
1455 1455 def _outgoing_recurse(ui, repo, dests, opts):
1456 1456 ret = 1
1457 1457 if opts.get(b'subrepos'):
1458 1458 ctx = repo[None]
1459 1459 for subpath in sorted(ctx.substate):
1460 1460 sub = ctx.sub(subpath)
1461 1461 ret = min(ret, sub.outgoing(ui, dests, opts))
1462 1462 return ret
1463 1463
1464 1464
1465 1465 def _outgoing_filter(repo, revs, opts):
1466 1466 """apply revision filtering/ordering option for outgoing"""
1467 1467 limit = logcmdutil.getlimit(opts)
1468 1468 no_merges = opts.get(b'no_merges')
1469 1469 if opts.get(b'newest_first'):
1470 1470 revs.reverse()
1471 1471 if limit is None and not no_merges:
1472 1472 for r in revs:
1473 1473 yield r
1474 1474 return
1475 1475
1476 1476 count = 0
1477 1477 cl = repo.changelog
1478 1478 for n in revs:
1479 1479 if limit is not None and count >= limit:
1480 1480 break
1481 1481 parents = [p for p in cl.parents(n) if p != repo.nullid]
1482 1482 if no_merges and len(parents) == 2:
1483 1483 continue
1484 1484 count += 1
1485 1485 yield n
1486 1486
1487 1487
1488 1488 def outgoing(ui, repo, dests, opts, subpath=None):
1489 1489 if opts.get(b'graph'):
1490 1490 logcmdutil.checkunsupportedgraphflags([], opts)
1491 1491 o, others = _outgoing(ui, repo, dests, opts, subpath=subpath)
1492 1492 ret = 1
1493 1493 try:
1494 1494 if o:
1495 1495 ret = 0
1496 1496
1497 1497 if opts.get(b'graph'):
1498 1498 revdag = logcmdutil.graphrevs(repo, o, opts)
1499 1499 ui.pager(b'outgoing')
1500 1500 displayer = logcmdutil.changesetdisplayer(
1501 1501 ui, repo, opts, buffered=True
1502 1502 )
1503 1503 logcmdutil.displaygraph(
1504 1504 ui, repo, revdag, displayer, graphmod.asciiedges
1505 1505 )
1506 1506 else:
1507 1507 ui.pager(b'outgoing')
1508 1508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1509 1509 for n in _outgoing_filter(repo, o, opts):
1510 1510 displayer.show(repo[n])
1511 1511 displayer.close()
1512 1512 for oth in others:
1513 1513 cmdutil.outgoinghooks(ui, repo, oth, opts, o)
1514 1514 ret = min(ret, _outgoing_recurse(ui, repo, dests, opts))
1515 1515 return ret # exit code is zero since we found outgoing changes
1516 1516 finally:
1517 1517 for oth in others:
1518 1518 oth.close()
1519 1519
1520 1520
1521 1521 def verify(repo, level=None):
1522 1522 """verify the consistency of a repository"""
1523 1523 ret = verifymod.verify(repo, level=level)
1524 1524
1525 1525 # Broken subrepo references in hidden csets don't seem worth worrying about,
1526 1526 # since they can't be pushed/pulled, and --hidden can be used if they are a
1527 1527 # concern.
1528 1528
1529 1529 # pathto() is needed for -R case
1530 1530 revs = repo.revs(
1531 1531 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1532 1532 )
1533 1533
1534 1534 if revs:
1535 1535 repo.ui.status(_(b'checking subrepo links\n'))
1536 1536 for rev in revs:
1537 1537 ctx = repo[rev]
1538 1538 try:
1539 1539 for subpath in ctx.substate:
1540 1540 try:
1541 1541 ret = (
1542 1542 ctx.sub(subpath, allowcreate=False).verify() or ret
1543 1543 )
1544 1544 except error.RepoError as e:
1545 1545 repo.ui.warn(b'%d: %s\n' % (rev, e))
1546 1546 except Exception:
1547 1547 repo.ui.warn(
1548 1548 _(b'.hgsubstate is corrupt in revision %s\n')
1549 1549 % short(ctx.node())
1550 1550 )
1551 1551
1552 1552 return ret
1553 1553
1554 1554
1555 1555 def remoteui(src, opts):
1556 1556 """build a remote ui from ui or repo and opts"""
1557 1557 if util.safehasattr(src, b'baseui'): # looks like a repository
1558 1558 dst = src.baseui.copy() # drop repo-specific config
1559 1559 src = src.ui # copy target options from repo
1560 1560 else: # assume it's a global ui object
1561 1561 dst = src.copy() # keep all global options
1562 1562
1563 1563 # copy ssh-specific options
1564 1564 for o in b'ssh', b'remotecmd':
1565 1565 v = opts.get(o) or src.config(b'ui', o)
1566 1566 if v:
1567 1567 dst.setconfig(b"ui", o, v, b'copied')
1568 1568
1569 1569 # copy bundle-specific options
1570 1570 r = src.config(b'bundle', b'mainreporoot')
1571 1571 if r:
1572 1572 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1573 1573
1574 1574 # copy selected local settings to the remote ui
1575 1575 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1576 1576 for key, val in src.configitems(sect):
1577 1577 dst.setconfig(sect, key, val, b'copied')
1578 1578 v = src.config(b'web', b'cacerts')
1579 1579 if v:
1580 1580 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1581 1581
1582 1582 return dst
1583 1583
1584 1584
1585 1585 # Files of interest
1586 1586 # Used to check if the repository has changed looking at mtime and size of
1587 1587 # these files.
1588 1588 foi = [
1589 1589 (b'spath', b'00changelog.i'),
1590 1590 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1591 1591 (b'spath', b'obsstore'),
1592 1592 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1593 1593 ]
1594 1594
1595 1595
1596 1596 class cachedlocalrepo:
1597 1597 """Holds a localrepository that can be cached and reused."""
1598 1598
1599 1599 def __init__(self, repo):
1600 1600 """Create a new cached repo from an existing repo.
1601 1601
1602 1602 We assume the passed in repo was recently created. If the
1603 1603 repo has changed between when it was created and when it was
1604 1604 turned into a cache, it may not refresh properly.
1605 1605 """
1606 1606 assert isinstance(repo, localrepo.localrepository)
1607 1607 self._repo = repo
1608 1608 self._state, self.mtime = self._repostate()
1609 1609 self._filtername = repo.filtername
1610 1610
1611 1611 def fetch(self):
1612 1612 """Refresh (if necessary) and return a repository.
1613 1613
1614 1614 If the cached instance is out of date, it will be recreated
1615 1615 automatically and returned.
1616 1616
1617 1617 Returns a tuple of the repo and a boolean indicating whether a new
1618 1618 repo instance was created.
1619 1619 """
1620 1620 # We compare the mtimes and sizes of some well-known files to
1621 1621 # determine if the repo changed. This is not precise, as mtimes
1622 1622 # are susceptible to clock skew and imprecise filesystems and
1623 1623 # file content can change while maintaining the same size.
1624 1624
1625 1625 state, mtime = self._repostate()
1626 1626 if state == self._state:
1627 1627 return self._repo, False
1628 1628
1629 1629 repo = repository(self._repo.baseui, self._repo.url())
1630 1630 if self._filtername:
1631 1631 self._repo = repo.filtered(self._filtername)
1632 1632 else:
1633 1633 self._repo = repo.unfiltered()
1634 1634 self._state = state
1635 1635 self.mtime = mtime
1636 1636
1637 1637 return self._repo, True
1638 1638
1639 1639 def _repostate(self):
1640 1640 state = []
1641 1641 maxmtime = -1
1642 1642 for attr, fname in foi:
1643 1643 prefix = getattr(self._repo, attr)
1644 1644 p = os.path.join(prefix, fname)
1645 1645 try:
1646 1646 st = os.stat(p)
1647 1647 except OSError:
1648 1648 st = os.stat(prefix)
1649 1649 state.append((st[stat.ST_MTIME], st.st_size))
1650 1650 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1651 1651
1652 1652 return tuple(state), maxmtime
1653 1653
1654 1654 def copy(self):
1655 1655 """Obtain a copy of this class instance.
1656 1656
1657 1657 A new localrepository instance is obtained. The new instance should be
1658 1658 completely independent of the original.
1659 1659 """
1660 1660 repo = repository(self._repo.baseui, self._repo.origroot)
1661 1661 if self._filtername:
1662 1662 repo = repo.filtered(self._filtername)
1663 1663 else:
1664 1664 repo = repo.unfiltered()
1665 1665 c = cachedlocalrepo(repo)
1666 1666 c._state = self._state
1667 1667 c.mtime = self.mtime
1668 1668 return c
@@ -1,642 +1,643 b''
1 1 # httppeer.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import errno
11 11 import io
12 12 import os
13 13 import socket
14 14 import struct
15 15
16 16 from concurrent import futures
17 17 from .i18n import _
18 18 from .pycompat import getattr
19 19 from . import (
20 20 bundle2,
21 21 error,
22 22 httpconnection,
23 23 pycompat,
24 24 statichttprepo,
25 25 url as urlmod,
26 26 util,
27 27 wireprotov1peer,
28 28 )
29 29 from .utils import urlutil
30 30
31 31 httplib = util.httplib
32 32 urlerr = util.urlerr
33 33 urlreq = util.urlreq
34 34
35 35
36 36 def encodevalueinheaders(value, header, limit):
37 37 """Encode a string value into multiple HTTP headers.
38 38
39 39 ``value`` will be encoded into 1 or more HTTP headers with the names
40 40 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
41 41 name + value will be at most ``limit`` bytes long.
42 42
43 43 Returns an iterable of 2-tuples consisting of header names and
44 44 values as native strings.
45 45 """
46 46 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
47 47 # not bytes. This function always takes bytes in as arguments.
48 48 fmt = pycompat.strurl(header) + r'-%s'
49 49 # Note: it is *NOT* a bug that the last bit here is a bytestring
50 50 # and not a unicode: we're just getting the encoded length anyway,
51 51 # and using an r-string to make it portable between Python 2 and 3
52 52 # doesn't work because then the \r is a literal backslash-r
53 53 # instead of a carriage return.
54 54 valuelen = limit - len(fmt % '000') - len(b': \r\n')
55 55 result = []
56 56
57 57 n = 0
58 58 for i in range(0, len(value), valuelen):
59 59 n += 1
60 60 result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen])))
61 61
62 62 return result
63 63
64 64
65 65 class _multifile:
66 66 def __init__(self, *fileobjs):
67 67 for f in fileobjs:
68 68 if not util.safehasattr(f, b'length'):
69 69 raise ValueError(
70 70 b'_multifile only supports file objects that '
71 71 b'have a length but this one does not:',
72 72 type(f),
73 73 f,
74 74 )
75 75 self._fileobjs = fileobjs
76 76 self._index = 0
77 77
78 78 @property
79 79 def length(self):
80 80 return sum(f.length for f in self._fileobjs)
81 81
82 82 def read(self, amt=None):
83 83 if amt <= 0:
84 84 return b''.join(f.read() for f in self._fileobjs)
85 85 parts = []
86 86 while amt and self._index < len(self._fileobjs):
87 87 parts.append(self._fileobjs[self._index].read(amt))
88 88 got = len(parts[-1])
89 89 if got < amt:
90 90 self._index += 1
91 91 amt -= got
92 92 return b''.join(parts)
93 93
94 94 def seek(self, offset, whence=os.SEEK_SET):
95 95 if whence != os.SEEK_SET:
96 96 raise NotImplementedError(
97 97 b'_multifile does not support anything other'
98 98 b' than os.SEEK_SET for whence on seek()'
99 99 )
100 100 if offset != 0:
101 101 raise NotImplementedError(
102 102 b'_multifile only supports seeking to start, but that '
103 103 b'could be fixed if you need it'
104 104 )
105 105 for f in self._fileobjs:
106 106 f.seek(0)
107 107 self._index = 0
108 108
109 109
110 110 def makev1commandrequest(
111 111 ui, requestbuilder, caps, capablefn, repobaseurl, cmd, args
112 112 ):
113 113 """Make an HTTP request to run a command for a version 1 client.
114 114
115 115 ``caps`` is a set of known server capabilities. The value may be
116 116 None if capabilities are not yet known.
117 117
118 118 ``capablefn`` is a function to evaluate a capability.
119 119
120 120 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
121 121 raw data to pass to it.
122 122 """
123 123 if cmd == b'pushkey':
124 124 args[b'data'] = b''
125 125 data = args.pop(b'data', None)
126 126 headers = args.pop(b'headers', {})
127 127
128 128 ui.debug(b"sending %s command\n" % cmd)
129 129 q = [(b'cmd', cmd)]
130 130 headersize = 0
131 131 # Important: don't use self.capable() here or else you end up
132 132 # with infinite recursion when trying to look up capabilities
133 133 # for the first time.
134 134 postargsok = caps is not None and b'httppostargs' in caps
135 135
136 136 # Send arguments via POST.
137 137 if postargsok and args:
138 138 strargs = urlreq.urlencode(sorted(args.items()))
139 139 if not data:
140 140 data = strargs
141 141 else:
142 142 if isinstance(data, bytes):
143 143 i = io.BytesIO(data)
144 144 i.length = len(data)
145 145 data = i
146 146 argsio = io.BytesIO(strargs)
147 147 argsio.length = len(strargs)
148 148 data = _multifile(argsio, data)
149 149 headers['X-HgArgs-Post'] = len(strargs)
150 150 elif args:
151 151 # Calling self.capable() can infinite loop if we are calling
152 152 # "capabilities". But that command should never accept wire
153 153 # protocol arguments. So this should never happen.
154 154 assert cmd != b'capabilities'
155 155 httpheader = capablefn(b'httpheader')
156 156 if httpheader:
157 157 headersize = int(httpheader.split(b',', 1)[0])
158 158
159 159 # Send arguments via HTTP headers.
160 160 if headersize > 0:
161 161 # The headers can typically carry more data than the URL.
162 162 encoded_args = urlreq.urlencode(sorted(args.items()))
163 163 for header, value in encodevalueinheaders(
164 164 encoded_args, b'X-HgArg', headersize
165 165 ):
166 166 headers[header] = value
167 167 # Send arguments via query string (Mercurial <1.9).
168 168 else:
169 169 q += sorted(args.items())
170 170
171 171 qs = b'?%s' % urlreq.urlencode(q)
172 172 cu = b"%s%s" % (repobaseurl, qs)
173 173 size = 0
174 174 if util.safehasattr(data, b'length'):
175 175 size = data.length
176 176 elif data is not None:
177 177 size = len(data)
178 178 if data is not None and 'Content-Type' not in headers:
179 179 headers['Content-Type'] = 'application/mercurial-0.1'
180 180
181 181 # Tell the server we accept application/mercurial-0.2 and multiple
182 182 # compression formats if the server is capable of emitting those
183 183 # payloads.
184 184 # Note: Keep this set empty by default, as client advertisement of
185 185 # protocol parameters should only occur after the handshake.
186 186 protoparams = set()
187 187
188 188 mediatypes = set()
189 189 if caps is not None:
190 190 mt = capablefn(b'httpmediatype')
191 191 if mt:
192 192 protoparams.add(b'0.1')
193 193 mediatypes = set(mt.split(b','))
194 194
195 195 protoparams.add(b'partial-pull')
196 196
197 197 if b'0.2tx' in mediatypes:
198 198 protoparams.add(b'0.2')
199 199
200 200 if b'0.2tx' in mediatypes and capablefn(b'compression'):
201 201 # We /could/ compare supported compression formats and prune
202 202 # non-mutually supported or error if nothing is mutually supported.
203 203 # For now, send the full list to the server and have it error.
204 204 comps = [
205 205 e.wireprotosupport().name
206 206 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
207 207 ]
208 208 protoparams.add(b'comp=%s' % b','.join(comps))
209 209
210 210 if protoparams:
211 211 protoheaders = encodevalueinheaders(
212 212 b' '.join(sorted(protoparams)), b'X-HgProto', headersize or 1024
213 213 )
214 214 for header, value in protoheaders:
215 215 headers[header] = value
216 216
217 217 varyheaders = []
218 218 for header in headers:
219 219 if header.lower().startswith('x-hg'):
220 220 varyheaders.append(header)
221 221
222 222 if varyheaders:
223 223 headers['Vary'] = ','.join(sorted(varyheaders))
224 224
225 225 req = requestbuilder(pycompat.strurl(cu), data, headers)
226 226
227 227 if data is not None:
228 228 ui.debug(b"sending %d bytes\n" % size)
229 229 req.add_unredirected_header('Content-Length', '%d' % size)
230 230
231 231 return req, cu, qs
232 232
233 233
234 234 def sendrequest(ui, opener, req):
235 235 """Send a prepared HTTP request.
236 236
237 237 Returns the response object.
238 238 """
239 239 dbg = ui.debug
240 240 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
241 241 line = b'devel-peer-request: %s\n'
242 242 dbg(
243 243 line
244 244 % b'%s %s'
245 245 % (
246 246 pycompat.bytesurl(req.get_method()),
247 247 pycompat.bytesurl(req.get_full_url()),
248 248 )
249 249 )
250 250 hgargssize = None
251 251
252 252 for header, value in sorted(req.header_items()):
253 253 header = pycompat.bytesurl(header)
254 254 value = pycompat.bytesurl(value)
255 255 if header.startswith(b'X-hgarg-'):
256 256 if hgargssize is None:
257 257 hgargssize = 0
258 258 hgargssize += len(value)
259 259 else:
260 260 dbg(line % b' %s %s' % (header, value))
261 261
262 262 if hgargssize is not None:
263 263 dbg(
264 264 line
265 265 % b' %d bytes of commands arguments in headers'
266 266 % hgargssize
267 267 )
268 268 data = req.data
269 269 if data is not None:
270 270 length = getattr(data, 'length', None)
271 271 if length is None:
272 272 length = len(data)
273 273 dbg(line % b' %d bytes of data' % length)
274 274
275 275 start = util.timer()
276 276
277 277 res = None
278 278 try:
279 279 res = opener.open(req)
280 280 except urlerr.httperror as inst:
281 281 if inst.code == 401:
282 282 raise error.Abort(_(b'authorization failed'))
283 283 raise
284 284 except httplib.HTTPException as inst:
285 285 ui.debug(
286 286 b'http error requesting %s\n'
287 287 % urlutil.hidepassword(req.get_full_url())
288 288 )
289 289 ui.traceback()
290 290 raise IOError(None, inst)
291 291 finally:
292 292 if ui.debugflag and ui.configbool(b'devel', b'debug.peer-request'):
293 293 code = res.code if res else -1
294 294 dbg(
295 295 line
296 296 % b' finished in %.4f seconds (%d)'
297 297 % (util.timer() - start, code)
298 298 )
299 299
300 300 # Insert error handlers for common I/O failures.
301 301 urlmod.wrapresponse(res)
302 302
303 303 return res
304 304
305 305
306 306 class RedirectedRepoError(error.RepoError):
307 307 def __init__(self, msg, respurl):
308 308 super(RedirectedRepoError, self).__init__(msg)
309 309 self.respurl = respurl
310 310
311 311
312 312 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible):
313 313 # record the url we got redirected to
314 314 redirected = False
315 315 respurl = pycompat.bytesurl(resp.geturl())
316 316 if respurl.endswith(qs):
317 317 respurl = respurl[: -len(qs)]
318 318 qsdropped = False
319 319 else:
320 320 qsdropped = True
321 321
322 322 if baseurl.rstrip(b'/') != respurl.rstrip(b'/'):
323 323 redirected = True
324 324 if not ui.quiet:
325 325 ui.warn(_(b'real URL is %s\n') % respurl)
326 326
327 327 try:
328 328 proto = pycompat.bytesurl(resp.getheader('content-type', ''))
329 329 except AttributeError:
330 330 proto = pycompat.bytesurl(resp.headers.get('content-type', ''))
331 331
332 332 safeurl = urlutil.hidepassword(baseurl)
333 333 if proto.startswith(b'application/hg-error'):
334 334 raise error.OutOfBandError(resp.read())
335 335
336 336 # Pre 1.0 versions of Mercurial used text/plain and
337 337 # application/hg-changegroup. We don't support such old servers.
338 338 if not proto.startswith(b'application/mercurial-'):
339 339 ui.debug(b"requested URL: '%s'\n" % urlutil.hidepassword(requrl))
340 340 msg = _(
341 341 b"'%s' does not appear to be an hg repository:\n"
342 342 b"---%%<--- (%s)\n%s\n---%%<---\n"
343 343 ) % (safeurl, proto or b'no content-type', resp.read(1024))
344 344
345 345 # Some servers may strip the query string from the redirect. We
346 346 # raise a special error type so callers can react to this specially.
347 347 if redirected and qsdropped:
348 348 raise RedirectedRepoError(msg, respurl)
349 349 else:
350 350 raise error.RepoError(msg)
351 351
352 352 try:
353 353 subtype = proto.split(b'-', 1)[1]
354 354
355 355 version_info = tuple([int(n) for n in subtype.split(b'.')])
356 356 except ValueError:
357 357 raise error.RepoError(
358 358 _(b"'%s' sent a broken Content-Type header (%s)") % (safeurl, proto)
359 359 )
360 360
361 361 # TODO consider switching to a decompression reader that uses
362 362 # generators.
363 363 if version_info == (0, 1):
364 364 if compressible:
365 365 resp = util.compengines[b'zlib'].decompressorreader(resp)
366 366
367 367 elif version_info == (0, 2):
368 368 # application/mercurial-0.2 always identifies the compression
369 369 # engine in the payload header.
370 370 elen = struct.unpack(b'B', util.readexactly(resp, 1))[0]
371 371 ename = util.readexactly(resp, elen)
372 372 engine = util.compengines.forwiretype(ename)
373 373
374 374 resp = engine.decompressorreader(resp)
375 375 else:
376 376 raise error.RepoError(
377 377 _(b"'%s' uses newer protocol %s") % (safeurl, subtype)
378 378 )
379 379
380 380 return respurl, proto, resp
381 381
382 382
383 383 class httppeer(wireprotov1peer.wirepeer):
384 384 def __init__(self, ui, path, url, opener, requestbuilder, caps):
385 385 super().__init__(ui)
386 386 self._path = path
387 387 self._url = url
388 388 self._caps = caps
389 389 self.limitedarguments = caps is not None and b'httppostargs' not in caps
390 390 self._urlopener = opener
391 391 self._requestbuilder = requestbuilder
392 392
393 393 def __del__(self):
394 394 for h in self._urlopener.handlers:
395 395 h.close()
396 396 getattr(h, "close_all", lambda: None)()
397 397
398 398 # Begin of ipeerconnection interface.
399 399
400 400 def url(self):
401 401 return self._path
402 402
403 403 def local(self):
404 404 return None
405 405
406 406 def canpush(self):
407 407 return True
408 408
409 409 def close(self):
410 410 try:
411 411 reqs, sent, recv = (
412 412 self._urlopener.requestscount,
413 413 self._urlopener.sentbytescount,
414 414 self._urlopener.receivedbytescount,
415 415 )
416 416 except AttributeError:
417 417 return
418 418 self.ui.note(
419 419 _(
420 420 b'(sent %d HTTP requests and %d bytes; '
421 421 b'received %d bytes in responses)\n'
422 422 )
423 423 % (reqs, sent, recv)
424 424 )
425 425
426 426 # End of ipeerconnection interface.
427 427
428 428 # Begin of ipeercommands interface.
429 429
430 430 def capabilities(self):
431 431 return self._caps
432 432
433 433 # End of ipeercommands interface.
434 434
435 435 def _callstream(self, cmd, _compressible=False, **args):
436 436 args = pycompat.byteskwargs(args)
437 437
438 438 req, cu, qs = makev1commandrequest(
439 439 self.ui,
440 440 self._requestbuilder,
441 441 self._caps,
442 442 self.capable,
443 443 self._url,
444 444 cmd,
445 445 args,
446 446 )
447 447
448 448 resp = sendrequest(self.ui, self._urlopener, req)
449 449
450 450 self._url, ct, resp = parsev1commandresponse(
451 451 self.ui, self._url, cu, qs, resp, _compressible
452 452 )
453 453
454 454 return resp
455 455
456 456 def _call(self, cmd, **args):
457 457 fp = self._callstream(cmd, **args)
458 458 try:
459 459 return fp.read()
460 460 finally:
461 461 # if using keepalive, allow connection to be reused
462 462 fp.close()
463 463
464 464 def _callpush(self, cmd, cg, **args):
465 465 # have to stream bundle to a temp file because we do not have
466 466 # http 1.1 chunked transfer.
467 467
468 468 types = self.capable(b'unbundle')
469 469 try:
470 470 types = types.split(b',')
471 471 except AttributeError:
472 472 # servers older than d1b16a746db6 will send 'unbundle' as a
473 473 # boolean capability. They only support headerless/uncompressed
474 474 # bundles.
475 475 types = [b""]
476 476 for x in types:
477 477 if x in bundle2.bundletypes:
478 478 type = x
479 479 break
480 480
481 481 tempname = bundle2.writebundle(self.ui, cg, None, type)
482 482 fp = httpconnection.httpsendfile(self.ui, tempname, b"rb")
483 483 headers = {'Content-Type': 'application/mercurial-0.1'}
484 484
485 485 try:
486 486 r = self._call(cmd, data=fp, headers=headers, **args)
487 487 vals = r.split(b'\n', 1)
488 488 if len(vals) < 2:
489 489 raise error.ResponseError(_(b"unexpected response:"), r)
490 490 return vals
491 491 except urlerr.httperror:
492 492 # Catch and re-raise these so we don't try and treat them
493 493 # like generic socket errors. They lack any values in
494 494 # .args on Python 3 which breaks our socket.error block.
495 495 raise
496 496 except socket.error as err:
497 497 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
498 498 raise error.Abort(_(b'push failed: %s') % err.args[1])
499 499 raise error.Abort(err.args[1])
500 500 finally:
501 501 fp.close()
502 502 os.unlink(tempname)
503 503
504 504 def _calltwowaystream(self, cmd, fp, **args):
505 505 filename = None
506 506 try:
507 507 # dump bundle to disk
508 508 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
509 509 with os.fdopen(fd, "wb") as fh:
510 510 d = fp.read(4096)
511 511 while d:
512 512 fh.write(d)
513 513 d = fp.read(4096)
514 514 # start http push
515 515 with httpconnection.httpsendfile(self.ui, filename, b"rb") as fp_:
516 516 headers = {'Content-Type': 'application/mercurial-0.1'}
517 517 return self._callstream(cmd, data=fp_, headers=headers, **args)
518 518 finally:
519 519 if filename is not None:
520 520 os.unlink(filename)
521 521
522 522 def _callcompressable(self, cmd, **args):
523 523 return self._callstream(cmd, _compressible=True, **args)
524 524
525 525 def _abort(self, exception):
526 526 raise exception
527 527
528 528
529 529 class queuedcommandfuture(futures.Future):
530 530 """Wraps result() on command futures to trigger submission on call."""
531 531
532 532 def result(self, timeout=None):
533 533 if self.done():
534 534 return futures.Future.result(self, timeout)
535 535
536 536 self._peerexecutor.sendcommands()
537 537
538 538 # sendcommands() will restore the original __class__ and self.result
539 539 # will resolve to Future.result.
540 540 return self.result(timeout)
541 541
542 542
543 543 def performhandshake(ui, url, opener, requestbuilder):
544 544 # The handshake is a request to the capabilities command.
545 545
546 546 caps = None
547 547
548 548 def capable(x):
549 549 raise error.ProgrammingError(b'should not be called')
550 550
551 551 args = {}
552 552
553 553 req, requrl, qs = makev1commandrequest(
554 554 ui, requestbuilder, caps, capable, url, b'capabilities', args
555 555 )
556 556 resp = sendrequest(ui, opener, req)
557 557
558 558 # The server may redirect us to the repo root, stripping the
559 559 # ?cmd=capabilities query string from the URL. The server would likely
560 560 # return HTML in this case and ``parsev1commandresponse()`` would raise.
561 561 # We catch this special case and re-issue the capabilities request against
562 562 # the new URL.
563 563 #
564 564 # We should ideally not do this, as a redirect that drops the query
565 565 # string from the URL is arguably a server bug. (Garbage in, garbage out).
566 566 # However, Mercurial clients for several years appeared to handle this
567 567 # issue without behavior degradation. And according to issue 5860, it may
568 568 # be a longstanding bug in some server implementations. So we allow a
569 569 # redirect that drops the query string to "just work."
570 570 try:
571 571 respurl, ct, resp = parsev1commandresponse(
572 572 ui, url, requrl, qs, resp, compressible=False
573 573 )
574 574 except RedirectedRepoError as e:
575 575 req, requrl, qs = makev1commandrequest(
576 576 ui, requestbuilder, caps, capable, e.respurl, b'capabilities', args
577 577 )
578 578 resp = sendrequest(ui, opener, req)
579 579 respurl, ct, resp = parsev1commandresponse(
580 580 ui, url, requrl, qs, resp, compressible=False
581 581 )
582 582
583 583 try:
584 584 rawdata = resp.read()
585 585 finally:
586 586 resp.close()
587 587
588 588 if not ct.startswith(b'application/mercurial-'):
589 589 raise error.ProgrammingError(b'unexpected content-type: %s' % ct)
590 590
591 591 info = {b'v1capabilities': set(rawdata.split())}
592 592
593 593 return respurl, info
594 594
595 595
596 596 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
597 597 """Construct an appropriate HTTP peer instance.
598 598
599 599 ``opener`` is an ``url.opener`` that should be used to establish
600 600 connections, perform HTTP requests.
601 601
602 602 ``requestbuilder`` is the type used for constructing HTTP requests.
603 603 It exists as an argument so extensions can override the default.
604 604 """
605 605 u = urlutil.url(path)
606 606 if u.query or u.fragment:
607 607 raise error.Abort(
608 608 _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
609 609 )
610 610
611 611 # urllib cannot handle URLs with embedded user or passwd.
612 612 url, authinfo = u.authinfo()
613 613 ui.debug(b'using %s\n' % url)
614 614
615 615 opener = opener or urlmod.opener(ui, authinfo)
616 616
617 617 respurl, info = performhandshake(ui, url, opener, requestbuilder)
618 618
619 619 return httppeer(
620 620 ui, path, respurl, opener, requestbuilder, info[b'v1capabilities']
621 621 )
622 622
623 623
624 624 def make_peer(ui, path, create, intents=None, createopts=None):
625 625 if create:
626 626 raise error.Abort(_(b'cannot create new http repository'))
627 path = path.loc
627 628 try:
628 629 if path.startswith(b'https:') and not urlmod.has_https:
629 630 raise error.Abort(
630 631 _(b'Python support for SSL and HTTPS is not installed')
631 632 )
632 633
633 634 inst = makepeer(ui, path)
634 635
635 636 return inst
636 637 except error.RepoError as httpexception:
637 638 try:
638 639 r = statichttprepo.make_peer(ui, b"static-" + path, create)
639 640 ui.note(_(b'(falling back to static-http)\n'))
640 641 return r
641 642 except error.RepoError:
642 643 raise httpexception # use the original http RepoError instead
@@ -1,675 +1,676 b''
1 1 # sshpeer.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import re
10 10 import uuid
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 error,
16 16 pycompat,
17 17 util,
18 18 wireprototypes,
19 19 wireprotov1peer,
20 20 wireprotov1server,
21 21 )
22 22 from .utils import (
23 23 procutil,
24 24 stringutil,
25 25 urlutil,
26 26 )
27 27
28 28
29 29 def _serverquote(s):
30 30 """quote a string for the remote shell ... which we assume is sh"""
31 31 if not s:
32 32 return s
33 33 if re.match(b'[a-zA-Z0-9@%_+=:,./-]*$', s):
34 34 return s
35 35 return b"'%s'" % s.replace(b"'", b"'\\''")
36 36
37 37
38 38 def _forwardoutput(ui, pipe, warn=False):
39 39 """display all data currently available on pipe as remote output.
40 40
41 41 This is non blocking."""
42 42 if pipe and not pipe.closed:
43 43 s = procutil.readpipe(pipe)
44 44 if s:
45 45 display = ui.warn if warn else ui.status
46 46 for l in s.splitlines():
47 47 display(_(b"remote: "), l, b'\n')
48 48
49 49
50 50 class doublepipe:
51 51 """Operate a side-channel pipe in addition of a main one
52 52
53 53 The side-channel pipe contains server output to be forwarded to the user
54 54 input. The double pipe will behave as the "main" pipe, but will ensure the
55 55 content of the "side" pipe is properly processed while we wait for blocking
56 56 call on the "main" pipe.
57 57
58 58 If large amounts of data are read from "main", the forward will cease after
59 59 the first bytes start to appear. This simplifies the implementation
60 60 without affecting actual output of sshpeer too much as we rarely issue
61 61 large read for data not yet emitted by the server.
62 62
63 63 The main pipe is expected to be a 'bufferedinputpipe' from the util module
64 64 that handle all the os specific bits. This class lives in this module
65 65 because it focus on behavior specific to the ssh protocol."""
66 66
67 67 def __init__(self, ui, main, side):
68 68 self._ui = ui
69 69 self._main = main
70 70 self._side = side
71 71
72 72 def _wait(self):
73 73 """wait until some data are available on main or side
74 74
75 75 return a pair of boolean (ismainready, issideready)
76 76
77 77 (This will only wait for data if the setup is supported by `util.poll`)
78 78 """
79 79 if (
80 80 isinstance(self._main, util.bufferedinputpipe)
81 81 and self._main.hasbuffer
82 82 ):
83 83 # Main has data. Assume side is worth poking at.
84 84 return True, True
85 85
86 86 fds = [self._main.fileno(), self._side.fileno()]
87 87 try:
88 88 act = util.poll(fds)
89 89 except NotImplementedError:
90 90 # non supported yet case, assume all have data.
91 91 act = fds
92 92 return (self._main.fileno() in act, self._side.fileno() in act)
93 93
94 94 def write(self, data):
95 95 return self._call(b'write', data)
96 96
97 97 def read(self, size):
98 98 r = self._call(b'read', size)
99 99 if size != 0 and not r:
100 100 # We've observed a condition that indicates the
101 101 # stdout closed unexpectedly. Check stderr one
102 102 # more time and snag anything that's there before
103 103 # letting anyone know the main part of the pipe
104 104 # closed prematurely.
105 105 _forwardoutput(self._ui, self._side)
106 106 return r
107 107
108 108 def unbufferedread(self, size):
109 109 r = self._call(b'unbufferedread', size)
110 110 if size != 0 and not r:
111 111 # We've observed a condition that indicates the
112 112 # stdout closed unexpectedly. Check stderr one
113 113 # more time and snag anything that's there before
114 114 # letting anyone know the main part of the pipe
115 115 # closed prematurely.
116 116 _forwardoutput(self._ui, self._side)
117 117 return r
118 118
119 119 def readline(self):
120 120 return self._call(b'readline')
121 121
122 122 def _call(self, methname, data=None):
123 123 """call <methname> on "main", forward output of "side" while blocking"""
124 124 # data can be '' or 0
125 125 if (data is not None and not data) or self._main.closed:
126 126 _forwardoutput(self._ui, self._side)
127 127 return b''
128 128 while True:
129 129 mainready, sideready = self._wait()
130 130 if sideready:
131 131 _forwardoutput(self._ui, self._side)
132 132 if mainready:
133 133 meth = getattr(self._main, methname)
134 134 if data is None:
135 135 return meth()
136 136 else:
137 137 return meth(data)
138 138
139 139 def close(self):
140 140 return self._main.close()
141 141
142 142 @property
143 143 def closed(self):
144 144 return self._main.closed
145 145
146 146 def flush(self):
147 147 return self._main.flush()
148 148
149 149
150 150 def _cleanuppipes(ui, pipei, pipeo, pipee, warn):
151 151 """Clean up pipes used by an SSH connection."""
152 152 didsomething = False
153 153 if pipeo and not pipeo.closed:
154 154 didsomething = True
155 155 pipeo.close()
156 156 if pipei and not pipei.closed:
157 157 didsomething = True
158 158 pipei.close()
159 159
160 160 if pipee and not pipee.closed:
161 161 didsomething = True
162 162 # Try to read from the err descriptor until EOF.
163 163 try:
164 164 for l in pipee:
165 165 ui.status(_(b'remote: '), l)
166 166 except (IOError, ValueError):
167 167 pass
168 168
169 169 pipee.close()
170 170
171 171 if didsomething and warn is not None:
172 172 # Encourage explicit close of sshpeers. Closing via __del__ is
173 173 # not very predictable when exceptions are thrown, which has led
174 174 # to deadlocks due to a peer get gc'ed in a fork
175 175 # We add our own stack trace, because the stacktrace when called
176 176 # from __del__ is useless.
177 177 ui.develwarn(b'missing close on SSH connection created at:\n%s' % warn)
178 178
179 179
180 180 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
181 181 """Create an SSH connection to a server.
182 182
183 183 Returns a tuple of (process, stdin, stdout, stderr) for the
184 184 spawned process.
185 185 """
186 186 cmd = b'%s %s %s' % (
187 187 sshcmd,
188 188 args,
189 189 procutil.shellquote(
190 190 b'%s -R %s serve --stdio'
191 191 % (_serverquote(remotecmd), _serverquote(path))
192 192 ),
193 193 )
194 194
195 195 ui.debug(b'running %s\n' % cmd)
196 196
197 197 # no buffer allow the use of 'select'
198 198 # feel free to remove buffering and select usage when we ultimately
199 199 # move to threading.
200 200 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
201 201
202 202 return proc, stdin, stdout, stderr
203 203
204 204
205 205 def _clientcapabilities():
206 206 """Return list of capabilities of this client.
207 207
208 208 Returns a list of capabilities that are supported by this client.
209 209 """
210 210 protoparams = {b'partial-pull'}
211 211 comps = [
212 212 e.wireprotosupport().name
213 213 for e in util.compengines.supportedwireengines(util.CLIENTROLE)
214 214 ]
215 215 protoparams.add(b'comp=%s' % b','.join(comps))
216 216 return protoparams
217 217
218 218
219 219 def _performhandshake(ui, stdin, stdout, stderr):
220 220 def badresponse():
221 221 # Flush any output on stderr. In general, the stderr contains errors
222 222 # from the remote (ssh errors, some hg errors), and status indications
223 223 # (like "adding changes"), with no current way to tell them apart.
224 224 # Here we failed so early that it's almost certainly only errors, so
225 225 # use warn=True so -q doesn't hide them.
226 226 _forwardoutput(ui, stderr, warn=True)
227 227
228 228 msg = _(b'no suitable response from remote hg')
229 229 hint = ui.config(b'ui', b'ssherrorhint')
230 230 raise error.RepoError(msg, hint=hint)
231 231
232 232 # The handshake consists of sending wire protocol commands in reverse
233 233 # order of protocol implementation and then sniffing for a response
234 234 # to one of them.
235 235 #
236 236 # Those commands (from oldest to newest) are:
237 237 #
238 238 # ``between``
239 239 # Asks for the set of revisions between a pair of revisions. Command
240 240 # present in all Mercurial server implementations.
241 241 #
242 242 # ``hello``
243 243 # Instructs the server to advertise its capabilities. Introduced in
244 244 # Mercurial 0.9.1.
245 245 #
246 246 # ``upgrade``
247 247 # Requests upgrade from default transport protocol version 1 to
248 248 # a newer version. Introduced in Mercurial 4.6 as an experimental
249 249 # feature.
250 250 #
251 251 # The ``between`` command is issued with a request for the null
252 252 # range. If the remote is a Mercurial server, this request will
253 253 # generate a specific response: ``1\n\n``. This represents the
254 254 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
255 255 # in the output stream and know this is the response to ``between``
256 256 # and we're at the end of our handshake reply.
257 257 #
258 258 # The response to the ``hello`` command will be a line with the
259 259 # length of the value returned by that command followed by that
260 260 # value. If the server doesn't support ``hello`` (which should be
261 261 # rare), that line will be ``0\n``. Otherwise, the value will contain
262 262 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
263 263 # the capabilities of the server.
264 264 #
265 265 # The ``upgrade`` command isn't really a command in the traditional
266 266 # sense of version 1 of the transport because it isn't using the
267 267 # proper mechanism for formatting insteads: instead, it just encodes
268 268 # arguments on the line, delimited by spaces.
269 269 #
270 270 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
271 271 # If the server doesn't support protocol upgrades, it will reply to
272 272 # this line with ``0\n``. Otherwise, it emits an
273 273 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
274 274 # Content immediately following this line describes additional
275 275 # protocol and server state.
276 276 #
277 277 # In addition to the responses to our command requests, the server
278 278 # may emit "banner" output on stdout. SSH servers are allowed to
279 279 # print messages to stdout on login. Issuing commands on connection
280 280 # allows us to flush this banner output from the server by scanning
281 281 # for output to our well-known ``between`` command. Of course, if
282 282 # the banner contains ``1\n\n``, this will throw off our detection.
283 283
284 284 requestlog = ui.configbool(b'devel', b'debug.peer-request')
285 285
286 286 # Generate a random token to help identify responses to version 2
287 287 # upgrade request.
288 288 token = pycompat.sysbytes(str(uuid.uuid4()))
289 289
290 290 try:
291 291 pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40)
292 292 handshake = [
293 293 b'hello\n',
294 294 b'between\n',
295 295 b'pairs %d\n' % len(pairsarg),
296 296 pairsarg,
297 297 ]
298 298
299 299 if requestlog:
300 300 ui.debug(b'devel-peer-request: hello+between\n')
301 301 ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
302 302 ui.debug(b'sending hello command\n')
303 303 ui.debug(b'sending between command\n')
304 304
305 305 stdin.write(b''.join(handshake))
306 306 stdin.flush()
307 307 except IOError:
308 308 badresponse()
309 309
310 310 # Assume version 1 of wire protocol by default.
311 311 protoname = wireprototypes.SSHV1
312 312 reupgraded = re.compile(b'^upgraded %s (.*)$' % stringutil.reescape(token))
313 313
314 314 lines = [b'', b'dummy']
315 315 max_noise = 500
316 316 while lines[-1] and max_noise:
317 317 try:
318 318 l = stdout.readline()
319 319 _forwardoutput(ui, stderr, warn=True)
320 320
321 321 # Look for reply to protocol upgrade request. It has a token
322 322 # in it, so there should be no false positives.
323 323 m = reupgraded.match(l)
324 324 if m:
325 325 protoname = m.group(1)
326 326 ui.debug(b'protocol upgraded to %s\n' % protoname)
327 327 # If an upgrade was handled, the ``hello`` and ``between``
328 328 # requests are ignored. The next output belongs to the
329 329 # protocol, so stop scanning lines.
330 330 break
331 331
332 332 # Otherwise it could be a banner, ``0\n`` response if server
333 333 # doesn't support upgrade.
334 334
335 335 if lines[-1] == b'1\n' and l == b'\n':
336 336 break
337 337 if l:
338 338 ui.debug(b'remote: ', l)
339 339 lines.append(l)
340 340 max_noise -= 1
341 341 except IOError:
342 342 badresponse()
343 343 else:
344 344 badresponse()
345 345
346 346 caps = set()
347 347
348 348 # For version 1, we should see a ``capabilities`` line in response to the
349 349 # ``hello`` command.
350 350 if protoname == wireprototypes.SSHV1:
351 351 for l in reversed(lines):
352 352 # Look for response to ``hello`` command. Scan from the back so
353 353 # we don't misinterpret banner output as the command reply.
354 354 if l.startswith(b'capabilities:'):
355 355 caps.update(l[:-1].split(b':')[1].split())
356 356 break
357 357
358 358 # Error if we couldn't find capabilities, this means:
359 359 #
360 360 # 1. Remote isn't a Mercurial server
361 361 # 2. Remote is a <0.9.1 Mercurial server
362 362 # 3. Remote is a future Mercurial server that dropped ``hello``
363 363 # and other attempted handshake mechanisms.
364 364 if not caps:
365 365 badresponse()
366 366
367 367 # Flush any output on stderr before proceeding.
368 368 _forwardoutput(ui, stderr, warn=True)
369 369
370 370 return protoname, caps
371 371
372 372
373 373 class sshv1peer(wireprotov1peer.wirepeer):
374 374 def __init__(
375 375 self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
376 376 ):
377 377 """Create a peer from an existing SSH connection.
378 378
379 379 ``proc`` is a handle on the underlying SSH process.
380 380 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
381 381 pipes for that process.
382 382 ``caps`` is a set of capabilities supported by the remote.
383 383 ``autoreadstderr`` denotes whether to automatically read from
384 384 stderr and to forward its output.
385 385 """
386 386 super().__init__(ui)
387 387 self._url = url
388 388 # self._subprocess is unused. Keeping a handle on the process
389 389 # holds a reference and prevents it from being garbage collected.
390 390 self._subprocess = proc
391 391
392 392 # And we hook up our "doublepipe" wrapper to allow querying
393 393 # stderr any time we perform I/O.
394 394 if autoreadstderr:
395 395 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
396 396 stdin = doublepipe(ui, stdin, stderr)
397 397
398 398 self._pipeo = stdin
399 399 self._pipei = stdout
400 400 self._pipee = stderr
401 401 self._caps = caps
402 402 self._autoreadstderr = autoreadstderr
403 403 self._initstack = b''.join(util.getstackframes(1))
404 404
405 405 # Commands that have a "framed" response where the first line of the
406 406 # response contains the length of that response.
407 407 _FRAMED_COMMANDS = {
408 408 b'batch',
409 409 }
410 410
411 411 # Begin of ipeerconnection interface.
412 412
413 413 def url(self):
414 414 return self._url
415 415
416 416 def local(self):
417 417 return None
418 418
419 419 def canpush(self):
420 420 return True
421 421
422 422 def close(self):
423 423 self._cleanup()
424 424
425 425 # End of ipeerconnection interface.
426 426
427 427 # Begin of ipeercommands interface.
428 428
429 429 def capabilities(self):
430 430 return self._caps
431 431
432 432 # End of ipeercommands interface.
433 433
434 434 def _readerr(self):
435 435 _forwardoutput(self.ui, self._pipee)
436 436
437 437 def _abort(self, exception):
438 438 self._cleanup()
439 439 raise exception
440 440
441 441 def _cleanup(self, warn=None):
442 442 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee, warn=warn)
443 443
444 444 def __del__(self):
445 445 self._cleanup(warn=self._initstack)
446 446
447 447 def _sendrequest(self, cmd, args, framed=False):
448 448 if self.ui.debugflag and self.ui.configbool(
449 449 b'devel', b'debug.peer-request'
450 450 ):
451 451 dbg = self.ui.debug
452 452 line = b'devel-peer-request: %s\n'
453 453 dbg(line % cmd)
454 454 for key, value in sorted(args.items()):
455 455 if not isinstance(value, dict):
456 456 dbg(line % b' %s: %d bytes' % (key, len(value)))
457 457 else:
458 458 for dk, dv in sorted(value.items()):
459 459 dbg(line % b' %s-%s: %d' % (key, dk, len(dv)))
460 460 self.ui.debug(b"sending %s command\n" % cmd)
461 461 self._pipeo.write(b"%s\n" % cmd)
462 462 _func, names = wireprotov1server.commands[cmd]
463 463 keys = names.split()
464 464 wireargs = {}
465 465 for k in keys:
466 466 if k == b'*':
467 467 wireargs[b'*'] = args
468 468 break
469 469 else:
470 470 wireargs[k] = args[k]
471 471 del args[k]
472 472 for k, v in sorted(wireargs.items()):
473 473 self._pipeo.write(b"%s %d\n" % (k, len(v)))
474 474 if isinstance(v, dict):
475 475 for dk, dv in v.items():
476 476 self._pipeo.write(b"%s %d\n" % (dk, len(dv)))
477 477 self._pipeo.write(dv)
478 478 else:
479 479 self._pipeo.write(v)
480 480 self._pipeo.flush()
481 481
482 482 # We know exactly how many bytes are in the response. So return a proxy
483 483 # around the raw output stream that allows reading exactly this many
484 484 # bytes. Callers then can read() without fear of overrunning the
485 485 # response.
486 486 if framed:
487 487 amount = self._getamount()
488 488 return util.cappedreader(self._pipei, amount)
489 489
490 490 return self._pipei
491 491
492 492 def _callstream(self, cmd, **args):
493 493 args = pycompat.byteskwargs(args)
494 494 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
495 495
496 496 def _callcompressable(self, cmd, **args):
497 497 args = pycompat.byteskwargs(args)
498 498 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
499 499
500 500 def _call(self, cmd, **args):
501 501 args = pycompat.byteskwargs(args)
502 502 return self._sendrequest(cmd, args, framed=True).read()
503 503
504 504 def _callpush(self, cmd, fp, **args):
505 505 # The server responds with an empty frame if the client should
506 506 # continue submitting the payload.
507 507 r = self._call(cmd, **args)
508 508 if r:
509 509 return b'', r
510 510
511 511 # The payload consists of frames with content followed by an empty
512 512 # frame.
513 513 for d in iter(lambda: fp.read(4096), b''):
514 514 self._writeframed(d)
515 515 self._writeframed(b"", flush=True)
516 516
517 517 # In case of success, there is an empty frame and a frame containing
518 518 # the integer result (as a string).
519 519 # In case of error, there is a non-empty frame containing the error.
520 520 r = self._readframed()
521 521 if r:
522 522 return b'', r
523 523 return self._readframed(), b''
524 524
525 525 def _calltwowaystream(self, cmd, fp, **args):
526 526 # The server responds with an empty frame if the client should
527 527 # continue submitting the payload.
528 528 r = self._call(cmd, **args)
529 529 if r:
530 530 # XXX needs to be made better
531 531 raise error.Abort(_(b'unexpected remote reply: %s') % r)
532 532
533 533 # The payload consists of frames with content followed by an empty
534 534 # frame.
535 535 for d in iter(lambda: fp.read(4096), b''):
536 536 self._writeframed(d)
537 537 self._writeframed(b"", flush=True)
538 538
539 539 return self._pipei
540 540
541 541 def _getamount(self):
542 542 l = self._pipei.readline()
543 543 if l == b'\n':
544 544 if self._autoreadstderr:
545 545 self._readerr()
546 546 msg = _(b'check previous remote output')
547 547 self._abort(error.OutOfBandError(hint=msg))
548 548 if self._autoreadstderr:
549 549 self._readerr()
550 550 try:
551 551 return int(l)
552 552 except ValueError:
553 553 self._abort(error.ResponseError(_(b"unexpected response:"), l))
554 554
555 555 def _readframed(self):
556 556 size = self._getamount()
557 557 if not size:
558 558 return b''
559 559
560 560 return self._pipei.read(size)
561 561
562 562 def _writeframed(self, data, flush=False):
563 563 self._pipeo.write(b"%d\n" % len(data))
564 564 if data:
565 565 self._pipeo.write(data)
566 566 if flush:
567 567 self._pipeo.flush()
568 568 if self._autoreadstderr:
569 569 self._readerr()
570 570
571 571
572 572 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
573 573 """Make a peer instance from existing pipes.
574 574
575 575 ``path`` and ``proc`` are stored on the eventual peer instance and may
576 576 not be used for anything meaningful.
577 577
578 578 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
579 579 SSH server's stdio handles.
580 580
581 581 This function is factored out to allow creating peers that don't
582 582 actually spawn a new process. It is useful for starting SSH protocol
583 583 servers and clients via non-standard means, which can be useful for
584 584 testing.
585 585 """
586 586 try:
587 587 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
588 588 except Exception:
589 589 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
590 590 raise
591 591
592 592 if protoname == wireprototypes.SSHV1:
593 593 return sshv1peer(
594 594 ui,
595 595 path,
596 596 proc,
597 597 stdin,
598 598 stdout,
599 599 stderr,
600 600 caps,
601 601 autoreadstderr=autoreadstderr,
602 602 )
603 603 else:
604 604 _cleanuppipes(ui, stdout, stdin, stderr, warn=None)
605 605 raise error.RepoError(
606 606 _(b'unknown version of SSH protocol: %s') % protoname
607 607 )
608 608
609 609
610 610 def make_peer(ui, path, create, intents=None, createopts=None):
611 611 """Create an SSH peer.
612 612
613 613 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
614 614 """
615 path = path.loc
615 616 u = urlutil.url(path, parsequery=False, parsefragment=False)
616 617 if u.scheme != b'ssh' or not u.host or u.path is None:
617 618 raise error.RepoError(_(b"couldn't parse location %s") % path)
618 619
619 620 urlutil.checksafessh(path)
620 621
621 622 if u.passwd is not None:
622 623 raise error.RepoError(_(b'password in URL not supported'))
623 624
624 625 sshcmd = ui.config(b'ui', b'ssh')
625 626 remotecmd = ui.config(b'ui', b'remotecmd')
626 627 sshaddenv = dict(ui.configitems(b'sshenv'))
627 628 sshenv = procutil.shellenviron(sshaddenv)
628 629 remotepath = u.path or b'.'
629 630
630 631 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
631 632
632 633 if create:
633 634 # We /could/ do this, but only if the remote init command knows how to
634 635 # handle them. We don't yet make any assumptions about that. And without
635 636 # querying the remote, there's no way of knowing if the remote even
636 637 # supports said requested feature.
637 638 if createopts:
638 639 raise error.RepoError(
639 640 _(
640 641 b'cannot create remote SSH repositories '
641 642 b'with extra options'
642 643 )
643 644 )
644 645
645 646 cmd = b'%s %s %s' % (
646 647 sshcmd,
647 648 args,
648 649 procutil.shellquote(
649 650 b'%s init %s'
650 651 % (_serverquote(remotecmd), _serverquote(remotepath))
651 652 ),
652 653 )
653 654 ui.debug(b'running %s\n' % cmd)
654 655 res = ui.system(cmd, blockedtag=b'sshpeer', environ=sshenv)
655 656 if res != 0:
656 657 raise error.RepoError(_(b'could not create remote repo'))
657 658
658 659 proc, stdin, stdout, stderr = _makeconnection(
659 660 ui, sshcmd, args, remotecmd, remotepath, sshenv
660 661 )
661 662
662 663 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
663 664
664 665 # Finally, if supported by the server, notify it about our own
665 666 # capabilities.
666 667 if b'protocaps' in peer.capabilities():
667 668 try:
668 669 peer._call(
669 670 b"protocaps", caps=b' '.join(sorted(_clientcapabilities()))
670 671 )
671 672 except IOError:
672 673 peer._cleanup()
673 674 raise error.RepoError(_(b'capability exchange failed'))
674 675
675 676 return peer
@@ -1,265 +1,266 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10
11 11 import errno
12 12
13 13 from .i18n import _
14 14 from .node import sha1nodeconstants
15 15 from . import (
16 16 branchmap,
17 17 changelog,
18 18 error,
19 19 localrepo,
20 20 manifest,
21 21 namespaces,
22 22 pathutil,
23 23 pycompat,
24 24 requirements as requirementsmod,
25 25 url,
26 26 util,
27 27 vfs as vfsmod,
28 28 )
29 29 from .utils import (
30 30 urlutil,
31 31 )
32 32
33 33 urlerr = util.urlerr
34 34 urlreq = util.urlreq
35 35
36 36
37 37 class httprangereader:
38 38 def __init__(self, url, opener):
39 39 # we assume opener has HTTPRangeHandler
40 40 self.url = url
41 41 self.pos = 0
42 42 self.opener = opener
43 43 self.name = url
44 44
45 45 def __enter__(self):
46 46 return self
47 47
48 48 def __exit__(self, exc_type, exc_value, traceback):
49 49 self.close()
50 50
51 51 def seek(self, pos):
52 52 self.pos = pos
53 53
54 54 def read(self, bytes=None):
55 55 req = urlreq.request(pycompat.strurl(self.url))
56 56 end = b''
57 57 if bytes:
58 58 end = self.pos + bytes - 1
59 59 if self.pos or end:
60 60 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
61 61
62 62 try:
63 63 f = self.opener.open(req)
64 64 data = f.read()
65 65 code = f.code
66 66 except urlerr.httperror as inst:
67 67 num = inst.code == 404 and errno.ENOENT or None
68 68 # Explicitly convert the exception to str as Py3 will try
69 69 # convert it to local encoding and with as the HTTPResponse
70 70 # instance doesn't support encode.
71 71 raise IOError(num, str(inst))
72 72 except urlerr.urlerror as inst:
73 73 raise IOError(None, inst.reason)
74 74
75 75 if code == 200:
76 76 # HTTPRangeHandler does nothing if remote does not support
77 77 # Range headers and returns the full entity. Let's slice it.
78 78 if bytes:
79 79 data = data[self.pos : self.pos + bytes]
80 80 else:
81 81 data = data[self.pos :]
82 82 elif bytes:
83 83 data = data[:bytes]
84 84 self.pos += len(data)
85 85 return data
86 86
87 87 def readlines(self):
88 88 return self.read().splitlines(True)
89 89
90 90 def __iter__(self):
91 91 return iter(self.readlines())
92 92
93 93 def close(self):
94 94 pass
95 95
96 96
97 97 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
98 98 # which was itself extracted from urlgrabber. See the last version of
99 99 # byterange.py from history if you need more information.
100 100 class _RangeError(IOError):
101 101 """Error raised when an unsatisfiable range is requested."""
102 102
103 103
104 104 class _HTTPRangeHandler(urlreq.basehandler):
105 105 """Handler that enables HTTP Range headers.
106 106
107 107 This was extremely simple. The Range header is a HTTP feature to
108 108 begin with so all this class does is tell urllib2 that the
109 109 "206 Partial Content" response from the HTTP server is what we
110 110 expected.
111 111 """
112 112
113 113 def http_error_206(self, req, fp, code, msg, hdrs):
114 114 # 206 Partial Content Response
115 115 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
116 116 r.code = code
117 117 r.msg = msg
118 118 return r
119 119
120 120 def http_error_416(self, req, fp, code, msg, hdrs):
121 121 # HTTP's Range Not Satisfiable error
122 122 raise _RangeError(b'Requested Range Not Satisfiable')
123 123
124 124
125 125 def build_opener(ui, authinfo):
126 126 # urllib cannot handle URLs with embedded user or passwd
127 127 urlopener = url.opener(ui, authinfo)
128 128 urlopener.add_handler(_HTTPRangeHandler())
129 129
130 130 class statichttpvfs(vfsmod.abstractvfs):
131 131 def __init__(self, base):
132 132 self.base = base
133 133 self.options = {}
134 134
135 135 def __call__(self, path, mode=b'r', *args, **kw):
136 136 if mode not in (b'r', b'rb'):
137 137 raise IOError(b'Permission denied')
138 138 f = b"/".join((self.base, urlreq.quote(path)))
139 139 return httprangereader(f, urlopener)
140 140
141 141 def join(self, path):
142 142 if path:
143 143 return pathutil.join(self.base, path)
144 144 else:
145 145 return self.base
146 146
147 147 return statichttpvfs
148 148
149 149
150 150 class statichttppeer(localrepo.localpeer):
151 151 def local(self):
152 152 return None
153 153
154 154 def canpush(self):
155 155 return False
156 156
157 157
158 158 class statichttprepository(
159 159 localrepo.localrepository, localrepo.revlogfilestorage
160 160 ):
161 161 supported = localrepo.localrepository._basesupported
162 162
163 163 def __init__(self, ui, path):
164 164 self._url = path
165 165 self.ui = ui
166 166
167 167 self.root = path
168 168 u = urlutil.url(path.rstrip(b'/') + b"/.hg")
169 169 self.path, authinfo = u.authinfo()
170 170
171 171 vfsclass = build_opener(ui, authinfo)
172 172 self.vfs = vfsclass(self.path)
173 173 self.cachevfs = vfsclass(self.vfs.join(b'cache'))
174 174 self._phasedefaults = []
175 175
176 176 self.names = namespaces.namespaces()
177 177 self.filtername = None
178 178 self._extrafilterid = None
179 179 self._wanted_sidedata = set()
180 180 self.features = set()
181 181
182 182 try:
183 183 requirements = set(self.vfs.read(b'requires').splitlines())
184 184 except FileNotFoundError:
185 185 requirements = set()
186 186
187 187 # check if it is a non-empty old-style repository
188 188 try:
189 189 fp = self.vfs(b"00changelog.i")
190 190 fp.read(1)
191 191 fp.close()
192 192 except FileNotFoundError:
193 193 # we do not care about empty old-style repositories here
194 194 msg = _(b"'%s' does not appear to be an hg repository") % path
195 195 raise error.RepoError(msg)
196 196 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
197 197 storevfs = vfsclass(self.vfs.join(b'store'))
198 198 requirements |= set(storevfs.read(b'requires').splitlines())
199 199
200 200 supportedrequirements = localrepo.gathersupportedrequirements(ui)
201 201 localrepo.ensurerequirementsrecognized(
202 202 requirements, supportedrequirements
203 203 )
204 204 localrepo.ensurerequirementscompatible(ui, requirements)
205 205 self.nodeconstants = sha1nodeconstants
206 206 self.nullid = self.nodeconstants.nullid
207 207
208 208 # setup store
209 209 self.store = localrepo.makestore(requirements, self.path, vfsclass)
210 210 self.spath = self.store.path
211 211 self.svfs = self.store.opener
212 212 self.sjoin = self.store.join
213 213 self._filecache = {}
214 214 self.requirements = requirements
215 215
216 216 rootmanifest = manifest.manifestrevlog(self.nodeconstants, self.svfs)
217 217 self.manifestlog = manifest.manifestlog(
218 218 self.svfs, self, rootmanifest, self.narrowmatch()
219 219 )
220 220 self.changelog = changelog.changelog(self.svfs)
221 221 self._tags = None
222 222 self.nodetagscache = None
223 223 self._branchcaches = branchmap.BranchMapCache()
224 224 self._revbranchcache = None
225 225 self.encodepats = None
226 226 self.decodepats = None
227 227 self._transref = None
228 228
229 229 def _restrictcapabilities(self, caps):
230 230 caps = super(statichttprepository, self)._restrictcapabilities(caps)
231 231 return caps.difference([b"pushkey"])
232 232
233 233 def url(self):
234 234 return self._url
235 235
236 236 def local(self):
237 237 return False
238 238
239 239 def peer(self, path=None):
240 240 return statichttppeer(self, path=path)
241 241
242 242 def wlock(self, wait=True):
243 243 raise error.LockUnavailable(
244 244 0,
245 245 _(b'lock not available'),
246 246 b'lock',
247 247 _(b'cannot lock static-http repository'),
248 248 )
249 249
250 250 def lock(self, wait=True):
251 251 raise error.LockUnavailable(
252 252 0,
253 253 _(b'lock not available'),
254 254 b'lock',
255 255 _(b'cannot lock static-http repository'),
256 256 )
257 257
258 258 def _writecaches(self):
259 259 pass # statichttprepository are read only
260 260
261 261
262 262 def make_peer(ui, path, create, intents=None, createopts=None):
263 263 if create:
264 264 raise error.Abort(_(b'cannot create new static-http repository'))
265 path = path.loc
265 266 return statichttprepository(ui, path[7:]).peer()
General Comments 0
You need to be logged in to leave comments. Login now