##// END OF EJS Templates
merge with stable
Augie Fackler -
r41828:91701785 merge default
parent child Browse files
Show More
@@ -1,38 +1,38 b''
1 1 # Extract version number into 4 parts, some of which may be empty:
2 2 #
3 3 # version: the numeric part of the most recent tag. Will always look like 1.3.
4 4 #
5 5 # type: if an rc build, "rc", otherwise empty
6 6 #
7 7 # distance: the distance from the nearest tag, or empty if built from a tag
8 8 #
9 9 # node: the node|short hg was built from, or empty if built from a tag
10 10 gethgversion() {
11 11 export HGRCPATH=
12 12 export HGPLAIN=
13 13
14 14 make cleanbutpackages
15 15 make local PURE=--pure
16 16 HG="$PWD/hg"
17 17
18 18 "$HG" version > /dev/null || { echo 'abort: hg version failed!'; exit 1 ; }
19 19
20 20 hgversion=`LANGUAGE=C "$HG" version | sed -ne 's/.*(version \(.*\))$/\1/p'`
21 21
22 22 if echo $hgversion | grep + > /dev/null 2>&1 ; then
23 23 tmp=`echo $hgversion | cut -d+ -f 2`
24 24 hgversion=`echo $hgversion | cut -d+ -f 1`
25 25 distance=`echo $tmp | cut -d- -f 1`
26 26 node=`echo $tmp | cut -d- -f 2`
27 27 else
28 28 distance=''
29 29 node=''
30 30 fi
31 if echo $hgversion | grep -- '-' > /dev/null 2>&1; then
32 version=`echo $hgversion | cut -d- -f1`
33 type=`echo $hgversion | cut -d- -f2`
31 if echo $hgversion | grep -E -- '[0-9]\.[0-9](\.[0-9])?rc' > /dev/null 2>&1; then
32 version=`echo $hgversion | cut -d'r' -f1`
33 type="rc`echo $hgversion | cut -d'c' -f2-`"
34 34 else
35 35 version=$hgversion
36 36 type=''
37 37 fi
38 38 }
@@ -1,1838 +1,1838 b''
1 1 # subrepo.py - sub-repository classes and factory
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import stat
16 16 import subprocess
17 17 import sys
18 18 import tarfile
19 19 import xml.dom.minidom
20 20
21 21 from .i18n import _
22 22 from . import (
23 23 cmdutil,
24 24 encoding,
25 25 error,
26 26 exchange,
27 27 logcmdutil,
28 28 match as matchmod,
29 29 node,
30 30 pathutil,
31 31 phases,
32 32 pycompat,
33 33 scmutil,
34 34 subrepoutil,
35 35 util,
36 36 vfs as vfsmod,
37 37 )
38 38 from .utils import (
39 39 dateutil,
40 40 procutil,
41 41 stringutil,
42 42 )
43 43
44 44 hg = None
45 45 reporelpath = subrepoutil.reporelpath
46 46 subrelpath = subrepoutil.subrelpath
47 47 _abssource = subrepoutil._abssource
48 48 propertycache = util.propertycache
49 49
50 50 def _expandedabspath(path):
51 51 '''
52 52 get a path or url and if it is a path expand it and return an absolute path
53 53 '''
54 54 expandedpath = util.urllocalpath(util.expandpath(path))
55 55 u = util.url(expandedpath)
56 56 if not u.scheme:
57 57 path = util.normpath(os.path.abspath(u.path))
58 58 return path
59 59
60 60 def _getstorehashcachename(remotepath):
61 61 '''get a unique filename for the store hash cache of a remote repository'''
62 62 return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
63 63
64 64 class SubrepoAbort(error.Abort):
65 65 """Exception class used to avoid handling a subrepo error more than once"""
66 66 def __init__(self, *args, **kw):
67 67 self.subrepo = kw.pop(r'subrepo', None)
68 68 self.cause = kw.pop(r'cause', None)
69 69 error.Abort.__init__(self, *args, **kw)
70 70
71 71 def annotatesubrepoerror(func):
72 72 def decoratedmethod(self, *args, **kargs):
73 73 try:
74 74 res = func(self, *args, **kargs)
75 75 except SubrepoAbort as ex:
76 76 # This exception has already been handled
77 77 raise ex
78 78 except error.Abort as ex:
79 79 subrepo = subrelpath(self)
80 80 errormsg = (stringutil.forcebytestr(ex) + ' '
81 81 + _('(in subrepository "%s")') % subrepo)
82 82 # avoid handling this exception by raising a SubrepoAbort exception
83 83 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
84 84 cause=sys.exc_info())
85 85 return res
86 86 return decoratedmethod
87 87
88 88 def _updateprompt(ui, sub, dirty, local, remote):
89 89 if dirty:
90 90 msg = (_(' subrepository sources for %s differ\n'
91 91 'use (l)ocal source (%s) or (r)emote source (%s)?'
92 92 '$$ &Local $$ &Remote')
93 93 % (subrelpath(sub), local, remote))
94 94 else:
95 95 msg = (_(' subrepository sources for %s differ (in checked out '
96 96 'version)\n'
97 97 'use (l)ocal source (%s) or (r)emote source (%s)?'
98 98 '$$ &Local $$ &Remote')
99 99 % (subrelpath(sub), local, remote))
100 100 return ui.promptchoice(msg, 0)
101 101
102 102 def _sanitize(ui, vfs, ignore):
103 103 for dirname, dirs, names in vfs.walk():
104 104 for i, d in enumerate(dirs):
105 105 if d.lower() == ignore:
106 106 del dirs[i]
107 107 break
108 108 if vfs.basename(dirname).lower() != '.hg':
109 109 continue
110 110 for f in names:
111 111 if f.lower() == 'hgrc':
112 112 ui.warn(_("warning: removing potentially hostile 'hgrc' "
113 113 "in '%s'\n") % vfs.join(dirname))
114 114 vfs.unlink(vfs.reljoin(dirname, f))
115 115
116 116 def _auditsubrepopath(repo, path):
117 117 # sanity check for potentially unsafe paths such as '~' and '$FOO'
118 118 if path.startswith('~') or '$' in path or util.expandpath(path) != path:
119 119 raise error.Abort(_('subrepo path contains illegal component: %s')
120 120 % path)
121 121 # auditor doesn't check if the path itself is a symlink
122 122 pathutil.pathauditor(repo.root)(path)
123 123 if repo.wvfs.islink(path):
124 124 raise error.Abort(_("subrepo '%s' traverses symbolic link") % path)
125 125
126 126 SUBREPO_ALLOWED_DEFAULTS = {
127 127 'hg': True,
128 128 'git': False,
129 129 'svn': False,
130 130 }
131 131
132 132 def _checktype(ui, kind):
133 133 # subrepos.allowed is a master kill switch. If disabled, subrepos are
134 134 # disabled period.
135 135 if not ui.configbool('subrepos', 'allowed', True):
136 136 raise error.Abort(_('subrepos not enabled'),
137 137 hint=_("see 'hg help config.subrepos' for details"))
138 138
139 139 default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
140 140 if not ui.configbool('subrepos', '%s:allowed' % kind, default):
141 141 raise error.Abort(_('%s subrepos not allowed') % kind,
142 142 hint=_("see 'hg help config.subrepos' for details"))
143 143
144 144 if kind not in types:
145 145 raise error.Abort(_('unknown subrepo type %s') % kind)
146 146
147 147 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
148 148 """return instance of the right subrepo class for subrepo in path"""
149 149 # subrepo inherently violates our import layering rules
150 150 # because it wants to make repo objects from deep inside the stack
151 151 # so we manually delay the circular imports to not break
152 152 # scripts that don't use our demand-loading
153 153 global hg
154 154 from . import hg as h
155 155 hg = h
156 156
157 157 repo = ctx.repo()
158 158 _auditsubrepopath(repo, path)
159 159 state = ctx.substate[path]
160 160 _checktype(repo.ui, state[2])
161 161 if allowwdir:
162 162 state = (state[0], ctx.subrev(path), state[2])
163 163 return types[state[2]](ctx, path, state[:2], allowcreate)
164 164
165 165 def nullsubrepo(ctx, path, pctx):
166 166 """return an empty subrepo in pctx for the extant subrepo in ctx"""
167 167 # subrepo inherently violates our import layering rules
168 168 # because it wants to make repo objects from deep inside the stack
169 169 # so we manually delay the circular imports to not break
170 170 # scripts that don't use our demand-loading
171 171 global hg
172 172 from . import hg as h
173 173 hg = h
174 174
175 175 repo = ctx.repo()
176 176 _auditsubrepopath(repo, path)
177 177 state = ctx.substate[path]
178 178 _checktype(repo.ui, state[2])
179 179 subrev = ''
180 180 if state[2] == 'hg':
181 181 subrev = "0" * 40
182 182 return types[state[2]](pctx, path, (state[0], subrev), True)
183 183
184 184 # subrepo classes need to implement the following abstract class:
185 185
186 186 class abstractsubrepo(object):
187 187
188 188 def __init__(self, ctx, path):
189 189 """Initialize abstractsubrepo part
190 190
191 191 ``ctx`` is the context referring this subrepository in the
192 192 parent repository.
193 193
194 194 ``path`` is the path to this subrepository as seen from
195 195 innermost repository.
196 196 """
197 197 self.ui = ctx.repo().ui
198 198 self._ctx = ctx
199 199 self._path = path
200 200
201 201 def addwebdirpath(self, serverpath, webconf):
202 202 """Add the hgwebdir entries for this subrepo, and any of its subrepos.
203 203
204 204 ``serverpath`` is the path component of the URL for this repo.
205 205
206 206 ``webconf`` is the dictionary of hgwebdir entries.
207 207 """
208 208 pass
209 209
210 210 def storeclean(self, path):
211 211 """
212 212 returns true if the repository has not changed since it was last
213 213 cloned from or pushed to a given repository.
214 214 """
215 215 return False
216 216
217 217 def dirty(self, ignoreupdate=False, missing=False):
218 218 """returns true if the dirstate of the subrepo is dirty or does not
219 219 match current stored state. If ignoreupdate is true, only check
220 220 whether the subrepo has uncommitted changes in its dirstate. If missing
221 221 is true, check for deleted files.
222 222 """
223 223 raise NotImplementedError
224 224
225 225 def dirtyreason(self, ignoreupdate=False, missing=False):
226 226 """return reason string if it is ``dirty()``
227 227
228 228 Returned string should have enough information for the message
229 229 of exception.
230 230
231 231 This returns None, otherwise.
232 232 """
233 233 if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
234 234 return _('uncommitted changes in subrepository "%s"'
235 235 ) % subrelpath(self)
236 236
237 237 def bailifchanged(self, ignoreupdate=False, hint=None):
238 238 """raise Abort if subrepository is ``dirty()``
239 239 """
240 240 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate,
241 241 missing=True)
242 242 if dirtyreason:
243 243 raise error.Abort(dirtyreason, hint=hint)
244 244
245 245 def basestate(self):
246 246 """current working directory base state, disregarding .hgsubstate
247 247 state and working directory modifications"""
248 248 raise NotImplementedError
249 249
250 250 def checknested(self, path):
251 251 """check if path is a subrepository within this repository"""
252 252 return False
253 253
254 254 def commit(self, text, user, date):
255 255 """commit the current changes to the subrepo with the given
256 256 log message. Use given user and date if possible. Return the
257 257 new state of the subrepo.
258 258 """
259 259 raise NotImplementedError
260 260
261 261 def phase(self, state):
262 262 """returns phase of specified state in the subrepository.
263 263 """
264 264 return phases.public
265 265
266 266 def remove(self):
267 267 """remove the subrepo
268 268
269 269 (should verify the dirstate is not dirty first)
270 270 """
271 271 raise NotImplementedError
272 272
273 273 def get(self, state, overwrite=False):
274 274 """run whatever commands are needed to put the subrepo into
275 275 this state
276 276 """
277 277 raise NotImplementedError
278 278
279 279 def merge(self, state):
280 280 """merge currently-saved state with the new state."""
281 281 raise NotImplementedError
282 282
283 283 def push(self, opts):
284 284 """perform whatever action is analogous to 'hg push'
285 285
286 286 This may be a no-op on some systems.
287 287 """
288 288 raise NotImplementedError
289 289
290 290 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
291 291 return []
292 292
293 293 def addremove(self, matcher, prefix, uipathfn, opts):
294 294 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
295 295 return 1
296 296
297 297 def cat(self, match, fm, fntemplate, prefix, **opts):
298 298 return 1
299 299
300 300 def status(self, rev2, **opts):
301 301 return scmutil.status([], [], [], [], [], [], [])
302 302
303 303 def diff(self, ui, diffopts, node2, match, prefix, **opts):
304 304 pass
305 305
306 306 def outgoing(self, ui, dest, opts):
307 307 return 1
308 308
309 309 def incoming(self, ui, source, opts):
310 310 return 1
311 311
312 312 def files(self):
313 313 """return filename iterator"""
314 314 raise NotImplementedError
315 315
316 316 def filedata(self, name, decode):
317 317 """return file data, optionally passed through repo decoders"""
318 318 raise NotImplementedError
319 319
320 320 def fileflags(self, name):
321 321 """return file flags"""
322 322 return ''
323 323
324 324 def matchfileset(self, expr, badfn=None):
325 325 """Resolve the fileset expression for this repo"""
326 326 return matchmod.never(badfn=badfn)
327 327
328 328 def printfiles(self, ui, m, fm, fmt, subrepos):
329 329 """handle the files command for this subrepo"""
330 330 return 1
331 331
332 332 def archive(self, archiver, prefix, match=None, decode=True):
333 333 if match is not None:
334 334 files = [f for f in self.files() if match(f)]
335 335 else:
336 336 files = self.files()
337 337 total = len(files)
338 338 relpath = subrelpath(self)
339 339 progress = self.ui.makeprogress(_('archiving (%s)') % relpath,
340 340 unit=_('files'), total=total)
341 341 progress.update(0)
342 342 for name in files:
343 343 flags = self.fileflags(name)
344 344 mode = 'x' in flags and 0o755 or 0o644
345 345 symlink = 'l' in flags
346 346 archiver.addfile(prefix + name, mode, symlink,
347 347 self.filedata(name, decode))
348 348 progress.increment()
349 349 progress.complete()
350 350 return total
351 351
352 352 def walk(self, match):
353 353 '''
354 354 walk recursively through the directory tree, finding all files
355 355 matched by the match function
356 356 '''
357 357
358 358 def forget(self, match, prefix, uipathfn, dryrun, interactive):
359 359 return ([], [])
360 360
361 361 def removefiles(self, matcher, prefix, uipathfn, after, force, subrepos,
362 362 dryrun, warnings):
363 363 """remove the matched files from the subrepository and the filesystem,
364 364 possibly by force and/or after the file has been removed from the
365 365 filesystem. Return 0 on success, 1 on any warning.
366 366 """
367 367 warnings.append(_("warning: removefiles not implemented (%s)")
368 368 % self._path)
369 369 return 1
370 370
371 371 def revert(self, substate, *pats, **opts):
372 372 self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \
373 373 % (substate[0], substate[2]))
374 374 return []
375 375
376 376 def shortid(self, revid):
377 377 return revid
378 378
379 379 def unshare(self):
380 380 '''
381 381 convert this repository from shared to normal storage.
382 382 '''
383 383
384 384 def verify(self):
385 385 '''verify the integrity of the repository. Return 0 on success or
386 386 warning, 1 on any error.
387 387 '''
388 388 return 0
389 389
390 390 @propertycache
391 391 def wvfs(self):
392 392 """return vfs to access the working directory of this subrepository
393 393 """
394 394 return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
395 395
396 396 @propertycache
397 397 def _relpath(self):
398 398 """return path to this subrepository as seen from outermost repository
399 399 """
400 400 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
401 401
402 402 class hgsubrepo(abstractsubrepo):
403 403 def __init__(self, ctx, path, state, allowcreate):
404 404 super(hgsubrepo, self).__init__(ctx, path)
405 405 self._state = state
406 406 r = ctx.repo()
407 root = r.wjoin(path)
407 root = r.wjoin(util.localpath(path))
408 408 create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
409 409 # repository constructor does expand variables in path, which is
410 410 # unsafe since subrepo path might come from untrusted source.
411 411 if os.path.realpath(util.expandpath(root)) != root:
412 412 raise error.Abort(_('subrepo path contains illegal component: %s')
413 413 % path)
414 414 self._repo = hg.repository(r.baseui, root, create=create)
415 415 if self._repo.root != root:
416 416 raise error.ProgrammingError('failed to reject unsafe subrepo '
417 417 'path: %s (expanded to %s)'
418 418 % (root, self._repo.root))
419 419
420 420 # Propagate the parent's --hidden option
421 421 if r is r.unfiltered():
422 422 self._repo = self._repo.unfiltered()
423 423
424 424 self.ui = self._repo.ui
425 425 for s, k in [('ui', 'commitsubrepos')]:
426 426 v = r.ui.config(s, k)
427 427 if v:
428 428 self.ui.setconfig(s, k, v, 'subrepo')
429 429 # internal config: ui._usedassubrepo
430 430 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
431 431 self._initrepo(r, state[0], create)
432 432
433 433 @annotatesubrepoerror
434 434 def addwebdirpath(self, serverpath, webconf):
435 435 cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
436 436
437 437 def storeclean(self, path):
438 438 with self._repo.lock():
439 439 return self._storeclean(path)
440 440
441 441 def _storeclean(self, path):
442 442 clean = True
443 443 itercache = self._calcstorehash(path)
444 444 for filehash in self._readstorehashcache(path):
445 445 if filehash != next(itercache, None):
446 446 clean = False
447 447 break
448 448 if clean:
449 449 # if not empty:
450 450 # the cached and current pull states have a different size
451 451 clean = next(itercache, None) is None
452 452 return clean
453 453
454 454 def _calcstorehash(self, remotepath):
455 455 '''calculate a unique "store hash"
456 456
457 457 This method is used to to detect when there are changes that may
458 458 require a push to a given remote path.'''
459 459 # sort the files that will be hashed in increasing (likely) file size
460 460 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
461 461 yield '# %s\n' % _expandedabspath(remotepath)
462 462 vfs = self._repo.vfs
463 463 for relname in filelist:
464 464 filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
465 465 yield '%s = %s\n' % (relname, filehash)
466 466
467 467 @propertycache
468 468 def _cachestorehashvfs(self):
469 469 return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
470 470
471 471 def _readstorehashcache(self, remotepath):
472 472 '''read the store hash cache for a given remote repository'''
473 473 cachefile = _getstorehashcachename(remotepath)
474 474 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
475 475
476 476 def _cachestorehash(self, remotepath):
477 477 '''cache the current store hash
478 478
479 479 Each remote repo requires its own store hash cache, because a subrepo
480 480 store may be "clean" versus a given remote repo, but not versus another
481 481 '''
482 482 cachefile = _getstorehashcachename(remotepath)
483 483 with self._repo.lock():
484 484 storehash = list(self._calcstorehash(remotepath))
485 485 vfs = self._cachestorehashvfs
486 486 vfs.writelines(cachefile, storehash, mode='wb', notindexed=True)
487 487
488 488 def _getctx(self):
489 489 '''fetch the context for this subrepo revision, possibly a workingctx
490 490 '''
491 491 if self._ctx.rev() is None:
492 492 return self._repo[None] # workingctx if parent is workingctx
493 493 else:
494 494 rev = self._state[1]
495 495 return self._repo[rev]
496 496
497 497 @annotatesubrepoerror
498 498 def _initrepo(self, parentrepo, source, create):
499 499 self._repo._subparent = parentrepo
500 500 self._repo._subsource = source
501 501
502 502 if create:
503 503 lines = ['[paths]\n']
504 504
505 505 def addpathconfig(key, value):
506 506 if value:
507 507 lines.append('%s = %s\n' % (key, value))
508 508 self.ui.setconfig('paths', key, value, 'subrepo')
509 509
510 510 defpath = _abssource(self._repo, abort=False)
511 511 defpushpath = _abssource(self._repo, True, abort=False)
512 512 addpathconfig('default', defpath)
513 513 if defpath != defpushpath:
514 514 addpathconfig('default-push', defpushpath)
515 515
516 516 self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines)))
517 517
518 518 @annotatesubrepoerror
519 519 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
520 520 return cmdutil.add(ui, self._repo, match, prefix, uipathfn,
521 521 explicitonly, **opts)
522 522
523 523 @annotatesubrepoerror
524 524 def addremove(self, m, prefix, uipathfn, opts):
525 525 # In the same way as sub directories are processed, once in a subrepo,
526 526 # always entry any of its subrepos. Don't corrupt the options that will
527 527 # be used to process sibling subrepos however.
528 528 opts = copy.copy(opts)
529 529 opts['subrepos'] = True
530 530 return scmutil.addremove(self._repo, m, prefix, uipathfn, opts)
531 531
532 532 @annotatesubrepoerror
533 533 def cat(self, match, fm, fntemplate, prefix, **opts):
534 534 rev = self._state[1]
535 535 ctx = self._repo[rev]
536 536 return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate,
537 537 prefix, **opts)
538 538
539 539 @annotatesubrepoerror
540 540 def status(self, rev2, **opts):
541 541 try:
542 542 rev1 = self._state[1]
543 543 ctx1 = self._repo[rev1]
544 544 ctx2 = self._repo[rev2]
545 545 return self._repo.status(ctx1, ctx2, **opts)
546 546 except error.RepoLookupError as inst:
547 547 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
548 548 % (inst, subrelpath(self)))
549 549 return scmutil.status([], [], [], [], [], [], [])
550 550
551 551 @annotatesubrepoerror
552 552 def diff(self, ui, diffopts, node2, match, prefix, **opts):
553 553 try:
554 554 node1 = node.bin(self._state[1])
555 555 # We currently expect node2 to come from substate and be
556 556 # in hex format
557 557 if node2 is not None:
558 558 node2 = node.bin(node2)
559 559 logcmdutil.diffordiffstat(ui, self._repo, diffopts, node1, node2,
560 560 match, prefix=prefix, listsubrepos=True,
561 561 **opts)
562 562 except error.RepoLookupError as inst:
563 563 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
564 564 % (inst, subrelpath(self)))
565 565
566 566 @annotatesubrepoerror
567 567 def archive(self, archiver, prefix, match=None, decode=True):
568 568 self._get(self._state + ('hg',))
569 569 files = self.files()
570 570 if match:
571 571 files = [f for f in files if match(f)]
572 572 rev = self._state[1]
573 573 ctx = self._repo[rev]
574 574 scmutil.prefetchfiles(self._repo, [ctx.rev()],
575 575 scmutil.matchfiles(self._repo, files))
576 576 total = abstractsubrepo.archive(self, archiver, prefix, match)
577 577 for subpath in ctx.substate:
578 578 s = subrepo(ctx, subpath, True)
579 579 submatch = matchmod.subdirmatcher(subpath, match)
580 580 subprefix = prefix + subpath + '/'
581 581 total += s.archive(archiver, subprefix, submatch,
582 582 decode)
583 583 return total
584 584
585 585 @annotatesubrepoerror
586 586 def dirty(self, ignoreupdate=False, missing=False):
587 587 r = self._state[1]
588 588 if r == '' and not ignoreupdate: # no state recorded
589 589 return True
590 590 w = self._repo[None]
591 591 if r != w.p1().hex() and not ignoreupdate:
592 592 # different version checked out
593 593 return True
594 594 return w.dirty(missing=missing) # working directory changed
595 595
596 596 def basestate(self):
597 597 return self._repo['.'].hex()
598 598
599 599 def checknested(self, path):
600 600 return self._repo._checknested(self._repo.wjoin(path))
601 601
602 602 @annotatesubrepoerror
603 603 def commit(self, text, user, date):
604 604 # don't bother committing in the subrepo if it's only been
605 605 # updated
606 606 if not self.dirty(True):
607 607 return self._repo['.'].hex()
608 608 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
609 609 n = self._repo.commit(text, user, date)
610 610 if not n:
611 611 return self._repo['.'].hex() # different version checked out
612 612 return node.hex(n)
613 613
614 614 @annotatesubrepoerror
615 615 def phase(self, state):
616 616 return self._repo[state or '.'].phase()
617 617
618 618 @annotatesubrepoerror
619 619 def remove(self):
620 620 # we can't fully delete the repository as it may contain
621 621 # local-only history
622 622 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
623 623 hg.clean(self._repo, node.nullid, False)
624 624
625 625 def _get(self, state):
626 626 source, revision, kind = state
627 627 parentrepo = self._repo._subparent
628 628
629 629 if revision in self._repo.unfiltered():
630 630 # Allow shared subrepos tracked at null to setup the sharedpath
631 631 if len(self._repo) != 0 or not parentrepo.shared():
632 632 return True
633 633 self._repo._subsource = source
634 634 srcurl = _abssource(self._repo)
635 635
636 636 # Defer creating the peer until after the status message is logged, in
637 637 # case there are network problems.
638 638 getpeer = lambda: hg.peer(self._repo, {}, srcurl)
639 639
640 640 if len(self._repo) == 0:
641 641 # use self._repo.vfs instead of self.wvfs to remove .hg only
642 642 self._repo.vfs.rmtree()
643 643
644 644 # A remote subrepo could be shared if there is a local copy
645 645 # relative to the parent's share source. But clone pooling doesn't
646 646 # assemble the repos in a tree, so that can't be consistently done.
647 647 # A simpler option is for the user to configure clone pooling, and
648 648 # work with that.
649 649 if parentrepo.shared() and hg.islocal(srcurl):
650 650 self.ui.status(_('sharing subrepo %s from %s\n')
651 651 % (subrelpath(self), srcurl))
652 652 shared = hg.share(self._repo._subparent.baseui,
653 653 getpeer(), self._repo.root,
654 654 update=False, bookmarks=False)
655 655 self._repo = shared.local()
656 656 else:
657 657 # TODO: find a common place for this and this code in the
658 658 # share.py wrap of the clone command.
659 659 if parentrepo.shared():
660 660 pool = self.ui.config('share', 'pool')
661 661 if pool:
662 662 pool = util.expandpath(pool)
663 663
664 664 shareopts = {
665 665 'pool': pool,
666 666 'mode': self.ui.config('share', 'poolnaming'),
667 667 }
668 668 else:
669 669 shareopts = {}
670 670
671 671 self.ui.status(_('cloning subrepo %s from %s\n')
672 672 % (subrelpath(self), util.hidepassword(srcurl)))
673 673 other, cloned = hg.clone(self._repo._subparent.baseui, {},
674 674 getpeer(), self._repo.root,
675 675 update=False, shareopts=shareopts)
676 676 self._repo = cloned.local()
677 677 self._initrepo(parentrepo, source, create=True)
678 678 self._cachestorehash(srcurl)
679 679 else:
680 680 self.ui.status(_('pulling subrepo %s from %s\n')
681 681 % (subrelpath(self), util.hidepassword(srcurl)))
682 682 cleansub = self.storeclean(srcurl)
683 683 exchange.pull(self._repo, getpeer())
684 684 if cleansub:
685 685 # keep the repo clean after pull
686 686 self._cachestorehash(srcurl)
687 687 return False
688 688
689 689 @annotatesubrepoerror
690 690 def get(self, state, overwrite=False):
691 691 inrepo = self._get(state)
692 692 source, revision, kind = state
693 693 repo = self._repo
694 694 repo.ui.debug("getting subrepo %s\n" % self._path)
695 695 if inrepo:
696 696 urepo = repo.unfiltered()
697 697 ctx = urepo[revision]
698 698 if ctx.hidden():
699 699 urepo.ui.warn(
700 700 _('revision %s in subrepository "%s" is hidden\n') \
701 701 % (revision[0:12], self._path))
702 702 repo = urepo
703 703 hg.updaterepo(repo, revision, overwrite)
704 704
705 705 @annotatesubrepoerror
706 706 def merge(self, state):
707 707 self._get(state)
708 708 cur = self._repo['.']
709 709 dst = self._repo[state[1]]
710 710 anc = dst.ancestor(cur)
711 711
712 712 def mergefunc():
713 713 if anc == cur and dst.branch() == cur.branch():
714 714 self.ui.debug('updating subrepository "%s"\n'
715 715 % subrelpath(self))
716 716 hg.update(self._repo, state[1])
717 717 elif anc == dst:
718 718 self.ui.debug('skipping subrepository "%s"\n'
719 719 % subrelpath(self))
720 720 else:
721 721 self.ui.debug('merging subrepository "%s"\n' % subrelpath(self))
722 722 hg.merge(self._repo, state[1], remind=False)
723 723
724 724 wctx = self._repo[None]
725 725 if self.dirty():
726 726 if anc != dst:
727 727 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
728 728 mergefunc()
729 729 else:
730 730 mergefunc()
731 731 else:
732 732 mergefunc()
733 733
734 734 @annotatesubrepoerror
735 735 def push(self, opts):
736 736 force = opts.get('force')
737 737 newbranch = opts.get('new_branch')
738 738 ssh = opts.get('ssh')
739 739
740 740 # push subrepos depth-first for coherent ordering
741 741 c = self._repo['.']
742 742 subs = c.substate # only repos that are committed
743 743 for s in sorted(subs):
744 744 if c.sub(s).push(opts) == 0:
745 745 return False
746 746
747 747 dsturl = _abssource(self._repo, True)
748 748 if not force:
749 749 if self.storeclean(dsturl):
750 750 self.ui.status(
751 751 _('no changes made to subrepo %s since last push to %s\n')
752 752 % (subrelpath(self), util.hidepassword(dsturl)))
753 753 return None
754 754 self.ui.status(_('pushing subrepo %s to %s\n') %
755 755 (subrelpath(self), util.hidepassword(dsturl)))
756 756 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
757 757 res = exchange.push(self._repo, other, force, newbranch=newbranch)
758 758
759 759 # the repo is now clean
760 760 self._cachestorehash(dsturl)
761 761 return res.cgresult
762 762
763 763 @annotatesubrepoerror
764 764 def outgoing(self, ui, dest, opts):
765 765 if 'rev' in opts or 'branch' in opts:
766 766 opts = copy.copy(opts)
767 767 opts.pop('rev', None)
768 768 opts.pop('branch', None)
769 769 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
770 770
771 771 @annotatesubrepoerror
772 772 def incoming(self, ui, source, opts):
773 773 if 'rev' in opts or 'branch' in opts:
774 774 opts = copy.copy(opts)
775 775 opts.pop('rev', None)
776 776 opts.pop('branch', None)
777 777 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
778 778
779 779 @annotatesubrepoerror
780 780 def files(self):
781 781 rev = self._state[1]
782 782 ctx = self._repo[rev]
783 783 return ctx.manifest().keys()
784 784
785 785 def filedata(self, name, decode):
786 786 rev = self._state[1]
787 787 data = self._repo[rev][name].data()
788 788 if decode:
789 789 data = self._repo.wwritedata(name, data)
790 790 return data
791 791
792 792 def fileflags(self, name):
793 793 rev = self._state[1]
794 794 ctx = self._repo[rev]
795 795 return ctx.flags(name)
796 796
797 797 @annotatesubrepoerror
798 798 def printfiles(self, ui, m, fm, fmt, subrepos):
799 799 # If the parent context is a workingctx, use the workingctx here for
800 800 # consistency.
801 801 if self._ctx.rev() is None:
802 802 ctx = self._repo[None]
803 803 else:
804 804 rev = self._state[1]
805 805 ctx = self._repo[rev]
806 806 return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
807 807
808 808 @annotatesubrepoerror
809 809 def matchfileset(self, expr, badfn=None):
810 810 if self._ctx.rev() is None:
811 811 ctx = self._repo[None]
812 812 else:
813 813 rev = self._state[1]
814 814 ctx = self._repo[rev]
815 815
816 816 matchers = [ctx.matchfileset(expr, badfn=badfn)]
817 817
818 818 for subpath in ctx.substate:
819 819 sub = ctx.sub(subpath)
820 820
821 821 try:
822 822 sm = sub.matchfileset(expr, badfn=badfn)
823 823 pm = matchmod.prefixdirmatcher(subpath, sm, badfn=badfn)
824 824 matchers.append(pm)
825 825 except error.LookupError:
826 826 self.ui.status(_("skipping missing subrepository: %s\n")
827 827 % self.wvfs.reljoin(reporelpath(self), subpath))
828 828 if len(matchers) == 1:
829 829 return matchers[0]
830 830 return matchmod.unionmatcher(matchers)
831 831
832 832 def walk(self, match):
833 833 ctx = self._repo[None]
834 834 return ctx.walk(match)
835 835
836 836 @annotatesubrepoerror
837 837 def forget(self, match, prefix, uipathfn, dryrun, interactive):
838 838 return cmdutil.forget(self.ui, self._repo, match, prefix, uipathfn,
839 839 True, dryrun=dryrun, interactive=interactive)
840 840
841 841 @annotatesubrepoerror
842 842 def removefiles(self, matcher, prefix, uipathfn, after, force, subrepos,
843 843 dryrun, warnings):
844 844 return cmdutil.remove(self.ui, self._repo, matcher, prefix, uipathfn,
845 845 after, force, subrepos, dryrun)
846 846
847 847 @annotatesubrepoerror
848 848 def revert(self, substate, *pats, **opts):
849 849 # reverting a subrepo is a 2 step process:
850 850 # 1. if the no_backup is not set, revert all modified
851 851 # files inside the subrepo
852 852 # 2. update the subrepo to the revision specified in
853 853 # the corresponding substate dictionary
854 854 self.ui.status(_('reverting subrepo %s\n') % substate[0])
855 855 if not opts.get(r'no_backup'):
856 856 # Revert all files on the subrepo, creating backups
857 857 # Note that this will not recursively revert subrepos
858 858 # We could do it if there was a set:subrepos() predicate
859 859 opts = opts.copy()
860 860 opts[r'date'] = None
861 861 opts[r'rev'] = substate[1]
862 862
863 863 self.filerevert(*pats, **opts)
864 864
865 865 # Update the repo to the revision specified in the given substate
866 866 if not opts.get(r'dry_run'):
867 867 self.get(substate, overwrite=True)
868 868
869 869 def filerevert(self, *pats, **opts):
870 870 ctx = self._repo[opts[r'rev']]
871 871 parents = self._repo.dirstate.parents()
872 872 if opts.get(r'all'):
873 873 pats = ['set:modified()']
874 874 else:
875 875 pats = []
876 876 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
877 877
878 878 def shortid(self, revid):
879 879 return revid[:12]
880 880
881 881 @annotatesubrepoerror
882 882 def unshare(self):
883 883 # subrepo inherently violates our import layering rules
884 884 # because it wants to make repo objects from deep inside the stack
885 885 # so we manually delay the circular imports to not break
886 886 # scripts that don't use our demand-loading
887 887 global hg
888 888 from . import hg as h
889 889 hg = h
890 890
891 891 # Nothing prevents a user from sharing in a repo, and then making that a
892 892 # subrepo. Alternately, the previous unshare attempt may have failed
893 893 # part way through. So recurse whether or not this layer is shared.
894 894 if self._repo.shared():
895 895 self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath)
896 896
897 897 hg.unshare(self.ui, self._repo)
898 898
899 899 def verify(self):
900 900 try:
901 901 rev = self._state[1]
902 902 ctx = self._repo.unfiltered()[rev]
903 903 if ctx.hidden():
904 904 # Since hidden revisions aren't pushed/pulled, it seems worth an
905 905 # explicit warning.
906 906 ui = self._repo.ui
907 907 ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
908 908 (self._relpath, node.short(self._ctx.node())))
909 909 return 0
910 910 except error.RepoLookupError:
911 911 # A missing subrepo revision may be a case of needing to pull it, so
912 912 # don't treat this as an error.
913 913 self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
914 914 (self._relpath, node.short(self._ctx.node())))
915 915 return 0
916 916
917 917 @propertycache
918 918 def wvfs(self):
919 919 """return own wvfs for efficiency and consistency
920 920 """
921 921 return self._repo.wvfs
922 922
923 923 @propertycache
924 924 def _relpath(self):
925 925 """return path to this subrepository as seen from outermost repository
926 926 """
927 927 # Keep consistent dir separators by avoiding vfs.join(self._path)
928 928 return reporelpath(self._repo)
929 929
930 930 class svnsubrepo(abstractsubrepo):
931 931 def __init__(self, ctx, path, state, allowcreate):
932 932 super(svnsubrepo, self).__init__(ctx, path)
933 933 self._state = state
934 934 self._exe = procutil.findexe('svn')
935 935 if not self._exe:
936 936 raise error.Abort(_("'svn' executable not found for subrepo '%s'")
937 937 % self._path)
938 938
939 939 def _svncommand(self, commands, filename='', failok=False):
940 940 cmd = [self._exe]
941 941 extrakw = {}
942 942 if not self.ui.interactive():
943 943 # Making stdin be a pipe should prevent svn from behaving
944 944 # interactively even if we can't pass --non-interactive.
945 945 extrakw[r'stdin'] = subprocess.PIPE
946 946 # Starting in svn 1.5 --non-interactive is a global flag
947 947 # instead of being per-command, but we need to support 1.4 so
948 948 # we have to be intelligent about what commands take
949 949 # --non-interactive.
950 950 if commands[0] in ('update', 'checkout', 'commit'):
951 951 cmd.append('--non-interactive')
952 952 cmd.extend(commands)
953 953 if filename is not None:
954 954 path = self.wvfs.reljoin(self._ctx.repo().origroot,
955 955 self._path, filename)
956 956 cmd.append(path)
957 957 env = dict(encoding.environ)
958 958 # Avoid localized output, preserve current locale for everything else.
959 959 lc_all = env.get('LC_ALL')
960 960 if lc_all:
961 961 env['LANG'] = lc_all
962 962 del env['LC_ALL']
963 963 env['LC_MESSAGES'] = 'C'
964 964 p = subprocess.Popen(pycompat.rapply(procutil.tonativestr, cmd),
965 965 bufsize=-1, close_fds=procutil.closefds,
966 966 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
967 967 env=procutil.tonativeenv(env), **extrakw)
968 968 stdout, stderr = map(util.fromnativeeol, p.communicate())
969 969 stderr = stderr.strip()
970 970 if not failok:
971 971 if p.returncode:
972 972 raise error.Abort(stderr or 'exited with code %d'
973 973 % p.returncode)
974 974 if stderr:
975 975 self.ui.warn(stderr + '\n')
976 976 return stdout, stderr
977 977
978 978 @propertycache
979 979 def _svnversion(self):
980 980 output, err = self._svncommand(['--version', '--quiet'], filename=None)
981 981 m = re.search(br'^(\d+)\.(\d+)', output)
982 982 if not m:
983 983 raise error.Abort(_('cannot retrieve svn tool version'))
984 984 return (int(m.group(1)), int(m.group(2)))
985 985
986 986 def _svnmissing(self):
987 987 return not self.wvfs.exists('.svn')
988 988
989 989 def _wcrevs(self):
990 990 # Get the working directory revision as well as the last
991 991 # commit revision so we can compare the subrepo state with
992 992 # both. We used to store the working directory one.
993 993 output, err = self._svncommand(['info', '--xml'])
994 994 doc = xml.dom.minidom.parseString(output)
995 995 entries = doc.getElementsByTagName(r'entry')
996 996 lastrev, rev = '0', '0'
997 997 if entries:
998 998 rev = pycompat.bytestr(entries[0].getAttribute(r'revision')) or '0'
999 999 commits = entries[0].getElementsByTagName(r'commit')
1000 1000 if commits:
1001 1001 lastrev = pycompat.bytestr(
1002 1002 commits[0].getAttribute(r'revision')) or '0'
1003 1003 return (lastrev, rev)
1004 1004
1005 1005 def _wcrev(self):
1006 1006 return self._wcrevs()[0]
1007 1007
1008 1008 def _wcchanged(self):
1009 1009 """Return (changes, extchanges, missing) where changes is True
1010 1010 if the working directory was changed, extchanges is
1011 1011 True if any of these changes concern an external entry and missing
1012 1012 is True if any change is a missing entry.
1013 1013 """
1014 1014 output, err = self._svncommand(['status', '--xml'])
1015 1015 externals, changes, missing = [], [], []
1016 1016 doc = xml.dom.minidom.parseString(output)
1017 1017 for e in doc.getElementsByTagName(r'entry'):
1018 1018 s = e.getElementsByTagName(r'wc-status')
1019 1019 if not s:
1020 1020 continue
1021 1021 item = s[0].getAttribute(r'item')
1022 1022 props = s[0].getAttribute(r'props')
1023 1023 path = e.getAttribute(r'path').encode('utf8')
1024 1024 if item == r'external':
1025 1025 externals.append(path)
1026 1026 elif item == r'missing':
1027 1027 missing.append(path)
1028 1028 if (item not in (r'', r'normal', r'unversioned', r'external')
1029 1029 or props not in (r'', r'none', r'normal')):
1030 1030 changes.append(path)
1031 1031 for path in changes:
1032 1032 for ext in externals:
1033 1033 if path == ext or path.startswith(ext + pycompat.ossep):
1034 1034 return True, True, bool(missing)
1035 1035 return bool(changes), False, bool(missing)
1036 1036
1037 1037 @annotatesubrepoerror
1038 1038 def dirty(self, ignoreupdate=False, missing=False):
1039 1039 if self._svnmissing():
1040 1040 return self._state[1] != ''
1041 1041 wcchanged = self._wcchanged()
1042 1042 changed = wcchanged[0] or (missing and wcchanged[2])
1043 1043 if not changed:
1044 1044 if self._state[1] in self._wcrevs() or ignoreupdate:
1045 1045 return False
1046 1046 return True
1047 1047
1048 1048 def basestate(self):
1049 1049 lastrev, rev = self._wcrevs()
1050 1050 if lastrev != rev:
1051 1051 # Last committed rev is not the same than rev. We would
1052 1052 # like to take lastrev but we do not know if the subrepo
1053 1053 # URL exists at lastrev. Test it and fallback to rev it
1054 1054 # is not there.
1055 1055 try:
1056 1056 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1057 1057 return lastrev
1058 1058 except error.Abort:
1059 1059 pass
1060 1060 return rev
1061 1061
1062 1062 @annotatesubrepoerror
1063 1063 def commit(self, text, user, date):
1064 1064 # user and date are out of our hands since svn is centralized
1065 1065 changed, extchanged, missing = self._wcchanged()
1066 1066 if not changed:
1067 1067 return self.basestate()
1068 1068 if extchanged:
1069 1069 # Do not try to commit externals
1070 1070 raise error.Abort(_('cannot commit svn externals'))
1071 1071 if missing:
1072 1072 # svn can commit with missing entries but aborting like hg
1073 1073 # seems a better approach.
1074 1074 raise error.Abort(_('cannot commit missing svn entries'))
1075 1075 commitinfo, err = self._svncommand(['commit', '-m', text])
1076 1076 self.ui.status(commitinfo)
1077 1077 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1078 1078 if not newrev:
1079 1079 if not commitinfo.strip():
1080 1080 # Sometimes, our definition of "changed" differs from
1081 1081 # svn one. For instance, svn ignores missing files
1082 1082 # when committing. If there are only missing files, no
1083 1083 # commit is made, no output and no error code.
1084 1084 raise error.Abort(_('failed to commit svn changes'))
1085 1085 raise error.Abort(commitinfo.splitlines()[-1])
1086 1086 newrev = newrev.groups()[0]
1087 1087 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1088 1088 return newrev
1089 1089
1090 1090 @annotatesubrepoerror
1091 1091 def remove(self):
1092 1092 if self.dirty():
1093 1093 self.ui.warn(_('not removing repo %s because '
1094 1094 'it has changes.\n') % self._path)
1095 1095 return
1096 1096 self.ui.note(_('removing subrepo %s\n') % self._path)
1097 1097
1098 1098 self.wvfs.rmtree(forcibly=True)
1099 1099 try:
1100 1100 pwvfs = self._ctx.repo().wvfs
1101 1101 pwvfs.removedirs(pwvfs.dirname(self._path))
1102 1102 except OSError:
1103 1103 pass
1104 1104
1105 1105 @annotatesubrepoerror
1106 1106 def get(self, state, overwrite=False):
1107 1107 if overwrite:
1108 1108 self._svncommand(['revert', '--recursive'])
1109 1109 args = ['checkout']
1110 1110 if self._svnversion >= (1, 5):
1111 1111 args.append('--force')
1112 1112 # The revision must be specified at the end of the URL to properly
1113 1113 # update to a directory which has since been deleted and recreated.
1114 1114 args.append('%s@%s' % (state[0], state[1]))
1115 1115
1116 1116 # SEC: check that the ssh url is safe
1117 1117 util.checksafessh(state[0])
1118 1118
1119 1119 status, err = self._svncommand(args, failok=True)
1120 1120 _sanitize(self.ui, self.wvfs, '.svn')
1121 1121 if not re.search('Checked out revision [0-9]+.', status):
1122 1122 if ('is already a working copy for a different URL' in err
1123 1123 and (self._wcchanged()[:2] == (False, False))):
1124 1124 # obstructed but clean working copy, so just blow it away.
1125 1125 self.remove()
1126 1126 self.get(state, overwrite=False)
1127 1127 return
1128 1128 raise error.Abort((status or err).splitlines()[-1])
1129 1129 self.ui.status(status)
1130 1130
1131 1131 @annotatesubrepoerror
1132 1132 def merge(self, state):
1133 1133 old = self._state[1]
1134 1134 new = state[1]
1135 1135 wcrev = self._wcrev()
1136 1136 if new != wcrev:
1137 1137 dirty = old == wcrev or self._wcchanged()[0]
1138 1138 if _updateprompt(self.ui, self, dirty, wcrev, new):
1139 1139 self.get(state, False)
1140 1140
1141 1141 def push(self, opts):
1142 1142 # push is a no-op for SVN
1143 1143 return True
1144 1144
1145 1145 @annotatesubrepoerror
1146 1146 def files(self):
1147 1147 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1148 1148 doc = xml.dom.minidom.parseString(output)
1149 1149 paths = []
1150 1150 for e in doc.getElementsByTagName(r'entry'):
1151 1151 kind = pycompat.bytestr(e.getAttribute(r'kind'))
1152 1152 if kind != 'file':
1153 1153 continue
1154 1154 name = r''.join(c.data for c
1155 1155 in e.getElementsByTagName(r'name')[0].childNodes
1156 1156 if c.nodeType == c.TEXT_NODE)
1157 1157 paths.append(name.encode('utf8'))
1158 1158 return paths
1159 1159
1160 1160 def filedata(self, name, decode):
1161 1161 return self._svncommand(['cat'], name)[0]
1162 1162
1163 1163
1164 1164 class gitsubrepo(abstractsubrepo):
1165 1165 def __init__(self, ctx, path, state, allowcreate):
1166 1166 super(gitsubrepo, self).__init__(ctx, path)
1167 1167 self._state = state
1168 1168 self._abspath = ctx.repo().wjoin(path)
1169 1169 self._subparent = ctx.repo()
1170 1170 self._ensuregit()
1171 1171
1172 1172 def _ensuregit(self):
1173 1173 try:
1174 1174 self._gitexecutable = 'git'
1175 1175 out, err = self._gitnodir(['--version'])
1176 1176 except OSError as e:
1177 1177 genericerror = _("error executing git for subrepo '%s': %s")
1178 1178 notfoundhint = _("check git is installed and in your PATH")
1179 1179 if e.errno != errno.ENOENT:
1180 1180 raise error.Abort(genericerror % (
1181 1181 self._path, encoding.strtolocal(e.strerror)))
1182 1182 elif pycompat.iswindows:
1183 1183 try:
1184 1184 self._gitexecutable = 'git.cmd'
1185 1185 out, err = self._gitnodir(['--version'])
1186 1186 except OSError as e2:
1187 1187 if e2.errno == errno.ENOENT:
1188 1188 raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
1189 1189 " for subrepo '%s'") % self._path,
1190 1190 hint=notfoundhint)
1191 1191 else:
1192 1192 raise error.Abort(genericerror % (self._path,
1193 1193 encoding.strtolocal(e2.strerror)))
1194 1194 else:
1195 1195 raise error.Abort(_("couldn't find git for subrepo '%s'")
1196 1196 % self._path, hint=notfoundhint)
1197 1197 versionstatus = self._checkversion(out)
1198 1198 if versionstatus == 'unknown':
1199 1199 self.ui.warn(_('cannot retrieve git version\n'))
1200 1200 elif versionstatus == 'abort':
1201 1201 raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
1202 1202 elif versionstatus == 'warning':
1203 1203 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1204 1204
1205 1205 @staticmethod
1206 1206 def _gitversion(out):
1207 1207 m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out)
1208 1208 if m:
1209 1209 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1210 1210
1211 1211 m = re.search(br'^git version (\d+)\.(\d+)', out)
1212 1212 if m:
1213 1213 return (int(m.group(1)), int(m.group(2)), 0)
1214 1214
1215 1215 return -1
1216 1216
1217 1217 @staticmethod
1218 1218 def _checkversion(out):
1219 1219 '''ensure git version is new enough
1220 1220
1221 1221 >>> _checkversion = gitsubrepo._checkversion
1222 1222 >>> _checkversion(b'git version 1.6.0')
1223 1223 'ok'
1224 1224 >>> _checkversion(b'git version 1.8.5')
1225 1225 'ok'
1226 1226 >>> _checkversion(b'git version 1.4.0')
1227 1227 'abort'
1228 1228 >>> _checkversion(b'git version 1.5.0')
1229 1229 'warning'
1230 1230 >>> _checkversion(b'git version 1.9-rc0')
1231 1231 'ok'
1232 1232 >>> _checkversion(b'git version 1.9.0.265.g81cdec2')
1233 1233 'ok'
1234 1234 >>> _checkversion(b'git version 1.9.0.GIT')
1235 1235 'ok'
1236 1236 >>> _checkversion(b'git version 12345')
1237 1237 'unknown'
1238 1238 >>> _checkversion(b'no')
1239 1239 'unknown'
1240 1240 '''
1241 1241 version = gitsubrepo._gitversion(out)
1242 1242 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1243 1243 # despite the docstring comment. For now, error on 1.4.0, warn on
1244 1244 # 1.5.0 but attempt to continue.
1245 1245 if version == -1:
1246 1246 return 'unknown'
1247 1247 if version < (1, 5, 0):
1248 1248 return 'abort'
1249 1249 elif version < (1, 6, 0):
1250 1250 return 'warning'
1251 1251 return 'ok'
1252 1252
1253 1253 def _gitcommand(self, commands, env=None, stream=False):
1254 1254 return self._gitdir(commands, env=env, stream=stream)[0]
1255 1255
1256 1256 def _gitdir(self, commands, env=None, stream=False):
1257 1257 return self._gitnodir(commands, env=env, stream=stream,
1258 1258 cwd=self._abspath)
1259 1259
1260 1260 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1261 1261 """Calls the git command
1262 1262
1263 1263 The methods tries to call the git command. versions prior to 1.6.0
1264 1264 are not supported and very probably fail.
1265 1265 """
1266 1266 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1267 1267 if env is None:
1268 1268 env = encoding.environ.copy()
1269 1269 # disable localization for Git output (issue5176)
1270 1270 env['LC_ALL'] = 'C'
1271 1271 # fix for Git CVE-2015-7545
1272 1272 if 'GIT_ALLOW_PROTOCOL' not in env:
1273 1273 env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
1274 1274 # unless ui.quiet is set, print git's stderr,
1275 1275 # which is mostly progress and useful info
1276 1276 errpipe = None
1277 1277 if self.ui.quiet:
1278 1278 errpipe = open(os.devnull, 'w')
1279 1279 if self.ui._colormode and len(commands) and commands[0] == "diff":
1280 1280 # insert the argument in the front,
1281 1281 # the end of git diff arguments is used for paths
1282 1282 commands.insert(1, '--color')
1283 1283 p = subprocess.Popen(pycompat.rapply(procutil.tonativestr,
1284 1284 [self._gitexecutable] + commands),
1285 1285 bufsize=-1,
1286 1286 cwd=pycompat.rapply(procutil.tonativestr, cwd),
1287 1287 env=procutil.tonativeenv(env),
1288 1288 close_fds=procutil.closefds,
1289 1289 stdout=subprocess.PIPE, stderr=errpipe)
1290 1290 if stream:
1291 1291 return p.stdout, None
1292 1292
1293 1293 retdata = p.stdout.read().strip()
1294 1294 # wait for the child to exit to avoid race condition.
1295 1295 p.wait()
1296 1296
1297 1297 if p.returncode != 0 and p.returncode != 1:
1298 1298 # there are certain error codes that are ok
1299 1299 command = commands[0]
1300 1300 if command in ('cat-file', 'symbolic-ref'):
1301 1301 return retdata, p.returncode
1302 1302 # for all others, abort
1303 1303 raise error.Abort(_('git %s error %d in %s') %
1304 1304 (command, p.returncode, self._relpath))
1305 1305
1306 1306 return retdata, p.returncode
1307 1307
1308 1308 def _gitmissing(self):
1309 1309 return not self.wvfs.exists('.git')
1310 1310
1311 1311 def _gitstate(self):
1312 1312 return self._gitcommand(['rev-parse', 'HEAD'])
1313 1313
1314 1314 def _gitcurrentbranch(self):
1315 1315 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1316 1316 if err:
1317 1317 current = None
1318 1318 return current
1319 1319
1320 1320 def _gitremote(self, remote):
1321 1321 out = self._gitcommand(['remote', 'show', '-n', remote])
1322 1322 line = out.split('\n')[1]
1323 1323 i = line.index('URL: ') + len('URL: ')
1324 1324 return line[i:]
1325 1325
1326 1326 def _githavelocally(self, revision):
1327 1327 out, code = self._gitdir(['cat-file', '-e', revision])
1328 1328 return code == 0
1329 1329
1330 1330 def _gitisancestor(self, r1, r2):
1331 1331 base = self._gitcommand(['merge-base', r1, r2])
1332 1332 return base == r1
1333 1333
1334 1334 def _gitisbare(self):
1335 1335 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1336 1336
1337 1337 def _gitupdatestat(self):
1338 1338 """This must be run before git diff-index.
1339 1339 diff-index only looks at changes to file stat;
1340 1340 this command looks at file contents and updates the stat."""
1341 1341 self._gitcommand(['update-index', '-q', '--refresh'])
1342 1342
1343 1343 def _gitbranchmap(self):
1344 1344 '''returns 2 things:
1345 1345 a map from git branch to revision
1346 1346 a map from revision to branches'''
1347 1347 branch2rev = {}
1348 1348 rev2branch = {}
1349 1349
1350 1350 out = self._gitcommand(['for-each-ref', '--format',
1351 1351 '%(objectname) %(refname)'])
1352 1352 for line in out.split('\n'):
1353 1353 revision, ref = line.split(' ')
1354 1354 if (not ref.startswith('refs/heads/') and
1355 1355 not ref.startswith('refs/remotes/')):
1356 1356 continue
1357 1357 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1358 1358 continue # ignore remote/HEAD redirects
1359 1359 branch2rev[ref] = revision
1360 1360 rev2branch.setdefault(revision, []).append(ref)
1361 1361 return branch2rev, rev2branch
1362 1362
1363 1363 def _gittracking(self, branches):
1364 1364 'return map of remote branch to local tracking branch'
1365 1365 # assumes no more than one local tracking branch for each remote
1366 1366 tracking = {}
1367 1367 for b in branches:
1368 1368 if b.startswith('refs/remotes/'):
1369 1369 continue
1370 1370 bname = b.split('/', 2)[2]
1371 1371 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1372 1372 if remote:
1373 1373 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1374 1374 tracking['refs/remotes/%s/%s' %
1375 1375 (remote, ref.split('/', 2)[2])] = b
1376 1376 return tracking
1377 1377
1378 1378 def _abssource(self, source):
1379 1379 if '://' not in source:
1380 1380 # recognize the scp syntax as an absolute source
1381 1381 colon = source.find(':')
1382 1382 if colon != -1 and '/' not in source[:colon]:
1383 1383 return source
1384 1384 self._subsource = source
1385 1385 return _abssource(self)
1386 1386
1387 1387 def _fetch(self, source, revision):
1388 1388 if self._gitmissing():
1389 1389 # SEC: check for safe ssh url
1390 1390 util.checksafessh(source)
1391 1391
1392 1392 source = self._abssource(source)
1393 1393 self.ui.status(_('cloning subrepo %s from %s\n') %
1394 1394 (self._relpath, source))
1395 1395 self._gitnodir(['clone', source, self._abspath])
1396 1396 if self._githavelocally(revision):
1397 1397 return
1398 1398 self.ui.status(_('pulling subrepo %s from %s\n') %
1399 1399 (self._relpath, self._gitremote('origin')))
1400 1400 # try only origin: the originally cloned repo
1401 1401 self._gitcommand(['fetch'])
1402 1402 if not self._githavelocally(revision):
1403 1403 raise error.Abort(_('revision %s does not exist in subrepository '
1404 1404 '"%s"\n') % (revision, self._relpath))
1405 1405
1406 1406 @annotatesubrepoerror
1407 1407 def dirty(self, ignoreupdate=False, missing=False):
1408 1408 if self._gitmissing():
1409 1409 return self._state[1] != ''
1410 1410 if self._gitisbare():
1411 1411 return True
1412 1412 if not ignoreupdate and self._state[1] != self._gitstate():
1413 1413 # different version checked out
1414 1414 return True
1415 1415 # check for staged changes or modified files; ignore untracked files
1416 1416 self._gitupdatestat()
1417 1417 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1418 1418 return code == 1
1419 1419
1420 1420 def basestate(self):
1421 1421 return self._gitstate()
1422 1422
1423 1423 @annotatesubrepoerror
1424 1424 def get(self, state, overwrite=False):
1425 1425 source, revision, kind = state
1426 1426 if not revision:
1427 1427 self.remove()
1428 1428 return
1429 1429 self._fetch(source, revision)
1430 1430 # if the repo was set to be bare, unbare it
1431 1431 if self._gitisbare():
1432 1432 self._gitcommand(['config', 'core.bare', 'false'])
1433 1433 if self._gitstate() == revision:
1434 1434 self._gitcommand(['reset', '--hard', 'HEAD'])
1435 1435 return
1436 1436 elif self._gitstate() == revision:
1437 1437 if overwrite:
1438 1438 # first reset the index to unmark new files for commit, because
1439 1439 # reset --hard will otherwise throw away files added for commit,
1440 1440 # not just unmark them.
1441 1441 self._gitcommand(['reset', 'HEAD'])
1442 1442 self._gitcommand(['reset', '--hard', 'HEAD'])
1443 1443 return
1444 1444 branch2rev, rev2branch = self._gitbranchmap()
1445 1445
1446 1446 def checkout(args):
1447 1447 cmd = ['checkout']
1448 1448 if overwrite:
1449 1449 # first reset the index to unmark new files for commit, because
1450 1450 # the -f option will otherwise throw away files added for
1451 1451 # commit, not just unmark them.
1452 1452 self._gitcommand(['reset', 'HEAD'])
1453 1453 cmd.append('-f')
1454 1454 self._gitcommand(cmd + args)
1455 1455 _sanitize(self.ui, self.wvfs, '.git')
1456 1456
1457 1457 def rawcheckout():
1458 1458 # no branch to checkout, check it out with no branch
1459 1459 self.ui.warn(_('checking out detached HEAD in '
1460 1460 'subrepository "%s"\n') % self._relpath)
1461 1461 self.ui.warn(_('check out a git branch if you intend '
1462 1462 'to make changes\n'))
1463 1463 checkout(['-q', revision])
1464 1464
1465 1465 if revision not in rev2branch:
1466 1466 rawcheckout()
1467 1467 return
1468 1468 branches = rev2branch[revision]
1469 1469 firstlocalbranch = None
1470 1470 for b in branches:
1471 1471 if b == 'refs/heads/master':
1472 1472 # master trumps all other branches
1473 1473 checkout(['refs/heads/master'])
1474 1474 return
1475 1475 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1476 1476 firstlocalbranch = b
1477 1477 if firstlocalbranch:
1478 1478 checkout([firstlocalbranch])
1479 1479 return
1480 1480
1481 1481 tracking = self._gittracking(branch2rev.keys())
1482 1482 # choose a remote branch already tracked if possible
1483 1483 remote = branches[0]
1484 1484 if remote not in tracking:
1485 1485 for b in branches:
1486 1486 if b in tracking:
1487 1487 remote = b
1488 1488 break
1489 1489
1490 1490 if remote not in tracking:
1491 1491 # create a new local tracking branch
1492 1492 local = remote.split('/', 3)[3]
1493 1493 checkout(['-b', local, remote])
1494 1494 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1495 1495 # When updating to a tracked remote branch,
1496 1496 # if the local tracking branch is downstream of it,
1497 1497 # a normal `git pull` would have performed a "fast-forward merge"
1498 1498 # which is equivalent to updating the local branch to the remote.
1499 1499 # Since we are only looking at branching at update, we need to
1500 1500 # detect this situation and perform this action lazily.
1501 1501 if tracking[remote] != self._gitcurrentbranch():
1502 1502 checkout([tracking[remote]])
1503 1503 self._gitcommand(['merge', '--ff', remote])
1504 1504 _sanitize(self.ui, self.wvfs, '.git')
1505 1505 else:
1506 1506 # a real merge would be required, just checkout the revision
1507 1507 rawcheckout()
1508 1508
1509 1509 @annotatesubrepoerror
1510 1510 def commit(self, text, user, date):
1511 1511 if self._gitmissing():
1512 1512 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1513 1513 cmd = ['commit', '-a', '-m', text]
1514 1514 env = encoding.environ.copy()
1515 1515 if user:
1516 1516 cmd += ['--author', user]
1517 1517 if date:
1518 1518 # git's date parser silently ignores when seconds < 1e9
1519 1519 # convert to ISO8601
1520 1520 env['GIT_AUTHOR_DATE'] = dateutil.datestr(date,
1521 1521 '%Y-%m-%dT%H:%M:%S %1%2')
1522 1522 self._gitcommand(cmd, env=env)
1523 1523 # make sure commit works otherwise HEAD might not exist under certain
1524 1524 # circumstances
1525 1525 return self._gitstate()
1526 1526
1527 1527 @annotatesubrepoerror
1528 1528 def merge(self, state):
1529 1529 source, revision, kind = state
1530 1530 self._fetch(source, revision)
1531 1531 base = self._gitcommand(['merge-base', revision, self._state[1]])
1532 1532 self._gitupdatestat()
1533 1533 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1534 1534
1535 1535 def mergefunc():
1536 1536 if base == revision:
1537 1537 self.get(state) # fast forward merge
1538 1538 elif base != self._state[1]:
1539 1539 self._gitcommand(['merge', '--no-commit', revision])
1540 1540 _sanitize(self.ui, self.wvfs, '.git')
1541 1541
1542 1542 if self.dirty():
1543 1543 if self._gitstate() != revision:
1544 1544 dirty = self._gitstate() == self._state[1] or code != 0
1545 1545 if _updateprompt(self.ui, self, dirty,
1546 1546 self._state[1][:7], revision[:7]):
1547 1547 mergefunc()
1548 1548 else:
1549 1549 mergefunc()
1550 1550
1551 1551 @annotatesubrepoerror
1552 1552 def push(self, opts):
1553 1553 force = opts.get('force')
1554 1554
1555 1555 if not self._state[1]:
1556 1556 return True
1557 1557 if self._gitmissing():
1558 1558 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1559 1559 # if a branch in origin contains the revision, nothing to do
1560 1560 branch2rev, rev2branch = self._gitbranchmap()
1561 1561 if self._state[1] in rev2branch:
1562 1562 for b in rev2branch[self._state[1]]:
1563 1563 if b.startswith('refs/remotes/origin/'):
1564 1564 return True
1565 1565 for b, revision in branch2rev.iteritems():
1566 1566 if b.startswith('refs/remotes/origin/'):
1567 1567 if self._gitisancestor(self._state[1], revision):
1568 1568 return True
1569 1569 # otherwise, try to push the currently checked out branch
1570 1570 cmd = ['push']
1571 1571 if force:
1572 1572 cmd.append('--force')
1573 1573
1574 1574 current = self._gitcurrentbranch()
1575 1575 if current:
1576 1576 # determine if the current branch is even useful
1577 1577 if not self._gitisancestor(self._state[1], current):
1578 1578 self.ui.warn(_('unrelated git branch checked out '
1579 1579 'in subrepository "%s"\n') % self._relpath)
1580 1580 return False
1581 1581 self.ui.status(_('pushing branch %s of subrepository "%s"\n') %
1582 1582 (current.split('/', 2)[2], self._relpath))
1583 1583 ret = self._gitdir(cmd + ['origin', current])
1584 1584 return ret[1] == 0
1585 1585 else:
1586 1586 self.ui.warn(_('no branch checked out in subrepository "%s"\n'
1587 1587 'cannot push revision %s\n') %
1588 1588 (self._relpath, self._state[1]))
1589 1589 return False
1590 1590
1591 1591 @annotatesubrepoerror
1592 1592 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
1593 1593 if self._gitmissing():
1594 1594 return []
1595 1595
1596 1596 s = self.status(None, unknown=True, clean=True)
1597 1597
1598 1598 tracked = set()
1599 1599 # dirstates 'amn' warn, 'r' is added again
1600 1600 for l in (s.modified, s.added, s.deleted, s.clean):
1601 1601 tracked.update(l)
1602 1602
1603 1603 # Unknown files not of interest will be rejected by the matcher
1604 1604 files = s.unknown
1605 1605 files.extend(match.files())
1606 1606
1607 1607 rejected = []
1608 1608
1609 1609 files = [f for f in sorted(set(files)) if match(f)]
1610 1610 for f in files:
1611 1611 exact = match.exact(f)
1612 1612 command = ["add"]
1613 1613 if exact:
1614 1614 command.append("-f") #should be added, even if ignored
1615 1615 if ui.verbose or not exact:
1616 1616 ui.status(_('adding %s\n') % uipathfn(f))
1617 1617
1618 1618 if f in tracked: # hg prints 'adding' even if already tracked
1619 1619 if exact:
1620 1620 rejected.append(f)
1621 1621 continue
1622 1622 if not opts.get(r'dry_run'):
1623 1623 self._gitcommand(command + [f])
1624 1624
1625 1625 for f in rejected:
1626 1626 ui.warn(_("%s already tracked!\n") % uipathfn(f))
1627 1627
1628 1628 return rejected
1629 1629
1630 1630 @annotatesubrepoerror
1631 1631 def remove(self):
1632 1632 if self._gitmissing():
1633 1633 return
1634 1634 if self.dirty():
1635 1635 self.ui.warn(_('not removing repo %s because '
1636 1636 'it has changes.\n') % self._relpath)
1637 1637 return
1638 1638 # we can't fully delete the repository as it may contain
1639 1639 # local-only history
1640 1640 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1641 1641 self._gitcommand(['config', 'core.bare', 'true'])
1642 1642 for f, kind in self.wvfs.readdir():
1643 1643 if f == '.git':
1644 1644 continue
1645 1645 if kind == stat.S_IFDIR:
1646 1646 self.wvfs.rmtree(f)
1647 1647 else:
1648 1648 self.wvfs.unlink(f)
1649 1649
1650 1650 def archive(self, archiver, prefix, match=None, decode=True):
1651 1651 total = 0
1652 1652 source, revision = self._state
1653 1653 if not revision:
1654 1654 return total
1655 1655 self._fetch(source, revision)
1656 1656
1657 1657 # Parse git's native archive command.
1658 1658 # This should be much faster than manually traversing the trees
1659 1659 # and objects with many subprocess calls.
1660 1660 tarstream = self._gitcommand(['archive', revision], stream=True)
1661 1661 tar = tarfile.open(fileobj=tarstream, mode=r'r|')
1662 1662 relpath = subrelpath(self)
1663 1663 progress = self.ui.makeprogress(_('archiving (%s)') % relpath,
1664 1664 unit=_('files'))
1665 1665 progress.update(0)
1666 1666 for info in tar:
1667 1667 if info.isdir():
1668 1668 continue
1669 1669 bname = pycompat.fsencode(info.name)
1670 1670 if match and not match(bname):
1671 1671 continue
1672 1672 if info.issym():
1673 1673 data = info.linkname
1674 1674 else:
1675 1675 data = tar.extractfile(info).read()
1676 1676 archiver.addfile(prefix + bname, info.mode, info.issym(), data)
1677 1677 total += 1
1678 1678 progress.increment()
1679 1679 progress.complete()
1680 1680 return total
1681 1681
1682 1682
1683 1683 @annotatesubrepoerror
1684 1684 def cat(self, match, fm, fntemplate, prefix, **opts):
1685 1685 rev = self._state[1]
1686 1686 if match.anypats():
1687 1687 return 1 #No support for include/exclude yet
1688 1688
1689 1689 if not match.files():
1690 1690 return 1
1691 1691
1692 1692 # TODO: add support for non-plain formatter (see cmdutil.cat())
1693 1693 for f in match.files():
1694 1694 output = self._gitcommand(["show", "%s:%s" % (rev, f)])
1695 1695 fp = cmdutil.makefileobj(self._ctx, fntemplate,
1696 1696 pathname=self.wvfs.reljoin(prefix, f))
1697 1697 fp.write(output)
1698 1698 fp.close()
1699 1699 return 0
1700 1700
1701 1701
1702 1702 @annotatesubrepoerror
1703 1703 def status(self, rev2, **opts):
1704 1704 rev1 = self._state[1]
1705 1705 if self._gitmissing() or not rev1:
1706 1706 # if the repo is missing, return no results
1707 1707 return scmutil.status([], [], [], [], [], [], [])
1708 1708 modified, added, removed = [], [], []
1709 1709 self._gitupdatestat()
1710 1710 if rev2:
1711 1711 command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
1712 1712 else:
1713 1713 command = ['diff-index', '--no-renames', rev1]
1714 1714 out = self._gitcommand(command)
1715 1715 for line in out.split('\n'):
1716 1716 tab = line.find('\t')
1717 1717 if tab == -1:
1718 1718 continue
1719 1719 status, f = line[tab - 1:tab], line[tab + 1:]
1720 1720 if status == 'M':
1721 1721 modified.append(f)
1722 1722 elif status == 'A':
1723 1723 added.append(f)
1724 1724 elif status == 'D':
1725 1725 removed.append(f)
1726 1726
1727 1727 deleted, unknown, ignored, clean = [], [], [], []
1728 1728
1729 1729 command = ['status', '--porcelain', '-z']
1730 1730 if opts.get(r'unknown'):
1731 1731 command += ['--untracked-files=all']
1732 1732 if opts.get(r'ignored'):
1733 1733 command += ['--ignored']
1734 1734 out = self._gitcommand(command)
1735 1735
1736 1736 changedfiles = set()
1737 1737 changedfiles.update(modified)
1738 1738 changedfiles.update(added)
1739 1739 changedfiles.update(removed)
1740 1740 for line in out.split('\0'):
1741 1741 if not line:
1742 1742 continue
1743 1743 st = line[0:2]
1744 1744 #moves and copies show 2 files on one line
1745 1745 if line.find('\0') >= 0:
1746 1746 filename1, filename2 = line[3:].split('\0')
1747 1747 else:
1748 1748 filename1 = line[3:]
1749 1749 filename2 = None
1750 1750
1751 1751 changedfiles.add(filename1)
1752 1752 if filename2:
1753 1753 changedfiles.add(filename2)
1754 1754
1755 1755 if st == '??':
1756 1756 unknown.append(filename1)
1757 1757 elif st == '!!':
1758 1758 ignored.append(filename1)
1759 1759
1760 1760 if opts.get(r'clean'):
1761 1761 out = self._gitcommand(['ls-files'])
1762 1762 for f in out.split('\n'):
1763 1763 if not f in changedfiles:
1764 1764 clean.append(f)
1765 1765
1766 1766 return scmutil.status(modified, added, removed, deleted,
1767 1767 unknown, ignored, clean)
1768 1768
1769 1769 @annotatesubrepoerror
1770 1770 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1771 1771 node1 = self._state[1]
1772 1772 cmd = ['diff', '--no-renames']
1773 1773 if opts[r'stat']:
1774 1774 cmd.append('--stat')
1775 1775 else:
1776 1776 # for Git, this also implies '-p'
1777 1777 cmd.append('-U%d' % diffopts.context)
1778 1778
1779 1779 if diffopts.noprefix:
1780 1780 cmd.extend(['--src-prefix=%s/' % prefix,
1781 1781 '--dst-prefix=%s/' % prefix])
1782 1782 else:
1783 1783 cmd.extend(['--src-prefix=a/%s/' % prefix,
1784 1784 '--dst-prefix=b/%s/' % prefix])
1785 1785
1786 1786 if diffopts.ignorews:
1787 1787 cmd.append('--ignore-all-space')
1788 1788 if diffopts.ignorewsamount:
1789 1789 cmd.append('--ignore-space-change')
1790 1790 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
1791 1791 and diffopts.ignoreblanklines:
1792 1792 cmd.append('--ignore-blank-lines')
1793 1793
1794 1794 cmd.append(node1)
1795 1795 if node2:
1796 1796 cmd.append(node2)
1797 1797
1798 1798 output = ""
1799 1799 if match.always():
1800 1800 output += self._gitcommand(cmd) + '\n'
1801 1801 else:
1802 1802 st = self.status(node2)[:3]
1803 1803 files = [f for sublist in st for f in sublist]
1804 1804 for f in files:
1805 1805 if match(f):
1806 1806 output += self._gitcommand(cmd + ['--', f]) + '\n'
1807 1807
1808 1808 if output.strip():
1809 1809 ui.write(output)
1810 1810
1811 1811 @annotatesubrepoerror
1812 1812 def revert(self, substate, *pats, **opts):
1813 1813 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1814 1814 if not opts.get(r'no_backup'):
1815 1815 status = self.status(None)
1816 1816 names = status.modified
1817 1817 for name in names:
1818 1818 # backuppath() expects a path relative to the parent repo (the
1819 1819 # repo that ui.origbackuppath is relative to)
1820 1820 parentname = os.path.join(self._path, name)
1821 1821 bakname = scmutil.backuppath(self.ui, self._subparent,
1822 1822 parentname)
1823 1823 self.ui.note(_('saving current version of %s as %s\n') %
1824 1824 (name, os.path.relpath(bakname)))
1825 1825 util.rename(self.wvfs.join(name), bakname)
1826 1826
1827 1827 if not opts.get(r'dry_run'):
1828 1828 self.get(substate, overwrite=True)
1829 1829 return []
1830 1830
1831 1831 def shortid(self, revid):
1832 1832 return revid[:7]
1833 1833
1834 1834 types = {
1835 1835 'hg': hgsubrepo,
1836 1836 'svn': svnsubrepo,
1837 1837 'git': gitsubrepo,
1838 1838 }
@@ -1,556 +1,556 b''
1 1 #require no-reposimplestore no-chg
2 2
3 3 Set up a server
4 4
5 5 $ hg init server
6 6 $ cd server
7 7 $ cat >> .hg/hgrc << EOF
8 8 > [extensions]
9 9 > clonebundles =
10 10 > EOF
11 11
12 12 $ touch foo
13 13 $ hg -q commit -A -m 'add foo'
14 14 $ touch bar
15 15 $ hg -q commit -A -m 'add bar'
16 16
17 17 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
18 18 $ cat hg.pid >> $DAEMON_PIDS
19 19 $ cd ..
20 20
21 21 Missing manifest should not result in server lookup
22 22
23 23 $ hg --verbose clone -U http://localhost:$HGPORT no-manifest
24 24 requesting all changes
25 25 adding changesets
26 26 adding manifests
27 27 adding file changes
28 28 added 2 changesets with 2 changes to 2 files
29 29 new changesets 53245c60e682:aaff8d2ffbbf
30 30 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
31 31
32 32 $ cat server/access.log
33 33 * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
34 34 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
35 35 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=aaff8d2ffbbf07a46dd1f05d8ae7877e3f56e2a2&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
36 36
37 37 Empty manifest file results in retrieval
38 38 (the extension only checks if the manifest file exists)
39 39
40 40 $ touch server/.hg/clonebundles.manifest
41 41 $ hg --verbose clone -U http://localhost:$HGPORT empty-manifest
42 42 no clone bundles available on remote; falling back to regular clone
43 43 requesting all changes
44 44 adding changesets
45 45 adding manifests
46 46 adding file changes
47 47 added 2 changesets with 2 changes to 2 files
48 48 new changesets 53245c60e682:aaff8d2ffbbf
49 49 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
50 50
51 51 Manifest file with invalid URL aborts
52 52
53 53 $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
54 54 $ hg clone http://localhost:$HGPORT 404-url
55 55 applying clone bundle from http://does.not.exist/bundle.hg
56 56 error fetching bundle: (.* not known|(\[Errno -?\d+])? No address associated with hostname) (re) (no-windows !)
57 57 error fetching bundle: [Errno 1100*] getaddrinfo failed (glob) (windows !)
58 58 abort: error applying bundle
59 59 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
60 60 [255]
61 61
62 62 Server is not running aborts
63 63
64 64 $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
65 65 $ hg clone http://localhost:$HGPORT server-not-runner
66 66 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$) (re)
67 error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$|.* No route to host) (re)
68 68 abort: error applying bundle
69 69 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
70 70 [255]
71 71
72 72 Server returns 404
73 73
74 74 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
75 75 $ cat http.pid >> $DAEMON_PIDS
76 76 $ hg clone http://localhost:$HGPORT running-404
77 77 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
78 78 HTTP error fetching bundle: HTTP Error 404: File not found
79 79 abort: error applying bundle
80 80 (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
81 81 [255]
82 82
83 83 We can override failure to fall back to regular clone
84 84
85 85 $ hg --config ui.clonebundlefallback=true clone -U http://localhost:$HGPORT 404-fallback
86 86 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
87 87 HTTP error fetching bundle: HTTP Error 404: File not found
88 88 falling back to normal clone
89 89 requesting all changes
90 90 adding changesets
91 91 adding manifests
92 92 adding file changes
93 93 added 2 changesets with 2 changes to 2 files
94 94 new changesets 53245c60e682:aaff8d2ffbbf
95 95
96 96 Bundle with partial content works
97 97
98 98 $ hg -R server bundle --type gzip-v1 --base null -r 53245c60e682 partial.hg
99 99 1 changesets found
100 100
101 101 We verify exact bundle content as an extra check against accidental future
102 102 changes. If this output changes, we could break old clients.
103 103
104 104 $ f --size --hexdump partial.hg
105 105 partial.hg: size=207
106 106 0000: 48 47 31 30 47 5a 78 9c 63 60 60 98 17 ac 12 93 |HG10GZx.c``.....|
107 107 0010: f0 ac a9 23 45 70 cb bf 0d 5f 59 4e 4a 7f 79 21 |...#Ep..._YNJ.y!|
108 108 0020: 9b cc 40 24 20 a0 d7 ce 2c d1 38 25 cd 24 25 d5 |..@$ ...,.8%.$%.|
109 109 0030: d8 c2 22 cd 38 d9 24 cd 22 d5 c8 22 cd 24 cd 32 |..".8.$."..".$.2|
110 110 0040: d1 c2 d0 c4 c8 d2 32 d1 38 39 29 c9 34 cd d4 80 |......2.89).4...|
111 111 0050: ab 24 b5 b8 84 cb 40 c1 80 2b 2d 3f 9f 8b 2b 31 |.$....@..+-?..+1|
112 112 0060: 25 45 01 c8 80 9a d2 9b 65 fb e5 9e 45 bf 8d 7f |%E......e...E...|
113 113 0070: 9f c6 97 9f 2b 44 34 67 d9 ec 8e 0f a0 92 0b 75 |....+D4g.......u|
114 114 0080: 41 d6 24 59 18 a4 a4 9a a6 18 1a 5b 98 9b 5a 98 |A.$Y.......[..Z.|
115 115 0090: 9a 18 26 9b a6 19 98 1a 99 99 26 a6 18 9a 98 24 |..&.......&....$|
116 116 00a0: 26 59 a6 25 5a 98 a5 18 a6 24 71 41 35 b1 43 dc |&Y.%Z....$qA5.C.|
117 117 00b0: 16 b2 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 d7 8a |.....E..V....R..|
118 118 00c0: 78 ed fc d5 76 f1 36 35 dc 05 00 36 ed 5e c7 |x...v.65...6.^.|
119 119
120 120 $ echo "http://localhost:$HGPORT1/partial.hg" > server/.hg/clonebundles.manifest
121 121 $ hg clone -U http://localhost:$HGPORT partial-bundle
122 122 applying clone bundle from http://localhost:$HGPORT1/partial.hg
123 123 adding changesets
124 124 adding manifests
125 125 adding file changes
126 126 added 1 changesets with 1 changes to 1 files
127 127 finished applying clone bundle
128 128 searching for changes
129 129 adding changesets
130 130 adding manifests
131 131 adding file changes
132 132 added 1 changesets with 1 changes to 1 files
133 133 new changesets aaff8d2ffbbf
134 134 1 local changesets published
135 135
136 136 Incremental pull doesn't fetch bundle
137 137
138 138 $ hg clone -r 53245c60e682 -U http://localhost:$HGPORT partial-clone
139 139 adding changesets
140 140 adding manifests
141 141 adding file changes
142 142 added 1 changesets with 1 changes to 1 files
143 143 new changesets 53245c60e682
144 144
145 145 $ cd partial-clone
146 146 $ hg pull
147 147 pulling from http://localhost:$HGPORT/
148 148 searching for changes
149 149 adding changesets
150 150 adding manifests
151 151 adding file changes
152 152 added 1 changesets with 1 changes to 1 files
153 153 new changesets aaff8d2ffbbf
154 154 (run 'hg update' to get a working copy)
155 155 $ cd ..
156 156
157 157 Bundle with full content works
158 158
159 159 $ hg -R server bundle --type gzip-v2 --base null -r tip full.hg
160 160 2 changesets found
161 161
162 162 Again, we perform an extra check against bundle content changes. If this content
163 163 changes, clone bundles produced by new Mercurial versions may not be readable
164 164 by old clients.
165 165
166 166 $ f --size --hexdump full.hg
167 167 full.hg: size=442
168 168 0000: 48 47 32 30 00 00 00 0e 43 6f 6d 70 72 65 73 73 |HG20....Compress|
169 169 0010: 69 6f 6e 3d 47 5a 78 9c 63 60 60 d0 e4 76 f6 70 |ion=GZx.c``..v.p|
170 170 0020: f4 73 77 75 0f f2 0f 0d 60 00 02 46 46 76 26 4e |.swu....`..FFv&N|
171 171 0030: c6 b2 d4 a2 e2 cc fc 3c 03 a3 bc a4 e4 8c c4 bc |.......<........|
172 172 0040: f4 d4 62 23 06 06 e6 19 40 f9 4d c1 2a 31 09 cf |..b#....@.M.*1..|
173 173 0050: 9a 3a 52 04 b7 fc db f0 95 e5 a4 f4 97 17 b2 c9 |.:R.............|
174 174 0060: 0c 14 00 02 e6 d9 99 25 1a a7 a4 99 a4 a4 1a 5b |.......%.......[|
175 175 0070: 58 a4 19 27 9b a4 59 a4 1a 59 a4 99 a4 59 26 5a |X..'..Y..Y...Y&Z|
176 176 0080: 18 9a 18 59 5a 26 1a 27 27 25 99 a6 99 1a 70 95 |...YZ&.''%....p.|
177 177 0090: a4 16 97 70 19 28 18 70 a5 e5 e7 73 71 25 a6 a4 |...p.(.p...sq%..|
178 178 00a0: 28 00 19 20 17 af fa df ab ff 7b 3f fb 92 dc 8b |(.. ......{?....|
179 179 00b0: 1f 62 bb 9e b7 d7 d9 87 3d 5a 44 89 2f b0 99 87 |.b......=ZD./...|
180 180 00c0: ec e2 54 63 43 e3 b4 64 43 73 23 33 43 53 0b 63 |..TcC..dCs#3CS.c|
181 181 00d0: d3 14 23 03 a0 fb 2c 2c 0c d3 80 1e 30 49 49 b1 |..#...,,....0II.|
182 182 00e0: 4c 4a 32 48 33 30 b0 34 42 b8 38 29 b1 08 e2 62 |LJ2H30.4B.8)...b|
183 183 00f0: 20 03 6a ca c2 2c db 2f f7 2c fa 6d fc fb 34 be | .j..,./.,.m..4.|
184 184 0100: fc 5c 21 a2 39 cb 66 77 7c 00 0d c3 59 17 14 58 |.\!.9.fw|...Y..X|
185 185 0110: 49 16 06 29 a9 a6 29 86 c6 16 e6 a6 16 a6 26 86 |I..)..).......&.|
186 186 0120: c9 a6 69 06 a6 46 66 a6 89 29 86 26 26 89 49 96 |..i..Ff..).&&.I.|
187 187 0130: 69 89 16 66 29 86 29 49 5c 20 07 3e 16 fe 23 ae |i..f).)I\ .>..#.|
188 188 0140: 26 da 1c ab 10 1f d1 f8 e3 b3 ef cd dd fc 0c 93 |&...............|
189 189 0150: 88 75 34 36 75 04 82 55 17 14 36 a4 38 10 04 d8 |.u46u..U..6.8...|
190 190 0160: 21 01 9a b1 83 f7 e9 45 8b d2 56 c7 a3 1f 82 52 |!......E..V....R|
191 191 0170: d7 8a 78 ed fc d5 76 f1 36 25 81 89 c7 ad ec 90 |..x...v.6%......|
192 192 0180: 54 47 75 2b 89 48 b1 b2 62 c9 89 c9 19 a9 56 45 |TGu+.H..b.....VE|
193 193 0190: a9 65 ba 49 45 89 79 c9 19 ba 60 01 a0 14 23 58 |.e.IE.y...`...#X|
194 194 01a0: 81 35 c8 7d 40 cc 04 e2 a4 a4 a6 25 96 e6 94 60 |.5.}@......%...`|
195 195 01b0: 33 17 5f 54 00 00 d3 1b 0d 4c |3._T.....L|
196 196
197 197 $ echo "http://localhost:$HGPORT1/full.hg" > server/.hg/clonebundles.manifest
198 198 $ hg clone -U http://localhost:$HGPORT full-bundle
199 199 applying clone bundle from http://localhost:$HGPORT1/full.hg
200 200 adding changesets
201 201 adding manifests
202 202 adding file changes
203 203 added 2 changesets with 2 changes to 2 files
204 204 finished applying clone bundle
205 205 searching for changes
206 206 no changes found
207 207 2 local changesets published
208 208
209 209 Feature works over SSH
210 210
211 211 $ hg clone -U -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/server ssh-full-clone
212 212 applying clone bundle from http://localhost:$HGPORT1/full.hg
213 213 adding changesets
214 214 adding manifests
215 215 adding file changes
216 216 added 2 changesets with 2 changes to 2 files
217 217 finished applying clone bundle
218 218 searching for changes
219 219 no changes found
220 220 2 local changesets published
221 221
222 222 Entry with unknown BUNDLESPEC is filtered and not used
223 223
224 224 $ cat > server/.hg/clonebundles.manifest << EOF
225 225 > http://bad.entry1 BUNDLESPEC=UNKNOWN
226 226 > http://bad.entry2 BUNDLESPEC=xz-v1
227 227 > http://bad.entry3 BUNDLESPEC=none-v100
228 228 > http://localhost:$HGPORT1/full.hg BUNDLESPEC=gzip-v2
229 229 > EOF
230 230
231 231 $ hg clone -U http://localhost:$HGPORT filter-unknown-type
232 232 applying clone bundle from http://localhost:$HGPORT1/full.hg
233 233 adding changesets
234 234 adding manifests
235 235 adding file changes
236 236 added 2 changesets with 2 changes to 2 files
237 237 finished applying clone bundle
238 238 searching for changes
239 239 no changes found
240 240 2 local changesets published
241 241
242 242 Automatic fallback when all entries are filtered
243 243
244 244 $ cat > server/.hg/clonebundles.manifest << EOF
245 245 > http://bad.entry BUNDLESPEC=UNKNOWN
246 246 > EOF
247 247
248 248 $ hg clone -U http://localhost:$HGPORT filter-all
249 249 no compatible clone bundles available on server; falling back to regular clone
250 250 (you may want to report this to the server operator)
251 251 requesting all changes
252 252 adding changesets
253 253 adding manifests
254 254 adding file changes
255 255 added 2 changesets with 2 changes to 2 files
256 256 new changesets 53245c60e682:aaff8d2ffbbf
257 257
258 258 URLs requiring SNI are filtered in Python <2.7.9
259 259
260 260 $ cp full.hg sni.hg
261 261 $ cat > server/.hg/clonebundles.manifest << EOF
262 262 > http://localhost:$HGPORT1/sni.hg REQUIRESNI=true
263 263 > http://localhost:$HGPORT1/full.hg
264 264 > EOF
265 265
266 266 #if sslcontext
267 267 Python 2.7.9+ support SNI
268 268
269 269 $ hg clone -U http://localhost:$HGPORT sni-supported
270 270 applying clone bundle from http://localhost:$HGPORT1/sni.hg
271 271 adding changesets
272 272 adding manifests
273 273 adding file changes
274 274 added 2 changesets with 2 changes to 2 files
275 275 finished applying clone bundle
276 276 searching for changes
277 277 no changes found
278 278 2 local changesets published
279 279 #else
280 280 Python <2.7.9 will filter SNI URLs
281 281
282 282 $ hg clone -U http://localhost:$HGPORT sni-unsupported
283 283 applying clone bundle from http://localhost:$HGPORT1/full.hg
284 284 adding changesets
285 285 adding manifests
286 286 adding file changes
287 287 added 2 changesets with 2 changes to 2 files
288 288 finished applying clone bundle
289 289 searching for changes
290 290 no changes found
291 291 2 local changesets published
292 292 #endif
293 293
294 294 Stream clone bundles are supported
295 295
296 296 $ hg -R server debugcreatestreamclonebundle packed.hg
297 297 writing 613 bytes for 4 files
298 298 bundle requirements: generaldelta, revlogv1, sparserevlog
299 299
300 300 No bundle spec should work
301 301
302 302 $ cat > server/.hg/clonebundles.manifest << EOF
303 303 > http://localhost:$HGPORT1/packed.hg
304 304 > EOF
305 305
306 306 $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
307 307 applying clone bundle from http://localhost:$HGPORT1/packed.hg
308 308 4 files to transfer, 613 bytes of data
309 309 transferred 613 bytes in *.* seconds (*) (glob)
310 310 finished applying clone bundle
311 311 searching for changes
312 312 no changes found
313 313
314 314 Bundle spec without parameters should work
315 315
316 316 $ cat > server/.hg/clonebundles.manifest << EOF
317 317 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
318 318 > EOF
319 319
320 320 $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
321 321 applying clone bundle from http://localhost:$HGPORT1/packed.hg
322 322 4 files to transfer, 613 bytes of data
323 323 transferred 613 bytes in *.* seconds (*) (glob)
324 324 finished applying clone bundle
325 325 searching for changes
326 326 no changes found
327 327
328 328 Bundle spec with format requirements should work
329 329
330 330 $ cat > server/.hg/clonebundles.manifest << EOF
331 331 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
332 332 > EOF
333 333
334 334 $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
335 335 applying clone bundle from http://localhost:$HGPORT1/packed.hg
336 336 4 files to transfer, 613 bytes of data
337 337 transferred 613 bytes in *.* seconds (*) (glob)
338 338 finished applying clone bundle
339 339 searching for changes
340 340 no changes found
341 341
342 342 Stream bundle spec with unknown requirements should be filtered out
343 343
344 344 $ cat > server/.hg/clonebundles.manifest << EOF
345 345 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
346 346 > EOF
347 347
348 348 $ hg clone -U http://localhost:$HGPORT stream-clone-unsupported-requirements
349 349 no compatible clone bundles available on server; falling back to regular clone
350 350 (you may want to report this to the server operator)
351 351 requesting all changes
352 352 adding changesets
353 353 adding manifests
354 354 adding file changes
355 355 added 2 changesets with 2 changes to 2 files
356 356 new changesets 53245c60e682:aaff8d2ffbbf
357 357
358 358 Set up manifest for testing preferences
359 359 (Remember, the TYPE does not have to match reality - the URL is
360 360 important)
361 361
362 362 $ cp full.hg gz-a.hg
363 363 $ cp full.hg gz-b.hg
364 364 $ cp full.hg bz2-a.hg
365 365 $ cp full.hg bz2-b.hg
366 366 $ cat > server/.hg/clonebundles.manifest << EOF
367 367 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 extra=a
368 368 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2 extra=a
369 369 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
370 370 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
371 371 > EOF
372 372
373 373 Preferring an undefined attribute will take first entry
374 374
375 375 $ hg --config ui.clonebundleprefers=foo=bar clone -U http://localhost:$HGPORT prefer-foo
376 376 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
377 377 adding changesets
378 378 adding manifests
379 379 adding file changes
380 380 added 2 changesets with 2 changes to 2 files
381 381 finished applying clone bundle
382 382 searching for changes
383 383 no changes found
384 384 2 local changesets published
385 385
386 386 Preferring bz2 type will download first entry of that type
387 387
388 388 $ hg --config ui.clonebundleprefers=COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-bz
389 389 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
390 390 adding changesets
391 391 adding manifests
392 392 adding file changes
393 393 added 2 changesets with 2 changes to 2 files
394 394 finished applying clone bundle
395 395 searching for changes
396 396 no changes found
397 397 2 local changesets published
398 398
399 399 Preferring multiple values of an option works
400 400
401 401 $ hg --config ui.clonebundleprefers=COMPRESSION=unknown,COMPRESSION=bzip2 clone -U http://localhost:$HGPORT prefer-multiple-bz
402 402 applying clone bundle from http://localhost:$HGPORT1/bz2-a.hg
403 403 adding changesets
404 404 adding manifests
405 405 adding file changes
406 406 added 2 changesets with 2 changes to 2 files
407 407 finished applying clone bundle
408 408 searching for changes
409 409 no changes found
410 410 2 local changesets published
411 411
412 412 Sorting multiple values should get us back to original first entry
413 413
414 414 $ hg --config ui.clonebundleprefers=BUNDLESPEC=unknown,BUNDLESPEC=gzip-v2,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-multiple-gz
415 415 applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
416 416 adding changesets
417 417 adding manifests
418 418 adding file changes
419 419 added 2 changesets with 2 changes to 2 files
420 420 finished applying clone bundle
421 421 searching for changes
422 422 no changes found
423 423 2 local changesets published
424 424
425 425 Preferring multiple attributes has correct order
426 426
427 427 $ hg --config ui.clonebundleprefers=extra=b,BUNDLESPEC=bzip2-v2 clone -U http://localhost:$HGPORT prefer-separate-attributes
428 428 applying clone bundle from http://localhost:$HGPORT1/bz2-b.hg
429 429 adding changesets
430 430 adding manifests
431 431 adding file changes
432 432 added 2 changesets with 2 changes to 2 files
433 433 finished applying clone bundle
434 434 searching for changes
435 435 no changes found
436 436 2 local changesets published
437 437
438 438 Test where attribute is missing from some entries
439 439
440 440 $ cat > server/.hg/clonebundles.manifest << EOF
441 441 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
442 442 > http://localhost:$HGPORT1/bz2-a.hg BUNDLESPEC=bzip2-v2
443 443 > http://localhost:$HGPORT1/gz-b.hg BUNDLESPEC=gzip-v2 extra=b
444 444 > http://localhost:$HGPORT1/bz2-b.hg BUNDLESPEC=bzip2-v2 extra=b
445 445 > EOF
446 446
447 447 $ hg --config ui.clonebundleprefers=extra=b clone -U http://localhost:$HGPORT prefer-partially-defined-attribute
448 448 applying clone bundle from http://localhost:$HGPORT1/gz-b.hg
449 449 adding changesets
450 450 adding manifests
451 451 adding file changes
452 452 added 2 changesets with 2 changes to 2 files
453 453 finished applying clone bundle
454 454 searching for changes
455 455 no changes found
456 456 2 local changesets published
457 457
458 458 Test interaction between clone bundles and --stream
459 459
460 460 A manifest with just a gzip bundle
461 461
462 462 $ cat > server/.hg/clonebundles.manifest << EOF
463 463 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
464 464 > EOF
465 465
466 466 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip
467 467 no compatible clone bundles available on server; falling back to regular clone
468 468 (you may want to report this to the server operator)
469 469 streaming all changes
470 470 9 files to transfer, 816 bytes of data
471 471 transferred 816 bytes in * seconds (*) (glob)
472 472
473 473 A manifest with a stream clone but no BUNDLESPEC
474 474
475 475 $ cat > server/.hg/clonebundles.manifest << EOF
476 476 > http://localhost:$HGPORT1/packed.hg
477 477 > EOF
478 478
479 479 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-no-bundlespec
480 480 no compatible clone bundles available on server; falling back to regular clone
481 481 (you may want to report this to the server operator)
482 482 streaming all changes
483 483 9 files to transfer, 816 bytes of data
484 484 transferred 816 bytes in * seconds (*) (glob)
485 485
486 486 A manifest with a gzip bundle and a stream clone
487 487
488 488 $ cat > server/.hg/clonebundles.manifest << EOF
489 489 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
490 490 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1
491 491 > EOF
492 492
493 493 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
494 494 applying clone bundle from http://localhost:$HGPORT1/packed.hg
495 495 4 files to transfer, 613 bytes of data
496 496 transferred 613 bytes in * seconds (*) (glob)
497 497 finished applying clone bundle
498 498 searching for changes
499 499 no changes found
500 500
501 501 A manifest with a gzip bundle and stream clone with supported requirements
502 502
503 503 $ cat > server/.hg/clonebundles.manifest << EOF
504 504 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
505 505 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv1
506 506 > EOF
507 507
508 508 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
509 509 applying clone bundle from http://localhost:$HGPORT1/packed.hg
510 510 4 files to transfer, 613 bytes of data
511 511 transferred 613 bytes in * seconds (*) (glob)
512 512 finished applying clone bundle
513 513 searching for changes
514 514 no changes found
515 515
516 516 A manifest with a gzip bundle and a stream clone with unsupported requirements
517 517
518 518 $ cat > server/.hg/clonebundles.manifest << EOF
519 519 > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2
520 520 > http://localhost:$HGPORT1/packed.hg BUNDLESPEC=none-packed1;requirements%3Drevlogv42
521 521 > EOF
522 522
523 523 $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-unsupported-requirements
524 524 no compatible clone bundles available on server; falling back to regular clone
525 525 (you may want to report this to the server operator)
526 526 streaming all changes
527 527 9 files to transfer, 816 bytes of data
528 528 transferred 816 bytes in * seconds (*) (glob)
529 529
530 530 Test clone bundle retrieved through bundle2
531 531
532 532 $ cat << EOF >> $HGRCPATH
533 533 > [extensions]
534 534 > largefiles=
535 535 > EOF
536 536 $ killdaemons.py
537 537 $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
538 538 $ cat hg.pid >> $DAEMON_PIDS
539 539
540 540 $ hg -R server debuglfput gz-a.hg
541 541 1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
542 542
543 543 $ cat > server/.hg/clonebundles.manifest << EOF
544 544 > largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae BUNDLESPEC=gzip-v2
545 545 > EOF
546 546
547 547 $ hg clone -U http://localhost:$HGPORT largefile-provided --traceback
548 548 applying clone bundle from largefile://1f74b3d08286b9b3a16fb3fa185dd29219cbc6ae
549 549 adding changesets
550 550 adding manifests
551 551 adding file changes
552 552 added 2 changesets with 2 changes to 2 files
553 553 finished applying clone bundle
554 554 searching for changes
555 555 no changes found
556 556 2 local changesets published
@@ -1,122 +1,122 b''
1 1 #require serve
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg ci -Ama -d '1123456789 0'
7 7 adding a
8 8 $ hg serve --config server.uncompressed=True -p $HGPORT -d --pid-file=hg.pid
9 9 $ cat hg.pid >> $DAEMON_PIDS
10 10 $ cd ..
11 11 $ tinyproxy.py $HGPORT1 localhost 2>proxy.log >/dev/null </dev/null &
12 12 $ while [ ! -f proxy.pid ]; do sleep 0; done
13 13 $ cat proxy.pid >> $DAEMON_PIDS
14 14
15 15 url for proxy, stream
16 16
17 17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --stream http://localhost:$HGPORT/ b
18 18 streaming all changes
19 19 6 files to transfer, 412 bytes of data (reporevlogstore !)
20 20 4 files to transfer, 330 bytes of data (reposimplestore !)
21 21 transferred * bytes in * seconds (*/sec) (glob)
22 22 updating to branch default
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ cd b
25 25 $ hg verify
26 26 checking changesets
27 27 checking manifests
28 28 crosschecking files in changesets and manifests
29 29 checking files
30 30 checked 1 changesets with 1 changes to 1 files
31 31 $ cd ..
32 32
33 33 url for proxy, pull
34 34
35 35 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
36 36 requesting all changes
37 37 adding changesets
38 38 adding manifests
39 39 adding file changes
40 40 added 1 changesets with 1 changes to 1 files
41 41 new changesets 83180e7845de
42 42 updating to branch default
43 43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 44 $ cd b-pull
45 45 $ hg verify
46 46 checking changesets
47 47 checking manifests
48 48 crosschecking files in changesets and manifests
49 49 checking files
50 50 checked 1 changesets with 1 changes to 1 files
51 51 $ cd ..
52 52
53 53 host:port for proxy
54 54
55 55 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
56 56 requesting all changes
57 57 adding changesets
58 58 adding manifests
59 59 adding file changes
60 60 added 1 changesets with 1 changes to 1 files
61 61 new changesets 83180e7845de
62 62 updating to branch default
63 63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 64
65 65 proxy url with user name and password
66 66
67 67 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
68 68 requesting all changes
69 69 adding changesets
70 70 adding manifests
71 71 adding file changes
72 72 added 1 changesets with 1 changes to 1 files
73 73 new changesets 83180e7845de
74 74 updating to branch default
75 75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 76
77 77 url with user name and password
78 78
79 79 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
80 80 requesting all changes
81 81 adding changesets
82 82 adding manifests
83 83 adding file changes
84 84 added 1 changesets with 1 changes to 1 files
85 85 new changesets 83180e7845de
86 86 updating to branch default
87 87 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 88
89 89 bad host:port for proxy ("Protocol not supported" can happen on
90 90 misconfigured hosts)
91 91
92 92 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
93 abort: error: (Connection refused|Protocol not supported|.* actively refused it|\$EADDRNOTAVAIL\$) (re)
93 abort: error: (Connection refused|Protocol not supported|.* actively refused it|\$EADDRNOTAVAIL\$|No route to host) (re)
94 94 [255]
95 95
96 96 do not use the proxy if it is in the no list
97 97
98 98 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
99 99 requesting all changes
100 100 adding changesets
101 101 adding manifests
102 102 adding file changes
103 103 added 1 changesets with 1 changes to 1 files
104 104 new changesets 83180e7845de
105 105 updating to branch default
106 106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 107 $ cat proxy.log
108 108 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
109 109 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
110 110 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
111 111 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
112 112 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
113 113 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
114 114 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 115 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
116 116 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
117 117 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
118 118 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
119 119 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
120 120 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
121 121 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
122 122 $LOCALIP - - [$LOGDATE$] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -1,505 +1,506 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > lfs=
6 6 > [lfs]
7 7 > track=all()
8 8 > [web]
9 9 > push_ssl = False
10 10 > allow-push = *
11 11 > EOF
12 12
13 13 Serving LFS files can experimentally be turned off. The long term solution is
14 14 to support the 'verify' action in both client and server, so that the server can
15 15 tell the client to store files elsewhere.
16 16
17 17 $ hg init server
18 18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 19 > --config experimental.lfs.serve=False -R server serve -d \
20 20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 21 $ cat hg.pid >> $DAEMON_PIDS
22 22
23 23 Uploads fail...
24 24
25 25 $ hg init client
26 26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 27 $ hg -R client ci -Am 'initial commit'
28 28 adding lfs.bin
29 29 $ hg -R client push http://localhost:$HGPORT
30 30 pushing to http://localhost:$HGPORT/
31 31 searching for changes
32 32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
33 33 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
34 34 [255]
35 35
36 36 ... so do a local push to make the data available. Remove the blob from the
37 37 default cache, so it attempts to download.
38 38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
39 39 > --config "lfs.url=null://" \
40 40 > -R client push -q server
41 41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
42 42
43 43 Downloads fail...
44 44
45 45 $ hg clone http://localhost:$HGPORT httpclone
46 46 (remote is using large file support (lfs); lfs will be enabled for this repository)
47 47 requesting all changes
48 48 adding changesets
49 49 adding manifests
50 50 adding file changes
51 51 added 1 changesets with 1 changes to 1 files
52 52 new changesets 525251863cad
53 53 updating to branch default
54 54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
55 55 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
56 56 [255]
57 57
58 58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
59 59
60 60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
61 61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
62 62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
66 66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
67 67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
69 69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
70 70
71 71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
72 72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
73 73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
74 74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
75 75 $ cat hg.pid >> $DAEMON_PIDS
76 76
77 77 Reasonable hint for a misconfigured blob server
78 78
79 79 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
80 80 abort: LFS HTTP error: HTTP Error 404: Not Found!
81 81 (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
82 82 [255]
83 83
84 84 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT2/missing
85 85 abort: LFS error: *onnection *refused*! (glob) (?)
86 86 abort: LFS error: $EADDRNOTAVAIL$! (glob) (?)
87 abort: LFS error: No route to host! (?)
87 88 (the "lfs.url" config may be used to override http://localhost:$HGPORT2/missing)
88 89 [255]
89 90
90 91 Blob URIs are correct when --prefix is used
91 92
92 93 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
93 94 using http://localhost:$HGPORT/subdir/mount/point
94 95 sending capabilities command
95 96 (remote is using large file support (lfs); lfs will be enabled for this repository)
96 97 query 1; heads
97 98 sending batch command
98 99 requesting all changes
99 100 sending getbundle command
100 101 bundle2-input-bundle: with-transaction
101 102 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
102 103 adding changesets
103 104 add changeset 525251863cad
104 105 adding manifests
105 106 adding file changes
106 107 adding lfs.bin revisions
107 108 added 1 changesets with 1 changes to 1 files
108 109 bundle2-input-part: total payload size 648
109 110 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
110 111 bundle2-input-part: "phase-heads" supported
111 112 bundle2-input-part: total payload size 24
112 113 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
113 114 bundle2-input-part: total payload size 39
114 115 bundle2-input-bundle: 3 parts total
115 116 checking for updated bookmarks
116 117 updating the branch cache
117 118 new changesets 525251863cad
118 119 updating to branch default
119 120 resolving manifests
120 121 branchmerge: False, force: False, partial: False
121 122 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
122 123 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
123 124 Status: 200
124 125 Content-Length: 371
125 126 Content-Type: application/vnd.git-lfs+json
126 127 Date: $HTTP_DATE$
127 128 Server: testing stub value
128 129 {
129 130 "objects": [
130 131 {
131 132 "actions": {
132 133 "download": {
133 134 "expires_at": "$ISO_8601_DATE_TIME$"
134 135 "header": {
135 136 "Accept": "application/vnd.git-lfs"
136 137 }
137 138 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
138 139 }
139 140 }
140 141 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
141 142 "size": 20
142 143 }
143 144 ]
144 145 "transfer": "basic"
145 146 }
146 147 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
147 148 Status: 200
148 149 Content-Length: 20
149 150 Content-Type: application/octet-stream
150 151 Date: $HTTP_DATE$
151 152 Server: testing stub value
152 153 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
153 154 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
154 155 lfs: downloaded 1 files (20 bytes)
155 156 lfs.bin: remote created -> g
156 157 getting lfs.bin
157 158 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
158 159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 160 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
160 161
161 162 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
162 163
163 164 $ cat $TESTTMP/access.log $TESTTMP/errors.log
164 165 $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
165 166 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
166 167 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
167 168 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
168 169 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
169 170 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
170 171
171 172 Blobs that already exist in the usercache are linked into the repo store, even
172 173 though the client doesn't send the blob.
173 174
174 175 $ hg init server2
175 176 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
176 177 > -p $HGPORT --pid-file=hg.pid \
177 178 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
178 179 $ cat hg.pid >> $DAEMON_PIDS
179 180
180 181 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
181 182 > push http://localhost:$HGPORT | grep '^[{} ]'
182 183 {
183 184 "objects": [
184 185 {
185 186 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
186 187 "size": 20
187 188 }
188 189 ]
189 190 "transfer": "basic"
190 191 }
191 192 $ find server2/.hg/store/lfs/objects | sort
192 193 server2/.hg/store/lfs/objects
193 194 server2/.hg/store/lfs/objects/f0
194 195 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
195 196 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
196 197 $ cat $TESTTMP/errors.log
197 198
198 199 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
199 200 > import errno
200 201 > from hgext.lfs import blobstore
201 202 >
202 203 > _numverifies = 0
203 204 > _readerr = True
204 205 >
205 206 > def reposetup(ui, repo):
206 207 > # Nothing to do with a remote repo
207 208 > if not repo.local():
208 209 > return
209 210 >
210 211 > store = repo.svfs.lfslocalblobstore
211 212 > class badstore(store.__class__):
212 213 > def download(self, oid, src):
213 214 > '''Called in the server to handle reading from the client in a
214 215 > PUT request.'''
215 216 > origread = src.read
216 217 > def _badread(nbytes):
217 218 > # Simulate bad data/checksum failure from the client
218 219 > return b'0' * len(origread(nbytes))
219 220 > src.read = _badread
220 221 > super(badstore, self).download(oid, src)
221 222 >
222 223 > def _read(self, vfs, oid, verify):
223 224 > '''Called in the server to read data for a GET request, and then
224 225 > calls self._verify() on it before returning.'''
225 226 > global _readerr
226 227 > # One time simulation of a read error
227 228 > if _readerr:
228 229 > _readerr = False
229 230 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
230 231 > # Simulate corrupt content on client download
231 232 > blobstore._verify(oid, b'dummy content')
232 233 >
233 234 > def verify(self, oid):
234 235 > '''Called in the server to populate the Batch API response,
235 236 > letting the client re-upload if the file is corrupt.'''
236 237 > # Fail verify in Batch API for one clone command and one push
237 238 > # command with an IOError. Then let it through to access other
238 239 > # functions. Checksum failure is tested elsewhere.
239 240 > global _numverifies
240 241 > _numverifies += 1
241 242 > if _numverifies <= 2:
242 243 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
243 244 > return super(badstore, self).verify(oid)
244 245 >
245 246 > store.__class__ = badstore
246 247 > EOF
247 248
248 249 $ rm -rf `hg config lfs.usercache`
249 250 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
250 251 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
251 252 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
252 253 > -R server serve -d \
253 254 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
254 255 $ cat hg.pid >> $DAEMON_PIDS
255 256
256 257 Test an I/O error in localstore.verify() (Batch API) with GET
257 258
258 259 $ hg clone http://localhost:$HGPORT1 httpclone2
259 260 (remote is using large file support (lfs); lfs will be enabled for this repository)
260 261 requesting all changes
261 262 adding changesets
262 263 adding manifests
263 264 adding file changes
264 265 added 1 changesets with 1 changes to 1 files
265 266 new changesets 525251863cad
266 267 updating to branch default
267 268 abort: LFS server error for "lfs.bin": Internal server error!
268 269 [255]
269 270
270 271 Test an I/O error in localstore.verify() (Batch API) with PUT
271 272
272 273 $ echo foo > client/lfs.bin
273 274 $ hg -R client ci -m 'mod lfs'
274 275 $ hg -R client push http://localhost:$HGPORT1
275 276 pushing to http://localhost:$HGPORT1/
276 277 searching for changes
277 278 abort: LFS server error for "unknown": Internal server error!
278 279 [255]
279 280 TODO: figure out how to associate the file name in the error above
280 281
281 282 Test a bad checksum sent by the client in the transfer API
282 283
283 284 $ hg -R client push http://localhost:$HGPORT1
284 285 pushing to http://localhost:$HGPORT1/
285 286 searching for changes
286 287 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
287 288 [255]
288 289
289 290 $ echo 'test lfs file' > server/lfs3.bin
290 291 $ hg --config experimental.lfs.disableusercache=True \
291 292 > -R server ci -Aqm 'another lfs file'
292 293 $ hg -R client pull -q http://localhost:$HGPORT1
293 294
294 295 Test an I/O error during the processing of the GET request
295 296
296 297 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
297 298 > -R client update -r tip
298 299 abort: LFS HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
299 300 [255]
300 301
301 302 Test a checksum failure during the processing of the GET request
302 303
303 304 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
304 305 > -R client update -r tip
305 306 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
306 307 [255]
307 308
308 309 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
309 310
310 311 $ cat $TESTTMP/access.log
311 312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
312 313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
313 314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
315 316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
316 317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
317 318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 319 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 320 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
320 321 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
321 322 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
322 323 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
323 324 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
324 325 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
325 326 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
326 327 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
327 328 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
328 329 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
329 330 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
330 331 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
331 332 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
332 333 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
333 334 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
334 335 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
335 336 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
336 337 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
337 338
338 339 $ grep -v ' File "' $TESTTMP/errors.log
339 340 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
340 341 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
341 342 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
342 343 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
343 344 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno 5] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
344 345 $LOCALIP - - [$ERRDATE$] HG error: (glob)
345 346 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
346 347 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
347 348 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
348 349 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
349 350 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno 5] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
350 351 $LOCALIP - - [$ERRDATE$] HG error: (glob)
351 352 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
352 353 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
353 354 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
354 355 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
355 356 $LOCALIP - - [$ERRDATE$] HG error: % oid) (glob)
356 357 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
357 358 $LOCALIP - - [$ERRDATE$] HG error: (glob)
358 359 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
359 360 Traceback (most recent call last):
360 361 self.do_write()
361 362 self.do_hgweb()
362 363 for chunk in self.server.application(env, self._start_response):
363 364 for r in self._runwsgi(req, res, repo):
364 365 rctx, req, res, self.check_perm)
365 366 return func(*(args + a), **kw) (no-py3 !)
366 367 lambda perm:
367 368 res.setbodybytes(localstore.read(oid))
368 369 blob = self._read(self.vfs, oid, verify)
369 370 raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
370 371 *Error: [Errno 5] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
371 372
372 373 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
373 374 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
374 375 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
375 376 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
376 377 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, b'dummy content') (glob)
377 378 $LOCALIP - - [$ERRDATE$] HG error: hint=_(b'run hg verify')) (glob)
378 379 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
379 380 $LOCALIP - - [$ERRDATE$] HG error: (glob)
380 381
381 382 Basic Authorization headers are returned by the Batch API, and sent back with
382 383 the GET/PUT request.
383 384
384 385 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
385 386
386 387 $ cat >> $HGRCPATH << EOF
387 388 > [experimental]
388 389 > lfs.disableusercache = True
389 390 > [auth]
390 391 > l.schemes=http
391 392 > l.prefix=lo
392 393 > l.username=user
393 394 > l.password=pass
394 395 > EOF
395 396
396 397 $ hg --config extensions.x=$TESTDIR/httpserverauth.py \
397 398 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
398 399 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
399 400 $ mv hg.pid $DAEMON_PIDS
400 401
401 402 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
402 403 {
403 404 "objects": [
404 405 {
405 406 "actions": {
406 407 "download": {
407 408 "expires_at": "$ISO_8601_DATE_TIME$"
408 409 "header": {
409 410 "Accept": "application/vnd.git-lfs"
410 411 "Authorization": "Basic dXNlcjpwYXNz"
411 412 }
412 413 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
413 414 }
414 415 }
415 416 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
416 417 "size": 14
417 418 }
418 419 ]
419 420 "transfer": "basic"
420 421 }
421 422
422 423 $ echo 'another blob' > auth_clone/lfs.blob
423 424 $ hg -R auth_clone ci -Aqm 'add blob'
424 425
425 426 $ cat > use_digests.py << EOF
426 427 > from mercurial import (
427 428 > exthelper,
428 429 > url,
429 430 > )
430 431 >
431 432 > eh = exthelper.exthelper()
432 433 > uisetup = eh.finaluisetup
433 434 >
434 435 > @eh.wrapfunction(url, 'opener')
435 436 > def urlopener(orig, *args, **kwargs):
436 437 > opener = orig(*args, **kwargs)
437 438 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
438 439 > return opener
439 440 > EOF
440 441
441 442 Test that Digest Auth fails gracefully before testing the successful Basic Auth
442 443
443 444 $ hg -R auth_clone push --config extensions.x=use_digests.py
444 445 pushing to http://localhost:$HGPORT1/
445 446 searching for changes
446 447 abort: LFS HTTP error: HTTP Error 401: the server must support Basic Authentication!
447 448 (api=http://localhost:$HGPORT1/.git/info/lfs/objects/batch, action=upload)
448 449 [255]
449 450
450 451 $ hg -R auth_clone --debug push | egrep '^[{}]| '
451 452 {
452 453 "objects": [
453 454 {
454 455 "actions": {
455 456 "upload": {
456 457 "expires_at": "$ISO_8601_DATE_TIME$"
457 458 "header": {
458 459 "Accept": "application/vnd.git-lfs"
459 460 "Authorization": "Basic dXNlcjpwYXNz"
460 461 }
461 462 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
462 463 }
463 464 }
464 465 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
465 466 "size": 13
466 467 }
467 468 ]
468 469 "transfer": "basic"
469 470 }
470 471
471 472 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
472 473
473 474 $ cat $TESTTMP/access.log $TESTTMP/errors.log
474 475 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
475 476 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
476 477 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
477 478 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
478 479 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
479 480 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
480 481 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
481 482 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
482 483 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest (glob)
483 484 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 401 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
484 485 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
485 486 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
486 487 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
487 488 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
488 489 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
489 490 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
490 491 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
491 492 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
492 493 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
493 494 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
494 495 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
495 496 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
496 497 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
497 498 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
498 499 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
499 500 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
500 501 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
501 502 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
502 503 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
503 504 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
504 505 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
505 506 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
General Comments 0
You need to be logged in to leave comments. Login now