##// END OF EJS Templates
lfs: check content length after downloading content...
Matt Harbison -
r44544:0ee0a3f6 default
parent child Browse files
Show More
@@ -1,748 +1,763 b''
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import hashlib
13 13 import json
14 14 import os
15 15 import re
16 16 import socket
17 17
18 18 from mercurial.i18n import _
19 19 from mercurial.pycompat import getattr
20 20
21 21 from mercurial import (
22 22 encoding,
23 23 error,
24 24 node,
25 25 pathutil,
26 26 pycompat,
27 27 url as urlmod,
28 28 util,
29 29 vfs as vfsmod,
30 30 worker,
31 31 )
32 32
33 33 from mercurial.utils import stringutil
34 34
35 35 from ..largefiles import lfutil
36 36
37 37 # 64 bytes for SHA256
38 38 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
39 39
40 40
41 41 class lfsvfs(vfsmod.vfs):
42 42 def join(self, path):
43 43 """split the path at first two characters, like: XX/XXXXX..."""
44 44 if not _lfsre.match(path):
45 45 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
46 46 return super(lfsvfs, self).join(path[0:2], path[2:])
47 47
48 48 def walk(self, path=None, onerror=None):
49 49 """Yield (dirpath, [], oids) tuple for blobs under path
50 50
51 51 Oids only exist in the root of this vfs, so dirpath is always ''.
52 52 """
53 53 root = os.path.normpath(self.base)
54 54 # when dirpath == root, dirpath[prefixlen:] becomes empty
55 55 # because len(dirpath) < prefixlen.
56 56 prefixlen = len(pathutil.normasprefix(root))
57 57 oids = []
58 58
59 59 for dirpath, dirs, files in os.walk(
60 60 self.reljoin(self.base, path or b''), onerror=onerror
61 61 ):
62 62 dirpath = dirpath[prefixlen:]
63 63
64 64 # Silently skip unexpected files and directories
65 65 if len(dirpath) == 2:
66 66 oids.extend(
67 67 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
68 68 )
69 69
70 70 yield (b'', [], oids)
71 71
72 72
73 73 class nullvfs(lfsvfs):
74 74 def __init__(self):
75 75 pass
76 76
77 77 def exists(self, oid):
78 78 return False
79 79
80 80 def read(self, oid):
81 81 # store.read() calls into here if the blob doesn't exist in its
82 82 # self.vfs. Raise the same error as a normal vfs when asked to read a
83 83 # file that doesn't exist. The only difference is the full file path
84 84 # isn't available in the error.
85 85 raise IOError(
86 86 errno.ENOENT,
87 87 pycompat.sysstr(b'%s: No such file or directory' % oid),
88 88 )
89 89
90 90 def walk(self, path=None, onerror=None):
91 91 return (b'', [], [])
92 92
93 93 def write(self, oid, data):
94 94 pass
95 95
96 96
97 97 class filewithprogress(object):
98 98 """a file-like object that supports __len__ and read.
99 99
100 100 Useful to provide progress information for how many bytes are read.
101 101 """
102 102
103 103 def __init__(self, fp, callback):
104 104 self._fp = fp
105 105 self._callback = callback # func(readsize)
106 106 fp.seek(0, os.SEEK_END)
107 107 self._len = fp.tell()
108 108 fp.seek(0)
109 109
110 110 def __len__(self):
111 111 return self._len
112 112
113 113 def read(self, size):
114 114 if self._fp is None:
115 115 return b''
116 116 data = self._fp.read(size)
117 117 if data:
118 118 if self._callback:
119 119 self._callback(len(data))
120 120 else:
121 121 self._fp.close()
122 122 self._fp = None
123 123 return data
124 124
125 125
126 126 class local(object):
127 127 """Local blobstore for large file contents.
128 128
129 129 This blobstore is used both as a cache and as a staging area for large blobs
130 130 to be uploaded to the remote blobstore.
131 131 """
132 132
133 133 def __init__(self, repo):
134 134 fullpath = repo.svfs.join(b'lfs/objects')
135 135 self.vfs = lfsvfs(fullpath)
136 136
137 137 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
138 138 self.cachevfs = nullvfs()
139 139 else:
140 140 usercache = lfutil._usercachedir(repo.ui, b'lfs')
141 141 self.cachevfs = lfsvfs(usercache)
142 142 self.ui = repo.ui
143 143
144 144 def open(self, oid):
145 145 """Open a read-only file descriptor to the named blob, in either the
146 146 usercache or the local store."""
147 147 # The usercache is the most likely place to hold the file. Commit will
148 148 # write to both it and the local store, as will anything that downloads
149 149 # the blobs. However, things like clone without an update won't
150 150 # populate the local store. For an init + push of a local clone,
151 151 # the usercache is the only place it _could_ be. If not present, the
152 152 # missing file msg here will indicate the local repo, not the usercache.
153 153 if self.cachevfs.exists(oid):
154 154 return self.cachevfs(oid, b'rb')
155 155
156 156 return self.vfs(oid, b'rb')
157 157
158 def download(self, oid, src):
158 def download(self, oid, src, content_length):
159 159 """Read the blob from the remote source in chunks, verify the content,
160 160 and write to this local blobstore."""
161 161 sha256 = hashlib.sha256()
162 size = 0
162 163
163 164 with self.vfs(oid, b'wb', atomictemp=True) as fp:
164 165 for chunk in util.filechunkiter(src, size=1048576):
165 166 fp.write(chunk)
166 167 sha256.update(chunk)
168 size += len(chunk)
169
170 # If the server advertised a length longer than what we actually
171 # received, then we should expect that the server crashed while
172 # producing the response (but the server has no way of telling us
173 # that), and we really don't need to try to write the response to
174 # the localstore, because it's not going to match the expected.
175 if content_length is not None and int(content_length) != size:
176 msg = (
177 b"Response length (%s) does not match Content-Length "
178 b"header (%d): likely server-side crash"
179 )
180 raise LfsRemoteError(_(msg) % (size, int(content_length)))
167 181
168 182 realoid = node.hex(sha256.digest())
169 183 if realoid != oid:
170 184 raise LfsCorruptionError(
171 185 _(b'corrupt remote lfs object: %s') % oid
172 186 )
173 187
174 188 self._linktousercache(oid)
175 189
176 190 def write(self, oid, data):
177 191 """Write blob to local blobstore.
178 192
179 193 This should only be called from the filelog during a commit or similar.
180 194 As such, there is no need to verify the data. Imports from a remote
181 195 store must use ``download()`` instead."""
182 196 with self.vfs(oid, b'wb', atomictemp=True) as fp:
183 197 fp.write(data)
184 198
185 199 self._linktousercache(oid)
186 200
187 201 def linkfromusercache(self, oid):
188 202 """Link blobs found in the user cache into this store.
189 203
190 204 The server module needs to do this when it lets the client know not to
191 205 upload the blob, to ensure it is always available in this store.
192 206 Normally this is done implicitly when the client reads or writes the
193 207 blob, but that doesn't happen when the server tells the client that it
194 208 already has the blob.
195 209 """
196 210 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
197 211 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
198 212 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
199 213
200 214 def _linktousercache(self, oid):
201 215 # XXX: should we verify the content of the cache, and hardlink back to
202 216 # the local store on success, but truncate, write and link on failure?
203 217 if not self.cachevfs.exists(oid) and not isinstance(
204 218 self.cachevfs, nullvfs
205 219 ):
206 220 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
207 221 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
208 222
209 223 def read(self, oid, verify=True):
210 224 """Read blob from local blobstore."""
211 225 if not self.vfs.exists(oid):
212 226 blob = self._read(self.cachevfs, oid, verify)
213 227
214 228 # Even if revlog will verify the content, it needs to be verified
215 229 # now before making the hardlink to avoid propagating corrupt blobs.
216 230 # Don't abort if corruption is detected, because `hg verify` will
217 231 # give more useful info about the corruption- simply don't add the
218 232 # hardlink.
219 233 if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
220 234 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
221 235 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
222 236 else:
223 237 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
224 238 blob = self._read(self.vfs, oid, verify)
225 239 return blob
226 240
227 241 def _read(self, vfs, oid, verify):
228 242 """Read blob (after verifying) from the given store"""
229 243 blob = vfs.read(oid)
230 244 if verify:
231 245 _verify(oid, blob)
232 246 return blob
233 247
234 248 def verify(self, oid):
235 249 """Indicate whether or not the hash of the underlying file matches its
236 250 name."""
237 251 sha256 = hashlib.sha256()
238 252
239 253 with self.open(oid) as fp:
240 254 for chunk in util.filechunkiter(fp, size=1048576):
241 255 sha256.update(chunk)
242 256
243 257 return oid == node.hex(sha256.digest())
244 258
245 259 def has(self, oid):
246 260 """Returns True if the local blobstore contains the requested blob,
247 261 False otherwise."""
248 262 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
249 263
250 264
251 265 def _urlerrorreason(urlerror):
252 266 '''Create a friendly message for the given URLError to be used in an
253 267 LfsRemoteError message.
254 268 '''
255 269 inst = urlerror
256 270
257 271 if isinstance(urlerror.reason, Exception):
258 272 inst = urlerror.reason
259 273
260 274 if util.safehasattr(inst, b'reason'):
261 275 try: # usually it is in the form (errno, strerror)
262 276 reason = inst.reason.args[1]
263 277 except (AttributeError, IndexError):
264 278 # it might be anything, for example a string
265 279 reason = inst.reason
266 280 if isinstance(reason, pycompat.unicode):
267 281 # SSLError of Python 2.7.9 contains a unicode
268 282 reason = encoding.unitolocal(reason)
269 283 return reason
270 284 elif getattr(inst, "strerror", None):
271 285 return encoding.strtolocal(inst.strerror)
272 286 else:
273 287 return stringutil.forcebytestr(urlerror)
274 288
275 289
276 290 class lfsauthhandler(util.urlreq.basehandler):
277 291 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
278 292
279 293 def http_error_401(self, req, fp, code, msg, headers):
280 294 """Enforces that any authentication performed is HTTP Basic
281 295 Authentication. No authentication is also acceptable.
282 296 """
283 297 authreq = headers.get('www-authenticate', None)
284 298 if authreq:
285 299 scheme = authreq.split()[0]
286 300
287 301 if scheme.lower() != 'basic':
288 302 msg = _(b'the server must support Basic Authentication')
289 303 raise util.urlerr.httperror(
290 304 req.get_full_url(),
291 305 code,
292 306 encoding.strfromlocal(msg),
293 307 headers,
294 308 fp,
295 309 )
296 310 return None
297 311
298 312
299 313 class _gitlfsremote(object):
300 314 def __init__(self, repo, url):
301 315 ui = repo.ui
302 316 self.ui = ui
303 317 baseurl, authinfo = url.authinfo()
304 318 self.baseurl = baseurl.rstrip(b'/')
305 319 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
306 320 if not useragent:
307 321 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
308 322 self.urlopener = urlmod.opener(ui, authinfo, useragent)
309 323 self.urlopener.add_handler(lfsauthhandler())
310 324 self.retry = ui.configint(b'lfs', b'retry')
311 325
312 326 def writebatch(self, pointers, fromstore):
313 327 """Batch upload from local to remote blobstore."""
314 328 self._batch(_deduplicate(pointers), fromstore, b'upload')
315 329
316 330 def readbatch(self, pointers, tostore):
317 331 """Batch download from remote to local blostore."""
318 332 self._batch(_deduplicate(pointers), tostore, b'download')
319 333
320 334 def _batchrequest(self, pointers, action):
321 335 """Get metadata about objects pointed by pointers for given action
322 336
323 337 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
324 338 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
325 339 """
326 340 objects = [
327 341 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
328 342 for p in pointers
329 343 ]
330 344 requestdata = pycompat.bytesurl(
331 345 json.dumps(
332 346 {'objects': objects, 'operation': pycompat.strurl(action),}
333 347 )
334 348 )
335 349 url = b'%s/objects/batch' % self.baseurl
336 350 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
337 351 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
338 352 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
339 353 try:
340 354 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
341 355 rawjson = rsp.read()
342 356 except util.urlerr.httperror as ex:
343 357 hints = {
344 358 400: _(
345 359 b'check that lfs serving is enabled on %s and "%s" is '
346 360 b'supported'
347 361 )
348 362 % (self.baseurl, action),
349 363 404: _(b'the "lfs.url" config may be used to override %s')
350 364 % self.baseurl,
351 365 }
352 366 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
353 367 raise LfsRemoteError(
354 368 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
355 369 hint=hint,
356 370 )
357 371 except util.urlerr.urlerror as ex:
358 372 hint = (
359 373 _(b'the "lfs.url" config may be used to override %s')
360 374 % self.baseurl
361 375 )
362 376 raise LfsRemoteError(
363 377 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
364 378 )
365 379 try:
366 380 response = pycompat.json_loads(rawjson)
367 381 except ValueError:
368 382 raise LfsRemoteError(
369 383 _(b'LFS server returns invalid JSON: %s')
370 384 % rawjson.encode("utf-8")
371 385 )
372 386
373 387 if self.ui.debugflag:
374 388 self.ui.debug(b'Status: %d\n' % rsp.status)
375 389 # lfs-test-server and hg serve return headers in different order
376 390 headers = pycompat.bytestr(rsp.info()).strip()
377 391 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
378 392
379 393 if 'objects' in response:
380 394 response['objects'] = sorted(
381 395 response['objects'], key=lambda p: p['oid']
382 396 )
383 397 self.ui.debug(
384 398 b'%s\n'
385 399 % pycompat.bytesurl(
386 400 json.dumps(
387 401 response,
388 402 indent=2,
389 403 separators=('', ': '),
390 404 sort_keys=True,
391 405 )
392 406 )
393 407 )
394 408
395 409 def encodestr(x):
396 410 if isinstance(x, pycompat.unicode):
397 411 return x.encode('utf-8')
398 412 return x
399 413
400 414 return pycompat.rapply(encodestr, response)
401 415
402 416 def _checkforservererror(self, pointers, responses, action):
403 417 """Scans errors from objects
404 418
405 419 Raises LfsRemoteError if any objects have an error"""
406 420 for response in responses:
407 421 # The server should return 404 when objects cannot be found. Some
408 422 # server implementation (ex. lfs-test-server) does not set "error"
409 423 # but just removes "download" from "actions". Treat that case
410 424 # as the same as 404 error.
411 425 if b'error' not in response:
412 426 if action == b'download' and action not in response.get(
413 427 b'actions', []
414 428 ):
415 429 code = 404
416 430 else:
417 431 continue
418 432 else:
419 433 # An error dict without a code doesn't make much sense, so
420 434 # treat as a server error.
421 435 code = response.get(b'error').get(b'code', 500)
422 436
423 437 ptrmap = {p.oid(): p for p in pointers}
424 438 p = ptrmap.get(response[b'oid'], None)
425 439 if p:
426 440 filename = getattr(p, 'filename', b'unknown')
427 441 errors = {
428 442 404: b'The object does not exist',
429 443 410: b'The object was removed by the owner',
430 444 422: b'Validation error',
431 445 500: b'Internal server error',
432 446 }
433 447 msg = errors.get(code, b'status code %d' % code)
434 448 raise LfsRemoteError(
435 449 _(b'LFS server error for "%s": %s') % (filename, msg)
436 450 )
437 451 else:
438 452 raise LfsRemoteError(
439 453 _(b'LFS server error. Unsolicited response for oid %s')
440 454 % response[b'oid']
441 455 )
442 456
443 457 def _extractobjects(self, response, pointers, action):
444 458 """extract objects from response of the batch API
445 459
446 460 response: parsed JSON object returned by batch API
447 461 return response['objects'] filtered by action
448 462 raise if any object has an error
449 463 """
450 464 # Scan errors from objects - fail early
451 465 objects = response.get(b'objects', [])
452 466 self._checkforservererror(pointers, objects, action)
453 467
454 468 # Filter objects with given action. Practically, this skips uploading
455 469 # objects which exist in the server.
456 470 filteredobjects = [
457 471 o for o in objects if action in o.get(b'actions', [])
458 472 ]
459 473
460 474 return filteredobjects
461 475
462 476 def _basictransfer(self, obj, action, localstore):
463 477 """Download or upload a single object using basic transfer protocol
464 478
465 479 obj: dict, an object description returned by batch API
466 480 action: string, one of ['upload', 'download']
467 481 localstore: blobstore.local
468 482
469 483 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
470 484 basic-transfers.md
471 485 """
472 486 oid = obj[b'oid']
473 487 href = obj[b'actions'][action].get(b'href')
474 488 headers = obj[b'actions'][action].get(b'header', {}).items()
475 489
476 490 request = util.urlreq.request(pycompat.strurl(href))
477 491 if action == b'upload':
478 492 # If uploading blobs, read data from local blobstore.
479 493 if not localstore.verify(oid):
480 494 raise error.Abort(
481 495 _(b'detected corrupt lfs object: %s') % oid,
482 496 hint=_(b'run hg verify'),
483 497 )
484 498 request.data = filewithprogress(localstore.open(oid), None)
485 499 request.get_method = lambda: r'PUT'
486 500 request.add_header('Content-Type', 'application/octet-stream')
487 501 request.add_header('Content-Length', len(request.data))
488 502
489 503 for k, v in headers:
490 504 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
491 505
492 506 response = b''
493 507 try:
494 508 with contextlib.closing(self.urlopener.open(request)) as res:
509 contentlength = res.info().get(b"content-length")
495 510 ui = self.ui # Shorten debug lines
496 511 if self.ui.debugflag:
497 512 ui.debug(b'Status: %d\n' % res.status)
498 513 # lfs-test-server and hg serve return headers in different
499 514 # order
500 515 headers = pycompat.bytestr(res.info()).strip()
501 516 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
502 517
503 518 if action == b'download':
504 519 # If downloading blobs, store downloaded data to local
505 520 # blobstore
506 localstore.download(oid, res)
521 localstore.download(oid, res, contentlength)
507 522 else:
508 523 while True:
509 524 data = res.read(1048576)
510 525 if not data:
511 526 break
512 527 response += data
513 528 if response:
514 529 ui.debug(b'lfs %s response: %s' % (action, response))
515 530 except util.urlerr.httperror as ex:
516 531 if self.ui.debugflag:
517 532 self.ui.debug(
518 533 b'%s: %s\n' % (oid, ex.read())
519 534 ) # XXX: also bytes?
520 535 raise LfsRemoteError(
521 536 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
522 537 % (stringutil.forcebytestr(ex), oid, action)
523 538 )
524 539 except util.urlerr.urlerror as ex:
525 540 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
526 541 util.urllibcompat.getfullurl(request)
527 542 )
528 543 raise LfsRemoteError(
529 544 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
530 545 )
531 546
532 547 def _batch(self, pointers, localstore, action):
533 548 if action not in [b'upload', b'download']:
534 549 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
535 550
536 551 response = self._batchrequest(pointers, action)
537 552 objects = self._extractobjects(response, pointers, action)
538 553 total = sum(x.get(b'size', 0) for x in objects)
539 554 sizes = {}
540 555 for obj in objects:
541 556 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
542 557 topic = {
543 558 b'upload': _(b'lfs uploading'),
544 559 b'download': _(b'lfs downloading'),
545 560 }[action]
546 561 if len(objects) > 1:
547 562 self.ui.note(
548 563 _(b'lfs: need to transfer %d objects (%s)\n')
549 564 % (len(objects), util.bytecount(total))
550 565 )
551 566
552 567 def transfer(chunk):
553 568 for obj in chunk:
554 569 objsize = obj.get(b'size', 0)
555 570 if self.ui.verbose:
556 571 if action == b'download':
557 572 msg = _(b'lfs: downloading %s (%s)\n')
558 573 elif action == b'upload':
559 574 msg = _(b'lfs: uploading %s (%s)\n')
560 575 self.ui.note(
561 576 msg % (obj.get(b'oid'), util.bytecount(objsize))
562 577 )
563 578 retry = self.retry
564 579 while True:
565 580 try:
566 581 self._basictransfer(obj, action, localstore)
567 582 yield 1, obj.get(b'oid')
568 583 break
569 584 except socket.error as ex:
570 585 if retry > 0:
571 586 self.ui.note(
572 587 _(b'lfs: failed: %r (remaining retry %d)\n')
573 588 % (stringutil.forcebytestr(ex), retry)
574 589 )
575 590 retry -= 1
576 591 continue
577 592 raise
578 593
579 594 # Until https multiplexing gets sorted out
580 595 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
581 596 oids = worker.worker(
582 597 self.ui,
583 598 0.1,
584 599 transfer,
585 600 (),
586 601 sorted(objects, key=lambda o: o.get(b'oid')),
587 602 )
588 603 else:
589 604 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
590 605
591 606 with self.ui.makeprogress(
592 607 topic, unit=_(b"bytes"), total=total
593 608 ) as progress:
594 609 progress.update(0)
595 610 processed = 0
596 611 blobs = 0
597 612 for _one, oid in oids:
598 613 processed += sizes[oid]
599 614 blobs += 1
600 615 progress.update(processed)
601 616 self.ui.note(_(b'lfs: processed: %s\n') % oid)
602 617
603 618 if blobs > 0:
604 619 if action == b'upload':
605 620 self.ui.status(
606 621 _(b'lfs: uploaded %d files (%s)\n')
607 622 % (blobs, util.bytecount(processed))
608 623 )
609 624 elif action == b'download':
610 625 self.ui.status(
611 626 _(b'lfs: downloaded %d files (%s)\n')
612 627 % (blobs, util.bytecount(processed))
613 628 )
614 629
615 630 def __del__(self):
616 631 # copied from mercurial/httppeer.py
617 632 urlopener = getattr(self, 'urlopener', None)
618 633 if urlopener:
619 634 for h in urlopener.handlers:
620 635 h.close()
621 636 getattr(h, "close_all", lambda: None)()
622 637
623 638
624 639 class _dummyremote(object):
625 640 """Dummy store storing blobs to temp directory."""
626 641
627 642 def __init__(self, repo, url):
628 643 fullpath = repo.vfs.join(b'lfs', url.path)
629 644 self.vfs = lfsvfs(fullpath)
630 645
631 646 def writebatch(self, pointers, fromstore):
632 647 for p in _deduplicate(pointers):
633 648 content = fromstore.read(p.oid(), verify=True)
634 649 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
635 650 fp.write(content)
636 651
637 652 def readbatch(self, pointers, tostore):
638 653 for p in _deduplicate(pointers):
639 654 with self.vfs(p.oid(), b'rb') as fp:
640 tostore.download(p.oid(), fp)
655 tostore.download(p.oid(), fp, None)
641 656
642 657
643 658 class _nullremote(object):
644 659 """Null store storing blobs to /dev/null."""
645 660
646 661 def __init__(self, repo, url):
647 662 pass
648 663
649 664 def writebatch(self, pointers, fromstore):
650 665 pass
651 666
652 667 def readbatch(self, pointers, tostore):
653 668 pass
654 669
655 670
656 671 class _promptremote(object):
657 672 """Prompt user to set lfs.url when accessed."""
658 673
659 674 def __init__(self, repo, url):
660 675 pass
661 676
662 677 def writebatch(self, pointers, fromstore, ui=None):
663 678 self._prompt()
664 679
665 680 def readbatch(self, pointers, tostore, ui=None):
666 681 self._prompt()
667 682
668 683 def _prompt(self):
669 684 raise error.Abort(_(b'lfs.url needs to be configured'))
670 685
671 686
672 687 _storemap = {
673 688 b'https': _gitlfsremote,
674 689 b'http': _gitlfsremote,
675 690 b'file': _dummyremote,
676 691 b'null': _nullremote,
677 692 None: _promptremote,
678 693 }
679 694
680 695
681 696 def _deduplicate(pointers):
682 697 """Remove any duplicate oids that exist in the list"""
683 698 reduced = util.sortdict()
684 699 for p in pointers:
685 700 reduced[p.oid()] = p
686 701 return reduced.values()
687 702
688 703
689 704 def _verify(oid, content):
690 705 realoid = node.hex(hashlib.sha256(content).digest())
691 706 if realoid != oid:
692 707 raise LfsCorruptionError(
693 708 _(b'detected corrupt lfs object: %s') % oid,
694 709 hint=_(b'run hg verify'),
695 710 )
696 711
697 712
698 713 def remote(repo, remote=None):
699 714 """remotestore factory. return a store in _storemap depending on config
700 715
701 716 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
702 717 infer the endpoint, based on the remote repository using the same path
703 718 adjustments as git. As an extension, 'http' is supported as well so that
704 719 ``hg serve`` works out of the box.
705 720
706 721 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
707 722 """
708 723 lfsurl = repo.ui.config(b'lfs', b'url')
709 724 url = util.url(lfsurl or b'')
710 725 if lfsurl is None:
711 726 if remote:
712 727 path = remote
713 728 elif util.safehasattr(repo, b'_subtoppath'):
714 729 # The pull command sets this during the optional update phase, which
715 730 # tells exactly where the pull originated, whether 'paths.default'
716 731 # or explicit.
717 732 path = repo._subtoppath
718 733 else:
719 734 # TODO: investigate 'paths.remote:lfsurl' style path customization,
720 735 # and fall back to inferring from 'paths.remote' if unspecified.
721 736 path = repo.ui.config(b'paths', b'default') or b''
722 737
723 738 defaulturl = util.url(path)
724 739
725 740 # TODO: support local paths as well.
726 741 # TODO: consider the ssh -> https transformation that git applies
727 742 if defaulturl.scheme in (b'http', b'https'):
728 743 if defaulturl.path and defaulturl.path[:-1] != b'/':
729 744 defaulturl.path += b'/'
730 745 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
731 746
732 747 url = util.url(bytes(defaulturl))
733 748 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
734 749
735 750 scheme = url.scheme
736 751 if scheme not in _storemap:
737 752 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
738 753 return _storemap[scheme](repo, url)
739 754
740 755
741 756 class LfsRemoteError(error.StorageError):
742 757 pass
743 758
744 759
745 760 class LfsCorruptionError(error.Abort):
746 761 """Raised when a corrupt blob is detected, aborting an operation
747 762
748 763 It exists to allow specialized handling on the server side."""
@@ -1,370 +1,370 b''
1 1 # wireprotolfsserver.py - lfs protocol server side implementation
2 2 #
3 3 # Copyright 2018 Matt Harbison <matt_harbison@yahoo.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import datetime
11 11 import errno
12 12 import json
13 13 import traceback
14 14
15 15 from mercurial.hgweb import common as hgwebcommon
16 16
17 17 from mercurial import (
18 18 exthelper,
19 19 pycompat,
20 20 util,
21 21 wireprotoserver,
22 22 )
23 23
24 24 from . import blobstore
25 25
26 26 HTTP_OK = hgwebcommon.HTTP_OK
27 27 HTTP_CREATED = hgwebcommon.HTTP_CREATED
28 28 HTTP_BAD_REQUEST = hgwebcommon.HTTP_BAD_REQUEST
29 29 HTTP_NOT_FOUND = hgwebcommon.HTTP_NOT_FOUND
30 30 HTTP_METHOD_NOT_ALLOWED = hgwebcommon.HTTP_METHOD_NOT_ALLOWED
31 31 HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE
32 32 HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE
33 33
34 34 eh = exthelper.exthelper()
35 35
36 36
37 37 @eh.wrapfunction(wireprotoserver, b'handlewsgirequest')
38 38 def handlewsgirequest(orig, rctx, req, res, checkperm):
39 39 """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
40 40 request if it is left unprocessed by the wrapped method.
41 41 """
42 42 if orig(rctx, req, res, checkperm):
43 43 return True
44 44
45 45 if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'):
46 46 return False
47 47
48 48 if not util.safehasattr(rctx.repo.svfs, 'lfslocalblobstore'):
49 49 return False
50 50
51 51 if not req.dispatchpath:
52 52 return False
53 53
54 54 try:
55 55 if req.dispatchpath == b'.git/info/lfs/objects/batch':
56 56 checkperm(rctx, req, b'pull')
57 57 return _processbatchrequest(rctx.repo, req, res)
58 58 # TODO: reserve and use a path in the proposed http wireprotocol /api/
59 59 # namespace?
60 60 elif req.dispatchpath.startswith(b'.hg/lfs/objects'):
61 61 return _processbasictransfer(
62 62 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
63 63 )
64 64 return False
65 65 except hgwebcommon.ErrorResponse as e:
66 66 # XXX: copied from the handler surrounding wireprotoserver._callhttp()
67 67 # in the wrapped function. Should this be moved back to hgweb to
68 68 # be a common handler?
69 69 for k, v in e.headers:
70 70 res.headers[k] = v
71 71 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
72 72 res.setbodybytes(b'0\n%s\n' % pycompat.bytestr(e))
73 73 return True
74 74
75 75
76 76 def _sethttperror(res, code, message=None):
77 77 res.status = hgwebcommon.statusmessage(code, message=message)
78 78 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
79 79 res.setbodybytes(b'')
80 80
81 81
82 82 def _logexception(req):
83 83 """Write information about the current exception to wsgi.errors."""
84 84 tb = pycompat.sysbytes(traceback.format_exc())
85 85 errorlog = req.rawenv[b'wsgi.errors']
86 86
87 87 uri = b''
88 88 if req.apppath:
89 89 uri += req.apppath
90 90 uri += b'/' + req.dispatchpath
91 91
92 92 errorlog.write(
93 93 b"Exception happened while processing request '%s':\n%s" % (uri, tb)
94 94 )
95 95
96 96
97 97 def _processbatchrequest(repo, req, res):
98 98 """Handle a request for the Batch API, which is the gateway to granting file
99 99 access.
100 100
101 101 https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
102 102 """
103 103
104 104 # Mercurial client request:
105 105 #
106 106 # HOST: localhost:$HGPORT
107 107 # ACCEPT: application/vnd.git-lfs+json
108 108 # ACCEPT-ENCODING: identity
109 109 # USER-AGENT: git-lfs/2.3.4 (Mercurial 4.5.2+1114-f48b9754f04c+20180316)
110 110 # Content-Length: 125
111 111 # Content-Type: application/vnd.git-lfs+json
112 112 #
113 113 # {
114 114 # "objects": [
115 115 # {
116 116 # "oid": "31cf...8e5b"
117 117 # "size": 12
118 118 # }
119 119 # ]
120 120 # "operation": "upload"
121 121 # }
122 122
123 123 if req.method != b'POST':
124 124 _sethttperror(res, HTTP_METHOD_NOT_ALLOWED)
125 125 return True
126 126
127 127 if req.headers[b'Content-Type'] != b'application/vnd.git-lfs+json':
128 128 _sethttperror(res, HTTP_UNSUPPORTED_MEDIA_TYPE)
129 129 return True
130 130
131 131 if req.headers[b'Accept'] != b'application/vnd.git-lfs+json':
132 132 _sethttperror(res, HTTP_NOT_ACCEPTABLE)
133 133 return True
134 134
135 135 # XXX: specify an encoding?
136 136 lfsreq = pycompat.json_loads(req.bodyfh.read())
137 137
138 138 # If no transfer handlers are explicitly requested, 'basic' is assumed.
139 139 if 'basic' not in lfsreq.get('transfers', ['basic']):
140 140 _sethttperror(
141 141 res,
142 142 HTTP_BAD_REQUEST,
143 143 b'Only the basic LFS transfer handler is supported',
144 144 )
145 145 return True
146 146
147 147 operation = lfsreq.get('operation')
148 148 operation = pycompat.bytestr(operation)
149 149
150 150 if operation not in (b'upload', b'download'):
151 151 _sethttperror(
152 152 res,
153 153 HTTP_BAD_REQUEST,
154 154 b'Unsupported LFS transfer operation: %s' % operation,
155 155 )
156 156 return True
157 157
158 158 localstore = repo.svfs.lfslocalblobstore
159 159
160 160 objects = [
161 161 p
162 162 for p in _batchresponseobjects(
163 163 req, lfsreq.get('objects', []), operation, localstore
164 164 )
165 165 ]
166 166
167 167 rsp = {
168 168 'transfer': 'basic',
169 169 'objects': objects,
170 170 }
171 171
172 172 res.status = hgwebcommon.statusmessage(HTTP_OK)
173 173 res.headers[b'Content-Type'] = b'application/vnd.git-lfs+json'
174 174 res.setbodybytes(pycompat.bytestr(json.dumps(rsp)))
175 175
176 176 return True
177 177
178 178
179 179 def _batchresponseobjects(req, objects, action, store):
180 180 """Yield one dictionary of attributes for the Batch API response for each
181 181 object in the list.
182 182
183 183 req: The parsedrequest for the Batch API request
184 184 objects: The list of objects in the Batch API object request list
185 185 action: 'upload' or 'download'
186 186 store: The local blob store for servicing requests"""
187 187
188 188 # Successful lfs-test-server response to solict an upload:
189 189 # {
190 190 # u'objects': [{
191 191 # u'size': 12,
192 192 # u'oid': u'31cf...8e5b',
193 193 # u'actions': {
194 194 # u'upload': {
195 195 # u'href': u'http://localhost:$HGPORT/objects/31cf...8e5b',
196 196 # u'expires_at': u'0001-01-01T00:00:00Z',
197 197 # u'header': {
198 198 # u'Accept': u'application/vnd.git-lfs'
199 199 # }
200 200 # }
201 201 # }
202 202 # }]
203 203 # }
204 204
205 205 # TODO: Sort out the expires_at/expires_in/authenticated keys.
206 206
207 207 for obj in objects:
208 208 # Convert unicode to ASCII to create a filesystem path
209 209 soid = obj.get('oid')
210 210 oid = soid.encode('ascii')
211 211 rsp = {
212 212 'oid': soid,
213 213 'size': obj.get('size'), # XXX: should this check the local size?
214 214 # 'authenticated': True,
215 215 }
216 216
217 217 exists = True
218 218 verifies = False
219 219
220 220 # Verify an existing file on the upload request, so that the client is
221 221 # solicited to re-upload if it corrupt locally. Download requests are
222 222 # also verified, so the error can be flagged in the Batch API response.
223 223 # (Maybe we can use this to short circuit the download for `hg verify`,
224 224 # IFF the client can assert that the remote end is an hg server.)
225 225 # Otherwise, it's potentially overkill on download, since it is also
226 226 # verified as the file is streamed to the caller.
227 227 try:
228 228 verifies = store.verify(oid)
229 229 if verifies and action == b'upload':
230 230 # The client will skip this upload, but make sure it remains
231 231 # available locally.
232 232 store.linkfromusercache(oid)
233 233 except IOError as inst:
234 234 if inst.errno != errno.ENOENT:
235 235 _logexception(req)
236 236
237 237 rsp['error'] = {
238 238 'code': 500,
239 239 'message': inst.strerror or 'Internal Server Server',
240 240 }
241 241 yield rsp
242 242 continue
243 243
244 244 exists = False
245 245
246 246 # Items are always listed for downloads. They are dropped for uploads
247 247 # IFF they already exist locally.
248 248 if action == b'download':
249 249 if not exists:
250 250 rsp['error'] = {
251 251 'code': 404,
252 252 'message': "The object does not exist",
253 253 }
254 254 yield rsp
255 255 continue
256 256
257 257 elif not verifies:
258 258 rsp['error'] = {
259 259 'code': 422, # XXX: is this the right code?
260 260 'message': "The object is corrupt",
261 261 }
262 262 yield rsp
263 263 continue
264 264
265 265 elif verifies:
266 266 yield rsp # Skip 'actions': already uploaded
267 267 continue
268 268
269 269 expiresat = datetime.datetime.now() + datetime.timedelta(minutes=10)
270 270
271 271 def _buildheader():
272 272 # The spec doesn't mention the Accept header here, but avoid
273 273 # a gratuitous deviation from lfs-test-server in the test
274 274 # output.
275 275 hdr = {'Accept': 'application/vnd.git-lfs'}
276 276
277 277 auth = req.headers.get(b'Authorization', b'')
278 278 if auth.startswith(b'Basic '):
279 279 hdr['Authorization'] = pycompat.strurl(auth)
280 280
281 281 return hdr
282 282
283 283 rsp['actions'] = {
284 284 '%s'
285 285 % pycompat.strurl(action): {
286 286 'href': pycompat.strurl(
287 287 b'%s%s/.hg/lfs/objects/%s' % (req.baseurl, req.apppath, oid)
288 288 ),
289 289 # datetime.isoformat() doesn't include the 'Z' suffix
290 290 "expires_at": expiresat.strftime('%Y-%m-%dT%H:%M:%SZ'),
291 291 'header': _buildheader(),
292 292 }
293 293 }
294 294
295 295 yield rsp
296 296
297 297
298 298 def _processbasictransfer(repo, req, res, checkperm):
299 299 """Handle a single file upload (PUT) or download (GET) action for the Basic
300 300 Transfer Adapter.
301 301
302 302 After determining if the request is for an upload or download, the access
303 303 must be checked by calling ``checkperm()`` with either 'pull' or 'upload'
304 304 before accessing the files.
305 305
306 306 https://github.com/git-lfs/git-lfs/blob/master/docs/api/basic-transfers.md
307 307 """
308 308
309 309 method = req.method
310 310 oid = req.dispatchparts[-1]
311 311 localstore = repo.svfs.lfslocalblobstore
312 312
313 313 if len(req.dispatchparts) != 4:
314 314 _sethttperror(res, HTTP_NOT_FOUND)
315 315 return True
316 316
317 317 if method == b'PUT':
318 318 checkperm(b'upload')
319 319
320 320 # TODO: verify Content-Type?
321 321
322 322 existed = localstore.has(oid)
323 323
324 324 # TODO: how to handle timeouts? The body proxy handles limiting to
325 325 # Content-Length, but what happens if a client sends less than it
326 326 # says it will?
327 327
328 328 statusmessage = hgwebcommon.statusmessage
329 329 try:
330 localstore.download(oid, req.bodyfh)
330 localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
331 331 res.status = statusmessage(HTTP_OK if existed else HTTP_CREATED)
332 332 except blobstore.LfsCorruptionError:
333 333 _logexception(req)
334 334
335 335 # XXX: Is this the right code?
336 336 res.status = statusmessage(422, b'corrupt blob')
337 337
338 338 # There's no payload here, but this is the header that lfs-test-server
339 339 # sends back. This eliminates some gratuitous test output conditionals.
340 340 res.headers[b'Content-Type'] = b'text/plain; charset=utf-8'
341 341 res.setbodybytes(b'')
342 342
343 343 return True
344 344 elif method == b'GET':
345 345 checkperm(b'pull')
346 346
347 347 res.status = hgwebcommon.statusmessage(HTTP_OK)
348 348 res.headers[b'Content-Type'] = b'application/octet-stream'
349 349
350 350 try:
351 351 # TODO: figure out how to send back the file in chunks, instead of
352 352 # reading the whole thing. (Also figure out how to send back
353 353 # an error status if an IOError occurs after a partial write
354 354 # in that case. Here, everything is read before starting.)
355 355 res.setbodybytes(localstore.read(oid))
356 356 except blobstore.LfsCorruptionError:
357 357 _logexception(req)
358 358
359 359 # XXX: Is this the right code?
360 360 res.status = hgwebcommon.statusmessage(422, b'corrupt blob')
361 361 res.setbodybytes(b'')
362 362
363 363 return True
364 364 else:
365 365 _sethttperror(
366 366 res,
367 367 HTTP_METHOD_NOT_ALLOWED,
368 368 message=b'Unsupported LFS transfer method: %s' % method,
369 369 )
370 370 return True
@@ -1,508 +1,508 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > lfs=
6 6 > [lfs]
7 7 > track=all()
8 8 > [web]
9 9 > push_ssl = False
10 10 > allow-push = *
11 11 > EOF
12 12
13 13 Serving LFS files can experimentally be turned off. The long term solution is
14 14 to support the 'verify' action in both client and server, so that the server can
15 15 tell the client to store files elsewhere.
16 16
17 17 $ hg init server
18 18 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
19 19 > --config experimental.lfs.serve=False -R server serve -d \
20 20 > -p $HGPORT --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
21 21 $ cat hg.pid >> $DAEMON_PIDS
22 22
23 23 Uploads fail...
24 24
25 25 $ hg init client
26 26 $ echo 'this-is-an-lfs-file' > client/lfs.bin
27 27 $ hg -R client ci -Am 'initial commit'
28 28 adding lfs.bin
29 29 $ hg -R client push http://localhost:$HGPORT
30 30 pushing to http://localhost:$HGPORT/
31 31 searching for changes
32 32 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
33 33 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
34 34 [255]
35 35
36 36 ... so do a local push to make the data available. Remove the blob from the
37 37 default cache, so it attempts to download.
38 38 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
39 39 > --config "lfs.url=null://" \
40 40 > -R client push -q server
41 41 $ mv `hg config lfs.usercache` $TESTTMP/servercache
42 42
43 43 Downloads fail...
44 44
45 45 $ hg clone http://localhost:$HGPORT httpclone
46 46 (remote is using large file support (lfs); lfs will be enabled for this repository)
47 47 requesting all changes
48 48 adding changesets
49 49 adding manifests
50 50 adding file changes
51 51 added 1 changesets with 1 changes to 1 files
52 52 new changesets 525251863cad
53 53 updating to branch default
54 54 abort: LFS HTTP error: HTTP Error 400: no such method: .git!
55 55 (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
56 56 [255]
57 57
58 58 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
59 59
60 60 $ cat $TESTTMP/access.log $TESTTMP/errors.log
61 61 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
62 62 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
63 63 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
64 64 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
65 65 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
66 66 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
67 67 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
68 68 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
69 69 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
70 70
71 71 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
72 72 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
73 73 > -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
74 74 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
75 75 $ cat hg.pid >> $DAEMON_PIDS
76 76
77 77 Reasonable hint for a misconfigured blob server
78 78
79 79 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
80 80 abort: LFS HTTP error: HTTP Error 404: Not Found!
81 81 (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
82 82 [255]
83 83
84 84 $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT2/missing
85 85 abort: LFS error: *onnection *refused*! (glob) (?)
86 86 abort: LFS error: $EADDRNOTAVAIL$! (glob) (?)
87 87 abort: LFS error: No route to host! (?)
88 88 (the "lfs.url" config may be used to override http://localhost:$HGPORT2/missing)
89 89 [255]
90 90
91 91 Blob URIs are correct when --prefix is used
92 92
93 93 $ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
94 94 using http://localhost:$HGPORT/subdir/mount/point
95 95 sending capabilities command
96 96 (remote is using large file support (lfs); lfs will be enabled for this repository)
97 97 query 1; heads
98 98 sending batch command
99 99 requesting all changes
100 100 sending getbundle command
101 101 bundle2-input-bundle: with-transaction
102 102 bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
103 103 adding changesets
104 104 add changeset 525251863cad
105 105 adding manifests
106 106 adding file changes
107 107 adding lfs.bin revisions
108 108 bundle2-input-part: total payload size 648
109 109 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
110 110 bundle2-input-part: "phase-heads" supported
111 111 bundle2-input-part: total payload size 24
112 112 bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
113 113 bundle2-input-part: total payload size 39
114 114 bundle2-input-bundle: 4 parts total
115 115 checking for updated bookmarks
116 116 updating the branch cache
117 117 added 1 changesets with 1 changes to 1 files
118 118 new changesets 525251863cad
119 119 updating to branch default
120 120 resolving manifests
121 121 branchmerge: False, force: False, partial: False
122 122 ancestor: 000000000000, local: 000000000000+, remote: 525251863cad
123 123 lfs: assuming remote store: http://localhost:$HGPORT/subdir/mount/point/.git/info/lfs
124 124 Status: 200
125 125 Content-Length: 371
126 126 Content-Type: application/vnd.git-lfs+json
127 127 Date: $HTTP_DATE$
128 128 Server: testing stub value
129 129 {
130 130 "objects": [
131 131 {
132 132 "actions": {
133 133 "download": {
134 134 "expires_at": "$ISO_8601_DATE_TIME$"
135 135 "header": {
136 136 "Accept": "application/vnd.git-lfs"
137 137 }
138 138 "href": "http://localhost:$HGPORT/subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
139 139 }
140 140 }
141 141 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
142 142 "size": 20
143 143 }
144 144 ]
145 145 "transfer": "basic"
146 146 }
147 147 lfs: downloading f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e (20 bytes)
148 148 Status: 200
149 149 Content-Length: 20
150 150 Content-Type: application/octet-stream
151 151 Date: $HTTP_DATE$
152 152 Server: testing stub value
153 153 lfs: adding f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e to the usercache
154 154 lfs: processed: f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
155 155 lfs: downloaded 1 files (20 bytes)
156 156 lfs.bin: remote created -> g
157 157 getting lfs.bin
158 158 lfs: found f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e in the local lfs store
159 159 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 160 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
161 161
162 162 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
163 163
164 164 $ cat $TESTTMP/access.log $TESTTMP/errors.log
165 165 $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
166 166 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
167 167 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
168 168 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
169 169 $LOCALIP - - [$LOGDATE$] "POST /subdir/mount/point/.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
170 170 $LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point/.hg/lfs/objects/f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e HTTP/1.1" 200 - (glob)
171 171
172 172 Blobs that already exist in the usercache are linked into the repo store, even
173 173 though the client doesn't send the blob.
174 174
175 175 $ hg init server2
176 176 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R server2 serve -d \
177 177 > -p $HGPORT --pid-file=hg.pid \
178 178 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
179 179 $ cat hg.pid >> $DAEMON_PIDS
180 180
181 181 $ hg --config "lfs.usercache=$TESTTMP/servercache" -R cloned2 --debug \
182 182 > push http://localhost:$HGPORT | grep '^[{} ]'
183 183 {
184 184 "objects": [
185 185 {
186 186 "oid": "f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e"
187 187 "size": 20
188 188 }
189 189 ]
190 190 "transfer": "basic"
191 191 }
192 192 $ find server2/.hg/store/lfs/objects | sort
193 193 server2/.hg/store/lfs/objects
194 194 server2/.hg/store/lfs/objects/f0
195 195 server2/.hg/store/lfs/objects/f0/3217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e
196 196 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
197 197 $ cat $TESTTMP/errors.log
198 198
199 199 $ cat >> $TESTTMP/lfsstoreerror.py <<EOF
200 200 > import errno
201 201 > from hgext.lfs import blobstore
202 202 >
203 203 > _numverifies = 0
204 204 > _readerr = True
205 205 >
206 206 > def reposetup(ui, repo):
207 207 > # Nothing to do with a remote repo
208 208 > if not repo.local():
209 209 > return
210 210 >
211 211 > store = repo.svfs.lfslocalblobstore
212 212 > class badstore(store.__class__):
213 > def download(self, oid, src):
213 > def download(self, oid, src, contentlength):
214 214 > '''Called in the server to handle reading from the client in a
215 215 > PUT request.'''
216 216 > origread = src.read
217 217 > def _badread(nbytes):
218 218 > # Simulate bad data/checksum failure from the client
219 219 > return b'0' * len(origread(nbytes))
220 220 > src.read = _badread
221 > super(badstore, self).download(oid, src)
221 > super(badstore, self).download(oid, src, contentlength)
222 222 >
223 223 > def _read(self, vfs, oid, verify):
224 224 > '''Called in the server to read data for a GET request, and then
225 225 > calls self._verify() on it before returning.'''
226 226 > global _readerr
227 227 > # One time simulation of a read error
228 228 > if _readerr:
229 229 > _readerr = False
230 230 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
231 231 > # Simulate corrupt content on client download
232 232 > blobstore._verify(oid, b'dummy content')
233 233 >
234 234 > def verify(self, oid):
235 235 > '''Called in the server to populate the Batch API response,
236 236 > letting the client re-upload if the file is corrupt.'''
237 237 > # Fail verify in Batch API for one clone command and one push
238 238 > # command with an IOError. Then let it through to access other
239 239 > # functions. Checksum failure is tested elsewhere.
240 240 > global _numverifies
241 241 > _numverifies += 1
242 242 > if _numverifies <= 2:
243 243 > raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
244 244 > return super(badstore, self).verify(oid)
245 245 >
246 246 > store.__class__ = badstore
247 247 > EOF
248 248
249 249 $ rm -rf `hg config lfs.usercache`
250 250 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
251 251 $ hg --config "lfs.usercache=$TESTTMP/servercache" \
252 252 > --config extensions.lfsstoreerror=$TESTTMP/lfsstoreerror.py \
253 253 > -R server serve -d \
254 254 > -p $HGPORT1 --pid-file=hg.pid -A $TESTTMP/access.log -E $TESTTMP/errors.log
255 255 $ cat hg.pid >> $DAEMON_PIDS
256 256
257 257 Test an I/O error in localstore.verify() (Batch API) with GET
258 258
259 259 $ hg clone http://localhost:$HGPORT1 httpclone2
260 260 (remote is using large file support (lfs); lfs will be enabled for this repository)
261 261 requesting all changes
262 262 adding changesets
263 263 adding manifests
264 264 adding file changes
265 265 added 1 changesets with 1 changes to 1 files
266 266 new changesets 525251863cad
267 267 updating to branch default
268 268 abort: LFS server error for "lfs.bin": Internal server error!
269 269 [255]
270 270
271 271 Test an I/O error in localstore.verify() (Batch API) with PUT
272 272
273 273 $ echo foo > client/lfs.bin
274 274 $ hg -R client ci -m 'mod lfs'
275 275 $ hg -R client push http://localhost:$HGPORT1
276 276 pushing to http://localhost:$HGPORT1/
277 277 searching for changes
278 278 abort: LFS server error for "unknown": Internal server error!
279 279 [255]
280 280 TODO: figure out how to associate the file name in the error above
281 281
282 282 Test a bad checksum sent by the client in the transfer API
283 283
284 284 $ hg -R client push http://localhost:$HGPORT1
285 285 pushing to http://localhost:$HGPORT1/
286 286 searching for changes
287 287 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
288 288 [255]
289 289
290 290 $ echo 'test lfs file' > server/lfs3.bin
291 291 $ hg --config experimental.lfs.disableusercache=True \
292 292 > -R server ci -Aqm 'another lfs file'
293 293 $ hg -R client pull -q http://localhost:$HGPORT1
294 294
295 295 Test an I/O error during the processing of the GET request
296 296
297 297 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
298 298 > -R client update -r tip
299 299 abort: LFS HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
300 300 [255]
301 301
302 302 Test a checksum failure during the processing of the GET request
303 303
304 304 $ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
305 305 > -R client update -r tip
306 306 abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
307 307 [255]
308 308
309 309 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
310 310
311 311 $ cat $TESTTMP/access.log
312 312 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
313 313 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
314 314 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
315 315 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
316 316 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
317 317 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
318 318 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
319 319 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
320 320 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
321 321 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
322 322 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
323 323 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
324 324 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
325 325 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
326 326 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
327 327 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
328 328 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
329 329 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
330 330 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c HTTP/1.1" 422 - (glob)
331 331 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
332 332 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D392c05922088bacf8e68a6939b480017afbf245d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
333 333 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=525251863cad618e55d483555f3d00a2ca99597e&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
334 334 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
335 335 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 500 - (glob)
336 336 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
337 337 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 422 - (glob)
338 338
339 339 $ grep -v ' File "' $TESTTMP/errors.log
340 340 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
341 341 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
342 342 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
343 343 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
344 344 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] f03217a32529a28a42d03b1244fe09b6e0f9fd06d7b966d4d50567be2abe6c0e: I/O error (glob)
345 345 $LOCALIP - - [$ERRDATE$] HG error: (glob)
346 346 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.git/info/lfs/objects/batch': (glob)
347 347 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
348 348 $LOCALIP - - [$ERRDATE$] HG error: verifies = store.verify(oid) (glob)
349 349 $LOCALIP - - [$ERRDATE$] HG error: raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8")) (glob)
350 350 $LOCALIP - - [$ERRDATE$] HG error: *Error: [Errno *] b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c: I/O error (glob)
351 351 $LOCALIP - - [$ERRDATE$] HG error: (glob)
352 352 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c': (glob)
353 353 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
354 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh) (glob)
355 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src) (glob)
354 $LOCALIP - - [$ERRDATE$] HG error: localstore.download(oid, req.bodyfh, req.headers[b'Content-Length'])
355 $LOCALIP - - [$ERRDATE$] HG error: super(badstore, self).download(oid, src, contentlength)
356 356 $LOCALIP - - [$ERRDATE$] HG error: _(b'corrupt remote lfs object: %s') % oid (glob)
357 357 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (no-py3 !)
358 358 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
359 359 $LOCALIP - - [$ERRDATE$] HG error: (glob)
360 360 $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
361 361 Traceback (most recent call last):
362 362 self.do_write()
363 363 self.do_hgweb()
364 364 for chunk in self.server.application(env, self._start_response):
365 365 for r in self._runwsgi(req, res, repo):
366 366 rctx, req, res, self.check_perm
367 367 return func(*(args + a), **kw) (no-py3 !)
368 368 rctx.repo, req, res, lambda perm: checkperm(rctx, req, perm)
369 369 res.setbodybytes(localstore.read(oid))
370 370 blob = self._read(self.vfs, oid, verify)
371 371 raise IOError(errno.EIO, r'%s: I/O error' % oid.decode("utf-8"))
372 372 *Error: [Errno *] 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d: I/O error (glob)
373 373
374 374 $LOCALIP - - [$ERRDATE$] HG error: Exception happened while processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
375 375 $LOCALIP - - [$ERRDATE$] HG error: Traceback (most recent call last): (glob)
376 376 $LOCALIP - - [$ERRDATE$] HG error: res.setbodybytes(localstore.read(oid)) (glob)
377 377 $LOCALIP - - [$ERRDATE$] HG error: blob = self._read(self.vfs, oid, verify) (glob)
378 378 $LOCALIP - - [$ERRDATE$] HG error: blobstore._verify(oid, b'dummy content') (glob)
379 379 $LOCALIP - - [$ERRDATE$] HG error: hint=_(b'run hg verify'), (glob)
380 380 $LOCALIP - - [$ERRDATE$] HG error: LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (no-py3 !)
381 381 $LOCALIP - - [$ERRDATE$] HG error: hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
382 382 $LOCALIP - - [$ERRDATE$] HG error: (glob)
383 383
384 384 Basic Authorization headers are returned by the Batch API, and sent back with
385 385 the GET/PUT request.
386 386
387 387 $ rm -f $TESTTMP/access.log $TESTTMP/errors.log
388 388
389 389 $ cat >> $HGRCPATH << EOF
390 390 > [experimental]
391 391 > lfs.disableusercache = True
392 392 > [auth]
393 393 > l.schemes=http
394 394 > l.prefix=lo
395 395 > l.username=user
396 396 > l.password=pass
397 397 > EOF
398 398
399 399 $ hg --config extensions.x=$TESTDIR/httpserverauth.py \
400 400 > -R server serve -d -p $HGPORT1 --pid-file=hg.pid \
401 401 > -A $TESTTMP/access.log -E $TESTTMP/errors.log
402 402 $ mv hg.pid $DAEMON_PIDS
403 403
404 404 $ hg clone --debug http://localhost:$HGPORT1 auth_clone | egrep '^[{}]| '
405 405 {
406 406 "objects": [
407 407 {
408 408 "actions": {
409 409 "download": {
410 410 "expires_at": "$ISO_8601_DATE_TIME$"
411 411 "header": {
412 412 "Accept": "application/vnd.git-lfs"
413 413 "Authorization": "Basic dXNlcjpwYXNz"
414 414 }
415 415 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
416 416 }
417 417 }
418 418 "oid": "276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d"
419 419 "size": 14
420 420 }
421 421 ]
422 422 "transfer": "basic"
423 423 }
424 424
425 425 $ echo 'another blob' > auth_clone/lfs.blob
426 426 $ hg -R auth_clone ci -Aqm 'add blob'
427 427
428 428 $ cat > use_digests.py << EOF
429 429 > from mercurial import (
430 430 > exthelper,
431 431 > url,
432 432 > )
433 433 >
434 434 > eh = exthelper.exthelper()
435 435 > uisetup = eh.finaluisetup
436 436 >
437 437 > @eh.wrapfunction(url, 'opener')
438 438 > def urlopener(orig, *args, **kwargs):
439 439 > opener = orig(*args, **kwargs)
440 440 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
441 441 > return opener
442 442 > EOF
443 443
444 444 Test that Digest Auth fails gracefully before testing the successful Basic Auth
445 445
446 446 $ hg -R auth_clone push --config extensions.x=use_digests.py
447 447 pushing to http://localhost:$HGPORT1/
448 448 searching for changes
449 449 abort: LFS HTTP error: HTTP Error 401: the server must support Basic Authentication!
450 450 (api=http://localhost:$HGPORT1/.git/info/lfs/objects/batch, action=upload)
451 451 [255]
452 452
453 453 $ hg -R auth_clone --debug push | egrep '^[{}]| '
454 454 {
455 455 "objects": [
456 456 {
457 457 "actions": {
458 458 "upload": {
459 459 "expires_at": "$ISO_8601_DATE_TIME$"
460 460 "header": {
461 461 "Accept": "application/vnd.git-lfs"
462 462 "Authorization": "Basic dXNlcjpwYXNz"
463 463 }
464 464 "href": "http://localhost:$HGPORT1/.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
465 465 }
466 466 }
467 467 "oid": "df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3"
468 468 "size": 13
469 469 }
470 470 ]
471 471 "transfer": "basic"
472 472 }
473 473
474 474 $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
475 475
476 476 $ cat $TESTTMP/access.log $TESTTMP/errors.log
477 477 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
478 478 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
479 479 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
480 480 $LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=506bf3d83f78c54b89e81c6411adee19fdf02156+525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
481 481 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
482 482 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
483 483 $LOCALIP - - [$LOGDATE$] "GET /.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d HTTP/1.1" 200 - (glob)
484 484 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
485 485 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest (glob)
486 486 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 401 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
487 487 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
488 488 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
489 489 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
490 490 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
491 491 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
492 492 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 401 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
493 493 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
494 494 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
495 495 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest (glob)
496 496 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - x-hgtest-authtype:Digest (glob)
497 497 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 401 - (glob)
498 498 $LOCALIP - - [$LOGDATE$] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)
499 499 $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D525251863cad618e55d483555f3d00a2ca99597e+4d9397055dc0c205f3132f331f36353ab1a525a3 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
500 500 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
501 501 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
502 502 $LOCALIP - - [$LOGDATE$] "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
503 503 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
504 504 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 401 - (glob)
505 505 $LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 200 - (glob)
506 506 $LOCALIP - - [$LOGDATE$] "PUT /.hg/lfs/objects/df14287d8d75f076a6459e7a3703ca583ca9fb3f4918caed10c77ac8622d49b3 HTTP/1.1" 201 - (glob)
507 507 $LOCALIP - - [$LOGDATE$] "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
508 508 $LOCALIP - - [$LOGDATE$] "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
General Comments 0
You need to be logged in to leave comments. Login now