Show More
@@ -1,218 +1,206 | |||
|
1 | 1 | # lfs - hash-preserving large file support using Git-LFS protocol |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | """lfs - large file support (EXPERIMENTAL) |
|
9 | 9 | |
|
10 | 10 | Configs:: |
|
11 | 11 | |
|
12 | 12 | [lfs] |
|
13 | 13 | # Remote endpoint. Multiple protocols are supported: |
|
14 | 14 | # - http(s)://user:pass@example.com/path |
|
15 | 15 | # git-lfs endpoint |
|
16 | 16 | # - file:///tmp/path |
|
17 | 17 | # local filesystem, usually for testing |
|
18 | 18 | # if unset, lfs will prompt setting this when it must use this value. |
|
19 | 19 | # (default: unset) |
|
20 | 20 | url = https://example.com/lfs |
|
21 | 21 | |
|
22 | 22 | # size of a file to make it use LFS |
|
23 | 23 | threshold = 10M |
|
24 | 24 | |
|
25 | 25 | # how many times to retry before giving up on transferring an object |
|
26 | 26 | retry = 5 |
|
27 | 27 | |
|
28 | 28 | # the local directory to store lfs files for sharing across local clones. |
|
29 | 29 | # If not set, the cache is located in an OS specific cache location. |
|
30 | 30 | usercache = /path/to/global/cache |
|
31 | 31 | """ |
|
32 | 32 | |
|
33 | 33 | from __future__ import absolute_import |
|
34 | 34 | |
|
35 | 35 | from mercurial.i18n import _ |
|
36 | 36 | |
|
37 | 37 | from mercurial import ( |
|
38 | 38 | bundle2, |
|
39 | 39 | changegroup, |
|
40 | 40 | context, |
|
41 | 41 | exchange, |
|
42 | 42 | extensions, |
|
43 | 43 | filelog, |
|
44 | 44 | hg, |
|
45 | 45 | localrepo, |
|
46 | 46 | node, |
|
47 | 47 | registrar, |
|
48 | 48 | revlog, |
|
49 | 49 | scmutil, |
|
50 | 50 | upgrade, |
|
51 | 51 | vfs as vfsmod, |
|
52 | 52 | wireproto, |
|
53 | 53 | ) |
|
54 | 54 | |
|
55 | 55 | from . import ( |
|
56 | 56 | blobstore, |
|
57 | 57 | wrapper, |
|
58 | 58 | ) |
|
59 | 59 | |
|
60 | 60 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
61 | 61 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
62 | 62 | # be specifying the version(s) of Mercurial they are tested with, or |
|
63 | 63 | # leave the attribute unspecified. |
|
64 | 64 | testedwith = 'ships-with-hg-core' |
|
65 | 65 | |
|
66 | 66 | configtable = {} |
|
67 | 67 | configitem = registrar.configitem(configtable) |
|
68 | 68 | |
|
69 | 69 | configitem('experimental', 'lfs.user-agent', |
|
70 | 70 | default=None, |
|
71 | 71 | ) |
|
72 | 72 | |
|
73 | 73 | configitem('lfs', 'url', |
|
74 | default=configitem.dynamicdefault, | |
|
74 | default=None, | |
|
75 | 75 | ) |
|
76 | 76 | configitem('lfs', 'usercache', |
|
77 | 77 | default=None, |
|
78 | 78 | ) |
|
79 | 79 | configitem('lfs', 'threshold', |
|
80 | 80 | default=None, |
|
81 | 81 | ) |
|
82 | 82 | configitem('lfs', 'retry', |
|
83 | 83 | default=5, |
|
84 | 84 | ) |
|
85 | # Deprecated | |
|
86 | configitem('lfs', 'remotestore', | |
|
87 | default=None, | |
|
88 | ) | |
|
89 | # Deprecated | |
|
90 | configitem('lfs', 'dummy', | |
|
91 | default=None, | |
|
92 | ) | |
|
93 | # Deprecated | |
|
94 | configitem('lfs', 'git-lfs', | |
|
95 | default=None, | |
|
96 | ) | |
|
97 | 85 | |
|
98 | 86 | cmdtable = {} |
|
99 | 87 | command = registrar.command(cmdtable) |
|
100 | 88 | |
|
101 | 89 | templatekeyword = registrar.templatekeyword() |
|
102 | 90 | |
|
103 | 91 | def featuresetup(ui, supported): |
|
104 | 92 | # don't die on seeing a repo with the lfs requirement |
|
105 | 93 | supported |= {'lfs'} |
|
106 | 94 | |
|
107 | 95 | def uisetup(ui): |
|
108 | 96 | localrepo.localrepository.featuresetupfuncs.add(featuresetup) |
|
109 | 97 | |
|
110 | 98 | def reposetup(ui, repo): |
|
111 | 99 | # Nothing to do with a remote repo |
|
112 | 100 | if not repo.local(): |
|
113 | 101 | return |
|
114 | 102 | |
|
115 | 103 | threshold = repo.ui.configbytes('lfs', 'threshold') |
|
116 | 104 | |
|
117 | 105 | repo.svfs.options['lfsthreshold'] = threshold |
|
118 | 106 | repo.svfs.lfslocalblobstore = blobstore.local(repo) |
|
119 | 107 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo) |
|
120 | 108 | |
|
121 | 109 | # Push hook |
|
122 | 110 | repo.prepushoutgoinghooks.add('lfs', wrapper.prepush) |
|
123 | 111 | |
|
124 | 112 | if 'lfs' not in repo.requirements: |
|
125 | 113 | def checkrequireslfs(ui, repo, **kwargs): |
|
126 | 114 | if 'lfs' not in repo.requirements: |
|
127 | 115 | last = kwargs.get('node_last') |
|
128 | 116 | _bin = node.bin |
|
129 | 117 | if last: |
|
130 | 118 | s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last)) |
|
131 | 119 | else: |
|
132 | 120 | s = repo.set('%n', _bin(kwargs['node'])) |
|
133 | 121 | for ctx in s: |
|
134 | 122 | # TODO: is there a way to just walk the files in the commit? |
|
135 | 123 | if any(ctx[f].islfs() for f in ctx.files() if f in ctx): |
|
136 | 124 | repo.requirements.add('lfs') |
|
137 | 125 | repo._writerequirements() |
|
138 | 126 | break |
|
139 | 127 | |
|
140 | 128 | ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs') |
|
141 | 129 | ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs') |
|
142 | 130 | |
|
143 | 131 | def wrapfilelog(filelog): |
|
144 | 132 | wrapfunction = extensions.wrapfunction |
|
145 | 133 | |
|
146 | 134 | wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision) |
|
147 | 135 | wrapfunction(filelog, 'renamed', wrapper.filelogrenamed) |
|
148 | 136 | wrapfunction(filelog, 'size', wrapper.filelogsize) |
|
149 | 137 | |
|
150 | 138 | def extsetup(ui): |
|
151 | 139 | wrapfilelog(filelog.filelog) |
|
152 | 140 | |
|
153 | 141 | wrapfunction = extensions.wrapfunction |
|
154 | 142 | |
|
155 | 143 | wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink) |
|
156 | 144 | |
|
157 | 145 | wrapfunction(upgrade, '_finishdatamigration', |
|
158 | 146 | wrapper.upgradefinishdatamigration) |
|
159 | 147 | |
|
160 | 148 | wrapfunction(upgrade, 'preservedrequirements', |
|
161 | 149 | wrapper.upgraderequirements) |
|
162 | 150 | |
|
163 | 151 | wrapfunction(upgrade, 'supporteddestrequirements', |
|
164 | 152 | wrapper.upgraderequirements) |
|
165 | 153 | |
|
166 | 154 | wrapfunction(changegroup, |
|
167 | 155 | 'supportedoutgoingversions', |
|
168 | 156 | wrapper.supportedoutgoingversions) |
|
169 | 157 | wrapfunction(changegroup, |
|
170 | 158 | 'allsupportedversions', |
|
171 | 159 | wrapper.allsupportedversions) |
|
172 | 160 | |
|
173 | 161 | wrapfunction(exchange, 'push', wrapper.push) |
|
174 | 162 | wrapfunction(wireproto, '_capabilities', wrapper._capabilities) |
|
175 | 163 | |
|
176 | 164 | wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp) |
|
177 | 165 | wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary) |
|
178 | 166 | context.basefilectx.islfs = wrapper.filectxislfs |
|
179 | 167 | |
|
180 | 168 | revlog.addflagprocessor( |
|
181 | 169 | revlog.REVIDX_EXTSTORED, |
|
182 | 170 | ( |
|
183 | 171 | wrapper.readfromstore, |
|
184 | 172 | wrapper.writetostore, |
|
185 | 173 | wrapper.bypasscheckhash, |
|
186 | 174 | ), |
|
187 | 175 | ) |
|
188 | 176 | |
|
189 | 177 | wrapfunction(hg, 'clone', wrapper.hgclone) |
|
190 | 178 | wrapfunction(hg, 'postshare', wrapper.hgpostshare) |
|
191 | 179 | |
|
192 | 180 | # Make bundle choose changegroup3 instead of changegroup2. This affects |
|
193 | 181 | # "hg bundle" command. Note: it does not cover all bundle formats like |
|
194 | 182 | # "packed1". Using "packed1" with lfs will likely cause trouble. |
|
195 | 183 | names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02'] |
|
196 | 184 | for k in names: |
|
197 | 185 | exchange._bundlespeccgversions[k] = '03' |
|
198 | 186 | |
|
199 | 187 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs |
|
200 | 188 | # options and blob stores are passed from othervfs to the new readonlyvfs. |
|
201 | 189 | wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit) |
|
202 | 190 | |
|
203 | 191 | # when writing a bundle via "hg bundle" command, upload related LFS blobs |
|
204 | 192 | wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) |
|
205 | 193 | |
|
206 | 194 | @templatekeyword('lfs_files') |
|
207 | 195 | def lfsfiles(repo, ctx, **args): |
|
208 | 196 | """List of strings. LFS files added or modified by the changeset.""" |
|
209 | 197 | pointers = wrapper.pointersfromctx(ctx) # {path: pointer} |
|
210 | 198 | return sorted(pointers.keys()) |
|
211 | 199 | |
|
212 | 200 | @command('debuglfsupload', |
|
213 | 201 | [('r', 'rev', [], _('upload large files introduced by REV'))]) |
|
214 | 202 | def debuglfsupload(ui, repo, **opts): |
|
215 | 203 | """upload lfs blobs added by the working copy parent or given revisions""" |
|
216 | 204 | revs = opts.get('rev', []) |
|
217 | 205 | pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs)) |
|
218 | 206 | wrapper.uploadblobs(repo, pointers) |
@@ -1,470 +1,455 | |||
|
1 | 1 | # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Facebook, Inc. |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import hashlib |
|
11 | 11 | import json |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | import socket |
|
15 | 15 | |
|
16 | 16 | from mercurial.i18n import _ |
|
17 | 17 | |
|
18 | 18 | from mercurial import ( |
|
19 | 19 | error, |
|
20 | 20 | pathutil, |
|
21 | 21 | url as urlmod, |
|
22 | 22 | util, |
|
23 | 23 | vfs as vfsmod, |
|
24 | 24 | worker, |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | from ..largefiles import lfutil |
|
28 | 28 | |
|
29 | 29 | # 64 bytes for SHA256 |
|
30 | 30 | _lfsre = re.compile(r'\A[a-f0-9]{64}\Z') |
|
31 | 31 | |
|
32 | 32 | class lfsvfs(vfsmod.vfs): |
|
33 | 33 | def join(self, path): |
|
34 | 34 | """split the path at first two characters, like: XX/XXXXX...""" |
|
35 | 35 | if not _lfsre.match(path): |
|
36 | 36 | raise error.ProgrammingError('unexpected lfs path: %s' % path) |
|
37 | 37 | return super(lfsvfs, self).join(path[0:2], path[2:]) |
|
38 | 38 | |
|
39 | 39 | def walk(self, path=None, onerror=None): |
|
40 | 40 | """Yield (dirpath, [], oids) tuple for blobs under path |
|
41 | 41 | |
|
42 | 42 | Oids only exist in the root of this vfs, so dirpath is always ''. |
|
43 | 43 | """ |
|
44 | 44 | root = os.path.normpath(self.base) |
|
45 | 45 | # when dirpath == root, dirpath[prefixlen:] becomes empty |
|
46 | 46 | # because len(dirpath) < prefixlen. |
|
47 | 47 | prefixlen = len(pathutil.normasprefix(root)) |
|
48 | 48 | oids = [] |
|
49 | 49 | |
|
50 | 50 | for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''), |
|
51 | 51 | onerror=onerror): |
|
52 | 52 | dirpath = dirpath[prefixlen:] |
|
53 | 53 | |
|
54 | 54 | # Silently skip unexpected files and directories |
|
55 | 55 | if len(dirpath) == 2: |
|
56 | 56 | oids.extend([dirpath + f for f in files |
|
57 | 57 | if _lfsre.match(dirpath + f)]) |
|
58 | 58 | |
|
59 | 59 | yield ('', [], oids) |
|
60 | 60 | |
|
61 | 61 | class filewithprogress(object): |
|
62 | 62 | """a file-like object that supports __len__ and read. |
|
63 | 63 | |
|
64 | 64 | Useful to provide progress information for how many bytes are read. |
|
65 | 65 | """ |
|
66 | 66 | |
|
67 | 67 | def __init__(self, fp, callback): |
|
68 | 68 | self._fp = fp |
|
69 | 69 | self._callback = callback # func(readsize) |
|
70 | 70 | fp.seek(0, os.SEEK_END) |
|
71 | 71 | self._len = fp.tell() |
|
72 | 72 | fp.seek(0) |
|
73 | 73 | |
|
74 | 74 | def __len__(self): |
|
75 | 75 | return self._len |
|
76 | 76 | |
|
77 | 77 | def read(self, size): |
|
78 | 78 | if self._fp is None: |
|
79 | 79 | return b'' |
|
80 | 80 | data = self._fp.read(size) |
|
81 | 81 | if data: |
|
82 | 82 | if self._callback: |
|
83 | 83 | self._callback(len(data)) |
|
84 | 84 | else: |
|
85 | 85 | self._fp.close() |
|
86 | 86 | self._fp = None |
|
87 | 87 | return data |
|
88 | 88 | |
|
89 | 89 | class local(object): |
|
90 | 90 | """Local blobstore for large file contents. |
|
91 | 91 | |
|
92 | 92 | This blobstore is used both as a cache and as a staging area for large blobs |
|
93 | 93 | to be uploaded to the remote blobstore. |
|
94 | 94 | """ |
|
95 | 95 | |
|
96 | 96 | def __init__(self, repo): |
|
97 | 97 | fullpath = repo.svfs.join('lfs/objects') |
|
98 | 98 | self.vfs = lfsvfs(fullpath) |
|
99 | 99 | usercache = lfutil._usercachedir(repo.ui, 'lfs') |
|
100 | 100 | self.cachevfs = lfsvfs(usercache) |
|
101 | 101 | self.ui = repo.ui |
|
102 | 102 | |
|
103 | 103 | def open(self, oid): |
|
104 | 104 | """Open a read-only file descriptor to the named blob, in either the |
|
105 | 105 | usercache or the local store.""" |
|
106 | 106 | # The usercache is the most likely place to hold the file. Commit will |
|
107 | 107 | # write to both it and the local store, as will anything that downloads |
|
108 | 108 | # the blobs. However, things like clone without an update won't |
|
109 | 109 | # populate the local store. For an init + push of a local clone, |
|
110 | 110 | # the usercache is the only place it _could_ be. If not present, the |
|
111 | 111 | # missing file msg here will indicate the local repo, not the usercache. |
|
112 | 112 | if self.cachevfs.exists(oid): |
|
113 | 113 | return self.cachevfs(oid, 'rb') |
|
114 | 114 | |
|
115 | 115 | return self.vfs(oid, 'rb') |
|
116 | 116 | |
|
117 | 117 | def download(self, oid, src): |
|
118 | 118 | """Read the blob from the remote source in chunks, verify the content, |
|
119 | 119 | and write to this local blobstore.""" |
|
120 | 120 | sha256 = hashlib.sha256() |
|
121 | 121 | |
|
122 | 122 | with self.vfs(oid, 'wb', atomictemp=True) as fp: |
|
123 | 123 | for chunk in util.filechunkiter(src, size=1048576): |
|
124 | 124 | fp.write(chunk) |
|
125 | 125 | sha256.update(chunk) |
|
126 | 126 | |
|
127 | 127 | realoid = sha256.hexdigest() |
|
128 | 128 | if realoid != oid: |
|
129 | 129 | raise error.Abort(_('corrupt remote lfs object: %s') % oid) |
|
130 | 130 | |
|
131 | 131 | # XXX: should we verify the content of the cache, and hardlink back to |
|
132 | 132 | # the local store on success, but truncate, write and link on failure? |
|
133 | 133 | if not self.cachevfs.exists(oid): |
|
134 | 134 | self.ui.note(_('lfs: adding %s to the usercache\n') % oid) |
|
135 | 135 | lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) |
|
136 | 136 | |
|
137 | 137 | def write(self, oid, data): |
|
138 | 138 | """Write blob to local blobstore. |
|
139 | 139 | |
|
140 | 140 | This should only be called from the filelog during a commit or similar. |
|
141 | 141 | As such, there is no need to verify the data. Imports from a remote |
|
142 | 142 | store must use ``download()`` instead.""" |
|
143 | 143 | with self.vfs(oid, 'wb', atomictemp=True) as fp: |
|
144 | 144 | fp.write(data) |
|
145 | 145 | |
|
146 | 146 | # XXX: should we verify the content of the cache, and hardlink back to |
|
147 | 147 | # the local store on success, but truncate, write and link on failure? |
|
148 | 148 | if not self.cachevfs.exists(oid): |
|
149 | 149 | self.ui.note(_('lfs: adding %s to the usercache\n') % oid) |
|
150 | 150 | lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid)) |
|
151 | 151 | |
|
152 | 152 | def read(self, oid, verify=True): |
|
153 | 153 | """Read blob from local blobstore.""" |
|
154 | 154 | if not self.vfs.exists(oid): |
|
155 | 155 | blob = self._read(self.cachevfs, oid, verify) |
|
156 | 156 | |
|
157 | 157 | # Even if revlog will verify the content, it needs to be verified |
|
158 | 158 | # now before making the hardlink to avoid propagating corrupt blobs. |
|
159 | 159 | # Don't abort if corruption is detected, because `hg verify` will |
|
160 | 160 | # give more useful info about the corruption- simply don't add the |
|
161 | 161 | # hardlink. |
|
162 | 162 | if verify or hashlib.sha256(blob).hexdigest() == oid: |
|
163 | 163 | self.ui.note(_('lfs: found %s in the usercache\n') % oid) |
|
164 | 164 | lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid)) |
|
165 | 165 | else: |
|
166 | 166 | self.ui.note(_('lfs: found %s in the local lfs store\n') % oid) |
|
167 | 167 | blob = self._read(self.vfs, oid, verify) |
|
168 | 168 | return blob |
|
169 | 169 | |
|
170 | 170 | def _read(self, vfs, oid, verify): |
|
171 | 171 | """Read blob (after verifying) from the given store""" |
|
172 | 172 | blob = vfs.read(oid) |
|
173 | 173 | if verify: |
|
174 | 174 | _verify(oid, blob) |
|
175 | 175 | return blob |
|
176 | 176 | |
|
177 | 177 | def has(self, oid): |
|
178 | 178 | """Returns True if the local blobstore contains the requested blob, |
|
179 | 179 | False otherwise.""" |
|
180 | 180 | return self.cachevfs.exists(oid) or self.vfs.exists(oid) |
|
181 | 181 | |
|
182 | 182 | class _gitlfsremote(object): |
|
183 | 183 | |
|
184 | 184 | def __init__(self, repo, url): |
|
185 | 185 | ui = repo.ui |
|
186 | 186 | self.ui = ui |
|
187 | 187 | baseurl, authinfo = url.authinfo() |
|
188 | 188 | self.baseurl = baseurl.rstrip('/') |
|
189 | 189 | useragent = repo.ui.config('experimental', 'lfs.user-agent') |
|
190 | 190 | if not useragent: |
|
191 | 191 | useragent = 'mercurial/%s git/2.15.1' % util.version() |
|
192 | 192 | self.urlopener = urlmod.opener(ui, authinfo, useragent) |
|
193 | 193 | self.retry = ui.configint('lfs', 'retry') |
|
194 | 194 | |
|
195 | 195 | def writebatch(self, pointers, fromstore): |
|
196 | 196 | """Batch upload from local to remote blobstore.""" |
|
197 | 197 | self._batch(pointers, fromstore, 'upload') |
|
198 | 198 | |
|
199 | 199 | def readbatch(self, pointers, tostore): |
|
200 | 200 | """Batch download from remote to local blostore.""" |
|
201 | 201 | self._batch(pointers, tostore, 'download') |
|
202 | 202 | |
|
203 | 203 | def _batchrequest(self, pointers, action): |
|
204 | 204 | """Get metadata about objects pointed by pointers for given action |
|
205 | 205 | |
|
206 | 206 | Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]} |
|
207 | 207 | See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md |
|
208 | 208 | """ |
|
209 | 209 | objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers] |
|
210 | 210 | requestdata = json.dumps({ |
|
211 | 211 | 'objects': objects, |
|
212 | 212 | 'operation': action, |
|
213 | 213 | }) |
|
214 | 214 | batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl, |
|
215 | 215 | data=requestdata) |
|
216 | 216 | batchreq.add_header('Accept', 'application/vnd.git-lfs+json') |
|
217 | 217 | batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') |
|
218 | 218 | try: |
|
219 | 219 | rawjson = self.urlopener.open(batchreq).read() |
|
220 | 220 | except util.urlerr.httperror as ex: |
|
221 | 221 | raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)') |
|
222 | 222 | % (ex, action)) |
|
223 | 223 | try: |
|
224 | 224 | response = json.loads(rawjson) |
|
225 | 225 | except ValueError: |
|
226 | 226 | raise LfsRemoteError(_('LFS server returns invalid JSON: %s') |
|
227 | 227 | % rawjson) |
|
228 | 228 | return response |
|
229 | 229 | |
|
230 | 230 | def _checkforservererror(self, pointers, responses): |
|
231 | 231 | """Scans errors from objects |
|
232 | 232 | |
|
233 | 233 | Returns LfsRemoteError if any objects has an error""" |
|
234 | 234 | for response in responses: |
|
235 | 235 | error = response.get('error') |
|
236 | 236 | if error: |
|
237 | 237 | ptrmap = {p.oid(): p for p in pointers} |
|
238 | 238 | p = ptrmap.get(response['oid'], None) |
|
239 | 239 | if error['code'] == 404 and p: |
|
240 | 240 | filename = getattr(p, 'filename', 'unknown') |
|
241 | 241 | raise LfsRemoteError( |
|
242 | 242 | _(('LFS server error. Remote object ' |
|
243 | 243 | 'for "%s" not found: %r')) % (filename, response)) |
|
244 | 244 | raise LfsRemoteError(_('LFS server error: %r') % response) |
|
245 | 245 | |
|
246 | 246 | def _extractobjects(self, response, pointers, action): |
|
247 | 247 | """extract objects from response of the batch API |
|
248 | 248 | |
|
249 | 249 | response: parsed JSON object returned by batch API |
|
250 | 250 | return response['objects'] filtered by action |
|
251 | 251 | raise if any object has an error |
|
252 | 252 | """ |
|
253 | 253 | # Scan errors from objects - fail early |
|
254 | 254 | objects = response.get('objects', []) |
|
255 | 255 | self._checkforservererror(pointers, objects) |
|
256 | 256 | |
|
257 | 257 | # Filter objects with given action. Practically, this skips uploading |
|
258 | 258 | # objects which exist in the server. |
|
259 | 259 | filteredobjects = [o for o in objects if action in o.get('actions', [])] |
|
260 | 260 | # But for downloading, we want all objects. Therefore missing objects |
|
261 | 261 | # should be considered an error. |
|
262 | 262 | if action == 'download': |
|
263 | 263 | if len(filteredobjects) < len(objects): |
|
264 | 264 | missing = [o.get('oid', '?') |
|
265 | 265 | for o in objects |
|
266 | 266 | if action not in o.get('actions', [])] |
|
267 | 267 | raise LfsRemoteError( |
|
268 | 268 | _('LFS server claims required objects do not exist:\n%s') |
|
269 | 269 | % '\n'.join(missing)) |
|
270 | 270 | |
|
271 | 271 | return filteredobjects |
|
272 | 272 | |
|
273 | 273 | def _basictransfer(self, obj, action, localstore): |
|
274 | 274 | """Download or upload a single object using basic transfer protocol |
|
275 | 275 | |
|
276 | 276 | obj: dict, an object description returned by batch API |
|
277 | 277 | action: string, one of ['upload', 'download'] |
|
278 | 278 | localstore: blobstore.local |
|
279 | 279 | |
|
280 | 280 | See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\ |
|
281 | 281 | basic-transfers.md |
|
282 | 282 | """ |
|
283 | 283 | oid = str(obj['oid']) |
|
284 | 284 | |
|
285 | 285 | href = str(obj['actions'][action].get('href')) |
|
286 | 286 | headers = obj['actions'][action].get('header', {}).items() |
|
287 | 287 | |
|
288 | 288 | request = util.urlreq.request(href) |
|
289 | 289 | if action == 'upload': |
|
290 | 290 | # If uploading blobs, read data from local blobstore. |
|
291 | 291 | with localstore.open(oid) as fp: |
|
292 | 292 | _verifyfile(oid, fp) |
|
293 | 293 | request.data = filewithprogress(localstore.open(oid), None) |
|
294 | 294 | request.get_method = lambda: 'PUT' |
|
295 | 295 | |
|
296 | 296 | for k, v in headers: |
|
297 | 297 | request.add_header(k, v) |
|
298 | 298 | |
|
299 | 299 | response = b'' |
|
300 | 300 | try: |
|
301 | 301 | req = self.urlopener.open(request) |
|
302 | 302 | if action == 'download': |
|
303 | 303 | # If downloading blobs, store downloaded data to local blobstore |
|
304 | 304 | localstore.download(oid, req) |
|
305 | 305 | else: |
|
306 | 306 | while True: |
|
307 | 307 | data = req.read(1048576) |
|
308 | 308 | if not data: |
|
309 | 309 | break |
|
310 | 310 | response += data |
|
311 | 311 | if response: |
|
312 | 312 | self.ui.debug('lfs %s response: %s' % (action, response)) |
|
313 | 313 | except util.urlerr.httperror as ex: |
|
314 | 314 | raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)') |
|
315 | 315 | % (ex, oid, action)) |
|
316 | 316 | |
|
317 | 317 | def _batch(self, pointers, localstore, action): |
|
318 | 318 | if action not in ['upload', 'download']: |
|
319 | 319 | raise error.ProgrammingError('invalid Git-LFS action: %s' % action) |
|
320 | 320 | |
|
321 | 321 | response = self._batchrequest(pointers, action) |
|
322 | 322 | objects = self._extractobjects(response, pointers, action) |
|
323 | 323 | total = sum(x.get('size', 0) for x in objects) |
|
324 | 324 | sizes = {} |
|
325 | 325 | for obj in objects: |
|
326 | 326 | sizes[obj.get('oid')] = obj.get('size', 0) |
|
327 | 327 | topic = {'upload': _('lfs uploading'), |
|
328 | 328 | 'download': _('lfs downloading')}[action] |
|
329 | 329 | if len(objects) > 1: |
|
330 | 330 | self.ui.note(_('lfs: need to transfer %d objects (%s)\n') |
|
331 | 331 | % (len(objects), util.bytecount(total))) |
|
332 | 332 | self.ui.progress(topic, 0, total=total) |
|
333 | 333 | def transfer(chunk): |
|
334 | 334 | for obj in chunk: |
|
335 | 335 | objsize = obj.get('size', 0) |
|
336 | 336 | if self.ui.verbose: |
|
337 | 337 | if action == 'download': |
|
338 | 338 | msg = _('lfs: downloading %s (%s)\n') |
|
339 | 339 | elif action == 'upload': |
|
340 | 340 | msg = _('lfs: uploading %s (%s)\n') |
|
341 | 341 | self.ui.note(msg % (obj.get('oid'), |
|
342 | 342 | util.bytecount(objsize))) |
|
343 | 343 | retry = self.retry |
|
344 | 344 | while True: |
|
345 | 345 | try: |
|
346 | 346 | self._basictransfer(obj, action, localstore) |
|
347 | 347 | yield 1, obj.get('oid') |
|
348 | 348 | break |
|
349 | 349 | except socket.error as ex: |
|
350 | 350 | if retry > 0: |
|
351 | 351 | self.ui.note( |
|
352 | 352 | _('lfs: failed: %r (remaining retry %d)\n') |
|
353 | 353 | % (ex, retry)) |
|
354 | 354 | retry -= 1 |
|
355 | 355 | continue |
|
356 | 356 | raise |
|
357 | 357 | |
|
358 | 358 | oids = worker.worker(self.ui, 0.1, transfer, (), |
|
359 | 359 | sorted(objects, key=lambda o: o.get('oid'))) |
|
360 | 360 | processed = 0 |
|
361 | 361 | for _one, oid in oids: |
|
362 | 362 | processed += sizes[oid] |
|
363 | 363 | self.ui.progress(topic, processed, total=total) |
|
364 | 364 | self.ui.note(_('lfs: processed: %s\n') % oid) |
|
365 | 365 | self.ui.progress(topic, pos=None, total=total) |
|
366 | 366 | |
|
367 | 367 | def __del__(self): |
|
368 | 368 | # copied from mercurial/httppeer.py |
|
369 | 369 | urlopener = getattr(self, 'urlopener', None) |
|
370 | 370 | if urlopener: |
|
371 | 371 | for h in urlopener.handlers: |
|
372 | 372 | h.close() |
|
373 | 373 | getattr(h, "close_all", lambda : None)() |
|
374 | 374 | |
|
375 | 375 | class _dummyremote(object): |
|
376 | 376 | """Dummy store storing blobs to temp directory.""" |
|
377 | 377 | |
|
378 | 378 | def __init__(self, repo, url): |
|
379 | 379 | fullpath = repo.vfs.join('lfs', url.path) |
|
380 | 380 | self.vfs = lfsvfs(fullpath) |
|
381 | 381 | |
|
382 | 382 | def writebatch(self, pointers, fromstore): |
|
383 | 383 | for p in pointers: |
|
384 | 384 | content = fromstore.read(p.oid(), verify=True) |
|
385 | 385 | with self.vfs(p.oid(), 'wb', atomictemp=True) as fp: |
|
386 | 386 | fp.write(content) |
|
387 | 387 | |
|
388 | 388 | def readbatch(self, pointers, tostore): |
|
389 | 389 | for p in pointers: |
|
390 | 390 | with self.vfs(p.oid(), 'rb') as fp: |
|
391 | 391 | tostore.download(p.oid(), fp) |
|
392 | 392 | |
|
393 | 393 | class _nullremote(object): |
|
394 | 394 | """Null store storing blobs to /dev/null.""" |
|
395 | 395 | |
|
396 | 396 | def __init__(self, repo, url): |
|
397 | 397 | pass |
|
398 | 398 | |
|
399 | 399 | def writebatch(self, pointers, fromstore): |
|
400 | 400 | pass |
|
401 | 401 | |
|
402 | 402 | def readbatch(self, pointers, tostore): |
|
403 | 403 | pass |
|
404 | 404 | |
|
405 | 405 | class _promptremote(object): |
|
406 | 406 | """Prompt user to set lfs.url when accessed.""" |
|
407 | 407 | |
|
408 | 408 | def __init__(self, repo, url): |
|
409 | 409 | pass |
|
410 | 410 | |
|
411 | 411 | def writebatch(self, pointers, fromstore, ui=None): |
|
412 | 412 | self._prompt() |
|
413 | 413 | |
|
414 | 414 | def readbatch(self, pointers, tostore, ui=None): |
|
415 | 415 | self._prompt() |
|
416 | 416 | |
|
417 | 417 | def _prompt(self): |
|
418 | 418 | raise error.Abort(_('lfs.url needs to be configured')) |
|
419 | 419 | |
|
420 | 420 | _storemap = { |
|
421 | 421 | 'https': _gitlfsremote, |
|
422 | 422 | 'http': _gitlfsremote, |
|
423 | 423 | 'file': _dummyremote, |
|
424 | 424 | 'null': _nullremote, |
|
425 | 425 | None: _promptremote, |
|
426 | 426 | } |
|
427 | 427 | |
|
428 | 428 | def _verify(oid, content): |
|
429 | 429 | realoid = hashlib.sha256(content).hexdigest() |
|
430 | 430 | if realoid != oid: |
|
431 | 431 | raise error.Abort(_('detected corrupt lfs object: %s') % oid, |
|
432 | 432 | hint=_('run hg verify')) |
|
433 | 433 | |
|
434 | 434 | def _verifyfile(oid, fp): |
|
435 | 435 | sha256 = hashlib.sha256() |
|
436 | 436 | while True: |
|
437 | 437 | data = fp.read(1024 * 1024) |
|
438 | 438 | if not data: |
|
439 | 439 | break |
|
440 | 440 | sha256.update(data) |
|
441 | 441 | realoid = sha256.hexdigest() |
|
442 | 442 | if realoid != oid: |
|
443 | 443 | raise error.Abort(_('detected corrupt lfs object: %s') % oid, |
|
444 | 444 | hint=_('run hg verify')) |
|
445 | 445 | |
|
446 | 446 | def remote(repo): |
|
447 | 447 | """remotestore factory. return a store in _storemap depending on config""" |
|
448 | defaulturl = '' | |
|
449 | ||
|
450 | # convert deprecated configs to the new url. TODO: remove this if other | |
|
451 | # places are migrated to the new url config. | |
|
452 | # deprecated config: lfs.remotestore | |
|
453 | deprecatedstore = repo.ui.config('lfs', 'remotestore') | |
|
454 | if deprecatedstore == 'dummy': | |
|
455 | # deprecated config: lfs.remotepath | |
|
456 | defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath') | |
|
457 | elif deprecatedstore == 'git-lfs': | |
|
458 | # deprecated config: lfs.remoteurl | |
|
459 | defaulturl = repo.ui.config('lfs', 'remoteurl') | |
|
460 | elif deprecatedstore == 'null': | |
|
461 | defaulturl = 'null://' | |
|
462 | ||
|
463 | url = util.url(repo.ui.config('lfs', 'url', defaulturl)) | |
|
448 | url = util.url(repo.ui.config('lfs', 'url') or '') | |
|
464 | 449 | scheme = url.scheme |
|
465 | 450 | if scheme not in _storemap: |
|
466 | 451 | raise error.Abort(_('lfs: unknown url scheme: %s') % scheme) |
|
467 | 452 | return _storemap[scheme](repo, url) |
|
468 | 453 | |
|
469 | 454 | class LfsRemoteError(error.RevlogError): |
|
470 | 455 | pass |
General Comments 0
You need to be logged in to leave comments.
Login now