Show More
@@ -0,0 +1,132 | |||||
|
1 | # lfs - hash-preserving large file support using Git-LFS protocol | |||
|
2 | # | |||
|
3 | # Copyright 2017 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | """lfs - large file support (EXPERIMENTAL) | |||
|
9 | ||||
|
10 | Configs:: | |||
|
11 | ||||
|
12 | [lfs] | |||
|
13 | # Remote endpoint. Multiple protocols are supported: | |||
|
14 | # - http(s)://user:pass@example.com/path | |||
|
15 | # git-lfs endpoint | |||
|
16 | # - file:///tmp/path | |||
|
17 | # local filesystem, usually for testing | |||
|
18 | # if unset, lfs will prompt setting this when it must use this value. | |||
|
19 | # (default: unset) | |||
|
20 | url = https://example.com/lfs | |||
|
21 | ||||
|
22 | # size of a file to make it use LFS | |||
|
23 | threshold = 10M | |||
|
24 | ||||
|
25 | # how many times to retry before giving up on transferring an object | |||
|
26 | retry = 5 | |||
|
27 | """ | |||
|
28 | ||||
|
29 | from __future__ import absolute_import | |||
|
30 | ||||
|
31 | from mercurial import ( | |||
|
32 | bundle2, | |||
|
33 | changegroup, | |||
|
34 | context, | |||
|
35 | exchange, | |||
|
36 | extensions, | |||
|
37 | filelog, | |||
|
38 | registrar, | |||
|
39 | revlog, | |||
|
40 | scmutil, | |||
|
41 | vfs as vfsmod, | |||
|
42 | ) | |||
|
43 | from mercurial.i18n import _ | |||
|
44 | ||||
|
45 | from . import ( | |||
|
46 | blobstore, | |||
|
47 | wrapper, | |||
|
48 | ) | |||
|
49 | ||||
|
50 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |||
|
51 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |||
|
52 | # be specifying the version(s) of Mercurial they are tested with, or | |||
|
53 | # leave the attribute unspecified. | |||
|
54 | testedwith = 'ships-with-hg-core' | |||
|
55 | ||||
|
56 | cmdtable = {} | |||
|
57 | command = registrar.command(cmdtable) | |||
|
58 | ||||
|
59 | templatekeyword = registrar.templatekeyword() | |||
|
60 | ||||
|
61 | def reposetup(ui, repo): | |||
|
62 | # Nothing to do with a remote repo | |||
|
63 | if not repo.local(): | |||
|
64 | return | |||
|
65 | ||||
|
66 | threshold = repo.ui.configbytes('lfs', 'threshold', None) | |||
|
67 | ||||
|
68 | repo.svfs.options['lfsthreshold'] = threshold | |||
|
69 | repo.svfs.lfslocalblobstore = blobstore.local(repo) | |||
|
70 | repo.svfs.lfsremoteblobstore = blobstore.remote(repo) | |||
|
71 | ||||
|
72 | # Push hook | |||
|
73 | repo.prepushoutgoinghooks.add('lfs', wrapper.prepush) | |||
|
74 | ||||
|
75 | def wrapfilelog(filelog): | |||
|
76 | wrapfunction = extensions.wrapfunction | |||
|
77 | ||||
|
78 | wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision) | |||
|
79 | wrapfunction(filelog, 'renamed', wrapper.filelogrenamed) | |||
|
80 | wrapfunction(filelog, 'size', wrapper.filelogsize) | |||
|
81 | ||||
|
82 | def extsetup(ui): | |||
|
83 | wrapfilelog(filelog.filelog) | |||
|
84 | ||||
|
85 | wrapfunction = extensions.wrapfunction | |||
|
86 | wrapfunction(changegroup, | |||
|
87 | 'supportedoutgoingversions', | |||
|
88 | wrapper.supportedoutgoingversions) | |||
|
89 | wrapfunction(changegroup, | |||
|
90 | 'allsupportedversions', | |||
|
91 | wrapper.allsupportedversions) | |||
|
92 | ||||
|
93 | wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp) | |||
|
94 | wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary) | |||
|
95 | context.basefilectx.islfs = wrapper.filectxislfs | |||
|
96 | ||||
|
97 | revlog.addflagprocessor( | |||
|
98 | revlog.REVIDX_EXTSTORED, | |||
|
99 | ( | |||
|
100 | wrapper.readfromstore, | |||
|
101 | wrapper.writetostore, | |||
|
102 | wrapper.bypasscheckhash, | |||
|
103 | ), | |||
|
104 | ) | |||
|
105 | ||||
|
106 | # Make bundle choose changegroup3 instead of changegroup2. This affects | |||
|
107 | # "hg bundle" command. Note: it does not cover all bundle formats like | |||
|
108 | # "packed1". Using "packed1" with lfs will likely cause trouble. | |||
|
109 | names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02'] | |||
|
110 | for k in names: | |||
|
111 | exchange._bundlespeccgversions[k] = '03' | |||
|
112 | ||||
|
113 | # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs | |||
|
114 | # options and blob stores are passed from othervfs to the new readonlyvfs. | |||
|
115 | wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit) | |||
|
116 | ||||
|
117 | # when writing a bundle via "hg bundle" command, upload related LFS blobs | |||
|
118 | wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) | |||
|
119 | ||||
|
120 | @templatekeyword('lfs_files') | |||
|
121 | def lfsfiles(repo, ctx, **args): | |||
|
122 | """List of strings. LFS files added or modified by the changeset.""" | |||
|
123 | pointers = wrapper.pointersfromctx(ctx) # {path: pointer} | |||
|
124 | return sorted(pointers.keys()) | |||
|
125 | ||||
|
126 | @command('debuglfsupload', | |||
|
127 | [('r', 'rev', [], _('upload large files introduced by REV'))]) | |||
|
128 | def debuglfsupload(ui, repo, **opts): | |||
|
129 | """upload lfs blobs added by the working copy parent or given revisions""" | |||
|
130 | revs = opts.get('rev', []) | |||
|
131 | pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs)) | |||
|
132 | wrapper.uploadblobs(repo, pointers) |
@@ -0,0 +1,346 | |||||
|
1 | # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages | |||
|
2 | # | |||
|
3 | # Copyright 2017 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import json | |||
|
11 | import os | |||
|
12 | import re | |||
|
13 | ||||
|
14 | from mercurial import ( | |||
|
15 | error, | |||
|
16 | url as urlmod, | |||
|
17 | util, | |||
|
18 | vfs as vfsmod, | |||
|
19 | ) | |||
|
20 | from mercurial.i18n import _ | |||
|
21 | ||||
|
22 | # 64 bytes for SHA256 | |||
|
23 | _lfsre = re.compile(r'\A[a-f0-9]{64}\Z') | |||
|
24 | ||||
|
25 | class lfsvfs(vfsmod.vfs): | |||
|
26 | def join(self, path): | |||
|
27 | """split the path at first two characters, like: XX/XXXXX...""" | |||
|
28 | if not _lfsre.match(path): | |||
|
29 | raise error.ProgrammingError('unexpected lfs path: %s' % path) | |||
|
30 | return super(lfsvfs, self).join(path[0:2], path[2:]) | |||
|
31 | ||||
|
32 | class filewithprogress(object): | |||
|
33 | """a file-like object that supports __len__ and read. | |||
|
34 | ||||
|
35 | Useful to provide progress information for how many bytes are read. | |||
|
36 | """ | |||
|
37 | ||||
|
38 | def __init__(self, fp, callback): | |||
|
39 | self._fp = fp | |||
|
40 | self._callback = callback # func(readsize) | |||
|
41 | fp.seek(0, os.SEEK_END) | |||
|
42 | self._len = fp.tell() | |||
|
43 | fp.seek(0) | |||
|
44 | ||||
|
45 | def __len__(self): | |||
|
46 | return self._len | |||
|
47 | ||||
|
48 | def read(self, size): | |||
|
49 | if self._fp is None: | |||
|
50 | return b'' | |||
|
51 | data = self._fp.read(size) | |||
|
52 | if data: | |||
|
53 | if self._callback: | |||
|
54 | self._callback(len(data)) | |||
|
55 | else: | |||
|
56 | self._fp.close() | |||
|
57 | self._fp = None | |||
|
58 | return data | |||
|
59 | ||||
|
60 | class local(object): | |||
|
61 | """Local blobstore for large file contents. | |||
|
62 | ||||
|
63 | This blobstore is used both as a cache and as a staging area for large blobs | |||
|
64 | to be uploaded to the remote blobstore. | |||
|
65 | """ | |||
|
66 | ||||
|
67 | def __init__(self, repo): | |||
|
68 | fullpath = repo.svfs.join('lfs/objects') | |||
|
69 | self.vfs = lfsvfs(fullpath) | |||
|
70 | ||||
|
71 | def write(self, oid, data): | |||
|
72 | """Write blob to local blobstore.""" | |||
|
73 | with self.vfs(oid, 'wb', atomictemp=True) as fp: | |||
|
74 | fp.write(data) | |||
|
75 | ||||
|
76 | def read(self, oid): | |||
|
77 | """Read blob from local blobstore.""" | |||
|
78 | return self.vfs.read(oid) | |||
|
79 | ||||
|
80 | def has(self, oid): | |||
|
81 | """Returns True if the local blobstore contains the requested blob, | |||
|
82 | False otherwise.""" | |||
|
83 | return self.vfs.exists(oid) | |||
|
84 | ||||
|
85 | class _gitlfsremote(object): | |||
|
86 | ||||
|
87 | def __init__(self, repo, url): | |||
|
88 | ui = repo.ui | |||
|
89 | self.ui = ui | |||
|
90 | baseurl, authinfo = url.authinfo() | |||
|
91 | self.baseurl = baseurl.rstrip('/') | |||
|
92 | self.urlopener = urlmod.opener(ui, authinfo) | |||
|
93 | self.retry = ui.configint('lfs', 'retry', 5) | |||
|
94 | ||||
|
95 | def writebatch(self, pointers, fromstore): | |||
|
96 | """Batch upload from local to remote blobstore.""" | |||
|
97 | self._batch(pointers, fromstore, 'upload') | |||
|
98 | ||||
|
99 | def readbatch(self, pointers, tostore): | |||
|
100 | """Batch download from remote to local blostore.""" | |||
|
101 | self._batch(pointers, tostore, 'download') | |||
|
102 | ||||
|
103 | def _batchrequest(self, pointers, action): | |||
|
104 | """Get metadata about objects pointed by pointers for given action | |||
|
105 | ||||
|
106 | Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]} | |||
|
107 | See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md | |||
|
108 | """ | |||
|
109 | objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers] | |||
|
110 | requestdata = json.dumps({ | |||
|
111 | 'objects': objects, | |||
|
112 | 'operation': action, | |||
|
113 | }) | |||
|
114 | batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl, | |||
|
115 | data=requestdata) | |||
|
116 | batchreq.add_header('Accept', 'application/vnd.git-lfs+json') | |||
|
117 | batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json') | |||
|
118 | try: | |||
|
119 | rawjson = self.urlopener.open(batchreq).read() | |||
|
120 | except util.urlerr.httperror as ex: | |||
|
121 | raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)') | |||
|
122 | % (ex, action)) | |||
|
123 | try: | |||
|
124 | response = json.loads(rawjson) | |||
|
125 | except ValueError: | |||
|
126 | raise LfsRemoteError(_('LFS server returns invalid JSON: %s') | |||
|
127 | % rawjson) | |||
|
128 | return response | |||
|
129 | ||||
|
130 | def _checkforservererror(self, pointers, responses): | |||
|
131 | """Scans errors from objects | |||
|
132 | ||||
|
133 | Returns LfsRemoteError if any objects has an error""" | |||
|
134 | for response in responses: | |||
|
135 | error = response.get('error') | |||
|
136 | if error: | |||
|
137 | ptrmap = {p.oid(): p for p in pointers} | |||
|
138 | p = ptrmap.get(response['oid'], None) | |||
|
139 | if error['code'] == 404 and p: | |||
|
140 | filename = getattr(p, 'filename', 'unknown') | |||
|
141 | raise LfsRemoteError( | |||
|
142 | _(('LFS server error. Remote object ' | |||
|
143 | 'for file %s not found: %r')) % (filename, response)) | |||
|
144 | raise LfsRemoteError(_('LFS server error: %r') % response) | |||
|
145 | ||||
|
146 | def _extractobjects(self, response, pointers, action): | |||
|
147 | """extract objects from response of the batch API | |||
|
148 | ||||
|
149 | response: parsed JSON object returned by batch API | |||
|
150 | return response['objects'] filtered by action | |||
|
151 | raise if any object has an error | |||
|
152 | """ | |||
|
153 | # Scan errors from objects - fail early | |||
|
154 | objects = response.get('objects', []) | |||
|
155 | self._checkforservererror(pointers, objects) | |||
|
156 | ||||
|
157 | # Filter objects with given action. Practically, this skips uploading | |||
|
158 | # objects which exist in the server. | |||
|
159 | filteredobjects = [o for o in objects if action in o.get('actions', [])] | |||
|
160 | # But for downloading, we want all objects. Therefore missing objects | |||
|
161 | # should be considered an error. | |||
|
162 | if action == 'download': | |||
|
163 | if len(filteredobjects) < len(objects): | |||
|
164 | missing = [o.get('oid', '?') | |||
|
165 | for o in objects | |||
|
166 | if action not in o.get('actions', [])] | |||
|
167 | raise LfsRemoteError( | |||
|
168 | _('LFS server claims required objects do not exist:\n%s') | |||
|
169 | % '\n'.join(missing)) | |||
|
170 | ||||
|
171 | return filteredobjects | |||
|
172 | ||||
|
173 | def _basictransfer(self, obj, action, localstore, progress=None): | |||
|
174 | """Download or upload a single object using basic transfer protocol | |||
|
175 | ||||
|
176 | obj: dict, an object description returned by batch API | |||
|
177 | action: string, one of ['upload', 'download'] | |||
|
178 | localstore: blobstore.local | |||
|
179 | ||||
|
180 | See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\ | |||
|
181 | basic-transfers.md | |||
|
182 | """ | |||
|
183 | oid = str(obj['oid']) | |||
|
184 | ||||
|
185 | href = str(obj['actions'][action].get('href')) | |||
|
186 | headers = obj['actions'][action].get('header', {}).items() | |||
|
187 | ||||
|
188 | request = util.urlreq.request(href) | |||
|
189 | if action == 'upload': | |||
|
190 | # If uploading blobs, read data from local blobstore. | |||
|
191 | request.data = filewithprogress(localstore.vfs(oid), progress) | |||
|
192 | request.get_method = lambda: 'PUT' | |||
|
193 | ||||
|
194 | for k, v in headers: | |||
|
195 | request.add_header(k, v) | |||
|
196 | ||||
|
197 | response = b'' | |||
|
198 | try: | |||
|
199 | req = self.urlopener.open(request) | |||
|
200 | while True: | |||
|
201 | data = req.read(1048576) | |||
|
202 | if not data: | |||
|
203 | break | |||
|
204 | if action == 'download' and progress: | |||
|
205 | progress(len(data)) | |||
|
206 | response += data | |||
|
207 | except util.urlerr.httperror as ex: | |||
|
208 | raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)') | |||
|
209 | % (ex, oid, action)) | |||
|
210 | ||||
|
211 | if action == 'download': | |||
|
212 | # If downloading blobs, store downloaded data to local blobstore | |||
|
213 | localstore.write(oid, response) | |||
|
214 | ||||
|
215 | def _batch(self, pointers, localstore, action): | |||
|
216 | if action not in ['upload', 'download']: | |||
|
217 | raise error.ProgrammingError('invalid Git-LFS action: %s' % action) | |||
|
218 | ||||
|
219 | response = self._batchrequest(pointers, action) | |||
|
220 | prunningsize = [0] | |||
|
221 | objects = self._extractobjects(response, pointers, action) | |||
|
222 | total = sum(x.get('size', 0) for x in objects) | |||
|
223 | topic = {'upload': _('lfs uploading'), | |||
|
224 | 'download': _('lfs downloading')}[action] | |||
|
225 | if self.ui.verbose and len(objects) > 1: | |||
|
226 | self.ui.write(_('lfs: need to transfer %d objects (%s)\n') | |||
|
227 | % (len(objects), util.bytecount(total))) | |||
|
228 | self.ui.progress(topic, 0, total=total) | |||
|
229 | def progress(size): | |||
|
230 | # advance progress bar by "size" bytes | |||
|
231 | prunningsize[0] += size | |||
|
232 | self.ui.progress(topic, prunningsize[0], total=total) | |||
|
233 | for obj in sorted(objects, key=lambda o: o.get('oid')): | |||
|
234 | objsize = obj.get('size', 0) | |||
|
235 | if self.ui.verbose: | |||
|
236 | if action == 'download': | |||
|
237 | msg = _('lfs: downloading %s (%s)\n') | |||
|
238 | elif action == 'upload': | |||
|
239 | msg = _('lfs: uploading %s (%s)\n') | |||
|
240 | self.ui.write(msg % (obj.get('oid'), util.bytecount(objsize))) | |||
|
241 | origrunningsize = prunningsize[0] | |||
|
242 | retry = self.retry | |||
|
243 | while True: | |||
|
244 | prunningsize[0] = origrunningsize | |||
|
245 | try: | |||
|
246 | self._basictransfer(obj, action, localstore, | |||
|
247 | progress=progress) | |||
|
248 | break | |||
|
249 | except Exception as ex: | |||
|
250 | if retry > 0: | |||
|
251 | if self.ui.verbose: | |||
|
252 | self.ui.write( | |||
|
253 | _('lfs: failed: %r (remaining retry %d)\n') | |||
|
254 | % (ex, retry)) | |||
|
255 | retry -= 1 | |||
|
256 | continue | |||
|
257 | raise | |||
|
258 | ||||
|
259 | self.ui.progress(topic, pos=None, total=total) | |||
|
260 | ||||
|
261 | def __del__(self): | |||
|
262 | # copied from mercurial/httppeer.py | |||
|
263 | urlopener = getattr(self, 'urlopener', None) | |||
|
264 | if urlopener: | |||
|
265 | for h in urlopener.handlers: | |||
|
266 | h.close() | |||
|
267 | getattr(h, "close_all", lambda : None)() | |||
|
268 | ||||
|
269 | class _dummyremote(object): | |||
|
270 | """Dummy store storing blobs to temp directory.""" | |||
|
271 | ||||
|
272 | def __init__(self, repo, url): | |||
|
273 | fullpath = repo.vfs.join('lfs', url.path) | |||
|
274 | self.vfs = lfsvfs(fullpath) | |||
|
275 | ||||
|
276 | def writebatch(self, pointers, fromstore): | |||
|
277 | for p in pointers: | |||
|
278 | content = fromstore.read(p.oid()) | |||
|
279 | with self.vfs(p.oid(), 'wb', atomictemp=True) as fp: | |||
|
280 | fp.write(content) | |||
|
281 | ||||
|
282 | def readbatch(self, pointers, tostore): | |||
|
283 | for p in pointers: | |||
|
284 | content = self.vfs.read(p.oid()) | |||
|
285 | tostore.write(p.oid(), content) | |||
|
286 | ||||
|
287 | class _nullremote(object): | |||
|
288 | """Null store storing blobs to /dev/null.""" | |||
|
289 | ||||
|
290 | def __init__(self, repo, url): | |||
|
291 | pass | |||
|
292 | ||||
|
293 | def writebatch(self, pointers, fromstore): | |||
|
294 | pass | |||
|
295 | ||||
|
296 | def readbatch(self, pointers, tostore): | |||
|
297 | pass | |||
|
298 | ||||
|
299 | class _promptremote(object): | |||
|
300 | """Prompt user to set lfs.url when accessed.""" | |||
|
301 | ||||
|
302 | def __init__(self, repo, url): | |||
|
303 | pass | |||
|
304 | ||||
|
305 | def writebatch(self, pointers, fromstore, ui=None): | |||
|
306 | self._prompt() | |||
|
307 | ||||
|
308 | def readbatch(self, pointers, tostore, ui=None): | |||
|
309 | self._prompt() | |||
|
310 | ||||
|
311 | def _prompt(self): | |||
|
312 | raise error.Abort(_('lfs.url needs to be configured')) | |||
|
313 | ||||
|
314 | _storemap = { | |||
|
315 | 'https': _gitlfsremote, | |||
|
316 | 'http': _gitlfsremote, | |||
|
317 | 'file': _dummyremote, | |||
|
318 | 'null': _nullremote, | |||
|
319 | None: _promptremote, | |||
|
320 | } | |||
|
321 | ||||
|
322 | def remote(repo): | |||
|
323 | """remotestore factory. return a store in _storemap depending on config""" | |||
|
324 | defaulturl = '' | |||
|
325 | ||||
|
326 | # convert deprecated configs to the new url. TODO: remove this if other | |||
|
327 | # places are migrated to the new url config. | |||
|
328 | # deprecated config: lfs.remotestore | |||
|
329 | deprecatedstore = repo.ui.config('lfs', 'remotestore') | |||
|
330 | if deprecatedstore == 'dummy': | |||
|
331 | # deprecated config: lfs.remotepath | |||
|
332 | defaulturl = 'file://' + repo.ui.config('lfs', 'remotepath') | |||
|
333 | elif deprecatedstore == 'git-lfs': | |||
|
334 | # deprecated config: lfs.remoteurl | |||
|
335 | defaulturl = repo.ui.config('lfs', 'remoteurl') | |||
|
336 | elif deprecatedstore == 'null': | |||
|
337 | defaulturl = 'null://' | |||
|
338 | ||||
|
339 | url = util.url(repo.ui.config('lfs', 'url', defaulturl)) | |||
|
340 | scheme = url.scheme | |||
|
341 | if scheme not in _storemap: | |||
|
342 | raise error.Abort(_('lfs: unknown url scheme: %s') % scheme) | |||
|
343 | return _storemap[scheme](repo, url) | |||
|
344 | ||||
|
345 | class LfsRemoteError(error.RevlogError): | |||
|
346 | pass |
@@ -0,0 +1,72 | |||||
|
1 | # pointer.py - Git-LFS pointer serialization | |||
|
2 | # | |||
|
3 | # Copyright 2017 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import re | |||
|
11 | ||||
|
12 | from mercurial import ( | |||
|
13 | error, | |||
|
14 | ) | |||
|
15 | from mercurial.i18n import _ | |||
|
16 | ||||
|
17 | class InvalidPointer(error.RevlogError): | |||
|
18 | pass | |||
|
19 | ||||
|
20 | class gitlfspointer(dict): | |||
|
21 | VERSION = 'https://git-lfs.github.com/spec/v1' | |||
|
22 | ||||
|
23 | def __init__(self, *args, **kwargs): | |||
|
24 | self['version'] = self.VERSION | |||
|
25 | super(gitlfspointer, self).__init__(*args, **kwargs) | |||
|
26 | ||||
|
27 | @classmethod | |||
|
28 | def deserialize(cls, text): | |||
|
29 | try: | |||
|
30 | return cls(l.split(' ', 1) for l in text.splitlines()).validate() | |||
|
31 | except ValueError: # l.split returns 1 item instead of 2 | |||
|
32 | raise InvalidPointer(_('cannot parse git-lfs text: %r') % text) | |||
|
33 | ||||
|
34 | def serialize(self): | |||
|
35 | sortkeyfunc = lambda x: (x[0] != 'version', x) | |||
|
36 | items = sorted(self.validate().iteritems(), key=sortkeyfunc) | |||
|
37 | return ''.join('%s %s\n' % (k, v) for k, v in items) | |||
|
38 | ||||
|
39 | def oid(self): | |||
|
40 | return self['oid'].split(':')[-1] | |||
|
41 | ||||
|
42 | def size(self): | |||
|
43 | return int(self['size']) | |||
|
44 | ||||
|
45 | # regular expressions used by _validate | |||
|
46 | # see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md | |||
|
47 | _keyre = re.compile(r'\A[a-z0-9.-]+\Z') | |||
|
48 | _valuere = re.compile(r'\A[^\n]*\Z') | |||
|
49 | _requiredre = { | |||
|
50 | 'size': re.compile(r'\A[0-9]+\Z'), | |||
|
51 | 'oid': re.compile(r'\Asha256:[0-9a-f]{64}\Z'), | |||
|
52 | 'version': re.compile(r'\A%s\Z' % re.escape(VERSION)), | |||
|
53 | } | |||
|
54 | ||||
|
55 | def validate(self): | |||
|
56 | """raise InvalidPointer on error. return self if there is no error""" | |||
|
57 | requiredcount = 0 | |||
|
58 | for k, v in self.iteritems(): | |||
|
59 | if k in self._requiredre: | |||
|
60 | if not self._requiredre[k].match(v): | |||
|
61 | raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) | |||
|
62 | requiredcount += 1 | |||
|
63 | elif not self._keyre.match(k): | |||
|
64 | raise InvalidPointer(_('unexpected key: %s') % k) | |||
|
65 | if not self._valuere.match(v): | |||
|
66 | raise InvalidPointer(_('unexpected value: %s=%r') % (k, v)) | |||
|
67 | if len(self._requiredre) != requiredcount: | |||
|
68 | miss = sorted(set(self._requiredre.keys()).difference(self.keys())) | |||
|
69 | raise InvalidPointer(_('missed keys: %s') % ', '.join(miss)) | |||
|
70 | return self | |||
|
71 | ||||
|
72 | deserialize = gitlfspointer.deserialize |
@@ -0,0 +1,247 | |||||
|
1 | # wrapper.py - methods wrapping core mercurial logic | |||
|
2 | # | |||
|
3 | # Copyright 2017 Facebook, Inc. | |||
|
4 | # | |||
|
5 | # This software may be used and distributed according to the terms of the | |||
|
6 | # GNU General Public License version 2 or any later version. | |||
|
7 | ||||
|
8 | from __future__ import absolute_import | |||
|
9 | ||||
|
10 | import hashlib | |||
|
11 | ||||
|
12 | from mercurial import ( | |||
|
13 | error, | |||
|
14 | filelog, | |||
|
15 | revlog, | |||
|
16 | util, | |||
|
17 | ) | |||
|
18 | from mercurial.i18n import _ | |||
|
19 | from mercurial.node import bin, nullid, short | |||
|
20 | ||||
|
21 | from . import ( | |||
|
22 | blobstore, | |||
|
23 | pointer, | |||
|
24 | ) | |||
|
25 | ||||
|
26 | def supportedoutgoingversions(orig, repo): | |||
|
27 | versions = orig(repo) | |||
|
28 | versions.discard('01') | |||
|
29 | versions.discard('02') | |||
|
30 | versions.add('03') | |||
|
31 | return versions | |||
|
32 | ||||
|
33 | def allsupportedversions(orig, ui): | |||
|
34 | versions = orig(ui) | |||
|
35 | versions.add('03') | |||
|
36 | return versions | |||
|
37 | ||||
|
38 | def bypasscheckhash(self, text): | |||
|
39 | return False | |||
|
40 | ||||
|
41 | def readfromstore(self, text): | |||
|
42 | """Read filelog content from local blobstore transform for flagprocessor. | |||
|
43 | ||||
|
44 | Default tranform for flagprocessor, returning contents from blobstore. | |||
|
45 | Returns a 2-typle (text, validatehash) where validatehash is True as the | |||
|
46 | contents of the blobstore should be checked using checkhash. | |||
|
47 | """ | |||
|
48 | p = pointer.deserialize(text) | |||
|
49 | oid = p.oid() | |||
|
50 | store = self.opener.lfslocalblobstore | |||
|
51 | if not store.has(oid): | |||
|
52 | p.filename = getattr(self, 'indexfile', None) | |||
|
53 | self.opener.lfsremoteblobstore.readbatch([p], store) | |||
|
54 | text = store.read(oid) | |||
|
55 | ||||
|
56 | # pack hg filelog metadata | |||
|
57 | hgmeta = {} | |||
|
58 | for k in p.keys(): | |||
|
59 | if k.startswith('x-hg-'): | |||
|
60 | name = k[len('x-hg-'):] | |||
|
61 | hgmeta[name] = p[k] | |||
|
62 | if hgmeta or text.startswith('\1\n'): | |||
|
63 | text = filelog.packmeta(hgmeta, text) | |||
|
64 | ||||
|
65 | return (text, True) | |||
|
66 | ||||
|
67 | def writetostore(self, text): | |||
|
68 | # hg filelog metadata (includes rename, etc) | |||
|
69 | hgmeta, offset = filelog.parsemeta(text) | |||
|
70 | if offset and offset > 0: | |||
|
71 | # lfs blob does not contain hg filelog metadata | |||
|
72 | text = text[offset:] | |||
|
73 | ||||
|
74 | # git-lfs only supports sha256 | |||
|
75 | oid = hashlib.sha256(text).hexdigest() | |||
|
76 | self.opener.lfslocalblobstore.write(oid, text) | |||
|
77 | ||||
|
78 | # replace contents with metadata | |||
|
79 | longoid = 'sha256:%s' % oid | |||
|
80 | metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text))) | |||
|
81 | ||||
|
82 | # by default, we expect the content to be binary. however, LFS could also | |||
|
83 | # be used for non-binary content. add a special entry for non-binary data. | |||
|
84 | # this will be used by filectx.isbinary(). | |||
|
85 | if not util.binary(text): | |||
|
86 | # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix | |||
|
87 | metadata['x-is-binary'] = '0' | |||
|
88 | ||||
|
89 | # translate hg filelog metadata to lfs metadata with "x-hg-" prefix | |||
|
90 | if hgmeta is not None: | |||
|
91 | for k, v in hgmeta.iteritems(): | |||
|
92 | metadata['x-hg-%s' % k] = v | |||
|
93 | ||||
|
94 | rawtext = metadata.serialize() | |||
|
95 | return (rawtext, False) | |||
|
96 | ||||
|
97 | def _islfs(rlog, node=None, rev=None): | |||
|
98 | if rev is None: | |||
|
99 | if node is None: | |||
|
100 | # both None - likely working copy content where node is not ready | |||
|
101 | return False | |||
|
102 | rev = rlog.rev(node) | |||
|
103 | else: | |||
|
104 | node = rlog.node(rev) | |||
|
105 | if node == nullid: | |||
|
106 | return False | |||
|
107 | flags = rlog.flags(rev) | |||
|
108 | return bool(flags & revlog.REVIDX_EXTSTORED) | |||
|
109 | ||||
|
110 | def filelogaddrevision(orig, self, text, transaction, link, p1, p2, | |||
|
111 | cachedelta=None, node=None, | |||
|
112 | flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds): | |||
|
113 | threshold = self.opener.options['lfsthreshold'] | |||
|
114 | textlen = len(text) | |||
|
115 | # exclude hg rename meta from file size | |||
|
116 | meta, offset = filelog.parsemeta(text) | |||
|
117 | if offset: | |||
|
118 | textlen -= offset | |||
|
119 | ||||
|
120 | if threshold and textlen > threshold: | |||
|
121 | flags |= revlog.REVIDX_EXTSTORED | |||
|
122 | ||||
|
123 | return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta, | |||
|
124 | node=node, flags=flags, **kwds) | |||
|
125 | ||||
|
126 | def filelogrenamed(orig, self, node): | |||
|
127 | if _islfs(self, node): | |||
|
128 | rawtext = self.revision(node, raw=True) | |||
|
129 | if not rawtext: | |||
|
130 | return False | |||
|
131 | metadata = pointer.deserialize(rawtext) | |||
|
132 | if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata: | |||
|
133 | return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev']) | |||
|
134 | else: | |||
|
135 | return False | |||
|
136 | return orig(self, node) | |||
|
137 | ||||
|
138 | def filelogsize(orig, self, rev): | |||
|
139 | if _islfs(self, rev=rev): | |||
|
140 | # fast path: use lfs metadata to answer size | |||
|
141 | rawtext = self.revision(rev, raw=True) | |||
|
142 | metadata = pointer.deserialize(rawtext) | |||
|
143 | return int(metadata['size']) | |||
|
144 | return orig(self, rev) | |||
|
145 | ||||
|
146 | def filectxcmp(orig, self, fctx): | |||
|
147 | """returns True if text is different than fctx""" | |||
|
148 | # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs | |||
|
149 | if self.islfs() and getattr(fctx, 'islfs', lambda: False)(): | |||
|
150 | # fast path: check LFS oid | |||
|
151 | p1 = pointer.deserialize(self.rawdata()) | |||
|
152 | p2 = pointer.deserialize(fctx.rawdata()) | |||
|
153 | return p1.oid() != p2.oid() | |||
|
154 | return orig(self, fctx) | |||
|
155 | ||||
|
156 | def filectxisbinary(orig, self): | |||
|
157 | if self.islfs(): | |||
|
158 | # fast path: use lfs metadata to answer isbinary | |||
|
159 | metadata = pointer.deserialize(self.rawdata()) | |||
|
160 | # if lfs metadata says nothing, assume it's binary by default | |||
|
161 | return bool(int(metadata.get('x-is-binary', 1))) | |||
|
162 | return orig(self) | |||
|
163 | ||||
|
164 | def filectxislfs(self): | |||
|
165 | return _islfs(self.filelog(), self.filenode()) | |||
|
166 | ||||
|
167 | def vfsinit(orig, self, othervfs): | |||
|
168 | orig(self, othervfs) | |||
|
169 | # copy lfs related options | |||
|
170 | for k, v in othervfs.options.items(): | |||
|
171 | if k.startswith('lfs'): | |||
|
172 | self.options[k] = v | |||
|
173 | # also copy lfs blobstores. note: this can run before reposetup, so lfs | |||
|
174 | # blobstore attributes are not always ready at this time. | |||
|
175 | for name in ['lfslocalblobstore', 'lfsremoteblobstore']: | |||
|
176 | if util.safehasattr(othervfs, name): | |||
|
177 | setattr(self, name, getattr(othervfs, name)) | |||
|
178 | ||||
|
179 | def _canskipupload(repo): | |||
|
180 | # if remotestore is a null store, upload is a no-op and can be skipped | |||
|
181 | return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |||
|
182 | ||||
|
183 | def candownload(repo): | |||
|
184 | # if remotestore is a null store, downloads will lead to nothing | |||
|
185 | return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) | |||
|
186 | ||||
|
187 | def uploadblobsfromrevs(repo, revs): | |||
|
188 | '''upload lfs blobs introduced by revs | |||
|
189 | ||||
|
190 | Note: also used by other extensions e. g. infinitepush. avoid renaming. | |||
|
191 | ''' | |||
|
192 | if _canskipupload(repo): | |||
|
193 | return | |||
|
194 | pointers = extractpointers(repo, revs) | |||
|
195 | uploadblobs(repo, pointers) | |||
|
196 | ||||
|
197 | def prepush(pushop): | |||
|
198 | """Prepush hook. | |||
|
199 | ||||
|
200 | Read through the revisions to push, looking for filelog entries that can be | |||
|
201 | deserialized into metadata so that we can block the push on their upload to | |||
|
202 | the remote blobstore. | |||
|
203 | """ | |||
|
204 | return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing) | |||
|
205 | ||||
|
206 | def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing, | |||
|
207 | *args, **kwargs): | |||
|
208 | """upload LFS blobs added by outgoing revisions on 'hg bundle'""" | |||
|
209 | uploadblobsfromrevs(repo, outgoing.missing) | |||
|
210 | return orig(ui, repo, source, filename, bundletype, outgoing, *args, | |||
|
211 | **kwargs) | |||
|
212 | ||||
|
213 | def extractpointers(repo, revs): | |||
|
214 | """return a list of lfs pointers added by given revs""" | |||
|
215 | ui = repo.ui | |||
|
216 | if ui.debugflag: | |||
|
217 | ui.write(_('lfs: computing set of blobs to upload\n')) | |||
|
218 | pointers = {} | |||
|
219 | for r in revs: | |||
|
220 | ctx = repo[r] | |||
|
221 | for p in pointersfromctx(ctx).values(): | |||
|
222 | pointers[p.oid()] = p | |||
|
223 | return pointers.values() | |||
|
224 | ||||
|
225 | def pointersfromctx(ctx): | |||
|
226 | """return a dict {path: pointer} for given single changectx""" | |||
|
227 | result = {} | |||
|
228 | for f in ctx.files(): | |||
|
229 | if f not in ctx: | |||
|
230 | continue | |||
|
231 | fctx = ctx[f] | |||
|
232 | if not _islfs(fctx.filelog(), fctx.filenode()): | |||
|
233 | continue | |||
|
234 | try: | |||
|
235 | result[f] = pointer.deserialize(fctx.rawdata()) | |||
|
236 | except pointer.InvalidPointer as ex: | |||
|
237 | raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') | |||
|
238 | % (f, short(ctx.node()), ex)) | |||
|
239 | return result | |||
|
240 | ||||
|
241 | def uploadblobs(repo, pointers): | |||
|
242 | """upload given pointers from local blobstore""" | |||
|
243 | if not pointers: | |||
|
244 | return | |||
|
245 | ||||
|
246 | remoteblob = repo.svfs.lfsremoteblobstore | |||
|
247 | remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore) |
@@ -0,0 +1,41 | |||||
|
1 | from __future__ import absolute_import, print_function | |||
|
2 | ||||
|
3 | import os | |||
|
4 | import sys | |||
|
5 | ||||
|
6 | # make it runnable using python directly without run-tests.py | |||
|
7 | sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')] | |||
|
8 | ||||
|
9 | from hgext.lfs import pointer | |||
|
10 | ||||
|
11 | def tryparse(text): | |||
|
12 | r = {} | |||
|
13 | try: | |||
|
14 | r = pointer.deserialize(text) | |||
|
15 | print('ok') | |||
|
16 | except Exception as ex: | |||
|
17 | print(ex) | |||
|
18 | if r: | |||
|
19 | text2 = r.serialize() | |||
|
20 | if text2 != text: | |||
|
21 | print('reconstructed text differs') | |||
|
22 | return r | |||
|
23 | ||||
|
24 | t = ('version https://git-lfs.github.com/spec/v1\n' | |||
|
25 | 'oid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1' | |||
|
26 | '258daaa5e2ca24d17e2393\n' | |||
|
27 | 'size 12345\n' | |||
|
28 | 'x-foo extra-information\n') | |||
|
29 | ||||
|
30 | tryparse('') | |||
|
31 | tryparse(t) | |||
|
32 | tryparse(t.replace('git-lfs', 'unknown')) | |||
|
33 | tryparse(t.replace('v1\n', 'v1\n\n')) | |||
|
34 | tryparse(t.replace('sha256', 'ahs256')) | |||
|
35 | tryparse(t.replace('sha256:', '')) | |||
|
36 | tryparse(t.replace('12345', '0x12345')) | |||
|
37 | tryparse(t.replace('extra-information', 'extra\0information')) | |||
|
38 | tryparse(t.replace('extra-information', 'extra\ninformation')) | |||
|
39 | tryparse(t.replace('x-foo', 'x_foo')) | |||
|
40 | tryparse(t.replace('oid', 'blobid')) | |||
|
41 | tryparse(t.replace('size', 'size-bytes').replace('oid', 'object-id')) |
@@ -0,0 +1,12 | |||||
|
1 | missed keys: oid, size | |||
|
2 | ok | |||
|
3 | unexpected value: version='https://unknown.github.com/spec/v1' | |||
|
4 | cannot parse git-lfs text: 'version https://git-lfs.github.com/spec/v1\n\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 12345\nx-foo extra-information\n' | |||
|
5 | unexpected value: oid='ahs256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393' | |||
|
6 | unexpected value: oid='4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393' | |||
|
7 | unexpected value: size='0x12345' | |||
|
8 | ok | |||
|
9 | cannot parse git-lfs text: 'version https://git-lfs.github.com/spec/v1\noid sha256:4d7a214614ab2935c943f9e0ff69d22eadbb8f32b1258daaa5e2ca24d17e2393\nsize 12345\nx-foo extra\ninformation\n' | |||
|
10 | unexpected key: x_foo | |||
|
11 | missed keys: oid | |||
|
12 | missed keys: oid, size |
@@ -0,0 +1,108 | |||||
|
1 | Require lfs-test-server (https://github.com/git-lfs/lfs-test-server) | |||
|
2 | ||||
|
3 | $ hash lfs-test-server || { echo 'skipped: missing lfs-test-server'; exit 80; } | |||
|
4 | ||||
|
5 | $ LFS_LISTEN="tcp://:$HGPORT" | |||
|
6 | $ LFS_HOST="localhost:$HGPORT" | |||
|
7 | $ LFS_PUBLIC=1 | |||
|
8 | $ export LFS_LISTEN LFS_HOST LFS_PUBLIC | |||
|
9 | $ lfs-test-server &> lfs-server.log & | |||
|
10 | $ echo $! >> $DAEMON_PIDS | |||
|
11 | ||||
|
12 | $ cat >> $HGRCPATH <<EOF | |||
|
13 | > [extensions] | |||
|
14 | > lfs= | |||
|
15 | > [lfs] | |||
|
16 | > url=http://foo:bar@$LFS_HOST/ | |||
|
17 | > threshold=1 | |||
|
18 | > EOF | |||
|
19 | ||||
|
20 | $ hg init repo1 | |||
|
21 | $ cd repo1 | |||
|
22 | $ echo THIS-IS-LFS > a | |||
|
23 | $ hg commit -m a -A a | |||
|
24 | ||||
|
25 | $ hg init ../repo2 | |||
|
26 | $ hg push ../repo2 -v | |||
|
27 | pushing to ../repo2 | |||
|
28 | searching for changes | |||
|
29 | lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) | |||
|
30 | 1 changesets found | |||
|
31 | uncompressed size of bundle content: | |||
|
32 | * (changelog) (glob) | |||
|
33 | * (manifests) (glob) | |||
|
34 | * a (glob) | |||
|
35 | adding changesets | |||
|
36 | adding manifests | |||
|
37 | adding file changes | |||
|
38 | added 1 changesets with 1 changes to 1 files | |||
|
39 | ||||
|
40 | $ cd ../repo2 | |||
|
41 | $ hg update tip -v | |||
|
42 | resolving manifests | |||
|
43 | getting a | |||
|
44 | lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) | |||
|
45 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
46 | ||||
|
47 | When the server has some blobs already | |||
|
48 | ||||
|
49 | $ hg mv a b | |||
|
50 | $ echo ANOTHER-LARGE-FILE > c | |||
|
51 | $ echo ANOTHER-LARGE-FILE2 > d | |||
|
52 | $ hg commit -m b-and-c -A b c d | |||
|
53 | $ hg push ../repo1 -v | grep -v '^ ' | |||
|
54 | pushing to ../repo1 | |||
|
55 | searching for changes | |||
|
56 | lfs: need to transfer 2 objects (39 bytes) | |||
|
57 | lfs: uploading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) | |||
|
58 | lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) | |||
|
59 | 1 changesets found | |||
|
60 | uncompressed size of bundle content: | |||
|
61 | adding changesets | |||
|
62 | adding manifests | |||
|
63 | adding file changes | |||
|
64 | added 1 changesets with 3 changes to 3 files | |||
|
65 | ||||
|
66 | $ hg --repo ../repo1 update tip -v | |||
|
67 | resolving manifests | |||
|
68 | getting b | |||
|
69 | getting c | |||
|
70 | lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) | |||
|
71 | getting d | |||
|
72 | lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) | |||
|
73 | 3 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
74 | ||||
|
75 | Check error message when the remote missed a blob: | |||
|
76 | ||||
|
77 | $ echo FFFFF > b | |||
|
78 | $ hg commit -m b -A b | |||
|
79 | $ echo FFFFF >> b | |||
|
80 | $ hg commit -m b b | |||
|
81 | $ rm -rf .hg/store/lfs | |||
|
82 | $ hg update -C '.^' | |||
|
83 | abort: LFS server claims required objects do not exist: | |||
|
84 | 8e6ea5f6c066b44a0efa43bcce86aea73f17e6e23f0663df0251e7524e140a13! | |||
|
85 | [255] | |||
|
86 | ||||
|
87 | Check error message when object does not exist: | |||
|
88 | ||||
|
89 | $ hg init test && cd test | |||
|
90 | $ echo "[extensions]" >> .hg/hgrc | |||
|
91 | $ echo "lfs=" >> .hg/hgrc | |||
|
92 | $ echo "[lfs]" >> .hg/hgrc | |||
|
93 | $ echo "threshold=1" >> .hg/hgrc | |||
|
94 | $ echo a > a | |||
|
95 | $ hg add a | |||
|
96 | $ hg commit -m 'test' | |||
|
97 | $ echo aaaaa > a | |||
|
98 | $ hg commit -m 'largefile' | |||
|
99 | $ hg debugdata .hg/store/data/a.i 1 # verify this is no the file content but includes "oid", the LFS "pointer". | |||
|
100 | version https://git-lfs.github.com/spec/v1 | |||
|
101 | oid sha256:bdc26931acfb734b142a8d675f205becf27560dc461f501822de13274fe6fc8a | |||
|
102 | size 6 | |||
|
103 | x-is-binary 0 | |||
|
104 | $ cd .. | |||
|
105 | $ hg --config 'lfs.url=https://dewey-lfs.vip.facebook.com/lfs' clone test test2 | |||
|
106 | updating to branch default | |||
|
107 | abort: LFS server error. Remote object for file data/a.i not found:(.*)! (re) | |||
|
108 | [255] |
This diff has been collapsed as it changes many lines, (544 lines changed) Show them Hide them | |||||
@@ -0,0 +1,544 | |||||
|
1 | # Initial setup | |||
|
2 | ||||
|
3 | $ cat >> $HGRCPATH << EOF | |||
|
4 | > [extensions] | |||
|
5 | > lfs= | |||
|
6 | > [lfs] | |||
|
7 | > threshold=1000B | |||
|
8 | > EOF | |||
|
9 | ||||
|
10 | $ LONG=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC | |||
|
11 | ||||
|
12 | # Prepare server and enable extension | |||
|
13 | $ hg init server | |||
|
14 | $ hg clone -q server client | |||
|
15 | $ cd client | |||
|
16 | ||||
|
17 | # Commit small file | |||
|
18 | $ echo s > smallfile | |||
|
19 | $ hg commit -Aqm "add small file" | |||
|
20 | ||||
|
21 | # Commit large file | |||
|
22 | $ echo $LONG > largefile | |||
|
23 | $ hg commit --traceback -Aqm "add large file" | |||
|
24 | ||||
|
25 | # Ensure metadata is stored | |||
|
26 | $ hg debugdata largefile 0 | |||
|
27 | version https://git-lfs.github.com/spec/v1 | |||
|
28 | oid sha256:f11e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b | |||
|
29 | size 1501 | |||
|
30 | x-is-binary 0 | |||
|
31 | ||||
|
32 | # Check the blobstore is populated | |||
|
33 | $ find .hg/store/lfs/objects | sort | |||
|
34 | .hg/store/lfs/objects | |||
|
35 | .hg/store/lfs/objects/f1 | |||
|
36 | .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b | |||
|
37 | ||||
|
38 | # Check the blob stored contains the actual contents of the file | |||
|
39 | $ cat .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b | |||
|
40 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC | |||
|
41 | ||||
|
42 | # Push changes to the server | |||
|
43 | ||||
|
44 | $ hg push | |||
|
45 | pushing to $TESTTMP/server (glob) | |||
|
46 | searching for changes | |||
|
47 | abort: lfs.url needs to be configured | |||
|
48 | [255] | |||
|
49 | ||||
|
50 | $ cat >> $HGRCPATH << EOF | |||
|
51 | > [lfs] | |||
|
52 | > url=file:$TESTTMP/dummy-remote/ | |||
|
53 | > EOF | |||
|
54 | ||||
|
55 | $ hg push -v | egrep -v '^(uncompressed| )' | |||
|
56 | pushing to $TESTTMP/server (glob) | |||
|
57 | searching for changes | |||
|
58 | 2 changesets found | |||
|
59 | adding changesets | |||
|
60 | adding manifests | |||
|
61 | adding file changes | |||
|
62 | added 2 changesets with 2 changes to 2 files | |||
|
63 | ||||
|
64 | # Unknown URL scheme | |||
|
65 | ||||
|
66 | $ hg push --config lfs.url=ftp://foobar | |||
|
67 | abort: lfs: unknown url scheme: ftp | |||
|
68 | [255] | |||
|
69 | ||||
|
70 | $ cd ../ | |||
|
71 | ||||
|
72 | # Initialize new client (not cloning) and setup extension | |||
|
73 | $ hg init client2 | |||
|
74 | $ cd client2 | |||
|
75 | $ cat >> .hg/hgrc <<EOF | |||
|
76 | > [paths] | |||
|
77 | > default = $TESTTMP/server | |||
|
78 | > EOF | |||
|
79 | ||||
|
80 | # Pull from server | |||
|
81 | $ hg pull default | |||
|
82 | pulling from $TESTTMP/server (glob) | |||
|
83 | requesting all changes | |||
|
84 | adding changesets | |||
|
85 | adding manifests | |||
|
86 | adding file changes | |||
|
87 | added 2 changesets with 2 changes to 2 files | |||
|
88 | new changesets b29ba743f89d:00c137947d30 | |||
|
89 | (run 'hg update' to get a working copy) | |||
|
90 | ||||
|
91 | # Check the blobstore is not yet populated | |||
|
92 | $ [ -d .hg/store/lfs/objects ] | |||
|
93 | [1] | |||
|
94 | ||||
|
95 | # Update to the last revision containing the large file | |||
|
96 | $ hg update | |||
|
97 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
98 | ||||
|
99 | # Check the blobstore has been populated on update | |||
|
100 | $ find .hg/store/lfs/objects | sort | |||
|
101 | .hg/store/lfs/objects | |||
|
102 | .hg/store/lfs/objects/f1 | |||
|
103 | .hg/store/lfs/objects/f1/1e77c257047a398492d8d6cb9f6acf3aa7c4384bb23080b43546053e183e4b | |||
|
104 | ||||
|
105 | # Check the contents of the file are fetched from blobstore when requested | |||
|
106 | $ hg cat -r . largefile | |||
|
107 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC | |||
|
108 | ||||
|
109 | # Check the file has been copied in the working copy | |||
|
110 | $ cat largefile | |||
|
111 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC | |||
|
112 | ||||
|
113 | $ cd .. | |||
|
114 | ||||
|
115 | # Check rename, and switch between large and small files | |||
|
116 | ||||
|
117 | $ hg init repo3 | |||
|
118 | $ cd repo3 | |||
|
119 | $ cat >> .hg/hgrc << EOF | |||
|
120 | > [lfs] | |||
|
121 | > threshold=10B | |||
|
122 | > EOF | |||
|
123 | ||||
|
124 | $ echo LONGER-THAN-TEN-BYTES-WILL-TRIGGER-LFS > large | |||
|
125 | $ echo SHORTER > small | |||
|
126 | $ hg add . -q | |||
|
127 | $ hg commit -m 'commit with lfs content' | |||
|
128 | ||||
|
129 | $ hg mv large l | |||
|
130 | $ hg mv small s | |||
|
131 | $ hg commit -m 'renames' | |||
|
132 | ||||
|
133 | $ echo SHORT > l | |||
|
134 | $ echo BECOME-LARGER-FROM-SHORTER > s | |||
|
135 | $ hg commit -m 'large to small, small to large' | |||
|
136 | ||||
|
137 | $ echo 1 >> l | |||
|
138 | $ echo 2 >> s | |||
|
139 | $ hg commit -m 'random modifications' | |||
|
140 | ||||
|
141 | $ echo RESTORE-TO-BE-LARGE > l | |||
|
142 | $ echo SHORTER > s | |||
|
143 | $ hg commit -m 'switch large and small again' | |||
|
144 | ||||
|
145 | # Test lfs_files template | |||
|
146 | ||||
|
147 | $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n' | |||
|
148 | 0 large | |||
|
149 | 1 l | |||
|
150 | 2 s | |||
|
151 | 3 s | |||
|
152 | 4 l | |||
|
153 | ||||
|
154 | # Push and pull the above repo | |||
|
155 | ||||
|
156 | $ hg --cwd .. init repo4 | |||
|
157 | $ hg push ../repo4 | |||
|
158 | pushing to ../repo4 | |||
|
159 | searching for changes | |||
|
160 | adding changesets | |||
|
161 | adding manifests | |||
|
162 | adding file changes | |||
|
163 | added 5 changesets with 10 changes to 4 files | |||
|
164 | ||||
|
165 | $ hg --cwd .. init repo5 | |||
|
166 | $ hg --cwd ../repo5 pull ../repo3 | |||
|
167 | pulling from ../repo3 | |||
|
168 | requesting all changes | |||
|
169 | adding changesets | |||
|
170 | adding manifests | |||
|
171 | adding file changes | |||
|
172 | added 5 changesets with 10 changes to 4 files | |||
|
173 | new changesets fd47a419c4f7:5adf850972b9 | |||
|
174 | (run 'hg update' to get a working copy) | |||
|
175 | ||||
|
176 | $ cd .. | |||
|
177 | ||||
|
178 | # Test clone | |||
|
179 | ||||
|
180 | $ hg init repo6 | |||
|
181 | $ cd repo6 | |||
|
182 | $ cat >> .hg/hgrc << EOF | |||
|
183 | > [lfs] | |||
|
184 | > threshold=30B | |||
|
185 | > EOF | |||
|
186 | ||||
|
187 | $ echo LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES > large | |||
|
188 | $ echo SMALL > small | |||
|
189 | $ hg commit -Aqm 'create a lfs file' large small | |||
|
190 | $ hg debuglfsupload -r 'all()' -v | |||
|
191 | ||||
|
192 | $ cd .. | |||
|
193 | ||||
|
194 | $ hg clone repo6 repo7 | |||
|
195 | updating to branch default | |||
|
196 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
197 | $ cd repo7 | |||
|
198 | $ cat large | |||
|
199 | LARGE-BECAUSE-IT-IS-MORE-THAN-30-BYTES | |||
|
200 | $ cat small | |||
|
201 | SMALL | |||
|
202 | ||||
|
203 | $ cd .. | |||
|
204 | ||||
|
205 | # Test rename and status | |||
|
206 | ||||
|
207 | $ hg init repo8 | |||
|
208 | $ cd repo8 | |||
|
209 | $ cat >> .hg/hgrc << EOF | |||
|
210 | > [lfs] | |||
|
211 | > threshold=10B | |||
|
212 | > EOF | |||
|
213 | ||||
|
214 | $ echo THIS-IS-LFS-BECAUSE-10-BYTES > a1 | |||
|
215 | $ echo SMALL > a2 | |||
|
216 | $ hg commit -m a -A a1 a2 | |||
|
217 | $ hg status | |||
|
218 | $ hg mv a1 b1 | |||
|
219 | $ hg mv a2 a1 | |||
|
220 | $ hg mv b1 a2 | |||
|
221 | $ hg commit -m b | |||
|
222 | $ hg status | |||
|
223 | $ HEADER=$'\1\n' | |||
|
224 | $ printf '%sSTART-WITH-HG-FILELOG-METADATA' "$HEADER" > a2 | |||
|
225 | $ printf '%sMETA\n' "$HEADER" > a1 | |||
|
226 | $ hg commit -m meta | |||
|
227 | $ hg status | |||
|
228 | $ hg log -T '{rev}: {file_copies} | {file_dels} | {file_adds}\n' | |||
|
229 | 2: | | | |||
|
230 | 1: a1 (a2)a2 (a1) | | | |||
|
231 | 0: | | a1 a2 | |||
|
232 | ||||
|
233 | $ for n in a1 a2; do | |||
|
234 | > for r in 0 1 2; do | |||
|
235 | > printf '\n%s @ %s\n' $n $r | |||
|
236 | > hg debugdata $n $r | |||
|
237 | > done | |||
|
238 | > done | |||
|
239 | ||||
|
240 | a1 @ 0 | |||
|
241 | version https://git-lfs.github.com/spec/v1 | |||
|
242 | oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024 | |||
|
243 | size 29 | |||
|
244 | x-is-binary 0 | |||
|
245 | ||||
|
246 | a1 @ 1 | |||
|
247 | \x01 (esc) | |||
|
248 | copy: a2 | |||
|
249 | copyrev: 50470ad23cf937b1f4b9f80bfe54df38e65b50d9 | |||
|
250 | \x01 (esc) | |||
|
251 | SMALL | |||
|
252 | ||||
|
253 | a1 @ 2 | |||
|
254 | \x01 (esc) | |||
|
255 | \x01 (esc) | |||
|
256 | \x01 (esc) | |||
|
257 | META | |||
|
258 | ||||
|
259 | a2 @ 0 | |||
|
260 | SMALL | |||
|
261 | ||||
|
262 | a2 @ 1 | |||
|
263 | version https://git-lfs.github.com/spec/v1 | |||
|
264 | oid sha256:5bb8341bee63b3649f222b2215bde37322bea075a30575aa685d8f8d21c77024 | |||
|
265 | size 29 | |||
|
266 | x-hg-copy a1 | |||
|
267 | x-hg-copyrev be23af27908a582af43e5cda209a5a9b319de8d4 | |||
|
268 | x-is-binary 0 | |||
|
269 | ||||
|
270 | a2 @ 2 | |||
|
271 | version https://git-lfs.github.com/spec/v1 | |||
|
272 | oid sha256:876dadc86a8542f9798048f2c47f51dbf8e4359aed883e8ec80c5db825f0d943 | |||
|
273 | size 32 | |||
|
274 | x-is-binary 0 | |||
|
275 | ||||
|
276 | # Verify commit hashes include rename metadata | |||
|
277 | ||||
|
278 | $ hg log -T '{rev}:{node|short} {desc}\n' | |||
|
279 | 2:0fae949de7fa meta | |||
|
280 | 1:9cd6bdffdac0 b | |||
|
281 | 0:7f96794915f7 a | |||
|
282 | ||||
|
283 | $ cd .. | |||
|
284 | ||||
|
285 | # Test bundle | |||
|
286 | ||||
|
287 | $ hg init repo9 | |||
|
288 | $ cd repo9 | |||
|
289 | $ cat >> .hg/hgrc << EOF | |||
|
290 | > [lfs] | |||
|
291 | > threshold=10B | |||
|
292 | > [diff] | |||
|
293 | > git=1 | |||
|
294 | > EOF | |||
|
295 | ||||
|
296 | $ for i in 0 single two three 4; do | |||
|
297 | > echo 'THIS-IS-LFS-'$i > a | |||
|
298 | > hg commit -m a-$i -A a | |||
|
299 | > done | |||
|
300 | ||||
|
301 | $ hg update 2 -q | |||
|
302 | $ echo 'THIS-IS-LFS-2-CHILD' > a | |||
|
303 | $ hg commit -m branching -q | |||
|
304 | ||||
|
305 | $ hg bundle --base 1 bundle.hg -v | |||
|
306 | 4 changesets found | |||
|
307 | uncompressed size of bundle content: | |||
|
308 | * (changelog) (glob) | |||
|
309 | * (manifests) (glob) | |||
|
310 | * a (glob) | |||
|
311 | $ hg --config extensions.strip= strip -r 2 --no-backup --force -q | |||
|
312 | $ hg -R bundle.hg log -p -T '{rev} {desc}\n' a | |||
|
313 | 5 branching | |||
|
314 | diff --git a/a b/a | |||
|
315 | --- a/a | |||
|
316 | +++ b/a | |||
|
317 | @@ -1,1 +1,1 @@ | |||
|
318 | -THIS-IS-LFS-two | |||
|
319 | +THIS-IS-LFS-2-CHILD | |||
|
320 | ||||
|
321 | 4 a-4 | |||
|
322 | diff --git a/a b/a | |||
|
323 | --- a/a | |||
|
324 | +++ b/a | |||
|
325 | @@ -1,1 +1,1 @@ | |||
|
326 | -THIS-IS-LFS-three | |||
|
327 | +THIS-IS-LFS-4 | |||
|
328 | ||||
|
329 | 3 a-three | |||
|
330 | diff --git a/a b/a | |||
|
331 | --- a/a | |||
|
332 | +++ b/a | |||
|
333 | @@ -1,1 +1,1 @@ | |||
|
334 | -THIS-IS-LFS-two | |||
|
335 | +THIS-IS-LFS-three | |||
|
336 | ||||
|
337 | 2 a-two | |||
|
338 | diff --git a/a b/a | |||
|
339 | --- a/a | |||
|
340 | +++ b/a | |||
|
341 | @@ -1,1 +1,1 @@ | |||
|
342 | -THIS-IS-LFS-single | |||
|
343 | +THIS-IS-LFS-two | |||
|
344 | ||||
|
345 | 1 a-single | |||
|
346 | diff --git a/a b/a | |||
|
347 | --- a/a | |||
|
348 | +++ b/a | |||
|
349 | @@ -1,1 +1,1 @@ | |||
|
350 | -THIS-IS-LFS-0 | |||
|
351 | +THIS-IS-LFS-single | |||
|
352 | ||||
|
353 | 0 a-0 | |||
|
354 | diff --git a/a b/a | |||
|
355 | new file mode 100644 | |||
|
356 | --- /dev/null | |||
|
357 | +++ b/a | |||
|
358 | @@ -0,0 +1,1 @@ | |||
|
359 | +THIS-IS-LFS-0 | |||
|
360 | ||||
|
361 | $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q | |||
|
362 | $ hg -R bundle-again.hg log -p -T '{rev} {desc}\n' a | |||
|
363 | 5 branching | |||
|
364 | diff --git a/a b/a | |||
|
365 | --- a/a | |||
|
366 | +++ b/a | |||
|
367 | @@ -1,1 +1,1 @@ | |||
|
368 | -THIS-IS-LFS-two | |||
|
369 | +THIS-IS-LFS-2-CHILD | |||
|
370 | ||||
|
371 | 4 a-4 | |||
|
372 | diff --git a/a b/a | |||
|
373 | --- a/a | |||
|
374 | +++ b/a | |||
|
375 | @@ -1,1 +1,1 @@ | |||
|
376 | -THIS-IS-LFS-three | |||
|
377 | +THIS-IS-LFS-4 | |||
|
378 | ||||
|
379 | 3 a-three | |||
|
380 | diff --git a/a b/a | |||
|
381 | --- a/a | |||
|
382 | +++ b/a | |||
|
383 | @@ -1,1 +1,1 @@ | |||
|
384 | -THIS-IS-LFS-two | |||
|
385 | +THIS-IS-LFS-three | |||
|
386 | ||||
|
387 | 2 a-two | |||
|
388 | diff --git a/a b/a | |||
|
389 | --- a/a | |||
|
390 | +++ b/a | |||
|
391 | @@ -1,1 +1,1 @@ | |||
|
392 | -THIS-IS-LFS-single | |||
|
393 | +THIS-IS-LFS-two | |||
|
394 | ||||
|
395 | 1 a-single | |||
|
396 | diff --git a/a b/a | |||
|
397 | --- a/a | |||
|
398 | +++ b/a | |||
|
399 | @@ -1,1 +1,1 @@ | |||
|
400 | -THIS-IS-LFS-0 | |||
|
401 | +THIS-IS-LFS-single | |||
|
402 | ||||
|
403 | 0 a-0 | |||
|
404 | diff --git a/a b/a | |||
|
405 | new file mode 100644 | |||
|
406 | --- /dev/null | |||
|
407 | +++ b/a | |||
|
408 | @@ -0,0 +1,1 @@ | |||
|
409 | +THIS-IS-LFS-0 | |||
|
410 | ||||
|
411 | $ cd .. | |||
|
412 | ||||
|
413 | # Test isbinary | |||
|
414 | ||||
|
415 | $ hg init repo10 | |||
|
416 | $ cd repo10 | |||
|
417 | $ cat >> .hg/hgrc << EOF | |||
|
418 | > [extensions] | |||
|
419 | > lfs= | |||
|
420 | > [lfs] | |||
|
421 | > threshold=1 | |||
|
422 | > EOF | |||
|
423 | $ $PYTHON <<'EOF' | |||
|
424 | > def write(path, content): | |||
|
425 | > with open(path, 'wb') as f: | |||
|
426 | > f.write(content) | |||
|
427 | > write('a', b'\0\0') | |||
|
428 | > write('b', b'\1\n') | |||
|
429 | > write('c', b'\1\n\0') | |||
|
430 | > write('d', b'xx') | |||
|
431 | > EOF | |||
|
432 | $ hg add a b c d | |||
|
433 | $ hg diff --stat | |||
|
434 | a | Bin | |||
|
435 | b | 1 + | |||
|
436 | c | Bin | |||
|
437 | d | 1 + | |||
|
438 | 4 files changed, 2 insertions(+), 0 deletions(-) | |||
|
439 | $ hg commit -m binarytest | |||
|
440 | $ cat > $TESTTMP/dumpbinary.py << EOF | |||
|
441 | > def reposetup(ui, repo): | |||
|
442 | > for n in 'abcd': | |||
|
443 | > ui.write(('%s: binary=%s\n') % (n, repo['.'][n].isbinary())) | |||
|
444 | > EOF | |||
|
445 | $ hg --config extensions.dumpbinary=$TESTTMP/dumpbinary.py id --trace | |||
|
446 | a: binary=True | |||
|
447 | b: binary=False | |||
|
448 | c: binary=True | |||
|
449 | d: binary=False | |||
|
450 | b55353847f02 tip | |||
|
451 | ||||
|
452 | $ cd .. | |||
|
453 | ||||
|
454 | # Test fctx.cmp fastpath - diff without LFS blobs | |||
|
455 | ||||
|
456 | $ hg init repo11 | |||
|
457 | $ cd repo11 | |||
|
458 | $ cat >> .hg/hgrc <<EOF | |||
|
459 | > [lfs] | |||
|
460 | > threshold=1 | |||
|
461 | > EOF | |||
|
462 | $ for i in 1 2 3; do | |||
|
463 | > cp ../repo10/a a | |||
|
464 | > if [ $i = 3 ]; then | |||
|
465 | > # make a content-only change | |||
|
466 | > chmod +x a | |||
|
467 | > i=2 | |||
|
468 | > fi | |||
|
469 | > echo $i >> a | |||
|
470 | > hg commit -m $i -A a | |||
|
471 | > done | |||
|
472 | $ [ -d .hg/store/lfs/objects ] | |||
|
473 | ||||
|
474 | $ cd .. | |||
|
475 | ||||
|
476 | $ hg clone repo11 repo12 --noupdate | |||
|
477 | $ cd repo12 | |||
|
478 | $ hg log --removed -p a -T '{desc}\n' --config diff.nobinary=1 --git | |||
|
479 | 2 | |||
|
480 | diff --git a/a b/a | |||
|
481 | old mode 100644 | |||
|
482 | new mode 100755 | |||
|
483 | ||||
|
484 | 2 | |||
|
485 | diff --git a/a b/a | |||
|
486 | Binary file a has changed | |||
|
487 | ||||
|
488 | 1 | |||
|
489 | diff --git a/a b/a | |||
|
490 | new file mode 100644 | |||
|
491 | Binary file a has changed | |||
|
492 | ||||
|
493 | $ [ -d .hg/store/lfs/objects ] | |||
|
494 | [1] | |||
|
495 | ||||
|
496 | $ cd .. | |||
|
497 | ||||
|
498 | # Verify the repos | |||
|
499 | ||||
|
500 | $ cat > $TESTTMP/dumpflog.py << EOF | |||
|
501 | > # print raw revision sizes, flags, and hashes for certain files | |||
|
502 | > import hashlib | |||
|
503 | > from mercurial import revlog | |||
|
504 | > from mercurial.node import short | |||
|
505 | > def hash(rawtext): | |||
|
506 | > h = hashlib.sha512() | |||
|
507 | > h.update(rawtext) | |||
|
508 | > return h.hexdigest()[:4] | |||
|
509 | > def reposetup(ui, repo): | |||
|
510 | > # these 2 files are interesting | |||
|
511 | > for name in ['l', 's']: | |||
|
512 | > fl = repo.file(name) | |||
|
513 | > if len(fl) == 0: | |||
|
514 | > continue | |||
|
515 | > sizes = [revlog.revlog.rawsize(fl, i) for i in fl] | |||
|
516 | > texts = [fl.revision(i, raw=True) for i in fl] | |||
|
517 | > flags = [fl.flags(i) for i in fl] | |||
|
518 | > hashes = [hash(t) for t in texts] | |||
|
519 | > print(' %s: rawsizes=%r flags=%r hashes=%r' | |||
|
520 | > % (name, sizes, flags, hashes)) | |||
|
521 | > EOF | |||
|
522 | ||||
|
523 | $ for i in client client2 server repo3 repo4 repo5 repo6 repo7 repo8 repo9 \ | |||
|
524 | > repo10; do | |||
|
525 | > echo 'repo:' $i | |||
|
526 | > hg --cwd $i verify --config extensions.dumpflog=$TESTTMP/dumpflog.py -q | |||
|
527 | > done | |||
|
528 | repo: client | |||
|
529 | repo: client2 | |||
|
530 | repo: server | |||
|
531 | repo: repo3 | |||
|
532 | l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d'] | |||
|
533 | s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b'] | |||
|
534 | repo: repo4 | |||
|
535 | l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d'] | |||
|
536 | s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b'] | |||
|
537 | repo: repo5 | |||
|
538 | l: rawsizes=[211, 6, 8, 141] flags=[8192, 0, 0, 8192] hashes=['d2b8', '948c', 'cc88', '724d'] | |||
|
539 | s: rawsizes=[74, 141, 141, 8] flags=[0, 8192, 8192, 0] hashes=['3c80', 'fce0', '874a', '826b'] | |||
|
540 | repo: repo6 | |||
|
541 | repo: repo7 | |||
|
542 | repo: repo8 | |||
|
543 | repo: repo9 | |||
|
544 | repo: repo10 |
General Comments 0
You need to be logged in to leave comments.
Login now