##// END OF EJS Templates
lfs: use %d to encode int, not str()...
Augie Fackler -
r36621:dcb6fbaa default
parent child Browse files
Show More
@@ -1,391 +1,391 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, nullid, short
13 from mercurial.node import bin, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from ..largefiles import lfutil
22 from ..largefiles import lfutil
23
23
24 from . import (
24 from . import (
25 blobstore,
25 blobstore,
26 pointer,
26 pointer,
27 )
27 )
28
28
29 def supportedoutgoingversions(orig, repo):
29 def supportedoutgoingversions(orig, repo):
30 versions = orig(repo)
30 versions = orig(repo)
31 if 'lfs' in repo.requirements:
31 if 'lfs' in repo.requirements:
32 versions.discard('01')
32 versions.discard('01')
33 versions.discard('02')
33 versions.discard('02')
34 versions.add('03')
34 versions.add('03')
35 return versions
35 return versions
36
36
37 def allsupportedversions(orig, ui):
37 def allsupportedversions(orig, ui):
38 versions = orig(ui)
38 versions = orig(ui)
39 versions.add('03')
39 versions.add('03')
40 return versions
40 return versions
41
41
42 def _capabilities(orig, repo, proto):
42 def _capabilities(orig, repo, proto):
43 '''Wrap server command to announce lfs server capability'''
43 '''Wrap server command to announce lfs server capability'''
44 caps = orig(repo, proto)
44 caps = orig(repo, proto)
45 # XXX: change to 'lfs=serve' when separate git server isn't required?
45 # XXX: change to 'lfs=serve' when separate git server isn't required?
46 caps.append('lfs')
46 caps.append('lfs')
47 return caps
47 return caps
48
48
49 def bypasscheckhash(self, text):
49 def bypasscheckhash(self, text):
50 return False
50 return False
51
51
52 def readfromstore(self, text):
52 def readfromstore(self, text):
53 """Read filelog content from local blobstore transform for flagprocessor.
53 """Read filelog content from local blobstore transform for flagprocessor.
54
54
55 Default tranform for flagprocessor, returning contents from blobstore.
55 Default tranform for flagprocessor, returning contents from blobstore.
56 Returns a 2-typle (text, validatehash) where validatehash is True as the
56 Returns a 2-typle (text, validatehash) where validatehash is True as the
57 contents of the blobstore should be checked using checkhash.
57 contents of the blobstore should be checked using checkhash.
58 """
58 """
59 p = pointer.deserialize(text)
59 p = pointer.deserialize(text)
60 oid = p.oid()
60 oid = p.oid()
61 store = self.opener.lfslocalblobstore
61 store = self.opener.lfslocalblobstore
62 if not store.has(oid):
62 if not store.has(oid):
63 p.filename = self.filename
63 p.filename = self.filename
64 self.opener.lfsremoteblobstore.readbatch([p], store)
64 self.opener.lfsremoteblobstore.readbatch([p], store)
65
65
66 # The caller will validate the content
66 # The caller will validate the content
67 text = store.read(oid, verify=False)
67 text = store.read(oid, verify=False)
68
68
69 # pack hg filelog metadata
69 # pack hg filelog metadata
70 hgmeta = {}
70 hgmeta = {}
71 for k in p.keys():
71 for k in p.keys():
72 if k.startswith('x-hg-'):
72 if k.startswith('x-hg-'):
73 name = k[len('x-hg-'):]
73 name = k[len('x-hg-'):]
74 hgmeta[name] = p[k]
74 hgmeta[name] = p[k]
75 if hgmeta or text.startswith('\1\n'):
75 if hgmeta or text.startswith('\1\n'):
76 text = filelog.packmeta(hgmeta, text)
76 text = filelog.packmeta(hgmeta, text)
77
77
78 return (text, True)
78 return (text, True)
79
79
80 def writetostore(self, text):
80 def writetostore(self, text):
81 # hg filelog metadata (includes rename, etc)
81 # hg filelog metadata (includes rename, etc)
82 hgmeta, offset = filelog.parsemeta(text)
82 hgmeta, offset = filelog.parsemeta(text)
83 if offset and offset > 0:
83 if offset and offset > 0:
84 # lfs blob does not contain hg filelog metadata
84 # lfs blob does not contain hg filelog metadata
85 text = text[offset:]
85 text = text[offset:]
86
86
87 # git-lfs only supports sha256
87 # git-lfs only supports sha256
88 oid = hashlib.sha256(text).hexdigest()
88 oid = hashlib.sha256(text).hexdigest()
89 self.opener.lfslocalblobstore.write(oid, text)
89 self.opener.lfslocalblobstore.write(oid, text)
90
90
91 # replace contents with metadata
91 # replace contents with metadata
92 longoid = 'sha256:%s' % oid
92 longoid = 'sha256:%s' % oid
93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
93 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
94
94
95 # by default, we expect the content to be binary. however, LFS could also
95 # by default, we expect the content to be binary. however, LFS could also
96 # be used for non-binary content. add a special entry for non-binary data.
96 # be used for non-binary content. add a special entry for non-binary data.
97 # this will be used by filectx.isbinary().
97 # this will be used by filectx.isbinary().
98 if not util.binary(text):
98 if not util.binary(text):
99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
100 metadata['x-is-binary'] = '0'
100 metadata['x-is-binary'] = '0'
101
101
102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
103 if hgmeta is not None:
103 if hgmeta is not None:
104 for k, v in hgmeta.iteritems():
104 for k, v in hgmeta.iteritems():
105 metadata['x-hg-%s' % k] = v
105 metadata['x-hg-%s' % k] = v
106
106
107 rawtext = metadata.serialize()
107 rawtext = metadata.serialize()
108 return (rawtext, False)
108 return (rawtext, False)
109
109
110 def _islfs(rlog, node=None, rev=None):
110 def _islfs(rlog, node=None, rev=None):
111 if rev is None:
111 if rev is None:
112 if node is None:
112 if node is None:
113 # both None - likely working copy content where node is not ready
113 # both None - likely working copy content where node is not ready
114 return False
114 return False
115 rev = rlog.rev(node)
115 rev = rlog.rev(node)
116 else:
116 else:
117 node = rlog.node(rev)
117 node = rlog.node(rev)
118 if node == nullid:
118 if node == nullid:
119 return False
119 return False
120 flags = rlog.flags(rev)
120 flags = rlog.flags(rev)
121 return bool(flags & revlog.REVIDX_EXTSTORED)
121 return bool(flags & revlog.REVIDX_EXTSTORED)
122
122
123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
124 cachedelta=None, node=None,
124 cachedelta=None, node=None,
125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
126 textlen = len(text)
126 textlen = len(text)
127 # exclude hg rename meta from file size
127 # exclude hg rename meta from file size
128 meta, offset = filelog.parsemeta(text)
128 meta, offset = filelog.parsemeta(text)
129 if offset:
129 if offset:
130 textlen -= offset
130 textlen -= offset
131
131
132 lfstrack = self.opener.options['lfstrack']
132 lfstrack = self.opener.options['lfstrack']
133
133
134 if lfstrack(self.filename, textlen):
134 if lfstrack(self.filename, textlen):
135 flags |= revlog.REVIDX_EXTSTORED
135 flags |= revlog.REVIDX_EXTSTORED
136
136
137 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
137 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
138 node=node, flags=flags, **kwds)
138 node=node, flags=flags, **kwds)
139
139
140 def filelogrenamed(orig, self, node):
140 def filelogrenamed(orig, self, node):
141 if _islfs(self, node):
141 if _islfs(self, node):
142 rawtext = self.revision(node, raw=True)
142 rawtext = self.revision(node, raw=True)
143 if not rawtext:
143 if not rawtext:
144 return False
144 return False
145 metadata = pointer.deserialize(rawtext)
145 metadata = pointer.deserialize(rawtext)
146 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
146 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
147 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
147 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
148 else:
148 else:
149 return False
149 return False
150 return orig(self, node)
150 return orig(self, node)
151
151
152 def filelogsize(orig, self, rev):
152 def filelogsize(orig, self, rev):
153 if _islfs(self, rev=rev):
153 if _islfs(self, rev=rev):
154 # fast path: use lfs metadata to answer size
154 # fast path: use lfs metadata to answer size
155 rawtext = self.revision(rev, raw=True)
155 rawtext = self.revision(rev, raw=True)
156 metadata = pointer.deserialize(rawtext)
156 metadata = pointer.deserialize(rawtext)
157 return int(metadata['size'])
157 return int(metadata['size'])
158 return orig(self, rev)
158 return orig(self, rev)
159
159
160 def filectxcmp(orig, self, fctx):
160 def filectxcmp(orig, self, fctx):
161 """returns True if text is different than fctx"""
161 """returns True if text is different than fctx"""
162 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
162 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
163 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
163 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
164 # fast path: check LFS oid
164 # fast path: check LFS oid
165 p1 = pointer.deserialize(self.rawdata())
165 p1 = pointer.deserialize(self.rawdata())
166 p2 = pointer.deserialize(fctx.rawdata())
166 p2 = pointer.deserialize(fctx.rawdata())
167 return p1.oid() != p2.oid()
167 return p1.oid() != p2.oid()
168 return orig(self, fctx)
168 return orig(self, fctx)
169
169
170 def filectxisbinary(orig, self):
170 def filectxisbinary(orig, self):
171 if self.islfs():
171 if self.islfs():
172 # fast path: use lfs metadata to answer isbinary
172 # fast path: use lfs metadata to answer isbinary
173 metadata = pointer.deserialize(self.rawdata())
173 metadata = pointer.deserialize(self.rawdata())
174 # if lfs metadata says nothing, assume it's binary by default
174 # if lfs metadata says nothing, assume it's binary by default
175 return bool(int(metadata.get('x-is-binary', 1)))
175 return bool(int(metadata.get('x-is-binary', 1)))
176 return orig(self)
176 return orig(self)
177
177
178 def filectxislfs(self):
178 def filectxislfs(self):
179 return _islfs(self.filelog(), self.filenode())
179 return _islfs(self.filelog(), self.filenode())
180
180
181 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
181 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
182 orig(fm, ctx, matcher, path, decode)
182 orig(fm, ctx, matcher, path, decode)
183 fm.data(rawdata=ctx[path].rawdata())
183 fm.data(rawdata=ctx[path].rawdata())
184
184
185 def convertsink(orig, sink):
185 def convertsink(orig, sink):
186 sink = orig(sink)
186 sink = orig(sink)
187 if sink.repotype == 'hg':
187 if sink.repotype == 'hg':
188 class lfssink(sink.__class__):
188 class lfssink(sink.__class__):
189 def putcommit(self, files, copies, parents, commit, source, revmap,
189 def putcommit(self, files, copies, parents, commit, source, revmap,
190 full, cleanp2):
190 full, cleanp2):
191 pc = super(lfssink, self).putcommit
191 pc = super(lfssink, self).putcommit
192 node = pc(files, copies, parents, commit, source, revmap, full,
192 node = pc(files, copies, parents, commit, source, revmap, full,
193 cleanp2)
193 cleanp2)
194
194
195 if 'lfs' not in self.repo.requirements:
195 if 'lfs' not in self.repo.requirements:
196 ctx = self.repo[node]
196 ctx = self.repo[node]
197
197
198 # The file list may contain removed files, so check for
198 # The file list may contain removed files, so check for
199 # membership before assuming it is in the context.
199 # membership before assuming it is in the context.
200 if any(f in ctx and ctx[f].islfs() for f, n in files):
200 if any(f in ctx and ctx[f].islfs() for f, n in files):
201 self.repo.requirements.add('lfs')
201 self.repo.requirements.add('lfs')
202 self.repo._writerequirements()
202 self.repo._writerequirements()
203
203
204 # Permanently enable lfs locally
204 # Permanently enable lfs locally
205 self.repo.vfs.append(
205 self.repo.vfs.append(
206 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
206 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
207
207
208 return node
208 return node
209
209
210 sink.__class__ = lfssink
210 sink.__class__ = lfssink
211
211
212 return sink
212 return sink
213
213
214 def vfsinit(orig, self, othervfs):
214 def vfsinit(orig, self, othervfs):
215 orig(self, othervfs)
215 orig(self, othervfs)
216 # copy lfs related options
216 # copy lfs related options
217 for k, v in othervfs.options.items():
217 for k, v in othervfs.options.items():
218 if k.startswith('lfs'):
218 if k.startswith('lfs'):
219 self.options[k] = v
219 self.options[k] = v
220 # also copy lfs blobstores. note: this can run before reposetup, so lfs
220 # also copy lfs blobstores. note: this can run before reposetup, so lfs
221 # blobstore attributes are not always ready at this time.
221 # blobstore attributes are not always ready at this time.
222 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
222 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
223 if util.safehasattr(othervfs, name):
223 if util.safehasattr(othervfs, name):
224 setattr(self, name, getattr(othervfs, name))
224 setattr(self, name, getattr(othervfs, name))
225
225
226 def hgclone(orig, ui, opts, *args, **kwargs):
226 def hgclone(orig, ui, opts, *args, **kwargs):
227 result = orig(ui, opts, *args, **kwargs)
227 result = orig(ui, opts, *args, **kwargs)
228
228
229 if result is not None:
229 if result is not None:
230 sourcerepo, destrepo = result
230 sourcerepo, destrepo = result
231 repo = destrepo.local()
231 repo = destrepo.local()
232
232
233 # When cloning to a remote repo (like through SSH), no repo is available
233 # When cloning to a remote repo (like through SSH), no repo is available
234 # from the peer. Therefore the hgrc can't be updated.
234 # from the peer. Therefore the hgrc can't be updated.
235 if not repo:
235 if not repo:
236 return result
236 return result
237
237
238 # If lfs is required for this repo, permanently enable it locally
238 # If lfs is required for this repo, permanently enable it locally
239 if 'lfs' in repo.requirements:
239 if 'lfs' in repo.requirements:
240 repo.vfs.append('hgrc',
240 repo.vfs.append('hgrc',
241 util.tonativeeol('\n[extensions]\nlfs=\n'))
241 util.tonativeeol('\n[extensions]\nlfs=\n'))
242
242
243 return result
243 return result
244
244
245 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
245 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
246 orig(sourcerepo, destrepo, bookmarks, defaultpath)
246 orig(sourcerepo, destrepo, bookmarks, defaultpath)
247
247
248 # If lfs is required for this repo, permanently enable it locally
248 # If lfs is required for this repo, permanently enable it locally
249 if 'lfs' in destrepo.requirements:
249 if 'lfs' in destrepo.requirements:
250 destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
250 destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
251
251
252 def _prefetchfiles(repo, ctx, files):
252 def _prefetchfiles(repo, ctx, files):
253 """Ensure that required LFS blobs are present, fetching them as a group if
253 """Ensure that required LFS blobs are present, fetching them as a group if
254 needed."""
254 needed."""
255 pointers = []
255 pointers = []
256 localstore = repo.svfs.lfslocalblobstore
256 localstore = repo.svfs.lfslocalblobstore
257
257
258 for f in files:
258 for f in files:
259 p = pointerfromctx(ctx, f)
259 p = pointerfromctx(ctx, f)
260 if p and not localstore.has(p.oid()):
260 if p and not localstore.has(p.oid()):
261 p.filename = f
261 p.filename = f
262 pointers.append(p)
262 pointers.append(p)
263
263
264 if pointers:
264 if pointers:
265 repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore)
265 repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore)
266
266
267 def _canskipupload(repo):
267 def _canskipupload(repo):
268 # if remotestore is a null store, upload is a no-op and can be skipped
268 # if remotestore is a null store, upload is a no-op and can be skipped
269 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
269 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
270
270
271 def candownload(repo):
271 def candownload(repo):
272 # if remotestore is a null store, downloads will lead to nothing
272 # if remotestore is a null store, downloads will lead to nothing
273 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
273 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
274
274
275 def uploadblobsfromrevs(repo, revs):
275 def uploadblobsfromrevs(repo, revs):
276 '''upload lfs blobs introduced by revs
276 '''upload lfs blobs introduced by revs
277
277
278 Note: also used by other extensions e. g. infinitepush. avoid renaming.
278 Note: also used by other extensions e. g. infinitepush. avoid renaming.
279 '''
279 '''
280 if _canskipupload(repo):
280 if _canskipupload(repo):
281 return
281 return
282 pointers = extractpointers(repo, revs)
282 pointers = extractpointers(repo, revs)
283 uploadblobs(repo, pointers)
283 uploadblobs(repo, pointers)
284
284
285 def prepush(pushop):
285 def prepush(pushop):
286 """Prepush hook.
286 """Prepush hook.
287
287
288 Read through the revisions to push, looking for filelog entries that can be
288 Read through the revisions to push, looking for filelog entries that can be
289 deserialized into metadata so that we can block the push on their upload to
289 deserialized into metadata so that we can block the push on their upload to
290 the remote blobstore.
290 the remote blobstore.
291 """
291 """
292 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
292 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
293
293
294 def push(orig, repo, remote, *args, **kwargs):
294 def push(orig, repo, remote, *args, **kwargs):
295 """bail on push if the extension isn't enabled on remote when needed"""
295 """bail on push if the extension isn't enabled on remote when needed"""
296 if 'lfs' in repo.requirements:
296 if 'lfs' in repo.requirements:
297 # If the remote peer is for a local repo, the requirement tests in the
297 # If the remote peer is for a local repo, the requirement tests in the
298 # base class method enforce lfs support. Otherwise, some revisions in
298 # base class method enforce lfs support. Otherwise, some revisions in
299 # this repo use lfs, and the remote repo needs the extension loaded.
299 # this repo use lfs, and the remote repo needs the extension loaded.
300 if not remote.local() and not remote.capable('lfs'):
300 if not remote.local() and not remote.capable('lfs'):
301 # This is a copy of the message in exchange.push() when requirements
301 # This is a copy of the message in exchange.push() when requirements
302 # are missing between local repos.
302 # are missing between local repos.
303 m = _("required features are not supported in the destination: %s")
303 m = _("required features are not supported in the destination: %s")
304 raise error.Abort(m % 'lfs',
304 raise error.Abort(m % 'lfs',
305 hint=_('enable the lfs extension on the server'))
305 hint=_('enable the lfs extension on the server'))
306 return orig(repo, remote, *args, **kwargs)
306 return orig(repo, remote, *args, **kwargs)
307
307
308 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
308 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
309 *args, **kwargs):
309 *args, **kwargs):
310 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
310 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
311 uploadblobsfromrevs(repo, outgoing.missing)
311 uploadblobsfromrevs(repo, outgoing.missing)
312 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
312 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
313 **kwargs)
313 **kwargs)
314
314
315 def extractpointers(repo, revs):
315 def extractpointers(repo, revs):
316 """return a list of lfs pointers added by given revs"""
316 """return a list of lfs pointers added by given revs"""
317 repo.ui.debug('lfs: computing set of blobs to upload\n')
317 repo.ui.debug('lfs: computing set of blobs to upload\n')
318 pointers = {}
318 pointers = {}
319 for r in revs:
319 for r in revs:
320 ctx = repo[r]
320 ctx = repo[r]
321 for p in pointersfromctx(ctx).values():
321 for p in pointersfromctx(ctx).values():
322 pointers[p.oid()] = p
322 pointers[p.oid()] = p
323 return sorted(pointers.values())
323 return sorted(pointers.values())
324
324
325 def pointerfromctx(ctx, f, removed=False):
325 def pointerfromctx(ctx, f, removed=False):
326 """return a pointer for the named file from the given changectx, or None if
326 """return a pointer for the named file from the given changectx, or None if
327 the file isn't LFS.
327 the file isn't LFS.
328
328
329 Optionally, the pointer for a file deleted from the context can be returned.
329 Optionally, the pointer for a file deleted from the context can be returned.
330 Since no such pointer is actually stored, and to distinguish from a non LFS
330 Since no such pointer is actually stored, and to distinguish from a non LFS
331 file, this pointer is represented by an empty dict.
331 file, this pointer is represented by an empty dict.
332 """
332 """
333 _ctx = ctx
333 _ctx = ctx
334 if f not in ctx:
334 if f not in ctx:
335 if not removed:
335 if not removed:
336 return None
336 return None
337 if f in ctx.p1():
337 if f in ctx.p1():
338 _ctx = ctx.p1()
338 _ctx = ctx.p1()
339 elif f in ctx.p2():
339 elif f in ctx.p2():
340 _ctx = ctx.p2()
340 _ctx = ctx.p2()
341 else:
341 else:
342 return None
342 return None
343 fctx = _ctx[f]
343 fctx = _ctx[f]
344 if not _islfs(fctx.filelog(), fctx.filenode()):
344 if not _islfs(fctx.filelog(), fctx.filenode()):
345 return None
345 return None
346 try:
346 try:
347 p = pointer.deserialize(fctx.rawdata())
347 p = pointer.deserialize(fctx.rawdata())
348 if ctx == _ctx:
348 if ctx == _ctx:
349 return p
349 return p
350 return {}
350 return {}
351 except pointer.InvalidPointer as ex:
351 except pointer.InvalidPointer as ex:
352 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
352 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
353 % (f, short(_ctx.node()), ex))
353 % (f, short(_ctx.node()), ex))
354
354
355 def pointersfromctx(ctx, removed=False):
355 def pointersfromctx(ctx, removed=False):
356 """return a dict {path: pointer} for given single changectx.
356 """return a dict {path: pointer} for given single changectx.
357
357
358 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
358 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
359 stored for the path is an empty dict.
359 stored for the path is an empty dict.
360 """
360 """
361 result = {}
361 result = {}
362 for f in ctx.files():
362 for f in ctx.files():
363 p = pointerfromctx(ctx, f, removed=removed)
363 p = pointerfromctx(ctx, f, removed=removed)
364 if p is not None:
364 if p is not None:
365 result[f] = p
365 result[f] = p
366 return result
366 return result
367
367
368 def uploadblobs(repo, pointers):
368 def uploadblobs(repo, pointers):
369 """upload given pointers from local blobstore"""
369 """upload given pointers from local blobstore"""
370 if not pointers:
370 if not pointers:
371 return
371 return
372
372
373 remoteblob = repo.svfs.lfsremoteblobstore
373 remoteblob = repo.svfs.lfsremoteblobstore
374 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
374 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
375
375
376 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
376 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
377 orig(ui, srcrepo, dstrepo, requirements)
377 orig(ui, srcrepo, dstrepo, requirements)
378
378
379 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
379 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
380 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
380 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
381
381
382 for dirpath, dirs, files in srclfsvfs.walk():
382 for dirpath, dirs, files in srclfsvfs.walk():
383 for oid in files:
383 for oid in files:
384 ui.write(_('copying lfs blob %s\n') % oid)
384 ui.write(_('copying lfs blob %s\n') % oid)
385 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
385 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
386
386
387 def upgraderequirements(orig, repo):
387 def upgraderequirements(orig, repo):
388 reqs = orig(repo)
388 reqs = orig(repo)
389 if 'lfs' in repo.requirements:
389 if 'lfs' in repo.requirements:
390 reqs.add('lfs')
390 reqs.add('lfs')
391 return reqs
391 return reqs
General Comments 0
You need to be logged in to leave comments. Login now