##// END OF EJS Templates
lfs: add a progress bar when searching for blobs to upload...
Matt Harbison -
r39306:37e56607 default
parent child Browse files
Show More
@@ -1,421 +1,428 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, hex, nullid, short
13 from mercurial.node import bin, hex, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 revlog,
17 revlog,
18 util,
18 util,
19 )
19 )
20
20
21 from mercurial.utils import (
21 from mercurial.utils import (
22 stringutil,
22 stringutil,
23 )
23 )
24
24
25 from ..largefiles import lfutil
25 from ..largefiles import lfutil
26
26
27 from . import (
27 from . import (
28 blobstore,
28 blobstore,
29 pointer,
29 pointer,
30 )
30 )
31
31
32 def allsupportedversions(orig, ui):
32 def allsupportedversions(orig, ui):
33 versions = orig(ui)
33 versions = orig(ui)
34 versions.add('03')
34 versions.add('03')
35 return versions
35 return versions
36
36
37 def _capabilities(orig, repo, proto):
37 def _capabilities(orig, repo, proto):
38 '''Wrap server command to announce lfs server capability'''
38 '''Wrap server command to announce lfs server capability'''
39 caps = orig(repo, proto)
39 caps = orig(repo, proto)
40 if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
40 if util.safehasattr(repo.svfs, 'lfslocalblobstore'):
41 # XXX: change to 'lfs=serve' when separate git server isn't required?
41 # XXX: change to 'lfs=serve' when separate git server isn't required?
42 caps.append('lfs')
42 caps.append('lfs')
43 return caps
43 return caps
44
44
45 def bypasscheckhash(self, text):
45 def bypasscheckhash(self, text):
46 return False
46 return False
47
47
48 def readfromstore(self, text):
48 def readfromstore(self, text):
49 """Read filelog content from local blobstore transform for flagprocessor.
49 """Read filelog content from local blobstore transform for flagprocessor.
50
50
51 Default tranform for flagprocessor, returning contents from blobstore.
51 Default tranform for flagprocessor, returning contents from blobstore.
52 Returns a 2-typle (text, validatehash) where validatehash is True as the
52 Returns a 2-typle (text, validatehash) where validatehash is True as the
53 contents of the blobstore should be checked using checkhash.
53 contents of the blobstore should be checked using checkhash.
54 """
54 """
55 p = pointer.deserialize(text)
55 p = pointer.deserialize(text)
56 oid = p.oid()
56 oid = p.oid()
57 store = self.opener.lfslocalblobstore
57 store = self.opener.lfslocalblobstore
58 if not store.has(oid):
58 if not store.has(oid):
59 p.filename = self.filename
59 p.filename = self.filename
60 self.opener.lfsremoteblobstore.readbatch([p], store)
60 self.opener.lfsremoteblobstore.readbatch([p], store)
61
61
62 # The caller will validate the content
62 # The caller will validate the content
63 text = store.read(oid, verify=False)
63 text = store.read(oid, verify=False)
64
64
65 # pack hg filelog metadata
65 # pack hg filelog metadata
66 hgmeta = {}
66 hgmeta = {}
67 for k in p.keys():
67 for k in p.keys():
68 if k.startswith('x-hg-'):
68 if k.startswith('x-hg-'):
69 name = k[len('x-hg-'):]
69 name = k[len('x-hg-'):]
70 hgmeta[name] = p[k]
70 hgmeta[name] = p[k]
71 if hgmeta or text.startswith('\1\n'):
71 if hgmeta or text.startswith('\1\n'):
72 text = revlog.packmeta(hgmeta, text)
72 text = revlog.packmeta(hgmeta, text)
73
73
74 return (text, True)
74 return (text, True)
75
75
76 def writetostore(self, text):
76 def writetostore(self, text):
77 # hg filelog metadata (includes rename, etc)
77 # hg filelog metadata (includes rename, etc)
78 hgmeta, offset = revlog.parsemeta(text)
78 hgmeta, offset = revlog.parsemeta(text)
79 if offset and offset > 0:
79 if offset and offset > 0:
80 # lfs blob does not contain hg filelog metadata
80 # lfs blob does not contain hg filelog metadata
81 text = text[offset:]
81 text = text[offset:]
82
82
83 # git-lfs only supports sha256
83 # git-lfs only supports sha256
84 oid = hex(hashlib.sha256(text).digest())
84 oid = hex(hashlib.sha256(text).digest())
85 self.opener.lfslocalblobstore.write(oid, text)
85 self.opener.lfslocalblobstore.write(oid, text)
86
86
87 # replace contents with metadata
87 # replace contents with metadata
88 longoid = 'sha256:%s' % oid
88 longoid = 'sha256:%s' % oid
89 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
89 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
90
90
91 # by default, we expect the content to be binary. however, LFS could also
91 # by default, we expect the content to be binary. however, LFS could also
92 # be used for non-binary content. add a special entry for non-binary data.
92 # be used for non-binary content. add a special entry for non-binary data.
93 # this will be used by filectx.isbinary().
93 # this will be used by filectx.isbinary().
94 if not stringutil.binary(text):
94 if not stringutil.binary(text):
95 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
95 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
96 metadata['x-is-binary'] = '0'
96 metadata['x-is-binary'] = '0'
97
97
98 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
98 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
99 if hgmeta is not None:
99 if hgmeta is not None:
100 for k, v in hgmeta.iteritems():
100 for k, v in hgmeta.iteritems():
101 metadata['x-hg-%s' % k] = v
101 metadata['x-hg-%s' % k] = v
102
102
103 rawtext = metadata.serialize()
103 rawtext = metadata.serialize()
104 return (rawtext, False)
104 return (rawtext, False)
105
105
106 def _islfs(rlog, node=None, rev=None):
106 def _islfs(rlog, node=None, rev=None):
107 if rev is None:
107 if rev is None:
108 if node is None:
108 if node is None:
109 # both None - likely working copy content where node is not ready
109 # both None - likely working copy content where node is not ready
110 return False
110 return False
111 rev = rlog.rev(node)
111 rev = rlog.rev(node)
112 else:
112 else:
113 node = rlog.node(rev)
113 node = rlog.node(rev)
114 if node == nullid:
114 if node == nullid:
115 return False
115 return False
116 flags = rlog.flags(rev)
116 flags = rlog.flags(rev)
117 return bool(flags & revlog.REVIDX_EXTSTORED)
117 return bool(flags & revlog.REVIDX_EXTSTORED)
118
118
119 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
119 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
120 cachedelta=None, node=None,
120 cachedelta=None, node=None,
121 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
121 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
122 # The matcher isn't available if reposetup() wasn't called.
122 # The matcher isn't available if reposetup() wasn't called.
123 lfstrack = self.opener.options.get('lfstrack')
123 lfstrack = self.opener.options.get('lfstrack')
124
124
125 if lfstrack:
125 if lfstrack:
126 textlen = len(text)
126 textlen = len(text)
127 # exclude hg rename meta from file size
127 # exclude hg rename meta from file size
128 meta, offset = revlog.parsemeta(text)
128 meta, offset = revlog.parsemeta(text)
129 if offset:
129 if offset:
130 textlen -= offset
130 textlen -= offset
131
131
132 if lfstrack(self.filename, textlen):
132 if lfstrack(self.filename, textlen):
133 flags |= revlog.REVIDX_EXTSTORED
133 flags |= revlog.REVIDX_EXTSTORED
134
134
135 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
135 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
136 node=node, flags=flags, **kwds)
136 node=node, flags=flags, **kwds)
137
137
138 def filelogrenamed(orig, self, node):
138 def filelogrenamed(orig, self, node):
139 if _islfs(self, node):
139 if _islfs(self, node):
140 rawtext = self.revision(node, raw=True)
140 rawtext = self.revision(node, raw=True)
141 if not rawtext:
141 if not rawtext:
142 return False
142 return False
143 metadata = pointer.deserialize(rawtext)
143 metadata = pointer.deserialize(rawtext)
144 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
144 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
145 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
145 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
146 else:
146 else:
147 return False
147 return False
148 return orig(self, node)
148 return orig(self, node)
149
149
150 def filelogsize(orig, self, rev):
150 def filelogsize(orig, self, rev):
151 if _islfs(self, rev=rev):
151 if _islfs(self, rev=rev):
152 # fast path: use lfs metadata to answer size
152 # fast path: use lfs metadata to answer size
153 rawtext = self.revision(rev, raw=True)
153 rawtext = self.revision(rev, raw=True)
154 metadata = pointer.deserialize(rawtext)
154 metadata = pointer.deserialize(rawtext)
155 return int(metadata['size'])
155 return int(metadata['size'])
156 return orig(self, rev)
156 return orig(self, rev)
157
157
158 def filectxcmp(orig, self, fctx):
158 def filectxcmp(orig, self, fctx):
159 """returns True if text is different than fctx"""
159 """returns True if text is different than fctx"""
160 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
160 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
161 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
161 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
162 # fast path: check LFS oid
162 # fast path: check LFS oid
163 p1 = pointer.deserialize(self.rawdata())
163 p1 = pointer.deserialize(self.rawdata())
164 p2 = pointer.deserialize(fctx.rawdata())
164 p2 = pointer.deserialize(fctx.rawdata())
165 return p1.oid() != p2.oid()
165 return p1.oid() != p2.oid()
166 return orig(self, fctx)
166 return orig(self, fctx)
167
167
168 def filectxisbinary(orig, self):
168 def filectxisbinary(orig, self):
169 if self.islfs():
169 if self.islfs():
170 # fast path: use lfs metadata to answer isbinary
170 # fast path: use lfs metadata to answer isbinary
171 metadata = pointer.deserialize(self.rawdata())
171 metadata = pointer.deserialize(self.rawdata())
172 # if lfs metadata says nothing, assume it's binary by default
172 # if lfs metadata says nothing, assume it's binary by default
173 return bool(int(metadata.get('x-is-binary', 1)))
173 return bool(int(metadata.get('x-is-binary', 1)))
174 return orig(self)
174 return orig(self)
175
175
176 def filectxislfs(self):
176 def filectxislfs(self):
177 return _islfs(self.filelog(), self.filenode())
177 return _islfs(self.filelog(), self.filenode())
178
178
179 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
179 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
180 orig(fm, ctx, matcher, path, decode)
180 orig(fm, ctx, matcher, path, decode)
181 fm.data(rawdata=ctx[path].rawdata())
181 fm.data(rawdata=ctx[path].rawdata())
182
182
183 def convertsink(orig, sink):
183 def convertsink(orig, sink):
184 sink = orig(sink)
184 sink = orig(sink)
185 if sink.repotype == 'hg':
185 if sink.repotype == 'hg':
186 class lfssink(sink.__class__):
186 class lfssink(sink.__class__):
187 def putcommit(self, files, copies, parents, commit, source, revmap,
187 def putcommit(self, files, copies, parents, commit, source, revmap,
188 full, cleanp2):
188 full, cleanp2):
189 pc = super(lfssink, self).putcommit
189 pc = super(lfssink, self).putcommit
190 node = pc(files, copies, parents, commit, source, revmap, full,
190 node = pc(files, copies, parents, commit, source, revmap, full,
191 cleanp2)
191 cleanp2)
192
192
193 if 'lfs' not in self.repo.requirements:
193 if 'lfs' not in self.repo.requirements:
194 ctx = self.repo[node]
194 ctx = self.repo[node]
195
195
196 # The file list may contain removed files, so check for
196 # The file list may contain removed files, so check for
197 # membership before assuming it is in the context.
197 # membership before assuming it is in the context.
198 if any(f in ctx and ctx[f].islfs() for f, n in files):
198 if any(f in ctx and ctx[f].islfs() for f, n in files):
199 self.repo.requirements.add('lfs')
199 self.repo.requirements.add('lfs')
200 self.repo._writerequirements()
200 self.repo._writerequirements()
201
201
202 # Permanently enable lfs locally
202 # Permanently enable lfs locally
203 self.repo.vfs.append(
203 self.repo.vfs.append(
204 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
204 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
205
205
206 return node
206 return node
207
207
208 sink.__class__ = lfssink
208 sink.__class__ = lfssink
209
209
210 return sink
210 return sink
211
211
212 def vfsinit(orig, self, othervfs):
212 def vfsinit(orig, self, othervfs):
213 orig(self, othervfs)
213 orig(self, othervfs)
214 # copy lfs related options
214 # copy lfs related options
215 for k, v in othervfs.options.items():
215 for k, v in othervfs.options.items():
216 if k.startswith('lfs'):
216 if k.startswith('lfs'):
217 self.options[k] = v
217 self.options[k] = v
218 # also copy lfs blobstores. note: this can run before reposetup, so lfs
218 # also copy lfs blobstores. note: this can run before reposetup, so lfs
219 # blobstore attributes are not always ready at this time.
219 # blobstore attributes are not always ready at this time.
220 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
220 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
221 if util.safehasattr(othervfs, name):
221 if util.safehasattr(othervfs, name):
222 setattr(self, name, getattr(othervfs, name))
222 setattr(self, name, getattr(othervfs, name))
223
223
224 def hgclone(orig, ui, opts, *args, **kwargs):
224 def hgclone(orig, ui, opts, *args, **kwargs):
225 result = orig(ui, opts, *args, **kwargs)
225 result = orig(ui, opts, *args, **kwargs)
226
226
227 if result is not None:
227 if result is not None:
228 sourcerepo, destrepo = result
228 sourcerepo, destrepo = result
229 repo = destrepo.local()
229 repo = destrepo.local()
230
230
231 # When cloning to a remote repo (like through SSH), no repo is available
231 # When cloning to a remote repo (like through SSH), no repo is available
232 # from the peer. Therefore the hgrc can't be updated.
232 # from the peer. Therefore the hgrc can't be updated.
233 if not repo:
233 if not repo:
234 return result
234 return result
235
235
236 # If lfs is required for this repo, permanently enable it locally
236 # If lfs is required for this repo, permanently enable it locally
237 if 'lfs' in repo.requirements:
237 if 'lfs' in repo.requirements:
238 repo.vfs.append('hgrc',
238 repo.vfs.append('hgrc',
239 util.tonativeeol('\n[extensions]\nlfs=\n'))
239 util.tonativeeol('\n[extensions]\nlfs=\n'))
240
240
241 return result
241 return result
242
242
243 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
243 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
244 orig(sourcerepo, destrepo, bookmarks, defaultpath)
244 orig(sourcerepo, destrepo, bookmarks, defaultpath)
245
245
246 # If lfs is required for this repo, permanently enable it locally
246 # If lfs is required for this repo, permanently enable it locally
247 if 'lfs' in destrepo.requirements:
247 if 'lfs' in destrepo.requirements:
248 destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
248 destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
249
249
250 def _prefetchfiles(repo, revs, match):
250 def _prefetchfiles(repo, revs, match):
251 """Ensure that required LFS blobs are present, fetching them as a group if
251 """Ensure that required LFS blobs are present, fetching them as a group if
252 needed."""
252 needed."""
253 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'):
253 if not util.safehasattr(repo.svfs, 'lfslocalblobstore'):
254 return
254 return
255
255
256 pointers = []
256 pointers = []
257 oids = set()
257 oids = set()
258 localstore = repo.svfs.lfslocalblobstore
258 localstore = repo.svfs.lfslocalblobstore
259
259
260 for rev in revs:
260 for rev in revs:
261 ctx = repo[rev]
261 ctx = repo[rev]
262 for f in ctx.walk(match):
262 for f in ctx.walk(match):
263 p = pointerfromctx(ctx, f)
263 p = pointerfromctx(ctx, f)
264 if p and p.oid() not in oids and not localstore.has(p.oid()):
264 if p and p.oid() not in oids and not localstore.has(p.oid()):
265 p.filename = f
265 p.filename = f
266 pointers.append(p)
266 pointers.append(p)
267 oids.add(p.oid())
267 oids.add(p.oid())
268
268
269 if pointers:
269 if pointers:
270 # Recalculating the repo store here allows 'paths.default' that is set
270 # Recalculating the repo store here allows 'paths.default' that is set
271 # on the repo by a clone command to be used for the update.
271 # on the repo by a clone command to be used for the update.
272 blobstore.remote(repo).readbatch(pointers, localstore)
272 blobstore.remote(repo).readbatch(pointers, localstore)
273
273
274 def _canskipupload(repo):
274 def _canskipupload(repo):
275 # Skip if this hasn't been passed to reposetup()
275 # Skip if this hasn't been passed to reposetup()
276 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
276 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
277 return True
277 return True
278
278
279 # if remotestore is a null store, upload is a no-op and can be skipped
279 # if remotestore is a null store, upload is a no-op and can be skipped
280 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
280 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
281
281
282 def candownload(repo):
282 def candownload(repo):
283 # Skip if this hasn't been passed to reposetup()
283 # Skip if this hasn't been passed to reposetup()
284 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
284 if not util.safehasattr(repo.svfs, 'lfsremoteblobstore'):
285 return False
285 return False
286
286
287 # if remotestore is a null store, downloads will lead to nothing
287 # if remotestore is a null store, downloads will lead to nothing
288 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
288 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
289
289
290 def uploadblobsfromrevs(repo, revs):
290 def uploadblobsfromrevs(repo, revs):
291 '''upload lfs blobs introduced by revs
291 '''upload lfs blobs introduced by revs
292
292
293 Note: also used by other extensions e. g. infinitepush. avoid renaming.
293 Note: also used by other extensions e. g. infinitepush. avoid renaming.
294 '''
294 '''
295 if _canskipupload(repo):
295 if _canskipupload(repo):
296 return
296 return
297 pointers = extractpointers(repo, revs)
297 pointers = extractpointers(repo, revs)
298 uploadblobs(repo, pointers)
298 uploadblobs(repo, pointers)
299
299
300 def prepush(pushop):
300 def prepush(pushop):
301 """Prepush hook.
301 """Prepush hook.
302
302
303 Read through the revisions to push, looking for filelog entries that can be
303 Read through the revisions to push, looking for filelog entries that can be
304 deserialized into metadata so that we can block the push on their upload to
304 deserialized into metadata so that we can block the push on their upload to
305 the remote blobstore.
305 the remote blobstore.
306 """
306 """
307 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
307 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
308
308
309 def push(orig, repo, remote, *args, **kwargs):
309 def push(orig, repo, remote, *args, **kwargs):
310 """bail on push if the extension isn't enabled on remote when needed, and
310 """bail on push if the extension isn't enabled on remote when needed, and
311 update the remote store based on the destination path."""
311 update the remote store based on the destination path."""
312 if 'lfs' in repo.requirements:
312 if 'lfs' in repo.requirements:
313 # If the remote peer is for a local repo, the requirement tests in the
313 # If the remote peer is for a local repo, the requirement tests in the
314 # base class method enforce lfs support. Otherwise, some revisions in
314 # base class method enforce lfs support. Otherwise, some revisions in
315 # this repo use lfs, and the remote repo needs the extension loaded.
315 # this repo use lfs, and the remote repo needs the extension loaded.
316 if not remote.local() and not remote.capable('lfs'):
316 if not remote.local() and not remote.capable('lfs'):
317 # This is a copy of the message in exchange.push() when requirements
317 # This is a copy of the message in exchange.push() when requirements
318 # are missing between local repos.
318 # are missing between local repos.
319 m = _("required features are not supported in the destination: %s")
319 m = _("required features are not supported in the destination: %s")
320 raise error.Abort(m % 'lfs',
320 raise error.Abort(m % 'lfs',
321 hint=_('enable the lfs extension on the server'))
321 hint=_('enable the lfs extension on the server'))
322
322
323 # Repositories where this extension is disabled won't have the field.
323 # Repositories where this extension is disabled won't have the field.
324 # But if there's a requirement, then the extension must be loaded AND
324 # But if there's a requirement, then the extension must be loaded AND
325 # there may be blobs to push.
325 # there may be blobs to push.
326 remotestore = repo.svfs.lfsremoteblobstore
326 remotestore = repo.svfs.lfsremoteblobstore
327 try:
327 try:
328 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
328 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
329 return orig(repo, remote, *args, **kwargs)
329 return orig(repo, remote, *args, **kwargs)
330 finally:
330 finally:
331 repo.svfs.lfsremoteblobstore = remotestore
331 repo.svfs.lfsremoteblobstore = remotestore
332 else:
332 else:
333 return orig(repo, remote, *args, **kwargs)
333 return orig(repo, remote, *args, **kwargs)
334
334
335 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
335 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
336 *args, **kwargs):
336 *args, **kwargs):
337 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
337 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
338 uploadblobsfromrevs(repo, outgoing.missing)
338 uploadblobsfromrevs(repo, outgoing.missing)
339 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
339 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
340 **kwargs)
340 **kwargs)
341
341
342 def extractpointers(repo, revs):
342 def extractpointers(repo, revs):
343 """return a list of lfs pointers added by given revs"""
343 """return a list of lfs pointers added by given revs"""
344 repo.ui.debug('lfs: computing set of blobs to upload\n')
344 repo.ui.debug('lfs: computing set of blobs to upload\n')
345 pointers = {}
345 pointers = {}
346 for r in revs:
346
347 ctx = repo[r]
347 progress = repo.ui.makeprogress(_('lfs search'), _('changesets'), len(revs))
348 for p in pointersfromctx(ctx).values():
348
349 pointers[p.oid()] = p
349 try:
350 return sorted(pointers.values())
350 for r in revs:
351 ctx = repo[r]
352 for p in pointersfromctx(ctx).values():
353 pointers[p.oid()] = p
354 progress.increment()
355 return sorted(pointers.values())
356 finally:
357 progress.complete()
351
358
352 def pointerfromctx(ctx, f, removed=False):
359 def pointerfromctx(ctx, f, removed=False):
353 """return a pointer for the named file from the given changectx, or None if
360 """return a pointer for the named file from the given changectx, or None if
354 the file isn't LFS.
361 the file isn't LFS.
355
362
356 Optionally, the pointer for a file deleted from the context can be returned.
363 Optionally, the pointer for a file deleted from the context can be returned.
357 Since no such pointer is actually stored, and to distinguish from a non LFS
364 Since no such pointer is actually stored, and to distinguish from a non LFS
358 file, this pointer is represented by an empty dict.
365 file, this pointer is represented by an empty dict.
359 """
366 """
360 _ctx = ctx
367 _ctx = ctx
361 if f not in ctx:
368 if f not in ctx:
362 if not removed:
369 if not removed:
363 return None
370 return None
364 if f in ctx.p1():
371 if f in ctx.p1():
365 _ctx = ctx.p1()
372 _ctx = ctx.p1()
366 elif f in ctx.p2():
373 elif f in ctx.p2():
367 _ctx = ctx.p2()
374 _ctx = ctx.p2()
368 else:
375 else:
369 return None
376 return None
370 fctx = _ctx[f]
377 fctx = _ctx[f]
371 if not _islfs(fctx.filelog(), fctx.filenode()):
378 if not _islfs(fctx.filelog(), fctx.filenode()):
372 return None
379 return None
373 try:
380 try:
374 p = pointer.deserialize(fctx.rawdata())
381 p = pointer.deserialize(fctx.rawdata())
375 if ctx == _ctx:
382 if ctx == _ctx:
376 return p
383 return p
377 return {}
384 return {}
378 except pointer.InvalidPointer as ex:
385 except pointer.InvalidPointer as ex:
379 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
386 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
380 % (f, short(_ctx.node()), ex))
387 % (f, short(_ctx.node()), ex))
381
388
382 def pointersfromctx(ctx, removed=False):
389 def pointersfromctx(ctx, removed=False):
383 """return a dict {path: pointer} for given single changectx.
390 """return a dict {path: pointer} for given single changectx.
384
391
385 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
392 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
386 stored for the path is an empty dict.
393 stored for the path is an empty dict.
387 """
394 """
388 result = {}
395 result = {}
389 for f in ctx.files():
396 for f in ctx.files():
390 p = pointerfromctx(ctx, f, removed=removed)
397 p = pointerfromctx(ctx, f, removed=removed)
391 if p is not None:
398 if p is not None:
392 result[f] = p
399 result[f] = p
393 return result
400 return result
394
401
395 def uploadblobs(repo, pointers):
402 def uploadblobs(repo, pointers):
396 """upload given pointers from local blobstore"""
403 """upload given pointers from local blobstore"""
397 if not pointers:
404 if not pointers:
398 return
405 return
399
406
400 remoteblob = repo.svfs.lfsremoteblobstore
407 remoteblob = repo.svfs.lfsremoteblobstore
401 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
408 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
402
409
403 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
410 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
404 orig(ui, srcrepo, dstrepo, requirements)
411 orig(ui, srcrepo, dstrepo, requirements)
405
412
406 # Skip if this hasn't been passed to reposetup()
413 # Skip if this hasn't been passed to reposetup()
407 if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and
414 if (util.safehasattr(srcrepo.svfs, 'lfslocalblobstore') and
408 util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')):
415 util.safehasattr(dstrepo.svfs, 'lfslocalblobstore')):
409 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
416 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
410 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
417 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
411
418
412 for dirpath, dirs, files in srclfsvfs.walk():
419 for dirpath, dirs, files in srclfsvfs.walk():
413 for oid in files:
420 for oid in files:
414 ui.write(_('copying lfs blob %s\n') % oid)
421 ui.write(_('copying lfs blob %s\n') % oid)
415 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
422 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
416
423
417 def upgraderequirements(orig, repo):
424 def upgraderequirements(orig, repo):
418 reqs = orig(repo)
425 reqs = orig(repo)
419 if 'lfs' in repo.requirements:
426 if 'lfs' in repo.requirements:
420 reqs.add('lfs')
427 reqs.add('lfs')
421 return reqs
428 return reqs
General Comments 0
You need to be logged in to leave comments. Login now