##// END OF EJS Templates
lfs: allow to run 'debugupgraderepo' on repo with largefiles...
Boris Feld -
r35347:9eb19b13 default
parent child Browse files
Show More
@@ -1,191 +1,198
1 # lfs - hash-preserving large file support using Git-LFS protocol
1 # lfs - hash-preserving large file support using Git-LFS protocol
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """lfs - large file support (EXPERIMENTAL)
8 """lfs - large file support (EXPERIMENTAL)
9
9
10 Configs::
10 Configs::
11
11
12 [lfs]
12 [lfs]
13 # Remote endpoint. Multiple protocols are supported:
13 # Remote endpoint. Multiple protocols are supported:
14 # - http(s)://user:pass@example.com/path
14 # - http(s)://user:pass@example.com/path
15 # git-lfs endpoint
15 # git-lfs endpoint
16 # - file:///tmp/path
16 # - file:///tmp/path
17 # local filesystem, usually for testing
17 # local filesystem, usually for testing
18 # if unset, lfs will prompt setting this when it must use this value.
18 # if unset, lfs will prompt setting this when it must use this value.
19 # (default: unset)
19 # (default: unset)
20 url = https://example.com/lfs
20 url = https://example.com/lfs
21
21
22 # size of a file to make it use LFS
22 # size of a file to make it use LFS
23 threshold = 10M
23 threshold = 10M
24
24
25 # how many times to retry before giving up on transferring an object
25 # how many times to retry before giving up on transferring an object
26 retry = 5
26 retry = 5
27
27
28 # the local directory to store lfs files for sharing across local clones.
28 # the local directory to store lfs files for sharing across local clones.
29 # If not set, the cache is located in an OS specific cache location.
29 # If not set, the cache is located in an OS specific cache location.
30 usercache = /path/to/global/cache
30 usercache = /path/to/global/cache
31 """
31 """
32
32
33 from __future__ import absolute_import
33 from __future__ import absolute_import
34
34
35 from mercurial.i18n import _
35 from mercurial.i18n import _
36
36
37 from mercurial import (
37 from mercurial import (
38 bundle2,
38 bundle2,
39 changegroup,
39 changegroup,
40 context,
40 context,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hg,
44 hg,
45 localrepo,
45 localrepo,
46 registrar,
46 registrar,
47 revlog,
47 revlog,
48 scmutil,
48 scmutil,
49 upgrade,
49 vfs as vfsmod,
50 vfs as vfsmod,
50 )
51 )
51
52
52 from . import (
53 from . import (
53 blobstore,
54 blobstore,
54 wrapper,
55 wrapper,
55 )
56 )
56
57
57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
58 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
59 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
59 # be specifying the version(s) of Mercurial they are tested with, or
60 # be specifying the version(s) of Mercurial they are tested with, or
60 # leave the attribute unspecified.
61 # leave the attribute unspecified.
61 testedwith = 'ships-with-hg-core'
62 testedwith = 'ships-with-hg-core'
62
63
63 configtable = {}
64 configtable = {}
64 configitem = registrar.configitem(configtable)
65 configitem = registrar.configitem(configtable)
65
66
66 configitem('lfs', 'url',
67 configitem('lfs', 'url',
67 default=configitem.dynamicdefault,
68 default=configitem.dynamicdefault,
68 )
69 )
69 configitem('lfs', 'usercache',
70 configitem('lfs', 'usercache',
70 default=None,
71 default=None,
71 )
72 )
72 configitem('lfs', 'threshold',
73 configitem('lfs', 'threshold',
73 default=None,
74 default=None,
74 )
75 )
75 configitem('lfs', 'retry',
76 configitem('lfs', 'retry',
76 default=5,
77 default=5,
77 )
78 )
78 # Deprecated
79 # Deprecated
79 configitem('lfs', 'remotestore',
80 configitem('lfs', 'remotestore',
80 default=None,
81 default=None,
81 )
82 )
82 # Deprecated
83 # Deprecated
83 configitem('lfs', 'dummy',
84 configitem('lfs', 'dummy',
84 default=None,
85 default=None,
85 )
86 )
86 # Deprecated
87 # Deprecated
87 configitem('lfs', 'git-lfs',
88 configitem('lfs', 'git-lfs',
88 default=None,
89 default=None,
89 )
90 )
90
91
91 cmdtable = {}
92 cmdtable = {}
92 command = registrar.command(cmdtable)
93 command = registrar.command(cmdtable)
93
94
94 templatekeyword = registrar.templatekeyword()
95 templatekeyword = registrar.templatekeyword()
95
96
96 def featuresetup(ui, supported):
97 def featuresetup(ui, supported):
97 # don't die on seeing a repo with the lfs requirement
98 # don't die on seeing a repo with the lfs requirement
98 supported |= {'lfs'}
99 supported |= {'lfs'}
99
100
100 def uisetup(ui):
101 def uisetup(ui):
101 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
102 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
102
103
103 def reposetup(ui, repo):
104 def reposetup(ui, repo):
104 # Nothing to do with a remote repo
105 # Nothing to do with a remote repo
105 if not repo.local():
106 if not repo.local():
106 return
107 return
107
108
108 threshold = repo.ui.configbytes('lfs', 'threshold')
109 threshold = repo.ui.configbytes('lfs', 'threshold')
109
110
110 repo.svfs.options['lfsthreshold'] = threshold
111 repo.svfs.options['lfsthreshold'] = threshold
111 repo.svfs.lfslocalblobstore = blobstore.local(repo)
112 repo.svfs.lfslocalblobstore = blobstore.local(repo)
112 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
113 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
113
114
114 # Push hook
115 # Push hook
115 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
116 repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
116
117
117 if 'lfs' not in repo.requirements:
118 if 'lfs' not in repo.requirements:
118 def checkrequireslfs(ui, repo, **kwargs):
119 def checkrequireslfs(ui, repo, **kwargs):
119 if 'lfs' not in repo.requirements:
120 if 'lfs' not in repo.requirements:
120 ctx = repo[kwargs['node']]
121 ctx = repo[kwargs['node']]
121 # TODO: is there a way to just walk the files in the commit?
122 # TODO: is there a way to just walk the files in the commit?
122 if any(ctx[f].islfs() for f in ctx.files()):
123 if any(ctx[f].islfs() for f in ctx.files()):
123 repo.requirements.add('lfs')
124 repo.requirements.add('lfs')
124 repo._writerequirements()
125 repo._writerequirements()
125
126
126 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
127 ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
127
128
128 def wrapfilelog(filelog):
129 def wrapfilelog(filelog):
129 wrapfunction = extensions.wrapfunction
130 wrapfunction = extensions.wrapfunction
130
131
131 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
132 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
132 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
133 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
133 wrapfunction(filelog, 'size', wrapper.filelogsize)
134 wrapfunction(filelog, 'size', wrapper.filelogsize)
134
135
135 def extsetup(ui):
136 def extsetup(ui):
136 wrapfilelog(filelog.filelog)
137 wrapfilelog(filelog.filelog)
137
138
138 wrapfunction = extensions.wrapfunction
139 wrapfunction = extensions.wrapfunction
139
140
140 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
141 wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
141
142
143 wrapfunction(upgrade, 'preservedrequirements',
144 wrapper.upgraderequirements)
145
146 wrapfunction(upgrade, 'supporteddestrequirements',
147 wrapper.upgraderequirements)
148
142 wrapfunction(changegroup,
149 wrapfunction(changegroup,
143 'supportedoutgoingversions',
150 'supportedoutgoingversions',
144 wrapper.supportedoutgoingversions)
151 wrapper.supportedoutgoingversions)
145 wrapfunction(changegroup,
152 wrapfunction(changegroup,
146 'allsupportedversions',
153 'allsupportedversions',
147 wrapper.allsupportedversions)
154 wrapper.allsupportedversions)
148
155
149 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
156 wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
150 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
157 wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
151 context.basefilectx.islfs = wrapper.filectxislfs
158 context.basefilectx.islfs = wrapper.filectxislfs
152
159
153 revlog.addflagprocessor(
160 revlog.addflagprocessor(
154 revlog.REVIDX_EXTSTORED,
161 revlog.REVIDX_EXTSTORED,
155 (
162 (
156 wrapper.readfromstore,
163 wrapper.readfromstore,
157 wrapper.writetostore,
164 wrapper.writetostore,
158 wrapper.bypasscheckhash,
165 wrapper.bypasscheckhash,
159 ),
166 ),
160 )
167 )
161
168
162 wrapfunction(hg, 'clone', wrapper.hgclone)
169 wrapfunction(hg, 'clone', wrapper.hgclone)
163 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
170 wrapfunction(hg, 'postshare', wrapper.hgpostshare)
164
171
165 # Make bundle choose changegroup3 instead of changegroup2. This affects
172 # Make bundle choose changegroup3 instead of changegroup2. This affects
166 # "hg bundle" command. Note: it does not cover all bundle formats like
173 # "hg bundle" command. Note: it does not cover all bundle formats like
167 # "packed1". Using "packed1" with lfs will likely cause trouble.
174 # "packed1". Using "packed1" with lfs will likely cause trouble.
168 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
175 names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
169 for k in names:
176 for k in names:
170 exchange._bundlespeccgversions[k] = '03'
177 exchange._bundlespeccgversions[k] = '03'
171
178
172 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
179 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
173 # options and blob stores are passed from othervfs to the new readonlyvfs.
180 # options and blob stores are passed from othervfs to the new readonlyvfs.
174 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
181 wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
175
182
176 # when writing a bundle via "hg bundle" command, upload related LFS blobs
183 # when writing a bundle via "hg bundle" command, upload related LFS blobs
177 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
184 wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
178
185
179 @templatekeyword('lfs_files')
186 @templatekeyword('lfs_files')
180 def lfsfiles(repo, ctx, **args):
187 def lfsfiles(repo, ctx, **args):
181 """List of strings. LFS files added or modified by the changeset."""
188 """List of strings. LFS files added or modified by the changeset."""
182 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
189 pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
183 return sorted(pointers.keys())
190 return sorted(pointers.keys())
184
191
185 @command('debuglfsupload',
192 @command('debuglfsupload',
186 [('r', 'rev', [], _('upload large files introduced by REV'))])
193 [('r', 'rev', [], _('upload large files introduced by REV'))])
187 def debuglfsupload(ui, repo, **opts):
194 def debuglfsupload(ui, repo, **opts):
188 """upload lfs blobs added by the working copy parent or given revisions"""
195 """upload lfs blobs added by the working copy parent or given revisions"""
189 revs = opts.get('rev', [])
196 revs = opts.get('rev', [])
190 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
197 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
191 wrapper.uploadblobs(repo, pointers)
198 wrapper.uploadblobs(repo, pointers)
@@ -1,304 +1,310
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.node import bin, nullid, short
13 from mercurial.node import bin, nullid, short
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 filelog,
17 filelog,
18 revlog,
18 revlog,
19 util,
19 util,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 blobstore,
23 blobstore,
24 pointer,
24 pointer,
25 )
25 )
26
26
27 def supportedoutgoingversions(orig, repo):
27 def supportedoutgoingversions(orig, repo):
28 versions = orig(repo)
28 versions = orig(repo)
29 versions.discard('01')
29 versions.discard('01')
30 versions.discard('02')
30 versions.discard('02')
31 versions.add('03')
31 versions.add('03')
32 return versions
32 return versions
33
33
34 def allsupportedversions(orig, ui):
34 def allsupportedversions(orig, ui):
35 versions = orig(ui)
35 versions = orig(ui)
36 versions.add('03')
36 versions.add('03')
37 return versions
37 return versions
38
38
39 def bypasscheckhash(self, text):
39 def bypasscheckhash(self, text):
40 return False
40 return False
41
41
42 def readfromstore(self, text):
42 def readfromstore(self, text):
43 """Read filelog content from local blobstore transform for flagprocessor.
43 """Read filelog content from local blobstore transform for flagprocessor.
44
44
45 Default tranform for flagprocessor, returning contents from blobstore.
45 Default tranform for flagprocessor, returning contents from blobstore.
46 Returns a 2-typle (text, validatehash) where validatehash is True as the
46 Returns a 2-typle (text, validatehash) where validatehash is True as the
47 contents of the blobstore should be checked using checkhash.
47 contents of the blobstore should be checked using checkhash.
48 """
48 """
49 p = pointer.deserialize(text)
49 p = pointer.deserialize(text)
50 oid = p.oid()
50 oid = p.oid()
51 store = self.opener.lfslocalblobstore
51 store = self.opener.lfslocalblobstore
52 if not store.has(oid):
52 if not store.has(oid):
53 p.filename = getattr(self, 'indexfile', None)
53 p.filename = getattr(self, 'indexfile', None)
54 self.opener.lfsremoteblobstore.readbatch([p], store)
54 self.opener.lfsremoteblobstore.readbatch([p], store)
55 text = store.read(oid)
55 text = store.read(oid)
56
56
57 # pack hg filelog metadata
57 # pack hg filelog metadata
58 hgmeta = {}
58 hgmeta = {}
59 for k in p.keys():
59 for k in p.keys():
60 if k.startswith('x-hg-'):
60 if k.startswith('x-hg-'):
61 name = k[len('x-hg-'):]
61 name = k[len('x-hg-'):]
62 hgmeta[name] = p[k]
62 hgmeta[name] = p[k]
63 if hgmeta or text.startswith('\1\n'):
63 if hgmeta or text.startswith('\1\n'):
64 text = filelog.packmeta(hgmeta, text)
64 text = filelog.packmeta(hgmeta, text)
65
65
66 return (text, True)
66 return (text, True)
67
67
68 def writetostore(self, text):
68 def writetostore(self, text):
69 # hg filelog metadata (includes rename, etc)
69 # hg filelog metadata (includes rename, etc)
70 hgmeta, offset = filelog.parsemeta(text)
70 hgmeta, offset = filelog.parsemeta(text)
71 if offset and offset > 0:
71 if offset and offset > 0:
72 # lfs blob does not contain hg filelog metadata
72 # lfs blob does not contain hg filelog metadata
73 text = text[offset:]
73 text = text[offset:]
74
74
75 # git-lfs only supports sha256
75 # git-lfs only supports sha256
76 oid = hashlib.sha256(text).hexdigest()
76 oid = hashlib.sha256(text).hexdigest()
77 self.opener.lfslocalblobstore.write(oid, text)
77 self.opener.lfslocalblobstore.write(oid, text)
78
78
79 # replace contents with metadata
79 # replace contents with metadata
80 longoid = 'sha256:%s' % oid
80 longoid = 'sha256:%s' % oid
81 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
81 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
82
82
83 # by default, we expect the content to be binary. however, LFS could also
83 # by default, we expect the content to be binary. however, LFS could also
84 # be used for non-binary content. add a special entry for non-binary data.
84 # be used for non-binary content. add a special entry for non-binary data.
85 # this will be used by filectx.isbinary().
85 # this will be used by filectx.isbinary().
86 if not util.binary(text):
86 if not util.binary(text):
87 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
87 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
88 metadata['x-is-binary'] = '0'
88 metadata['x-is-binary'] = '0'
89
89
90 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
90 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
91 if hgmeta is not None:
91 if hgmeta is not None:
92 for k, v in hgmeta.iteritems():
92 for k, v in hgmeta.iteritems():
93 metadata['x-hg-%s' % k] = v
93 metadata['x-hg-%s' % k] = v
94
94
95 rawtext = metadata.serialize()
95 rawtext = metadata.serialize()
96 return (rawtext, False)
96 return (rawtext, False)
97
97
98 def _islfs(rlog, node=None, rev=None):
98 def _islfs(rlog, node=None, rev=None):
99 if rev is None:
99 if rev is None:
100 if node is None:
100 if node is None:
101 # both None - likely working copy content where node is not ready
101 # both None - likely working copy content where node is not ready
102 return False
102 return False
103 rev = rlog.rev(node)
103 rev = rlog.rev(node)
104 else:
104 else:
105 node = rlog.node(rev)
105 node = rlog.node(rev)
106 if node == nullid:
106 if node == nullid:
107 return False
107 return False
108 flags = rlog.flags(rev)
108 flags = rlog.flags(rev)
109 return bool(flags & revlog.REVIDX_EXTSTORED)
109 return bool(flags & revlog.REVIDX_EXTSTORED)
110
110
111 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
111 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
112 cachedelta=None, node=None,
112 cachedelta=None, node=None,
113 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
113 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
114 threshold = self.opener.options['lfsthreshold']
114 threshold = self.opener.options['lfsthreshold']
115 textlen = len(text)
115 textlen = len(text)
116 # exclude hg rename meta from file size
116 # exclude hg rename meta from file size
117 meta, offset = filelog.parsemeta(text)
117 meta, offset = filelog.parsemeta(text)
118 if offset:
118 if offset:
119 textlen -= offset
119 textlen -= offset
120
120
121 if threshold and textlen > threshold:
121 if threshold and textlen > threshold:
122 flags |= revlog.REVIDX_EXTSTORED
122 flags |= revlog.REVIDX_EXTSTORED
123
123
124 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
124 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
125 node=node, flags=flags, **kwds)
125 node=node, flags=flags, **kwds)
126
126
127 def filelogrenamed(orig, self, node):
127 def filelogrenamed(orig, self, node):
128 if _islfs(self, node):
128 if _islfs(self, node):
129 rawtext = self.revision(node, raw=True)
129 rawtext = self.revision(node, raw=True)
130 if not rawtext:
130 if not rawtext:
131 return False
131 return False
132 metadata = pointer.deserialize(rawtext)
132 metadata = pointer.deserialize(rawtext)
133 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
133 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
134 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
134 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
135 else:
135 else:
136 return False
136 return False
137 return orig(self, node)
137 return orig(self, node)
138
138
139 def filelogsize(orig, self, rev):
139 def filelogsize(orig, self, rev):
140 if _islfs(self, rev=rev):
140 if _islfs(self, rev=rev):
141 # fast path: use lfs metadata to answer size
141 # fast path: use lfs metadata to answer size
142 rawtext = self.revision(rev, raw=True)
142 rawtext = self.revision(rev, raw=True)
143 metadata = pointer.deserialize(rawtext)
143 metadata = pointer.deserialize(rawtext)
144 return int(metadata['size'])
144 return int(metadata['size'])
145 return orig(self, rev)
145 return orig(self, rev)
146
146
147 def filectxcmp(orig, self, fctx):
147 def filectxcmp(orig, self, fctx):
148 """returns True if text is different than fctx"""
148 """returns True if text is different than fctx"""
149 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
149 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
150 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
150 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
151 # fast path: check LFS oid
151 # fast path: check LFS oid
152 p1 = pointer.deserialize(self.rawdata())
152 p1 = pointer.deserialize(self.rawdata())
153 p2 = pointer.deserialize(fctx.rawdata())
153 p2 = pointer.deserialize(fctx.rawdata())
154 return p1.oid() != p2.oid()
154 return p1.oid() != p2.oid()
155 return orig(self, fctx)
155 return orig(self, fctx)
156
156
157 def filectxisbinary(orig, self):
157 def filectxisbinary(orig, self):
158 if self.islfs():
158 if self.islfs():
159 # fast path: use lfs metadata to answer isbinary
159 # fast path: use lfs metadata to answer isbinary
160 metadata = pointer.deserialize(self.rawdata())
160 metadata = pointer.deserialize(self.rawdata())
161 # if lfs metadata says nothing, assume it's binary by default
161 # if lfs metadata says nothing, assume it's binary by default
162 return bool(int(metadata.get('x-is-binary', 1)))
162 return bool(int(metadata.get('x-is-binary', 1)))
163 return orig(self)
163 return orig(self)
164
164
165 def filectxislfs(self):
165 def filectxislfs(self):
166 return _islfs(self.filelog(), self.filenode())
166 return _islfs(self.filelog(), self.filenode())
167
167
168 def convertsink(orig, sink):
168 def convertsink(orig, sink):
169 sink = orig(sink)
169 sink = orig(sink)
170 if sink.repotype == 'hg':
170 if sink.repotype == 'hg':
171 class lfssink(sink.__class__):
171 class lfssink(sink.__class__):
172 def putcommit(self, files, copies, parents, commit, source, revmap,
172 def putcommit(self, files, copies, parents, commit, source, revmap,
173 full, cleanp2):
173 full, cleanp2):
174 pc = super(lfssink, self).putcommit
174 pc = super(lfssink, self).putcommit
175 node = pc(files, copies, parents, commit, source, revmap, full,
175 node = pc(files, copies, parents, commit, source, revmap, full,
176 cleanp2)
176 cleanp2)
177
177
178 if 'lfs' not in self.repo.requirements:
178 if 'lfs' not in self.repo.requirements:
179 ctx = self.repo[node]
179 ctx = self.repo[node]
180
180
181 # The file list may contain removed files, so check for
181 # The file list may contain removed files, so check for
182 # membership before assuming it is in the context.
182 # membership before assuming it is in the context.
183 if any(f in ctx and ctx[f].islfs() for f, n in files):
183 if any(f in ctx and ctx[f].islfs() for f, n in files):
184 self.repo.requirements.add('lfs')
184 self.repo.requirements.add('lfs')
185 self.repo._writerequirements()
185 self.repo._writerequirements()
186
186
187 # Permanently enable lfs locally
187 # Permanently enable lfs locally
188 with self.repo.vfs('hgrc', 'a', text=True) as fp:
188 with self.repo.vfs('hgrc', 'a', text=True) as fp:
189 fp.write('\n[extensions]\nlfs=\n')
189 fp.write('\n[extensions]\nlfs=\n')
190
190
191 return node
191 return node
192
192
193 sink.__class__ = lfssink
193 sink.__class__ = lfssink
194
194
195 return sink
195 return sink
196
196
197 def vfsinit(orig, self, othervfs):
197 def vfsinit(orig, self, othervfs):
198 orig(self, othervfs)
198 orig(self, othervfs)
199 # copy lfs related options
199 # copy lfs related options
200 for k, v in othervfs.options.items():
200 for k, v in othervfs.options.items():
201 if k.startswith('lfs'):
201 if k.startswith('lfs'):
202 self.options[k] = v
202 self.options[k] = v
203 # also copy lfs blobstores. note: this can run before reposetup, so lfs
203 # also copy lfs blobstores. note: this can run before reposetup, so lfs
204 # blobstore attributes are not always ready at this time.
204 # blobstore attributes are not always ready at this time.
205 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
205 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
206 if util.safehasattr(othervfs, name):
206 if util.safehasattr(othervfs, name):
207 setattr(self, name, getattr(othervfs, name))
207 setattr(self, name, getattr(othervfs, name))
208
208
209 def hgclone(orig, ui, opts, *args, **kwargs):
209 def hgclone(orig, ui, opts, *args, **kwargs):
210 result = orig(ui, opts, *args, **kwargs)
210 result = orig(ui, opts, *args, **kwargs)
211
211
212 if result is not None:
212 if result is not None:
213 sourcerepo, destrepo = result
213 sourcerepo, destrepo = result
214 repo = destrepo.local()
214 repo = destrepo.local()
215
215
216 # When cloning to a remote repo (like through SSH), no repo is available
216 # When cloning to a remote repo (like through SSH), no repo is available
217 # from the peer. Therefore the hgrc can't be updated.
217 # from the peer. Therefore the hgrc can't be updated.
218 if not repo:
218 if not repo:
219 return result
219 return result
220
220
221 # If lfs is required for this repo, permanently enable it locally
221 # If lfs is required for this repo, permanently enable it locally
222 if 'lfs' in repo.requirements:
222 if 'lfs' in repo.requirements:
223 with repo.vfs('hgrc', 'a', text=True) as fp:
223 with repo.vfs('hgrc', 'a', text=True) as fp:
224 fp.write('\n[extensions]\nlfs=\n')
224 fp.write('\n[extensions]\nlfs=\n')
225
225
226 return result
226 return result
227
227
228 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
228 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
229 orig(sourcerepo, destrepo, bookmarks, defaultpath)
229 orig(sourcerepo, destrepo, bookmarks, defaultpath)
230
230
231 # If lfs is required for this repo, permanently enable it locally
231 # If lfs is required for this repo, permanently enable it locally
232 if 'lfs' in destrepo.requirements:
232 if 'lfs' in destrepo.requirements:
233 with destrepo.vfs('hgrc', 'a', text=True) as fp:
233 with destrepo.vfs('hgrc', 'a', text=True) as fp:
234 fp.write('\n[extensions]\nlfs=\n')
234 fp.write('\n[extensions]\nlfs=\n')
235
235
236 def _canskipupload(repo):
236 def _canskipupload(repo):
237 # if remotestore is a null store, upload is a no-op and can be skipped
237 # if remotestore is a null store, upload is a no-op and can be skipped
238 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
238 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
239
239
240 def candownload(repo):
240 def candownload(repo):
241 # if remotestore is a null store, downloads will lead to nothing
241 # if remotestore is a null store, downloads will lead to nothing
242 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
242 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
243
243
244 def uploadblobsfromrevs(repo, revs):
244 def uploadblobsfromrevs(repo, revs):
245 '''upload lfs blobs introduced by revs
245 '''upload lfs blobs introduced by revs
246
246
247 Note: also used by other extensions e. g. infinitepush. avoid renaming.
247 Note: also used by other extensions e. g. infinitepush. avoid renaming.
248 '''
248 '''
249 if _canskipupload(repo):
249 if _canskipupload(repo):
250 return
250 return
251 pointers = extractpointers(repo, revs)
251 pointers = extractpointers(repo, revs)
252 uploadblobs(repo, pointers)
252 uploadblobs(repo, pointers)
253
253
254 def prepush(pushop):
254 def prepush(pushop):
255 """Prepush hook.
255 """Prepush hook.
256
256
257 Read through the revisions to push, looking for filelog entries that can be
257 Read through the revisions to push, looking for filelog entries that can be
258 deserialized into metadata so that we can block the push on their upload to
258 deserialized into metadata so that we can block the push on their upload to
259 the remote blobstore.
259 the remote blobstore.
260 """
260 """
261 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
261 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
262
262
263 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
263 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
264 *args, **kwargs):
264 *args, **kwargs):
265 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
265 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
266 uploadblobsfromrevs(repo, outgoing.missing)
266 uploadblobsfromrevs(repo, outgoing.missing)
267 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
267 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
268 **kwargs)
268 **kwargs)
269
269
270 def extractpointers(repo, revs):
270 def extractpointers(repo, revs):
271 """return a list of lfs pointers added by given revs"""
271 """return a list of lfs pointers added by given revs"""
272 ui = repo.ui
272 ui = repo.ui
273 if ui.debugflag:
273 if ui.debugflag:
274 ui.write(_('lfs: computing set of blobs to upload\n'))
274 ui.write(_('lfs: computing set of blobs to upload\n'))
275 pointers = {}
275 pointers = {}
276 for r in revs:
276 for r in revs:
277 ctx = repo[r]
277 ctx = repo[r]
278 for p in pointersfromctx(ctx).values():
278 for p in pointersfromctx(ctx).values():
279 pointers[p.oid()] = p
279 pointers[p.oid()] = p
280 return pointers.values()
280 return pointers.values()
281
281
282 def pointersfromctx(ctx):
282 def pointersfromctx(ctx):
283 """return a dict {path: pointer} for given single changectx"""
283 """return a dict {path: pointer} for given single changectx"""
284 result = {}
284 result = {}
285 for f in ctx.files():
285 for f in ctx.files():
286 if f not in ctx:
286 if f not in ctx:
287 continue
287 continue
288 fctx = ctx[f]
288 fctx = ctx[f]
289 if not _islfs(fctx.filelog(), fctx.filenode()):
289 if not _islfs(fctx.filelog(), fctx.filenode()):
290 continue
290 continue
291 try:
291 try:
292 result[f] = pointer.deserialize(fctx.rawdata())
292 result[f] = pointer.deserialize(fctx.rawdata())
293 except pointer.InvalidPointer as ex:
293 except pointer.InvalidPointer as ex:
294 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
294 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
295 % (f, short(ctx.node()), ex))
295 % (f, short(ctx.node()), ex))
296 return result
296 return result
297
297
298 def uploadblobs(repo, pointers):
298 def uploadblobs(repo, pointers):
299 """upload given pointers from local blobstore"""
299 """upload given pointers from local blobstore"""
300 if not pointers:
300 if not pointers:
301 return
301 return
302
302
303 remoteblob = repo.svfs.lfsremoteblobstore
303 remoteblob = repo.svfs.lfsremoteblobstore
304 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
304 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
305
306 def upgraderequirements(orig, repo):
307 reqs = orig(repo)
308 if 'lfs' in repo.requirements:
309 reqs.add('lfs')
310 return reqs
General Comments 0
You need to be logged in to leave comments. Login now