##// END OF EJS Templates
lfs: use %d to encode int, not str()...
Augie Fackler -
r36621:dcb6fbaa default
parent child Browse files
Show More
@@ -1,391 +1,391 b''
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import hashlib
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import bin, nullid, short
14 14
15 15 from mercurial import (
16 16 error,
17 17 filelog,
18 18 revlog,
19 19 util,
20 20 )
21 21
22 22 from ..largefiles import lfutil
23 23
24 24 from . import (
25 25 blobstore,
26 26 pointer,
27 27 )
28 28
29 29 def supportedoutgoingversions(orig, repo):
30 30 versions = orig(repo)
31 31 if 'lfs' in repo.requirements:
32 32 versions.discard('01')
33 33 versions.discard('02')
34 34 versions.add('03')
35 35 return versions
36 36
37 37 def allsupportedversions(orig, ui):
38 38 versions = orig(ui)
39 39 versions.add('03')
40 40 return versions
41 41
42 42 def _capabilities(orig, repo, proto):
43 43 '''Wrap server command to announce lfs server capability'''
44 44 caps = orig(repo, proto)
45 45 # XXX: change to 'lfs=serve' when separate git server isn't required?
46 46 caps.append('lfs')
47 47 return caps
48 48
49 49 def bypasscheckhash(self, text):
50 50 return False
51 51
52 52 def readfromstore(self, text):
53 53 """Read filelog content from local blobstore transform for flagprocessor.
54 54
55 55 Default tranform for flagprocessor, returning contents from blobstore.
56 56 Returns a 2-typle (text, validatehash) where validatehash is True as the
57 57 contents of the blobstore should be checked using checkhash.
58 58 """
59 59 p = pointer.deserialize(text)
60 60 oid = p.oid()
61 61 store = self.opener.lfslocalblobstore
62 62 if not store.has(oid):
63 63 p.filename = self.filename
64 64 self.opener.lfsremoteblobstore.readbatch([p], store)
65 65
66 66 # The caller will validate the content
67 67 text = store.read(oid, verify=False)
68 68
69 69 # pack hg filelog metadata
70 70 hgmeta = {}
71 71 for k in p.keys():
72 72 if k.startswith('x-hg-'):
73 73 name = k[len('x-hg-'):]
74 74 hgmeta[name] = p[k]
75 75 if hgmeta or text.startswith('\1\n'):
76 76 text = filelog.packmeta(hgmeta, text)
77 77
78 78 return (text, True)
79 79
80 80 def writetostore(self, text):
81 81 # hg filelog metadata (includes rename, etc)
82 82 hgmeta, offset = filelog.parsemeta(text)
83 83 if offset and offset > 0:
84 84 # lfs blob does not contain hg filelog metadata
85 85 text = text[offset:]
86 86
87 87 # git-lfs only supports sha256
88 88 oid = hashlib.sha256(text).hexdigest()
89 89 self.opener.lfslocalblobstore.write(oid, text)
90 90
91 91 # replace contents with metadata
92 92 longoid = 'sha256:%s' % oid
93 metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
93 metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text))
94 94
95 95 # by default, we expect the content to be binary. however, LFS could also
96 96 # be used for non-binary content. add a special entry for non-binary data.
97 97 # this will be used by filectx.isbinary().
98 98 if not util.binary(text):
99 99 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
100 100 metadata['x-is-binary'] = '0'
101 101
102 102 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
103 103 if hgmeta is not None:
104 104 for k, v in hgmeta.iteritems():
105 105 metadata['x-hg-%s' % k] = v
106 106
107 107 rawtext = metadata.serialize()
108 108 return (rawtext, False)
109 109
110 110 def _islfs(rlog, node=None, rev=None):
111 111 if rev is None:
112 112 if node is None:
113 113 # both None - likely working copy content where node is not ready
114 114 return False
115 115 rev = rlog.rev(node)
116 116 else:
117 117 node = rlog.node(rev)
118 118 if node == nullid:
119 119 return False
120 120 flags = rlog.flags(rev)
121 121 return bool(flags & revlog.REVIDX_EXTSTORED)
122 122
123 123 def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
124 124 cachedelta=None, node=None,
125 125 flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
126 126 textlen = len(text)
127 127 # exclude hg rename meta from file size
128 128 meta, offset = filelog.parsemeta(text)
129 129 if offset:
130 130 textlen -= offset
131 131
132 132 lfstrack = self.opener.options['lfstrack']
133 133
134 134 if lfstrack(self.filename, textlen):
135 135 flags |= revlog.REVIDX_EXTSTORED
136 136
137 137 return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
138 138 node=node, flags=flags, **kwds)
139 139
140 140 def filelogrenamed(orig, self, node):
141 141 if _islfs(self, node):
142 142 rawtext = self.revision(node, raw=True)
143 143 if not rawtext:
144 144 return False
145 145 metadata = pointer.deserialize(rawtext)
146 146 if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
147 147 return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
148 148 else:
149 149 return False
150 150 return orig(self, node)
151 151
152 152 def filelogsize(orig, self, rev):
153 153 if _islfs(self, rev=rev):
154 154 # fast path: use lfs metadata to answer size
155 155 rawtext = self.revision(rev, raw=True)
156 156 metadata = pointer.deserialize(rawtext)
157 157 return int(metadata['size'])
158 158 return orig(self, rev)
159 159
160 160 def filectxcmp(orig, self, fctx):
161 161 """returns True if text is different than fctx"""
162 162 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
163 163 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
164 164 # fast path: check LFS oid
165 165 p1 = pointer.deserialize(self.rawdata())
166 166 p2 = pointer.deserialize(fctx.rawdata())
167 167 return p1.oid() != p2.oid()
168 168 return orig(self, fctx)
169 169
170 170 def filectxisbinary(orig, self):
171 171 if self.islfs():
172 172 # fast path: use lfs metadata to answer isbinary
173 173 metadata = pointer.deserialize(self.rawdata())
174 174 # if lfs metadata says nothing, assume it's binary by default
175 175 return bool(int(metadata.get('x-is-binary', 1)))
176 176 return orig(self)
177 177
178 178 def filectxislfs(self):
179 179 return _islfs(self.filelog(), self.filenode())
180 180
181 181 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
182 182 orig(fm, ctx, matcher, path, decode)
183 183 fm.data(rawdata=ctx[path].rawdata())
184 184
185 185 def convertsink(orig, sink):
186 186 sink = orig(sink)
187 187 if sink.repotype == 'hg':
188 188 class lfssink(sink.__class__):
189 189 def putcommit(self, files, copies, parents, commit, source, revmap,
190 190 full, cleanp2):
191 191 pc = super(lfssink, self).putcommit
192 192 node = pc(files, copies, parents, commit, source, revmap, full,
193 193 cleanp2)
194 194
195 195 if 'lfs' not in self.repo.requirements:
196 196 ctx = self.repo[node]
197 197
198 198 # The file list may contain removed files, so check for
199 199 # membership before assuming it is in the context.
200 200 if any(f in ctx and ctx[f].islfs() for f, n in files):
201 201 self.repo.requirements.add('lfs')
202 202 self.repo._writerequirements()
203 203
204 204 # Permanently enable lfs locally
205 205 self.repo.vfs.append(
206 206 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
207 207
208 208 return node
209 209
210 210 sink.__class__ = lfssink
211 211
212 212 return sink
213 213
214 214 def vfsinit(orig, self, othervfs):
215 215 orig(self, othervfs)
216 216 # copy lfs related options
217 217 for k, v in othervfs.options.items():
218 218 if k.startswith('lfs'):
219 219 self.options[k] = v
220 220 # also copy lfs blobstores. note: this can run before reposetup, so lfs
221 221 # blobstore attributes are not always ready at this time.
222 222 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
223 223 if util.safehasattr(othervfs, name):
224 224 setattr(self, name, getattr(othervfs, name))
225 225
226 226 def hgclone(orig, ui, opts, *args, **kwargs):
227 227 result = orig(ui, opts, *args, **kwargs)
228 228
229 229 if result is not None:
230 230 sourcerepo, destrepo = result
231 231 repo = destrepo.local()
232 232
233 233 # When cloning to a remote repo (like through SSH), no repo is available
234 234 # from the peer. Therefore the hgrc can't be updated.
235 235 if not repo:
236 236 return result
237 237
238 238 # If lfs is required for this repo, permanently enable it locally
239 239 if 'lfs' in repo.requirements:
240 240 repo.vfs.append('hgrc',
241 241 util.tonativeeol('\n[extensions]\nlfs=\n'))
242 242
243 243 return result
244 244
245 245 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
246 246 orig(sourcerepo, destrepo, bookmarks, defaultpath)
247 247
248 248 # If lfs is required for this repo, permanently enable it locally
249 249 if 'lfs' in destrepo.requirements:
250 250 destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
251 251
252 252 def _prefetchfiles(repo, ctx, files):
253 253 """Ensure that required LFS blobs are present, fetching them as a group if
254 254 needed."""
255 255 pointers = []
256 256 localstore = repo.svfs.lfslocalblobstore
257 257
258 258 for f in files:
259 259 p = pointerfromctx(ctx, f)
260 260 if p and not localstore.has(p.oid()):
261 261 p.filename = f
262 262 pointers.append(p)
263 263
264 264 if pointers:
265 265 repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore)
266 266
267 267 def _canskipupload(repo):
268 268 # if remotestore is a null store, upload is a no-op and can be skipped
269 269 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
270 270
271 271 def candownload(repo):
272 272 # if remotestore is a null store, downloads will lead to nothing
273 273 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
274 274
275 275 def uploadblobsfromrevs(repo, revs):
276 276 '''upload lfs blobs introduced by revs
277 277
278 278 Note: also used by other extensions e. g. infinitepush. avoid renaming.
279 279 '''
280 280 if _canskipupload(repo):
281 281 return
282 282 pointers = extractpointers(repo, revs)
283 283 uploadblobs(repo, pointers)
284 284
285 285 def prepush(pushop):
286 286 """Prepush hook.
287 287
288 288 Read through the revisions to push, looking for filelog entries that can be
289 289 deserialized into metadata so that we can block the push on their upload to
290 290 the remote blobstore.
291 291 """
292 292 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
293 293
294 294 def push(orig, repo, remote, *args, **kwargs):
295 295 """bail on push if the extension isn't enabled on remote when needed"""
296 296 if 'lfs' in repo.requirements:
297 297 # If the remote peer is for a local repo, the requirement tests in the
298 298 # base class method enforce lfs support. Otherwise, some revisions in
299 299 # this repo use lfs, and the remote repo needs the extension loaded.
300 300 if not remote.local() and not remote.capable('lfs'):
301 301 # This is a copy of the message in exchange.push() when requirements
302 302 # are missing between local repos.
303 303 m = _("required features are not supported in the destination: %s")
304 304 raise error.Abort(m % 'lfs',
305 305 hint=_('enable the lfs extension on the server'))
306 306 return orig(repo, remote, *args, **kwargs)
307 307
308 308 def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
309 309 *args, **kwargs):
310 310 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
311 311 uploadblobsfromrevs(repo, outgoing.missing)
312 312 return orig(ui, repo, source, filename, bundletype, outgoing, *args,
313 313 **kwargs)
314 314
315 315 def extractpointers(repo, revs):
316 316 """return a list of lfs pointers added by given revs"""
317 317 repo.ui.debug('lfs: computing set of blobs to upload\n')
318 318 pointers = {}
319 319 for r in revs:
320 320 ctx = repo[r]
321 321 for p in pointersfromctx(ctx).values():
322 322 pointers[p.oid()] = p
323 323 return sorted(pointers.values())
324 324
325 325 def pointerfromctx(ctx, f, removed=False):
326 326 """return a pointer for the named file from the given changectx, or None if
327 327 the file isn't LFS.
328 328
329 329 Optionally, the pointer for a file deleted from the context can be returned.
330 330 Since no such pointer is actually stored, and to distinguish from a non LFS
331 331 file, this pointer is represented by an empty dict.
332 332 """
333 333 _ctx = ctx
334 334 if f not in ctx:
335 335 if not removed:
336 336 return None
337 337 if f in ctx.p1():
338 338 _ctx = ctx.p1()
339 339 elif f in ctx.p2():
340 340 _ctx = ctx.p2()
341 341 else:
342 342 return None
343 343 fctx = _ctx[f]
344 344 if not _islfs(fctx.filelog(), fctx.filenode()):
345 345 return None
346 346 try:
347 347 p = pointer.deserialize(fctx.rawdata())
348 348 if ctx == _ctx:
349 349 return p
350 350 return {}
351 351 except pointer.InvalidPointer as ex:
352 352 raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
353 353 % (f, short(_ctx.node()), ex))
354 354
355 355 def pointersfromctx(ctx, removed=False):
356 356 """return a dict {path: pointer} for given single changectx.
357 357
358 358 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
359 359 stored for the path is an empty dict.
360 360 """
361 361 result = {}
362 362 for f in ctx.files():
363 363 p = pointerfromctx(ctx, f, removed=removed)
364 364 if p is not None:
365 365 result[f] = p
366 366 return result
367 367
368 368 def uploadblobs(repo, pointers):
369 369 """upload given pointers from local blobstore"""
370 370 if not pointers:
371 371 return
372 372
373 373 remoteblob = repo.svfs.lfsremoteblobstore
374 374 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
375 375
376 376 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
377 377 orig(ui, srcrepo, dstrepo, requirements)
378 378
379 379 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
380 380 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
381 381
382 382 for dirpath, dirs, files in srclfsvfs.walk():
383 383 for oid in files:
384 384 ui.write(_('copying lfs blob %s\n') % oid)
385 385 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
386 386
387 387 def upgraderequirements(orig, repo):
388 388 reqs = orig(repo)
389 389 if 'lfs' in repo.requirements:
390 390 reqs.add('lfs')
391 391 return reqs
General Comments 0
You need to be logged in to leave comments. Login now