##// END OF EJS Templates
largefiles: introduce "_lfstatuswriters" to customize status reporting...
FUJIWARA Katsunori -
r23188:94ac64bc default
parent child Browse files
Show More
@@ -1,559 +1,576 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import platform
13 13 import shutil
14 14 import stat
15 15 import copy
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19 from mercurial import node
20 20
21 21 shortname = '.hglf'
22 22 shortnameslash = shortname + '/'
23 23 longname = 'largefiles'
24 24
25 25
26 26 # -- Private worker functions ------------------------------------------
27 27
28 28 def getminsize(ui, assumelfiles, opt, default=10):
29 29 lfsize = opt
30 30 if not lfsize and assumelfiles:
31 31 lfsize = ui.config(longname, 'minsize', default=default)
32 32 if lfsize:
33 33 try:
34 34 lfsize = float(lfsize)
35 35 except ValueError:
36 36 raise util.Abort(_('largefiles: size must be number (not %s)\n')
37 37 % lfsize)
38 38 if lfsize is None:
39 39 raise util.Abort(_('minimum size for largefiles must be specified'))
40 40 return lfsize
41 41
42 42 def link(src, dest):
43 43 util.makedirs(os.path.dirname(dest))
44 44 try:
45 45 util.oslink(src, dest)
46 46 except OSError:
47 47 # if hardlinks fail, fallback on atomic copy
48 48 dst = util.atomictempfile(dest)
49 49 for chunk in util.filechunkiter(open(src, 'rb')):
50 50 dst.write(chunk)
51 51 dst.close()
52 52 os.chmod(dest, os.stat(src).st_mode)
53 53
54 54 def usercachepath(ui, hash):
55 55 path = ui.configpath(longname, 'usercache', None)
56 56 if path:
57 57 path = os.path.join(path, hash)
58 58 else:
59 59 if os.name == 'nt':
60 60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
61 61 if appdata:
62 62 path = os.path.join(appdata, longname, hash)
63 63 elif platform.system() == 'Darwin':
64 64 home = os.getenv('HOME')
65 65 if home:
66 66 path = os.path.join(home, 'Library', 'Caches',
67 67 longname, hash)
68 68 elif os.name == 'posix':
69 69 path = os.getenv('XDG_CACHE_HOME')
70 70 if path:
71 71 path = os.path.join(path, longname, hash)
72 72 else:
73 73 home = os.getenv('HOME')
74 74 if home:
75 75 path = os.path.join(home, '.cache', longname, hash)
76 76 else:
77 77 raise util.Abort(_('unknown operating system: %s\n') % os.name)
78 78 return path
79 79
80 80 def inusercache(ui, hash):
81 81 path = usercachepath(ui, hash)
82 82 return path and os.path.exists(path)
83 83
84 84 def findfile(repo, hash):
85 85 if instore(repo, hash):
86 86 repo.ui.note(_('found %s in store\n') % hash)
87 87 return storepath(repo, hash)
88 88 elif inusercache(repo.ui, hash):
89 89 repo.ui.note(_('found %s in system cache\n') % hash)
90 90 path = storepath(repo, hash)
91 91 link(usercachepath(repo.ui, hash), path)
92 92 return path
93 93 return None
94 94
95 95 class largefilesdirstate(dirstate.dirstate):
96 96 def __getitem__(self, key):
97 97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 98 def normal(self, f):
99 99 return super(largefilesdirstate, self).normal(unixpath(f))
100 100 def remove(self, f):
101 101 return super(largefilesdirstate, self).remove(unixpath(f))
102 102 def add(self, f):
103 103 return super(largefilesdirstate, self).add(unixpath(f))
104 104 def drop(self, f):
105 105 return super(largefilesdirstate, self).drop(unixpath(f))
106 106 def forget(self, f):
107 107 return super(largefilesdirstate, self).forget(unixpath(f))
108 108 def normallookup(self, f):
109 109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 110 def _ignore(self, f):
111 111 return False
112 112
113 113 def openlfdirstate(ui, repo, create=True):
114 114 '''
115 115 Return a dirstate object that tracks largefiles: i.e. its root is
116 116 the repo root, but it is saved in .hg/largefiles/dirstate.
117 117 '''
118 118 lfstoredir = repo.join(longname)
119 119 opener = scmutil.opener(lfstoredir)
120 120 lfdirstate = largefilesdirstate(opener, ui, repo.root,
121 121 repo.dirstate._validate)
122 122
123 123 # If the largefiles dirstate does not exist, populate and create
124 124 # it. This ensures that we create it on the first meaningful
125 125 # largefiles operation in a new clone.
126 126 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
127 127 matcher = getstandinmatcher(repo)
128 128 standins = repo.dirstate.walk(matcher, [], False, False)
129 129
130 130 if len(standins) > 0:
131 131 util.makedirs(lfstoredir)
132 132
133 133 for standin in standins:
134 134 lfile = splitstandin(standin)
135 135 lfdirstate.normallookup(lfile)
136 136 return lfdirstate
137 137
138 138 def lfdirstatestatus(lfdirstate, repo):
139 139 wctx = repo['.']
140 140 match = match_.always(repo.root, repo.getcwd())
141 141 unsure, s = lfdirstate.status(match, [], False, False, False)
142 142 modified, clean = s.modified, s.clean
143 143 for lfile in unsure:
144 144 try:
145 145 fctx = wctx[standin(lfile)]
146 146 except LookupError:
147 147 fctx = None
148 148 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 149 modified.append(lfile)
150 150 else:
151 151 clean.append(lfile)
152 152 lfdirstate.normal(lfile)
153 153 return s
154 154
155 155 def listlfiles(repo, rev=None, matcher=None):
156 156 '''return a list of largefiles in the working copy or the
157 157 specified changeset'''
158 158
159 159 if matcher is None:
160 160 matcher = getstandinmatcher(repo)
161 161
162 162 # ignore unknown files in working directory
163 163 return [splitstandin(f)
164 164 for f in repo[rev].walk(matcher)
165 165 if rev is not None or repo.dirstate[f] != '?']
166 166
167 167 def instore(repo, hash):
168 168 return os.path.exists(storepath(repo, hash))
169 169
170 170 def storepath(repo, hash):
171 171 return repo.join(os.path.join(longname, hash))
172 172
173 173 def copyfromcache(repo, hash, filename):
174 174 '''Copy the specified largefile from the repo or system cache to
175 175 filename in the repository. Return true on success or false if the
176 176 file was not found in either cache (which should not happened:
177 177 this is meant to be called only after ensuring that the needed
178 178 largefile exists in the cache).'''
179 179 path = findfile(repo, hash)
180 180 if path is None:
181 181 return False
182 182 util.makedirs(os.path.dirname(repo.wjoin(filename)))
183 183 # The write may fail before the file is fully written, but we
184 184 # don't use atomic writes in the working copy.
185 185 shutil.copy(path, repo.wjoin(filename))
186 186 return True
187 187
188 188 def copytostore(repo, rev, file, uploaded=False):
189 189 hash = readstandin(repo, file, rev)
190 190 if instore(repo, hash):
191 191 return
192 192 copytostoreabsolute(repo, repo.wjoin(file), hash)
193 193
194 194 def copyalltostore(repo, node):
195 195 '''Copy all largefiles in a given revision to the store'''
196 196
197 197 ctx = repo[node]
198 198 for filename in ctx.files():
199 199 if isstandin(filename) and filename in ctx.manifest():
200 200 realfile = splitstandin(filename)
201 201 copytostore(repo, ctx.node(), realfile)
202 202
203 203
204 204 def copytostoreabsolute(repo, file, hash):
205 205 if inusercache(repo.ui, hash):
206 206 link(usercachepath(repo.ui, hash), storepath(repo, hash))
207 207 elif not getattr(repo, "_isconverting", False):
208 208 util.makedirs(os.path.dirname(storepath(repo, hash)))
209 209 dst = util.atomictempfile(storepath(repo, hash),
210 210 createmode=repo.store.createmode)
211 211 for chunk in util.filechunkiter(open(file, 'rb')):
212 212 dst.write(chunk)
213 213 dst.close()
214 214 linktousercache(repo, hash)
215 215
216 216 def linktousercache(repo, hash):
217 217 path = usercachepath(repo.ui, hash)
218 218 if path:
219 219 link(storepath(repo, hash), path)
220 220
221 221 def getstandinmatcher(repo, pats=[], opts={}):
222 222 '''Return a match object that applies pats to the standin directory'''
223 223 standindir = repo.wjoin(shortname)
224 224 if pats:
225 225 pats = [os.path.join(standindir, pat) for pat in pats]
226 226 else:
227 227 # no patterns: relative to repo root
228 228 pats = [standindir]
229 229 # no warnings about missing files or directories
230 230 match = scmutil.match(repo[None], pats, opts)
231 231 match.bad = lambda f, msg: None
232 232 return match
233 233
234 234 def composestandinmatcher(repo, rmatcher):
235 235 '''Return a matcher that accepts standins corresponding to the
236 236 files accepted by rmatcher. Pass the list of files in the matcher
237 237 as the paths specified by the user.'''
238 238 smatcher = getstandinmatcher(repo, rmatcher.files())
239 239 isstandin = smatcher.matchfn
240 240 def composedmatchfn(f):
241 241 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
242 242 smatcher.matchfn = composedmatchfn
243 243
244 244 return smatcher
245 245
246 246 def standin(filename):
247 247 '''Return the repo-relative path to the standin for the specified big
248 248 file.'''
249 249 # Notes:
250 250 # 1) Some callers want an absolute path, but for instance addlargefiles
251 251 # needs it repo-relative so it can be passed to repo[None].add(). So
252 252 # leave it up to the caller to use repo.wjoin() to get an absolute path.
253 253 # 2) Join with '/' because that's what dirstate always uses, even on
254 254 # Windows. Change existing separator to '/' first in case we are
255 255 # passed filenames from an external source (like the command line).
256 256 return shortnameslash + util.pconvert(filename)
257 257
258 258 def isstandin(filename):
259 259 '''Return true if filename is a big file standin. filename must be
260 260 in Mercurial's internal form (slash-separated).'''
261 261 return filename.startswith(shortnameslash)
262 262
263 263 def splitstandin(filename):
264 264 # Split on / because that's what dirstate always uses, even on Windows.
265 265 # Change local separator to / first just in case we are passed filenames
266 266 # from an external source (like the command line).
267 267 bits = util.pconvert(filename).split('/', 1)
268 268 if len(bits) == 2 and bits[0] == shortname:
269 269 return bits[1]
270 270 else:
271 271 return None
272 272
273 273 def updatestandin(repo, standin):
274 274 file = repo.wjoin(splitstandin(standin))
275 275 if os.path.exists(file):
276 276 hash = hashfile(file)
277 277 executable = getexecutable(file)
278 278 writestandin(repo, standin, hash, executable)
279 279
280 280 def readstandin(repo, filename, node=None):
281 281 '''read hex hash from standin for filename at given node, or working
282 282 directory if no node is given'''
283 283 return repo[node][standin(filename)].data().strip()
284 284
285 285 def writestandin(repo, standin, hash, executable):
286 286 '''write hash to <repo.root>/<standin>'''
287 287 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
288 288
289 289 def copyandhash(instream, outfile):
290 290 '''Read bytes from instream (iterable) and write them to outfile,
291 291 computing the SHA-1 hash of the data along the way. Return the hash.'''
292 292 hasher = util.sha1('')
293 293 for data in instream:
294 294 hasher.update(data)
295 295 outfile.write(data)
296 296 return hasher.hexdigest()
297 297
298 298 def hashrepofile(repo, file):
299 299 return hashfile(repo.wjoin(file))
300 300
301 301 def hashfile(file):
302 302 if not os.path.exists(file):
303 303 return ''
304 304 hasher = util.sha1('')
305 305 fd = open(file, 'rb')
306 306 for data in util.filechunkiter(fd, 128 * 1024):
307 307 hasher.update(data)
308 308 fd.close()
309 309 return hasher.hexdigest()
310 310
311 311 def getexecutable(filename):
312 312 mode = os.stat(filename).st_mode
313 313 return ((mode & stat.S_IXUSR) and
314 314 (mode & stat.S_IXGRP) and
315 315 (mode & stat.S_IXOTH))
316 316
317 317 def urljoin(first, second, *arg):
318 318 def join(left, right):
319 319 if not left.endswith('/'):
320 320 left += '/'
321 321 if right.startswith('/'):
322 322 right = right[1:]
323 323 return left + right
324 324
325 325 url = join(first, second)
326 326 for a in arg:
327 327 url = join(url, a)
328 328 return url
329 329
330 330 def hexsha1(data):
331 331 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
332 332 object data"""
333 333 h = util.sha1()
334 334 for chunk in util.filechunkiter(data):
335 335 h.update(chunk)
336 336 return h.hexdigest()
337 337
338 338 def httpsendfile(ui, filename):
339 339 return httpconnection.httpsendfile(ui, filename, 'rb')
340 340
341 341 def unixpath(path):
342 342 '''Return a version of path normalized for use with the lfdirstate.'''
343 343 return util.pconvert(os.path.normpath(path))
344 344
345 345 def islfilesrepo(repo):
346 346 if ('largefiles' in repo.requirements and
347 347 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
348 348 return True
349 349
350 350 return util.any(openlfdirstate(repo.ui, repo, False))
351 351
352 352 class storeprotonotcapable(Exception):
353 353 def __init__(self, storetypes):
354 354 self.storetypes = storetypes
355 355
356 356 def getstandinsstate(repo):
357 357 standins = []
358 358 matcher = getstandinmatcher(repo)
359 359 for standin in repo.dirstate.walk(matcher, [], False, False):
360 360 lfile = splitstandin(standin)
361 361 try:
362 362 hash = readstandin(repo, lfile)
363 363 except IOError:
364 364 hash = None
365 365 standins.append((lfile, hash))
366 366 return standins
367 367
368 368 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
369 369 lfstandin = standin(lfile)
370 370 if lfstandin in repo.dirstate:
371 371 stat = repo.dirstate._map[lfstandin]
372 372 state, mtime = stat[0], stat[3]
373 373 else:
374 374 state, mtime = '?', -1
375 375 if state == 'n':
376 376 if normallookup or mtime < 0:
377 377 # state 'n' doesn't ensure 'clean' in this case
378 378 lfdirstate.normallookup(lfile)
379 379 else:
380 380 lfdirstate.normal(lfile)
381 381 elif state == 'm':
382 382 lfdirstate.normallookup(lfile)
383 383 elif state == 'r':
384 384 lfdirstate.remove(lfile)
385 385 elif state == 'a':
386 386 lfdirstate.add(lfile)
387 387 elif state == '?':
388 388 lfdirstate.drop(lfile)
389 389
390 390 def markcommitted(orig, ctx, node):
391 391 repo = ctx._repo
392 392
393 393 orig(node)
394 394
395 395 lfdirstate = openlfdirstate(repo.ui, repo)
396 396 for f in ctx.files():
397 397 if isstandin(f):
398 398 lfile = splitstandin(f)
399 399 synclfdirstate(repo, lfdirstate, lfile, False)
400 400 lfdirstate.write()
401 401
402 402 def getlfilestoupdate(oldstandins, newstandins):
403 403 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
404 404 filelist = []
405 405 for f in changedstandins:
406 406 if f[0] not in filelist:
407 407 filelist.append(f[0])
408 408 return filelist
409 409
410 410 def getlfilestoupload(repo, missing, addfunc):
411 411 for n in missing:
412 412 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
413 413 ctx = repo[n]
414 414 files = set(ctx.files())
415 415 if len(parents) == 2:
416 416 mc = ctx.manifest()
417 417 mp1 = ctx.parents()[0].manifest()
418 418 mp2 = ctx.parents()[1].manifest()
419 419 for f in mp1:
420 420 if f not in mc:
421 421 files.add(f)
422 422 for f in mp2:
423 423 if f not in mc:
424 424 files.add(f)
425 425 for f in mc:
426 426 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
427 427 files.add(f)
428 428 for fn in files:
429 429 if isstandin(fn) and fn in ctx:
430 430 addfunc(fn, ctx[fn].data().strip())
431 431
432 432 def updatestandinsbymatch(repo, match):
433 433 '''Update standins in the working directory according to specified match
434 434
435 435 This returns (possibly modified) ``match`` object to be used for
436 436 subsequent commit process.
437 437 '''
438 438
439 439 ui = repo.ui
440 440
441 441 # Case 0: Automated committing
442 442 #
443 443 # While automated committing (like rebase, transplant
444 444 # and so on), this code path is used to avoid:
445 445 # (1) updating standins, because standins should
446 446 # be already updated at this point
447 447 # (2) aborting when standins are matched by "match",
448 448 # because automated committing may specify them directly
449 449 #
450 450 if getattr(repo, "_istransplanting", False):
451 451 return match
452 452
453 453 # Case 1: user calls commit with no specific files or
454 454 # include/exclude patterns: refresh and commit all files that
455 455 # are "dirty".
456 456 if match is None or match.always():
457 457 # Spend a bit of time here to get a list of files we know
458 458 # are modified so we can compare only against those.
459 459 # It can cost a lot of time (several seconds)
460 460 # otherwise to update all standins if the largefiles are
461 461 # large.
462 462 lfdirstate = openlfdirstate(ui, repo)
463 463 dirtymatch = match_.always(repo.root, repo.getcwd())
464 464 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
465 465 False)
466 466 modifiedfiles = unsure + s.modified + s.added + s.removed
467 467 lfiles = listlfiles(repo)
468 468 # this only loops through largefiles that exist (not
469 469 # removed/renamed)
470 470 for lfile in lfiles:
471 471 if lfile in modifiedfiles:
472 472 if os.path.exists(
473 473 repo.wjoin(standin(lfile))):
474 474 # this handles the case where a rebase is being
475 475 # performed and the working copy is not updated
476 476 # yet.
477 477 if os.path.exists(repo.wjoin(lfile)):
478 478 updatestandin(repo,
479 479 standin(lfile))
480 480
481 481 return match
482 482
483 483 lfiles = listlfiles(repo)
484 484 match._files = repo._subdirlfs(match.files(), lfiles)
485 485
486 486 # Case 2: user calls commit with specified patterns: refresh
487 487 # any matching big files.
488 488 smatcher = composestandinmatcher(repo, match)
489 489 standins = repo.dirstate.walk(smatcher, [], False, False)
490 490
491 491 # No matching big files: get out of the way and pass control to
492 492 # the usual commit() method.
493 493 if not standins:
494 494 return match
495 495
496 496 # Refresh all matching big files. It's possible that the
497 497 # commit will end up failing, in which case the big files will
498 498 # stay refreshed. No harm done: the user modified them and
499 499 # asked to commit them, so sooner or later we're going to
500 500 # refresh the standins. Might as well leave them refreshed.
501 501 lfdirstate = openlfdirstate(ui, repo)
502 502 for fstandin in standins:
503 503 lfile = splitstandin(fstandin)
504 504 if lfdirstate[lfile] != 'r':
505 505 updatestandin(repo, fstandin)
506 506
507 507 # Cook up a new matcher that only matches regular files or
508 508 # standins corresponding to the big files requested by the
509 509 # user. Have to modify _files to prevent commit() from
510 510 # complaining "not tracked" for big files.
511 511 match = copy.copy(match)
512 512 origmatchfn = match.matchfn
513 513
514 514 # Check both the list of largefiles and the list of
515 515 # standins because if a largefile was removed, it
516 516 # won't be in the list of largefiles at this point
517 517 match._files += sorted(standins)
518 518
519 519 actualfiles = []
520 520 for f in match._files:
521 521 fstandin = standin(f)
522 522
523 523 # ignore known largefiles and standins
524 524 if f in lfiles or fstandin in standins:
525 525 continue
526 526
527 527 actualfiles.append(f)
528 528 match._files = actualfiles
529 529
530 530 def matchfn(f):
531 531 if origmatchfn(f):
532 532 return f not in lfiles
533 533 else:
534 534 return f in standins
535 535
536 536 match.matchfn = matchfn
537 537
538 538 return match
539 539
540 540 class automatedcommithook(object):
541 541 '''Statefull hook to update standins at the 1st commit of resuming
542 542
543 543 For efficiency, updating standins in the working directory should
544 544 be avoided while automated committing (like rebase, transplant and
545 545 so on), because they should be updated before committing.
546 546
547 547 But the 1st commit of resuming automated committing (e.g. ``rebase
548 548 --continue``) should update them, because largefiles may be
549 549 modified manually.
550 550 '''
551 551 def __init__(self, resuming):
552 552 self.resuming = resuming
553 553
554 554 def __call__(self, repo, match):
555 555 if self.resuming:
556 556 self.resuming = False # avoids updating at subsequent commits
557 557 return updatestandinsbymatch(repo, match)
558 558 else:
559 559 return match
560
561 def getstatuswriter(ui, repo, forcibly=None):
562 '''Return the function to write largefiles specific status out
563
564 If ``forcibly`` is ``None``, this returns the last element of
565 ``repo._lfupdatereporters`` as "default" writer function.
566
567 Otherwise, this returns the function to always write out (or
568 ignore if ``not forcibly``) status.
569 '''
570 if forcibly is None:
571 return repo._lfstatuswriters[-1]
572 else:
573 if forcibly:
574 return ui.status # forcibly WRITE OUT
575 else:
576 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,362 +1,367 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import os
12 12
13 13 from mercurial import error, manifest, match as match_, util
14 14 from mercurial.i18n import _
15 15 from mercurial import localrepo, scmutil
16 16
17 17 import lfcommands
18 18 import lfutil
19 19
20 20 def reposetup(ui, repo):
21 21 # wire repositories should be given new wireproto functions
22 22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 23 if not repo.local():
24 24 return
25 25
26 26 class lfilesrepo(repo.__class__):
27 27 lfstatus = False
28 28 def status_nolfiles(self, *args, **kwargs):
29 29 return super(lfilesrepo, self).status(*args, **kwargs)
30 30
31 31 # When lfstatus is set, return a context that gives the names
32 32 # of largefiles instead of their corresponding standins and
33 33 # identifies the largefiles as always binary, regardless of
34 34 # their actual contents.
35 35 def __getitem__(self, changeid):
36 36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 37 if self.lfstatus:
38 38 class lfilesmanifestdict(manifest.manifestdict):
39 39 def __contains__(self, filename):
40 40 orig = super(lfilesmanifestdict, self).__contains__
41 41 return orig(filename) or orig(lfutil.standin(filename))
42 42 class lfilesctx(ctx.__class__):
43 43 def files(self):
44 44 filenames = super(lfilesctx, self).files()
45 45 return [lfutil.splitstandin(f) or f for f in filenames]
46 46 def manifest(self):
47 47 man1 = super(lfilesctx, self).manifest()
48 48 man1.__class__ = lfilesmanifestdict
49 49 return man1
50 50 def filectx(self, path, fileid=None, filelog=None):
51 51 orig = super(lfilesctx, self).filectx
52 52 try:
53 53 if filelog is not None:
54 54 result = orig(path, fileid, filelog)
55 55 else:
56 56 result = orig(path, fileid)
57 57 except error.LookupError:
58 58 # Adding a null character will cause Mercurial to
59 59 # identify this as a binary file.
60 60 if filelog is not None:
61 61 result = orig(lfutil.standin(path), fileid,
62 62 filelog)
63 63 else:
64 64 result = orig(lfutil.standin(path), fileid)
65 65 olddata = result.data
66 66 result.data = lambda: olddata() + '\0'
67 67 return result
68 68 ctx.__class__ = lfilesctx
69 69 return ctx
70 70
71 71 # Figure out the status of big files and insert them into the
72 72 # appropriate list in the result. Also removes standin files
73 73 # from the listing. Revert to the original status if
74 74 # self.lfstatus is False.
75 75 # XXX large file status is buggy when used on repo proxy.
76 76 # XXX this needs to be investigated.
77 77 @localrepo.unfilteredmethod
78 78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 79 clean=False, unknown=False, listsubrepos=False):
80 80 listignored, listclean, listunknown = ignored, clean, unknown
81 81 orig = super(lfilesrepo, self).status
82 82 if not self.lfstatus:
83 83 return orig(node1, node2, match, listignored, listclean,
84 84 listunknown, listsubrepos)
85 85
86 86 # some calls in this function rely on the old version of status
87 87 self.lfstatus = False
88 88 ctx1 = self[node1]
89 89 ctx2 = self[node2]
90 90 working = ctx2.rev() is None
91 91 parentworking = working and ctx1 == self['.']
92 92
93 93 if match is None:
94 94 match = match_.always(self.root, self.getcwd())
95 95
96 96 wlock = None
97 97 try:
98 98 try:
99 99 # updating the dirstate is optional
100 100 # so we don't wait on the lock
101 101 wlock = self.wlock(False)
102 102 except error.LockError:
103 103 pass
104 104
105 105 # First check if paths or patterns were specified on the
106 106 # command line. If there were, and they don't match any
107 107 # largefiles, we should just bail here and let super
108 108 # handle it -- thus gaining a big performance boost.
109 109 lfdirstate = lfutil.openlfdirstate(ui, self)
110 110 if not match.always():
111 111 for f in lfdirstate:
112 112 if match(f):
113 113 break
114 114 else:
115 115 return orig(node1, node2, match, listignored, listclean,
116 116 listunknown, listsubrepos)
117 117
118 118 # Create a copy of match that matches standins instead
119 119 # of largefiles.
120 120 def tostandins(files):
121 121 if not working:
122 122 return files
123 123 newfiles = []
124 124 dirstate = self.dirstate
125 125 for f in files:
126 126 sf = lfutil.standin(f)
127 127 if sf in dirstate:
128 128 newfiles.append(sf)
129 129 elif sf in dirstate.dirs():
130 130 # Directory entries could be regular or
131 131 # standin, check both
132 132 newfiles.extend((f, sf))
133 133 else:
134 134 newfiles.append(f)
135 135 return newfiles
136 136
137 137 m = copy.copy(match)
138 138 m._files = tostandins(m._files)
139 139
140 140 result = orig(node1, node2, m, ignored, clean, unknown,
141 141 listsubrepos)
142 142 if working:
143 143
144 144 def sfindirstate(f):
145 145 sf = lfutil.standin(f)
146 146 dirstate = self.dirstate
147 147 return sf in dirstate or sf in dirstate.dirs()
148 148
149 149 match._files = [f for f in match._files
150 150 if sfindirstate(f)]
151 151 # Don't waste time getting the ignored and unknown
152 152 # files from lfdirstate
153 153 unsure, s = lfdirstate.status(match, [], False, listclean,
154 154 False)
155 155 (modified, added, removed, clean) = (s.modified, s.added,
156 156 s.removed, s.clean)
157 157 if parentworking:
158 158 for lfile in unsure:
159 159 standin = lfutil.standin(lfile)
160 160 if standin not in ctx1:
161 161 # from second parent
162 162 modified.append(lfile)
163 163 elif ctx1[standin].data().strip() \
164 164 != lfutil.hashfile(self.wjoin(lfile)):
165 165 modified.append(lfile)
166 166 else:
167 167 if listclean:
168 168 clean.append(lfile)
169 169 lfdirstate.normal(lfile)
170 170 else:
171 171 tocheck = unsure + modified + added + clean
172 172 modified, added, clean = [], [], []
173 173
174 174 for lfile in tocheck:
175 175 standin = lfutil.standin(lfile)
176 176 if standin in ctx1:
177 177 abslfile = self.wjoin(lfile)
178 178 if ((ctx1[standin].data().strip() !=
179 179 lfutil.hashfile(abslfile)) or
180 180 (('x' in ctx1.flags(standin)) !=
181 181 bool(lfutil.getexecutable(abslfile)))):
182 182 modified.append(lfile)
183 183 elif listclean:
184 184 clean.append(lfile)
185 185 else:
186 186 added.append(lfile)
187 187
188 188 # at this point, 'removed' contains largefiles
189 189 # marked as 'R' in the working context.
190 190 # then, largefiles not managed also in the target
191 191 # context should be excluded from 'removed'.
192 192 removed = [lfile for lfile in removed
193 193 if lfutil.standin(lfile) in ctx1]
194 194
195 195 # Standins no longer found in lfdirstate has been
196 196 # removed
197 197 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
198 198 lfile = lfutil.splitstandin(standin)
199 199 if not match(lfile):
200 200 continue
201 201 if lfile not in lfdirstate:
202 202 removed.append(lfile)
203 203
204 204 # Filter result lists
205 205 result = list(result)
206 206
207 207 # Largefiles are not really removed when they're
208 208 # still in the normal dirstate. Likewise, normal
209 209 # files are not really removed if they are still in
210 210 # lfdirstate. This happens in merges where files
211 211 # change type.
212 212 removed = [f for f in removed
213 213 if f not in self.dirstate]
214 214 result[2] = [f for f in result[2]
215 215 if f not in lfdirstate]
216 216
217 217 lfiles = set(lfdirstate._map)
218 218 # Unknown files
219 219 result[4] = set(result[4]).difference(lfiles)
220 220 # Ignored files
221 221 result[5] = set(result[5]).difference(lfiles)
222 222 # combine normal files and largefiles
223 223 normals = [[fn for fn in filelist
224 224 if not lfutil.isstandin(fn)]
225 225 for filelist in result]
226 226 lfstatus = (modified, added, removed, s.deleted, [], [],
227 227 clean)
228 228 result = [sorted(list1 + list2)
229 229 for (list1, list2) in zip(normals, lfstatus)]
230 230 else: # not against working directory
231 231 result = [[lfutil.splitstandin(f) or f for f in items]
232 232 for items in result]
233 233
234 234 if wlock:
235 235 lfdirstate.write()
236 236
237 237 finally:
238 238 if wlock:
239 239 wlock.release()
240 240
241 241 self.lfstatus = True
242 242 return scmutil.status(*result)
243 243
244 244 # As part of committing, copy all of the largefiles into the
245 245 # cache.
246 246 def commitctx(self, ctx, *args, **kwargs):
247 247 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
248 248 lfutil.copyalltostore(self, node)
249 249 class lfilesctx(ctx.__class__):
250 250 def markcommitted(self, node):
251 251 orig = super(lfilesctx, self).markcommitted
252 252 return lfutil.markcommitted(orig, self, node)
253 253 ctx.__class__ = lfilesctx
254 254 return node
255 255
256 256 # Before commit, largefile standins have not had their
257 257 # contents updated to reflect the hash of their largefile.
258 258 # Do that here.
259 259 def commit(self, text="", user=None, date=None, match=None,
260 260 force=False, editor=False, extra={}):
261 261 orig = super(lfilesrepo, self).commit
262 262
263 263 wlock = self.wlock()
264 264 try:
265 265 lfcommithook = self._lfcommithooks[-1]
266 266 match = lfcommithook(self, match)
267 267 result = orig(text=text, user=user, date=date, match=match,
268 268 force=force, editor=editor, extra=extra)
269 269 return result
270 270 finally:
271 271 wlock.release()
272 272
273 273 def push(self, remote, force=False, revs=None, newbranch=False):
274 274 if remote.local():
275 275 missing = set(self.requirements) - remote.local().supported
276 276 if missing:
277 277 msg = _("required features are not"
278 278 " supported in the destination:"
279 279 " %s") % (', '.join(sorted(missing)))
280 280 raise util.Abort(msg)
281 281 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
282 282 newbranch=newbranch)
283 283
284 284 # TODO: _subdirlfs should be moved into "lfutil.py", because
285 285 # it is referred only from "lfutil.updatestandinsbymatch"
286 286 def _subdirlfs(self, files, lfiles):
287 287 '''
288 288 Adjust matched file list
289 289 If we pass a directory to commit whose only commitable files
290 290 are largefiles, the core commit code aborts before finding
291 291 the largefiles.
292 292 So we do the following:
293 293 For directories that only have largefiles as matches,
294 294 we explicitly add the largefiles to the match list and remove
295 295 the directory.
296 296 In other cases, we leave the match list unmodified.
297 297 '''
298 298 actualfiles = []
299 299 dirs = []
300 300 regulars = []
301 301
302 302 for f in files:
303 303 if lfutil.isstandin(f + '/'):
304 304 raise util.Abort(
305 305 _('file "%s" is a largefile standin') % f,
306 306 hint=('commit the largefile itself instead'))
307 307 # Scan directories
308 308 if os.path.isdir(self.wjoin(f)):
309 309 dirs.append(f)
310 310 else:
311 311 regulars.append(f)
312 312
313 313 for f in dirs:
314 314 matcheddir = False
315 315 d = self.dirstate.normalize(f) + '/'
316 316 # Check for matched normal files
317 317 for mf in regulars:
318 318 if self.dirstate.normalize(mf).startswith(d):
319 319 actualfiles.append(f)
320 320 matcheddir = True
321 321 break
322 322 if not matcheddir:
323 323 # If no normal match, manually append
324 324 # any matching largefiles
325 325 for lf in lfiles:
326 326 if self.dirstate.normalize(lf).startswith(d):
327 327 actualfiles.append(lf)
328 328 if not matcheddir:
329 329 actualfiles.append(lfutil.standin(f))
330 330 matcheddir = True
331 331 # Nothing in dir, so readd it
332 332 # and let commit reject it
333 333 if not matcheddir:
334 334 actualfiles.append(f)
335 335
336 336 # Always add normal files
337 337 actualfiles += regulars
338 338 return actualfiles
339 339
340 340 repo.__class__ = lfilesrepo
341 341
342 342 # stack of hooks being executed before committing.
343 343 # only last element ("_lfcommithooks[-1]") is used for each committing.
344 344 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
345 345
346 # Stack of status writer functions taking "*msg, **opts" arguments
347 # like "ui.status()". Only last element ("_lfupdatereporters[-1]")
348 # is used to write status out.
349 repo._lfstatuswriters = [ui.status]
350
346 351 def prepushoutgoinghook(local, remote, outgoing):
347 352 if outgoing.missing:
348 353 toupload = set()
349 354 addfunc = lambda fn, lfhash: toupload.add(lfhash)
350 355 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
351 356 lfcommands.uploadlfiles(ui, local, remote, toupload)
352 357 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
353 358
354 359 def checkrequireslfiles(ui, repo, **kwargs):
355 360 if 'largefiles' not in repo.requirements and util.any(
356 361 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
357 362 repo.requirements.add('largefiles')
358 363 repo._writerequirements()
359 364
360 365 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
361 366 'largefiles')
362 367 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now