lfutil.py
662 lines
| 23.2 KiB
| text/x-python
|
PythonLexer
various
|
r15168 | # Copyright 2009-2010 Gregory P. Ward | ||
# Copyright 2009-2010 Intelerad Medical Systems Incorporated | ||||
# Copyright 2010-2011 Fog Creek Software | ||||
# Copyright 2010-2011 Unity Technologies | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
'''largefiles utility code: must not import other modules in this package.''' | ||||
liscju
|
r29309 | from __future__ import absolute_import | ||
various
|
r15168 | |||
liscju
|
r29309 | import copy | ||
Augie Fackler
|
r29341 | import hashlib | ||
various
|
r15168 | import os | ||
Benjamin Pollack
|
r15320 | import platform | ||
various
|
r15168 | import stat | ||
liscju
|
r29309 | |||
from mercurial.i18n import _ | ||||
various
|
r15168 | |||
liscju
|
r29309 | from mercurial import ( | ||
dirstate, | ||||
error, | ||||
httpconnection, | ||||
liscju
|
r29320 | match as matchmod, | ||
liscju
|
r29309 | node, | ||
scmutil, | ||||
util, | ||||
) | ||||
various
|
r15168 | |||
shortname = '.hglf' | ||||
Mads Kiilerich
|
r18151 | shortnameslash = shortname + '/' | ||
various
|
r15168 | longname = 'largefiles' | ||
# -- Private worker functions ------------------------------------------ | ||||
Greg Ward
|
r15227 | def getminsize(ui, assumelfiles, opt, default=10): | ||
lfsize = opt | ||||
if not lfsize and assumelfiles: | ||||
Greg Ward
|
r15304 | lfsize = ui.config(longname, 'minsize', default=default) | ||
Greg Ward
|
r15227 | if lfsize: | ||
try: | ||||
Greg Ward
|
r15228 | lfsize = float(lfsize) | ||
Greg Ward
|
r15227 | except ValueError: | ||
Pierre-Yves David
|
r26587 | raise error.Abort(_('largefiles: size must be number (not %s)\n') | ||
Greg Ward
|
r15227 | % lfsize) | ||
if lfsize is None: | ||||
Pierre-Yves David
|
r26587 | raise error.Abort(_('minimum size for largefiles must be specified')) | ||
Greg Ward
|
r15227 | return lfsize | ||
various
|
r15168 | def link(src, dest): | ||
Mads Kiilerich
|
r28576 | """Try to create hardlink - if that fails, efficiently make a copy.""" | ||
Mads Kiilerich
|
r18998 | util.makedirs(os.path.dirname(dest)) | ||
various
|
r15168 | try: | ||
Na'Tosha Bard
|
r15206 | util.oslink(src, dest) | ||
various
|
r15168 | except OSError: | ||
Martin Geisler
|
r15572 | # if hardlinks fail, fallback on atomic copy | ||
Mads Kiilerich
|
r30142 | with open(src, 'rb') as srcf: | ||
with util.atomictempfile(dest) as dstf: | ||||
for chunk in util.filechunkiter(srcf): | ||||
dstf.write(chunk) | ||||
various
|
r15168 | os.chmod(dest, os.stat(src).st_mode) | ||
Benjamin Pollack
|
r15316 | def usercachepath(ui, hash): | ||
Mads Kiilerich
|
r28574 | '''Return the correct location in the "global" largefiles cache for a file | ||
with the given hash. | ||||
This cache is used for sharing of largefiles across repositories - both | ||||
to preserve download bandwidth and storage space.''' | ||||
Mads Kiilerich
|
r28575 | return os.path.join(_usercachedir(ui), hash) | ||
Mads Kiilerich
|
r28574 | |||
def _usercachedir(ui): | ||||
'''Return the location of the "global" largefiles cache.''' | ||||
Greg Ward
|
r15350 | path = ui.configpath(longname, 'usercache', None) | ||
various
|
r15168 | if path: | ||
Mads Kiilerich
|
r28574 | return path | ||
if os.name == 'nt': | ||||
appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA')) | ||||
if appdata: | ||||
return os.path.join(appdata, longname) | ||||
elif platform.system() == 'Darwin': | ||||
home = os.getenv('HOME') | ||||
if home: | ||||
return os.path.join(home, 'Library', 'Caches', longname) | ||||
elif os.name == 'posix': | ||||
path = os.getenv('XDG_CACHE_HOME') | ||||
if path: | ||||
return os.path.join(path, longname) | ||||
home = os.getenv('HOME') | ||||
if home: | ||||
return os.path.join(home, '.cache', longname) | ||||
various
|
r15168 | else: | ||
Mads Kiilerich
|
r28574 | raise error.Abort(_('unknown operating system: %s\n') % os.name) | ||
FUJIWARA Katsunori
|
r29644 | raise error.Abort(_('unknown %s usercache location') % longname) | ||
various
|
r15168 | |||
Benjamin Pollack
|
r15316 | def inusercache(ui, hash): | ||
Kevin Gessner
|
r15658 | path = usercachepath(ui, hash) | ||
Mads Kiilerich
|
r28575 | return os.path.exists(path) | ||
various
|
r15168 | |||
def findfile(repo, hash): | ||||
Mads Kiilerich
|
r28576 | '''Return store path of the largefile with the specified hash. | ||
As a side effect, the file might be linked from user cache. | ||||
Return None if the file can't be found locally.''' | ||||
Matt Harbison
|
r24631 | path, exists = findstorepath(repo, hash) | ||
if exists: | ||||
Martin Geisler
|
r16928 | repo.ui.note(_('found %s in store\n') % hash) | ||
Matt Harbison
|
r24631 | return path | ||
Benjamin Pollack
|
r15317 | elif inusercache(repo.ui, hash): | ||
Martin Geisler
|
r16928 | repo.ui.note(_('found %s in system cache\n') % hash) | ||
Hao Lian
|
r15408 | path = storepath(repo, hash) | ||
link(usercachepath(repo.ui, hash), path) | ||||
Na'Tosha Bard
|
r15913 | return path | ||
return None | ||||
various
|
r15168 | |||
Na'Tosha Bard
|
r16247 | class largefilesdirstate(dirstate.dirstate): | ||
various
|
r15168 | def __getitem__(self, key): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) | ||
various
|
r15168 | def normal(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).normal(unixpath(f)) | ||
various
|
r15168 | def remove(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).remove(unixpath(f)) | ||
various
|
r15168 | def add(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).add(unixpath(f)) | ||
various
|
r15168 | def drop(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).drop(unixpath(f)) | ||
various
|
r15168 | def forget(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).forget(unixpath(f)) | ||
Na'Tosha Bard
|
r15793 | def normallookup(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).normallookup(unixpath(f)) | ||
Mads Kiilerich
|
r21085 | def _ignore(self, f): | ||
Mads Kiilerich
|
r18148 | return False | ||
FUJIWARA Katsunori
|
r26749 | def write(self, tr=False): | ||
# (1) disable PENDING mode always | ||||
# (lfdirstate isn't yet managed as a part of the transaction) | ||||
# (2) avoid develwarn 'use dirstate.write with ....' | ||||
super(largefilesdirstate, self).write(None) | ||||
various
|
r15168 | |||
Matt Harbison
|
r17659 | def openlfdirstate(ui, repo, create=True): | ||
various
|
r15168 | ''' | ||
Greg Ward
|
r15252 | Return a dirstate object that tracks largefiles: i.e. its root is | ||
the repo root, but it is saved in .hg/largefiles/dirstate. | ||||
various
|
r15168 | ''' | ||
liscju
|
r28560 | vfs = repo.vfs | ||
lfstoredir = longname | ||||
opener = scmutil.opener(vfs.join(lfstoredir)) | ||||
Na'Tosha Bard
|
r16247 | lfdirstate = largefilesdirstate(opener, ui, repo.root, | ||
Greg Ward
|
r15349 | repo.dirstate._validate) | ||
various
|
r15168 | |||
Greg Ward
|
r15252 | # If the largefiles dirstate does not exist, populate and create | ||
# it. This ensures that we create it on the first meaningful | ||||
Levi Bard
|
r15794 | # largefiles operation in a new clone. | ||
liscju
|
r28560 | if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')): | ||
various
|
r15168 | matcher = getstandinmatcher(repo) | ||
Matt Harbison
|
r21917 | standins = repo.dirstate.walk(matcher, [], False, False) | ||
if len(standins) > 0: | ||||
liscju
|
r28560 | vfs.makedirs(lfstoredir) | ||
Matt Harbison
|
r21917 | |||
for standin in standins: | ||||
various
|
r15168 | lfile = splitstandin(standin) | ||
lfdirstate.normallookup(lfile) | ||||
return lfdirstate | ||||
Mads Kiilerich
|
r23039 | def lfdirstatestatus(lfdirstate, repo): | ||
wctx = repo['.'] | ||||
liscju
|
r29320 | match = matchmod.always(repo.root, repo.getcwd()) | ||
Martin von Zweigbergk
|
r22911 | unsure, s = lfdirstate.status(match, [], False, False, False) | ||
Martin von Zweigbergk
|
r22919 | modified, clean = s.modified, s.clean | ||
Levi Bard
|
r15794 | for lfile in unsure: | ||
Mads Kiilerich
|
r18299 | try: | ||
Mads Kiilerich
|
r23039 | fctx = wctx[standin(lfile)] | ||
Mads Kiilerich
|
r18299 | except LookupError: | ||
fctx = None | ||||
if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)): | ||||
Levi Bard
|
r15794 | modified.append(lfile) | ||
else: | ||||
clean.append(lfile) | ||||
lfdirstate.normal(lfile) | ||||
Martin von Zweigbergk
|
r22912 | return s | ||
various
|
r15168 | |||
def listlfiles(repo, rev=None, matcher=None): | ||||
Greg Ward
|
r15252 | '''return a list of largefiles in the working copy or the | ||
specified changeset''' | ||||
various
|
r15168 | |||
if matcher is None: | ||||
matcher = getstandinmatcher(repo) | ||||
# ignore unknown files in working directory | ||||
Greg Ward
|
r15255 | return [splitstandin(f) | ||
for f in repo[rev].walk(matcher) | ||||
various
|
r15168 | if rev is not None or repo.dirstate[f] != '?'] | ||
Matt Harbison
|
r24631 | def instore(repo, hash, forcelocal=False): | ||
liscju
|
r29419 | '''Return true if a largefile with the given hash exists in the store''' | ||
Matt Harbison
|
r24631 | return os.path.exists(storepath(repo, hash, forcelocal)) | ||
various
|
r15168 | |||
Matt Harbison
|
r24631 | def storepath(repo, hash, forcelocal=False): | ||
liscju
|
r29419 | '''Return the correct location in the repository largefiles store for a | ||
Mads Kiilerich
|
r28576 | file with the given hash.''' | ||
Matt Harbison
|
r24631 | if not forcelocal and repo.shared(): | ||
return repo.vfs.reljoin(repo.sharedpath, longname, hash) | ||||
Matt Harbison
|
r24627 | return repo.join(longname, hash) | ||
various
|
r15168 | |||
Matt Harbison
|
r24629 | def findstorepath(repo, hash): | ||
'''Search through the local store path(s) to find the file for the given | ||||
hash. If the file is not found, its path in the primary store is returned. | ||||
The return value is a tuple of (path, exists(path)). | ||||
''' | ||||
Matt Harbison
|
r24631 | # For shared repos, the primary store is in the share source. But for | ||
# backward compatibility, force a lookup in the local store if it wasn't | ||||
# found in the share source. | ||||
path = storepath(repo, hash, False) | ||||
if instore(repo, hash): | ||||
return (path, True) | ||||
elif repo.shared() and instore(repo, hash, True): | ||||
Henrik Stuart
|
r29329 | return storepath(repo, hash, True), True | ||
Matt Harbison
|
r24631 | |||
return (path, False) | ||||
Matt Harbison
|
r24629 | |||
various
|
r15168 | def copyfromcache(repo, hash, filename): | ||
Greg Ward
|
r15252 | '''Copy the specified largefile from the repo or system cache to | ||
filename in the repository. Return true on success or false if the | ||||
file was not found in either cache (which should not happened: | ||||
this is meant to be called only after ensuring that the needed | ||||
largefile exists in the cache).''' | ||||
liscju
|
r28560 | wvfs = repo.wvfs | ||
various
|
r15168 | path = findfile(repo, hash) | ||
if path is None: | ||||
return False | ||||
liscju
|
r28560 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | ||
Martin Geisler
|
r15570 | # The write may fail before the file is fully written, but we | ||
# don't use atomic writes in the working copy. | ||||
Mads Kiilerich
|
r26823 | with open(path, 'rb') as srcfd: | ||
liscju
|
r28560 | with wvfs(filename, 'wb') as destfd: | ||
Mads Kiilerich
|
r30180 | gothash = copyandhash( | ||
util.filechunkiter(srcfd), destfd) | ||||
Mads Kiilerich
|
r26823 | if gothash != hash: | ||
repo.ui.warn(_('%s: data corruption in %s with hash %s\n') | ||||
% (filename, path, gothash)) | ||||
liscju
|
r28560 | wvfs.unlink(filename) | ||
Mads Kiilerich
|
r26823 | return False | ||
various
|
r15168 | return True | ||
Benjamin Pollack
|
r15316 | def copytostore(repo, rev, file, uploaded=False): | ||
liscju
|
r28560 | wvfs = repo.wvfs | ||
Matt Harbison
|
r17877 | hash = readstandin(repo, file, rev) | ||
Benjamin Pollack
|
r15316 | if instore(repo, hash): | ||
various
|
r15168 | return | ||
liscju
|
r28560 | if wvfs.exists(file): | ||
copytostoreabsolute(repo, wvfs.join(file), hash) | ||||
Mads Kiilerich
|
r27903 | else: | ||
repo.ui.warn(_("%s: largefile %s not available from local store\n") % | ||||
(file, hash)) | ||||
various
|
r15168 | |||
Dan Villiom Podlaski Christiansen
|
r15796 | def copyalltostore(repo, node): | ||
'''Copy all largefiles in a given revision to the store''' | ||||
ctx = repo[node] | ||||
for filename in ctx.files(): | ||||
if isstandin(filename) and filename in ctx.manifest(): | ||||
realfile = splitstandin(filename) | ||||
copytostore(repo, ctx.node(), realfile) | ||||
Benjamin Pollack
|
r15316 | def copytostoreabsolute(repo, file, hash): | ||
if inusercache(repo.ui, hash): | ||||
link(usercachepath(repo.ui, hash), storepath(repo, hash)) | ||||
FUJIWARA Katsunori
|
r23276 | else: | ||
Mads Kiilerich
|
r18998 | util.makedirs(os.path.dirname(storepath(repo, hash))) | ||
Mads Kiilerich
|
r30142 | with open(file, 'rb') as srcf: | ||
with util.atomictempfile(storepath(repo, hash), | ||||
createmode=repo.store.createmode) as dstf: | ||||
for chunk in util.filechunkiter(srcf): | ||||
dstf.write(chunk) | ||||
Benjamin Pollack
|
r15316 | linktousercache(repo, hash) | ||
various
|
r15168 | |||
Benjamin Pollack
|
r15316 | def linktousercache(repo, hash): | ||
Mads Kiilerich
|
r28576 | '''Link / copy the largefile with the specified hash from the store | ||
to the cache.''' | ||||
Kevin Gessner
|
r15658 | path = usercachepath(repo.ui, hash) | ||
Mads Kiilerich
|
r28575 | link(storepath(repo, hash), path) | ||
various
|
r15168 | |||
Martin von Zweigbergk
|
r25292 | def getstandinmatcher(repo, rmatcher=None): | ||
'''Return a match object that applies rmatcher to the standin directory''' | ||||
liscju
|
r28560 | wvfs = repo.wvfs | ||
standindir = shortname | ||||
Matt Harbison
|
r25470 | |||
# no warnings about missing files or directories | ||||
badfn = lambda f, msg: None | ||||
Martin von Zweigbergk
|
r25293 | if rmatcher and not rmatcher.always(): | ||
liscju
|
r28560 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | ||
Matt Harbison
|
r26025 | if not pats: | ||
liscju
|
r28560 | pats = [wvfs.join(standindir)] | ||
Matt Harbison
|
r25470 | match = scmutil.match(repo[None], pats, badfn=badfn) | ||
Martin von Zweigbergk
|
r25293 | # if pats is empty, it would incorrectly always match, so clear _always | ||
match._always = False | ||||
Mads Kiilerich
|
r18724 | else: | ||
various
|
r15168 | # no patterns: relative to repo root | ||
liscju
|
r28560 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | ||
various
|
r15168 | return match | ||
def composestandinmatcher(repo, rmatcher): | ||||
Greg Ward
|
r15252 | '''Return a matcher that accepts standins corresponding to the | ||
files accepted by rmatcher. Pass the list of files in the matcher | ||||
as the paths specified by the user.''' | ||||
Martin von Zweigbergk
|
r25292 | smatcher = getstandinmatcher(repo, rmatcher) | ||
various
|
r15168 | isstandin = smatcher.matchfn | ||
Na'Tosha Bard
|
r16247 | def composedmatchfn(f): | ||
various
|
r15168 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | ||
Na'Tosha Bard
|
r16247 | smatcher.matchfn = composedmatchfn | ||
various
|
r15168 | |||
return smatcher | ||||
def standin(filename): | ||||
'''Return the repo-relative path to the standin for the specified big | ||||
file.''' | ||||
# Notes: | ||||
Mads Kiilerich
|
r17425 | # 1) Some callers want an absolute path, but for instance addlargefiles | ||
Mads Kiilerich
|
r18154 | # needs it repo-relative so it can be passed to repo[None].add(). So | ||
# leave it up to the caller to use repo.wjoin() to get an absolute path. | ||||
various
|
r15168 | # 2) Join with '/' because that's what dirstate always uses, even on | ||
# Windows. Change existing separator to '/' first in case we are | ||||
# passed filenames from an external source (like the command line). | ||||
Mads Kiilerich
|
r18151 | return shortnameslash + util.pconvert(filename) | ||
various
|
r15168 | |||
def isstandin(filename): | ||||
Greg Ward
|
r15252 | '''Return true if filename is a big file standin. filename must be | ||
in Mercurial's internal form (slash-separated).''' | ||||
Mads Kiilerich
|
r18151 | return filename.startswith(shortnameslash) | ||
various
|
r15168 | |||
def splitstandin(filename): | ||||
# Split on / because that's what dirstate always uses, even on Windows. | ||||
# Change local separator to / first just in case we are passed filenames | ||||
# from an external source (like the command line). | ||||
FUJIWARA Katsunori
|
r16066 | bits = util.pconvert(filename).split('/', 1) | ||
various
|
r15168 | if len(bits) == 2 and bits[0] == shortname: | ||
return bits[1] | ||||
else: | ||||
return None | ||||
def updatestandin(repo, standin): | ||||
file = repo.wjoin(splitstandin(standin)) | ||||
liscju
|
r28560 | if repo.wvfs.exists(splitstandin(standin)): | ||
various
|
r15168 | hash = hashfile(file) | ||
executable = getexecutable(file) | ||||
writestandin(repo, standin, hash, executable) | ||||
Matt Harbison
|
r27947 | else: | ||
raise error.Abort(_('%s: file not found!') % splitstandin(standin)) | ||||
various
|
r15168 | |||
def readstandin(repo, filename, node=None): | ||||
'''read hex hash from standin for filename at given node, or working | ||||
directory if no node is given''' | ||||
return repo[node][standin(filename)].data().strip() | ||||
def writestandin(repo, standin, hash, executable): | ||||
Greg Ward
|
r15252 | '''write hash to <repo.root>/<standin>''' | ||
Mads Kiilerich
|
r19089 | repo.wwrite(standin, hash + '\n', executable and 'x' or '') | ||
various
|
r15168 | |||
def copyandhash(instream, outfile): | ||||
'''Read bytes from instream (iterable) and write them to outfile, | ||||
Mads Kiilerich
|
r19002 | computing the SHA-1 hash of the data along the way. Return the hash.''' | ||
Augie Fackler
|
r29341 | hasher = hashlib.sha1('') | ||
various
|
r15168 | for data in instream: | ||
hasher.update(data) | ||||
outfile.write(data) | ||||
Mads Kiilerich
|
r18999 | return hasher.hexdigest() | ||
various
|
r15168 | |||
def hashrepofile(repo, file): | ||||
return hashfile(repo.wjoin(file)) | ||||
def hashfile(file): | ||||
if not os.path.exists(file): | ||||
return '' | ||||
Augie Fackler
|
r29341 | hasher = hashlib.sha1('') | ||
Mads Kiilerich
|
r30142 | with open(file, 'rb') as fd: | ||
Mads Kiilerich
|
r30181 | for data in util.filechunkiter(fd): | ||
Mads Kiilerich
|
r30142 | hasher.update(data) | ||
various
|
r15168 | return hasher.hexdigest() | ||
def getexecutable(filename): | ||||
mode = os.stat(filename).st_mode | ||||
Greg Ward
|
r15255 | return ((mode & stat.S_IXUSR) and | ||
(mode & stat.S_IXGRP) and | ||||
(mode & stat.S_IXOTH)) | ||||
various
|
r15168 | |||
def urljoin(first, second, *arg): | ||||
def join(left, right): | ||||
if not left.endswith('/'): | ||||
left += '/' | ||||
if right.startswith('/'): | ||||
right = right[1:] | ||||
return left + right | ||||
url = join(first, second) | ||||
for a in arg: | ||||
url = join(url, a) | ||||
return url | ||||
def hexsha1(data): | ||||
"""hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | ||||
object data""" | ||||
Augie Fackler
|
r29341 | h = hashlib.sha1() | ||
various
|
r15168 | for chunk in util.filechunkiter(data): | ||
h.update(chunk) | ||||
return h.hexdigest() | ||||
def httpsendfile(ui, filename): | ||||
Na'Tosha Bard
|
r15224 | return httpconnection.httpsendfile(ui, filename, 'rb') | ||
various
|
r15168 | |||
def unixpath(path): | ||||
Greg Ward
|
r15252 | '''Return a version of path normalized for use with the lfdirstate.''' | ||
FUJIWARA Katsunori
|
r16066 | return util.pconvert(os.path.normpath(path)) | ||
various
|
r15168 | |||
def islfilesrepo(repo): | ||||
Mads Kiilerich
|
r28576 | '''Return true if the repo is a largefile repo.''' | ||
Matt Harbison
|
r17659 | if ('largefiles' in repo.requirements and | ||
Augie Fackler
|
r25149 | any(shortnameslash in f[0] for f in repo.store.datafiles())): | ||
Matt Harbison
|
r17659 | return True | ||
Augie Fackler
|
r25149 | return any(openlfdirstate(repo.ui, repo, False)) | ||
various
|
r15168 | |||
Matt Mackall
|
r15333 | class storeprotonotcapable(Exception): | ||
various
|
r15168 | def __init__(self, storetypes): | ||
self.storetypes = storetypes | ||||
Na'Tosha Bard
|
r16103 | |||
Na'Tosha Bard
|
r16120 | def getstandinsstate(repo): | ||
standins = [] | ||||
matcher = getstandinmatcher(repo) | ||||
Mads Kiilerich
|
r18154 | for standin in repo.dirstate.walk(matcher, [], False, False): | ||
Na'Tosha Bard
|
r16120 | lfile = splitstandin(standin) | ||
Mads Kiilerich
|
r18300 | try: | ||
hash = readstandin(repo, lfile) | ||||
except IOError: | ||||
hash = None | ||||
standins.append((lfile, hash)) | ||||
Na'Tosha Bard
|
r16120 | return standins | ||
Na'Tosha Bard
|
r16245 | |||
FUJIWARA Katsunori
|
r22095 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): | ||
lfstandin = standin(lfile) | ||||
if lfstandin in repo.dirstate: | ||||
stat = repo.dirstate._map[lfstandin] | ||||
state, mtime = stat[0], stat[3] | ||||
else: | ||||
state, mtime = '?', -1 | ||||
if state == 'n': | ||||
Mads Kiilerich
|
r26627 | if (normallookup or mtime < 0 or | ||
liscju
|
r28560 | not repo.wvfs.exists(lfile)): | ||
FUJIWARA Katsunori
|
r22095 | # state 'n' doesn't ensure 'clean' in this case | ||
lfdirstate.normallookup(lfile) | ||||
else: | ||||
lfdirstate.normal(lfile) | ||||
elif state == 'm': | ||||
lfdirstate.normallookup(lfile) | ||||
elif state == 'r': | ||||
lfdirstate.remove(lfile) | ||||
elif state == 'a': | ||||
lfdirstate.add(lfile) | ||||
elif state == '?': | ||||
lfdirstate.drop(lfile) | ||||
FUJIWARA Katsunori
|
r23184 | def markcommitted(orig, ctx, node): | ||
Matt Harbison
|
r24336 | repo = ctx.repo() | ||
FUJIWARA Katsunori
|
r23184 | |||
orig(node) | ||||
FUJIWARA Katsunori
|
r23273 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | ||
# because files coming from the 2nd parent are omitted in the latter. | ||||
# | ||||
# The former should be used to get targets of "synclfdirstate", | ||||
# because such files: | ||||
# - are marked as "a" by "patch.patch()" (e.g. via transplant), and | ||||
# - have to be marked as "n" after commit, but | ||||
# - aren't listed in "repo[node].files()" | ||||
FUJIWARA Katsunori
|
r23184 | lfdirstate = openlfdirstate(repo.ui, repo) | ||
for f in ctx.files(): | ||||
if isstandin(f): | ||||
lfile = splitstandin(f) | ||||
synclfdirstate(repo, lfdirstate, lfile, False) | ||||
lfdirstate.write() | ||||
FUJIWARA Katsunori
|
r23276 | # As part of committing, copy all of the largefiles into the cache. | ||
copyalltostore(repo, node) | ||||
Na'Tosha Bard
|
r16245 | def getlfilestoupdate(oldstandins, newstandins): | ||
changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) | ||||
filelist = [] | ||||
for f in changedstandins: | ||||
if f[0] not in filelist: | ||||
filelist.append(f[0]) | ||||
return filelist | ||||
FUJIWARA Katsunori
|
r21042 | |||
def getlfilestoupload(repo, missing, addfunc): | ||||
Mads Kiilerich
|
r23892 | for i, n in enumerate(missing): | ||
repo.ui.progress(_('finding outgoing largefiles'), i, | ||||
r28464 | unit=_('revisions'), total=len(missing)) | |||
Mads Kiilerich
|
r28877 | parents = [p for p in repo[n].parents() if p != node.nullid] | ||
Matt Harbison
|
r23657 | |||
oldlfstatus = repo.lfstatus | ||||
repo.lfstatus = False | ||||
try: | ||||
ctx = repo[n] | ||||
finally: | ||||
repo.lfstatus = oldlfstatus | ||||
FUJIWARA Katsunori
|
r21042 | files = set(ctx.files()) | ||
if len(parents) == 2: | ||||
mc = ctx.manifest() | ||||
mp1 = ctx.parents()[0].manifest() | ||||
mp2 = ctx.parents()[1].manifest() | ||||
for f in mp1: | ||||
if f not in mc: | ||||
files.add(f) | ||||
for f in mp2: | ||||
if f not in mc: | ||||
files.add(f) | ||||
for f in mc: | ||||
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | ||||
files.add(f) | ||||
for fn in files: | ||||
if isstandin(fn) and fn in ctx: | ||||
addfunc(fn, ctx[fn].data().strip()) | ||||
Mads Kiilerich
|
r23892 | repo.ui.progress(_('finding outgoing largefiles'), None) | ||
FUJIWARA Katsunori
|
r23185 | |||
def updatestandinsbymatch(repo, match): | ||||
'''Update standins in the working directory according to specified match | ||||
This returns (possibly modified) ``match`` object to be used for | ||||
subsequent commit process. | ||||
''' | ||||
ui = repo.ui | ||||
# Case 1: user calls commit with no specific files or | ||||
# include/exclude patterns: refresh and commit all files that | ||||
# are "dirty". | ||||
if match is None or match.always(): | ||||
# Spend a bit of time here to get a list of files we know | ||||
# are modified so we can compare only against those. | ||||
# It can cost a lot of time (several seconds) | ||||
# otherwise to update all standins if the largefiles are | ||||
# large. | ||||
lfdirstate = openlfdirstate(ui, repo) | ||||
liscju
|
r29320 | dirtymatch = matchmod.always(repo.root, repo.getcwd()) | ||
FUJIWARA Katsunori
|
r23185 | unsure, s = lfdirstate.status(dirtymatch, [], False, False, | ||
False) | ||||
modifiedfiles = unsure + s.modified + s.added + s.removed | ||||
lfiles = listlfiles(repo) | ||||
# this only loops through largefiles that exist (not | ||||
# removed/renamed) | ||||
for lfile in lfiles: | ||||
if lfile in modifiedfiles: | ||||
liscju
|
r28560 | if repo.wvfs.exists(standin(lfile)): | ||
FUJIWARA Katsunori
|
r23185 | # this handles the case where a rebase is being | ||
# performed and the working copy is not updated | ||||
# yet. | ||||
liscju
|
r28560 | if repo.wvfs.exists(lfile): | ||
FUJIWARA Katsunori
|
r23185 | updatestandin(repo, | ||
standin(lfile)) | ||||
return match | ||||
lfiles = listlfiles(repo) | ||||
match._files = repo._subdirlfs(match.files(), lfiles) | ||||
# Case 2: user calls commit with specified patterns: refresh | ||||
# any matching big files. | ||||
smatcher = composestandinmatcher(repo, match) | ||||
standins = repo.dirstate.walk(smatcher, [], False, False) | ||||
# No matching big files: get out of the way and pass control to | ||||
# the usual commit() method. | ||||
if not standins: | ||||
return match | ||||
# Refresh all matching big files. It's possible that the | ||||
# commit will end up failing, in which case the big files will | ||||
# stay refreshed. No harm done: the user modified them and | ||||
# asked to commit them, so sooner or later we're going to | ||||
# refresh the standins. Might as well leave them refreshed. | ||||
lfdirstate = openlfdirstate(ui, repo) | ||||
for fstandin in standins: | ||||
lfile = splitstandin(fstandin) | ||||
if lfdirstate[lfile] != 'r': | ||||
updatestandin(repo, fstandin) | ||||
# Cook up a new matcher that only matches regular files or | ||||
# standins corresponding to the big files requested by the | ||||
# user. Have to modify _files to prevent commit() from | ||||
# complaining "not tracked" for big files. | ||||
match = copy.copy(match) | ||||
origmatchfn = match.matchfn | ||||
# Check both the list of largefiles and the list of | ||||
# standins because if a largefile was removed, it | ||||
# won't be in the list of largefiles at this point | ||||
match._files += sorted(standins) | ||||
actualfiles = [] | ||||
for f in match._files: | ||||
fstandin = standin(f) | ||||
Mads Kiilerich
|
r26817 | # For largefiles, only one of the normal and standin should be | ||
Matt Harbison
|
r27942 | # committed (except if one of them is a remove). In the case of a | ||
# standin removal, drop the normal file if it is unknown to dirstate. | ||||
Mads Kiilerich
|
r26817 | # Thus, skip plain largefile names but keep the standin. | ||
Matt Harbison
|
r27942 | if f in lfiles or fstandin in standins: | ||
if repo.dirstate[fstandin] != 'r': | ||||
if repo.dirstate[f] != 'r': | ||||
continue | ||||
elif repo.dirstate[f] == '?': | ||||
continue | ||||
FUJIWARA Katsunori
|
r23185 | |||
actualfiles.append(f) | ||||
match._files = actualfiles | ||||
def matchfn(f): | ||||
if origmatchfn(f): | ||||
return f not in lfiles | ||||
else: | ||||
return f in standins | ||||
match.matchfn = matchfn | ||||
return match | ||||
FUJIWARA Katsunori
|
r23187 | |||
class automatedcommithook(object): | ||||
Mads Kiilerich
|
r23543 | '''Stateful hook to update standins at the 1st commit of resuming | ||
FUJIWARA Katsunori
|
r23187 | |||
For efficiency, updating standins in the working directory should | ||||
be avoided while automated committing (like rebase, transplant and | ||||
so on), because they should be updated before committing. | ||||
But the 1st commit of resuming automated committing (e.g. ``rebase | ||||
--continue``) should update them, because largefiles may be | ||||
modified manually. | ||||
''' | ||||
def __init__(self, resuming): | ||||
self.resuming = resuming | ||||
def __call__(self, repo, match): | ||||
if self.resuming: | ||||
self.resuming = False # avoids updating at subsequent commits | ||||
return updatestandinsbymatch(repo, match) | ||||
else: | ||||
return match | ||||
FUJIWARA Katsunori
|
r23188 | |||
def getstatuswriter(ui, repo, forcibly=None): | ||||
'''Return the function to write largefiles specific status out | ||||
If ``forcibly`` is ``None``, this returns the last element of | ||||
Mads Kiilerich
|
r23543 | ``repo._lfstatuswriters`` as "default" writer function. | ||
FUJIWARA Katsunori
|
r23188 | |||
Otherwise, this returns the function to always write out (or | ||||
ignore if ``not forcibly``) status. | ||||
''' | ||||
FUJIWARA Katsunori
|
r24158 | if forcibly is None and util.safehasattr(repo, '_largefilesenabled'): | ||
FUJIWARA Katsunori
|
r23188 | return repo._lfstatuswriters[-1] | ||
else: | ||||
if forcibly: | ||||
return ui.status # forcibly WRITE OUT | ||||
else: | ||||
return lambda *msg, **opts: None # forcibly IGNORE | ||||