lfutil.py
459 lines
| 15.4 KiB
| text/x-python
|
PythonLexer
various
|
r15168 | # Copyright 2009-2010 Gregory P. Ward | ||
# Copyright 2009-2010 Intelerad Medical Systems Incorporated | ||||
# Copyright 2010-2011 Fog Creek Software | ||||
# Copyright 2010-2011 Unity Technologies | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
'''largefiles utility code: must not import other modules in this package.''' | ||||
import os | ||||
import errno | ||||
Benjamin Pollack
|
r15320 | import platform | ||
various
|
r15168 | import shutil | ||
import stat | ||||
Hao Lian
|
r15391 | import tempfile | ||
various
|
r15168 | |||
Na'Tosha Bard
|
r15226 | from mercurial import dirstate, httpconnection, match as match_, util, scmutil | ||
various
|
r15168 | from mercurial.i18n import _ | ||
shortname = '.hglf' | ||||
longname = 'largefiles' | ||||
# -- Portability wrappers ---------------------------------------------- | ||||
Na'Tosha Bard
|
r15224 | def dirstate_walk(dirstate, matcher, unknown=False, ignored=False): | ||
return dirstate.walk(matcher, [], unknown, ignored) | ||||
various
|
r15168 | |||
def repo_add(repo, list): | ||||
Na'Tosha Bard
|
r15224 | add = repo[None].add | ||
various
|
r15168 | return add(list) | ||
def repo_remove(repo, list, unlink=False): | ||||
Na'Tosha Bard
|
r15224 | def remove(list, unlink): | ||
wlock = repo.wlock() | ||||
various
|
r15168 | try: | ||
Na'Tosha Bard
|
r15224 | if unlink: | ||
for f in list: | ||||
try: | ||||
util.unlinkpath(repo.wjoin(f)) | ||||
except OSError, inst: | ||||
if inst.errno != errno.ENOENT: | ||||
raise | ||||
repo[None].forget(list) | ||||
finally: | ||||
wlock.release() | ||||
various
|
r15168 | return remove(list, unlink=unlink) | ||
def repo_forget(repo, list): | ||||
Na'Tosha Bard
|
r15224 | forget = repo[None].forget | ||
various
|
r15168 | return forget(list) | ||
def findoutgoing(repo, remote, force): | ||||
Na'Tosha Bard
|
r15224 | from mercurial import discovery | ||
common, _anyinc, _heads = discovery.findcommonincoming(repo, | ||||
remote, force=force) | ||||
return repo.changelog.findmissing(common) | ||||
various
|
r15168 | |||
# -- Private worker functions ------------------------------------------ | ||||
Greg Ward
|
r15227 | def getminsize(ui, assumelfiles, opt, default=10): | ||
lfsize = opt | ||||
if not lfsize and assumelfiles: | ||||
Greg Ward
|
r15304 | lfsize = ui.config(longname, 'minsize', default=default) | ||
Greg Ward
|
r15227 | if lfsize: | ||
try: | ||||
Greg Ward
|
r15228 | lfsize = float(lfsize) | ||
Greg Ward
|
r15227 | except ValueError: | ||
Greg Ward
|
r15228 | raise util.Abort(_('largefiles: size must be number (not %s)\n') | ||
Greg Ward
|
r15227 | % lfsize) | ||
if lfsize is None: | ||||
raise util.Abort(_('minimum size for largefiles must be specified')) | ||||
return lfsize | ||||
various
|
r15168 | def link(src, dest): | ||
try: | ||||
Na'Tosha Bard
|
r15206 | util.oslink(src, dest) | ||
various
|
r15168 | except OSError: | ||
Martin Geisler
|
r15572 | # if hardlinks fail, fallback on atomic copy | ||
dst = util.atomictempfile(dest) | ||||
Matt Mackall
|
r15699 | for chunk in util.filechunkiter(open(src, 'rb')): | ||
Martin Geisler
|
r15572 | dst.write(chunk) | ||
dst.close() | ||||
various
|
r15168 | os.chmod(dest, os.stat(src).st_mode) | ||
Benjamin Pollack
|
r15316 | def usercachepath(ui, hash): | ||
Greg Ward
|
r15350 | path = ui.configpath(longname, 'usercache', None) | ||
various
|
r15168 | if path: | ||
path = os.path.join(path, hash) | ||||
else: | ||||
if os.name == 'nt': | ||||
Greg Ward
|
r15255 | appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA')) | ||
Kevin Gessner
|
r15658 | if appdata: | ||
path = os.path.join(appdata, longname, hash) | ||||
Benjamin Pollack
|
r15320 | elif platform.system() == 'Darwin': | ||
Kevin Gessner
|
r15658 | home = os.getenv('HOME') | ||
if home: | ||||
path = os.path.join(home, 'Library', 'Caches', | ||||
longname, hash) | ||||
various
|
r15168 | elif os.name == 'posix': | ||
Benjamin Pollack
|
r15320 | path = os.getenv('XDG_CACHE_HOME') | ||
if path: | ||||
path = os.path.join(path, longname, hash) | ||||
else: | ||||
Kevin Gessner
|
r15658 | home = os.getenv('HOME') | ||
if home: | ||||
path = os.path.join(home, '.cache', longname, hash) | ||||
various
|
r15168 | else: | ||
Greg Ward
|
r15253 | raise util.Abort(_('unknown operating system: %s\n') % os.name) | ||
various
|
r15168 | return path | ||
Benjamin Pollack
|
r15316 | def inusercache(ui, hash): | ||
Kevin Gessner
|
r15658 | path = usercachepath(ui, hash) | ||
return path and os.path.exists(path) | ||||
various
|
r15168 | |||
def findfile(repo, hash): | ||||
Benjamin Pollack
|
r15316 | if instore(repo, hash): | ||
repo.ui.note(_('Found %s in store\n') % hash) | ||||
Na'Tosha Bard
|
r15913 | return storepath(repo, hash) | ||
Benjamin Pollack
|
r15317 | elif inusercache(repo.ui, hash): | ||
various
|
r15168 | repo.ui.note(_('Found %s in system cache\n') % hash) | ||
Hao Lian
|
r15408 | path = storepath(repo, hash) | ||
util.makedirs(os.path.dirname(path)) | ||||
link(usercachepath(repo.ui, hash), path) | ||||
Na'Tosha Bard
|
r15913 | return path | ||
return None | ||||
various
|
r15168 | |||
class largefiles_dirstate(dirstate.dirstate): | ||||
def __getitem__(self, key): | ||||
return super(largefiles_dirstate, self).__getitem__(unixpath(key)) | ||||
def normal(self, f): | ||||
return super(largefiles_dirstate, self).normal(unixpath(f)) | ||||
def remove(self, f): | ||||
return super(largefiles_dirstate, self).remove(unixpath(f)) | ||||
def add(self, f): | ||||
return super(largefiles_dirstate, self).add(unixpath(f)) | ||||
def drop(self, f): | ||||
return super(largefiles_dirstate, self).drop(unixpath(f)) | ||||
def forget(self, f): | ||||
return super(largefiles_dirstate, self).forget(unixpath(f)) | ||||
Na'Tosha Bard
|
r15793 | def normallookup(self, f): | ||
return super(largefiles_dirstate, self).normallookup(unixpath(f)) | ||||
various
|
r15168 | |||
def openlfdirstate(ui, repo): | ||||
''' | ||||
Greg Ward
|
r15252 | Return a dirstate object that tracks largefiles: i.e. its root is | ||
the repo root, but it is saved in .hg/largefiles/dirstate. | ||||
various
|
r15168 | ''' | ||
admin = repo.join(longname) | ||||
Na'Tosha Bard
|
r15224 | opener = scmutil.opener(admin) | ||
Greg Ward
|
r15349 | lfdirstate = largefiles_dirstate(opener, ui, repo.root, | ||
repo.dirstate._validate) | ||||
various
|
r15168 | |||
Greg Ward
|
r15252 | # If the largefiles dirstate does not exist, populate and create | ||
# it. This ensures that we create it on the first meaningful | ||||
Levi Bard
|
r15794 | # largefiles operation in a new clone. | ||
various
|
r15168 | if not os.path.exists(os.path.join(admin, 'dirstate')): | ||
util.makedirs(admin) | ||||
matcher = getstandinmatcher(repo) | ||||
for standin in dirstate_walk(repo.dirstate, matcher): | ||||
lfile = splitstandin(standin) | ||||
hash = readstandin(repo, lfile) | ||||
lfdirstate.normallookup(lfile) | ||||
try: | ||||
Mads Kiilerich
|
r15553 | if hash == hashfile(repo.wjoin(lfile)): | ||
various
|
r15168 | lfdirstate.normal(lfile) | ||
Martin Geisler
|
r15548 | except OSError, err: | ||
various
|
r15168 | if err.errno != errno.ENOENT: | ||
raise | ||||
return lfdirstate | ||||
def lfdirstate_status(lfdirstate, repo, rev): | ||||
Levi Bard
|
r15794 | match = match_.always(repo.root, repo.getcwd()) | ||
s = lfdirstate.status(match, [], False, False, False) | ||||
unsure, modified, added, removed, missing, unknown, ignored, clean = s | ||||
for lfile in unsure: | ||||
if repo[rev][standin(lfile)].data().strip() != \ | ||||
hashfile(repo.wjoin(lfile)): | ||||
modified.append(lfile) | ||||
else: | ||||
clean.append(lfile) | ||||
lfdirstate.normal(lfile) | ||||
various
|
r15168 | return (modified, added, removed, missing, unknown, ignored, clean) | ||
def listlfiles(repo, rev=None, matcher=None): | ||||
Greg Ward
|
r15252 | '''return a list of largefiles in the working copy or the | ||
specified changeset''' | ||||
various
|
r15168 | |||
if matcher is None: | ||||
matcher = getstandinmatcher(repo) | ||||
# ignore unknown files in working directory | ||||
Greg Ward
|
r15255 | return [splitstandin(f) | ||
for f in repo[rev].walk(matcher) | ||||
various
|
r15168 | if rev is not None or repo.dirstate[f] != '?'] | ||
Benjamin Pollack
|
r15316 | def instore(repo, hash): | ||
return os.path.exists(storepath(repo, hash)) | ||||
various
|
r15168 | |||
Benjamin Pollack
|
r15316 | def storepath(repo, hash): | ||
various
|
r15168 | return repo.join(os.path.join(longname, hash)) | ||
def copyfromcache(repo, hash, filename): | ||||
Greg Ward
|
r15252 | '''Copy the specified largefile from the repo or system cache to | ||
filename in the repository. Return true on success or false if the | ||||
file was not found in either cache (which should not happened: | ||||
this is meant to be called only after ensuring that the needed | ||||
largefile exists in the cache).''' | ||||
various
|
r15168 | path = findfile(repo, hash) | ||
if path is None: | ||||
return False | ||||
util.makedirs(os.path.dirname(repo.wjoin(filename))) | ||||
Martin Geisler
|
r15570 | # The write may fail before the file is fully written, but we | ||
# don't use atomic writes in the working copy. | ||||
various
|
r15168 | shutil.copy(path, repo.wjoin(filename)) | ||
return True | ||||
Benjamin Pollack
|
r15316 | def copytostore(repo, rev, file, uploaded=False): | ||
various
|
r15168 | hash = readstandin(repo, file) | ||
Benjamin Pollack
|
r15316 | if instore(repo, hash): | ||
various
|
r15168 | return | ||
Benjamin Pollack
|
r15316 | copytostoreabsolute(repo, repo.wjoin(file), hash) | ||
various
|
r15168 | |||
Dan Villiom Podlaski Christiansen
|
r15796 | def copyalltostore(repo, node): | ||
'''Copy all largefiles in a given revision to the store''' | ||||
ctx = repo[node] | ||||
for filename in ctx.files(): | ||||
if isstandin(filename) and filename in ctx.manifest(): | ||||
realfile = splitstandin(filename) | ||||
copytostore(repo, ctx.node(), realfile) | ||||
Benjamin Pollack
|
r15316 | def copytostoreabsolute(repo, file, hash): | ||
Hao Lian
|
r15371 | util.makedirs(os.path.dirname(storepath(repo, hash))) | ||
Benjamin Pollack
|
r15316 | if inusercache(repo.ui, hash): | ||
link(usercachepath(repo.ui, hash), storepath(repo, hash)) | ||||
various
|
r15168 | else: | ||
Martin Geisler
|
r16153 | dst = util.atomictempfile(storepath(repo, hash), | ||
createmode=repo.store.createmode) | ||||
Matt Mackall
|
r15699 | for chunk in util.filechunkiter(open(file, 'rb')): | ||
Martin Geisler
|
r15571 | dst.write(chunk) | ||
dst.close() | ||||
Benjamin Pollack
|
r15316 | linktousercache(repo, hash) | ||
various
|
r15168 | |||
Benjamin Pollack
|
r15316 | def linktousercache(repo, hash): | ||
Kevin Gessner
|
r15658 | path = usercachepath(repo.ui, hash) | ||
if path: | ||||
util.makedirs(os.path.dirname(path)) | ||||
link(storepath(repo, hash), path) | ||||
various
|
r15168 | |||
def getstandinmatcher(repo, pats=[], opts={}): | ||||
'''Return a match object that applies pats to the standin directory''' | ||||
standindir = repo.pathto(shortname) | ||||
if pats: | ||||
# patterns supplied: search standin directory relative to current dir | ||||
cwd = repo.getcwd() | ||||
if os.path.isabs(cwd): | ||||
# cwd is an absolute path for hg -R <reponame> | ||||
# work relative to the repository root in this case | ||||
cwd = '' | ||||
pats = [os.path.join(standindir, cwd, pat) for pat in pats] | ||||
elif os.path.isdir(standindir): | ||||
# no patterns: relative to repo root | ||||
pats = [standindir] | ||||
else: | ||||
# no patterns and no standin dir: return matcher that matches nothing | ||||
match = match_.match(repo.root, None, [], exact=True) | ||||
match.matchfn = lambda f: False | ||||
return match | ||||
return getmatcher(repo, pats, opts, showbad=False) | ||||
def getmatcher(repo, pats=[], opts={}, showbad=True): | ||||
Greg Ward
|
r15252 | '''Wrapper around scmutil.match() that adds showbad: if false, | ||
neuter the match object's bad() method so it does not print any | ||||
warnings about missing files or directories.''' | ||||
Na'Tosha Bard
|
r15224 | match = scmutil.match(repo[None], pats, opts) | ||
various
|
r15168 | |||
if not showbad: | ||||
match.bad = lambda f, msg: None | ||||
return match | ||||
def composestandinmatcher(repo, rmatcher): | ||||
Greg Ward
|
r15252 | '''Return a matcher that accepts standins corresponding to the | ||
files accepted by rmatcher. Pass the list of files in the matcher | ||||
as the paths specified by the user.''' | ||||
various
|
r15168 | smatcher = getstandinmatcher(repo, rmatcher.files()) | ||
isstandin = smatcher.matchfn | ||||
def composed_matchfn(f): | ||||
return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | ||||
smatcher.matchfn = composed_matchfn | ||||
return smatcher | ||||
def standin(filename): | ||||
'''Return the repo-relative path to the standin for the specified big | ||||
file.''' | ||||
# Notes: | ||||
# 1) Most callers want an absolute path, but _create_standin() needs | ||||
# it repo-relative so lfadd() can pass it to repo_add(). So leave | ||||
# it up to the caller to use repo.wjoin() to get an absolute path. | ||||
# 2) Join with '/' because that's what dirstate always uses, even on | ||||
# Windows. Change existing separator to '/' first in case we are | ||||
# passed filenames from an external source (like the command line). | ||||
FUJIWARA Katsunori
|
r16066 | return shortname + '/' + util.pconvert(filename) | ||
various
|
r15168 | |||
def isstandin(filename): | ||||
Greg Ward
|
r15252 | '''Return true if filename is a big file standin. filename must be | ||
in Mercurial's internal form (slash-separated).''' | ||||
various
|
r15168 | return filename.startswith(shortname + '/') | ||
def splitstandin(filename): | ||||
# Split on / because that's what dirstate always uses, even on Windows. | ||||
# Change local separator to / first just in case we are passed filenames | ||||
# from an external source (like the command line). | ||||
FUJIWARA Katsunori
|
r16066 | bits = util.pconvert(filename).split('/', 1) | ||
various
|
r15168 | if len(bits) == 2 and bits[0] == shortname: | ||
return bits[1] | ||||
else: | ||||
return None | ||||
def updatestandin(repo, standin): | ||||
file = repo.wjoin(splitstandin(standin)) | ||||
if os.path.exists(file): | ||||
hash = hashfile(file) | ||||
executable = getexecutable(file) | ||||
writestandin(repo, standin, hash, executable) | ||||
def readstandin(repo, filename, node=None): | ||||
'''read hex hash from standin for filename at given node, or working | ||||
directory if no node is given''' | ||||
return repo[node][standin(filename)].data().strip() | ||||
def writestandin(repo, standin, hash, executable): | ||||
Greg Ward
|
r15252 | '''write hash to <repo.root>/<standin>''' | ||
various
|
r15168 | writehash(hash, repo.wjoin(standin), executable) | ||
def copyandhash(instream, outfile): | ||||
'''Read bytes from instream (iterable) and write them to outfile, | ||||
computing the SHA-1 hash of the data along the way. Close outfile | ||||
when done and return the binary hash.''' | ||||
hasher = util.sha1('') | ||||
for data in instream: | ||||
hasher.update(data) | ||||
outfile.write(data) | ||||
# Blecch: closing a file that somebody else opened is rude and | ||||
Greg Ward
|
r15252 | # wrong. But it's so darn convenient and practical! After all, | ||
various
|
r15168 | # outfile was opened just to copy and hash. | ||
outfile.close() | ||||
return hasher.digest() | ||||
def hashrepofile(repo, file): | ||||
return hashfile(repo.wjoin(file)) | ||||
def hashfile(file): | ||||
if not os.path.exists(file): | ||||
return '' | ||||
hasher = util.sha1('') | ||||
fd = open(file, 'rb') | ||||
for data in blockstream(fd): | ||||
hasher.update(data) | ||||
fd.close() | ||||
return hasher.hexdigest() | ||||
class limitreader(object): | ||||
def __init__(self, f, limit): | ||||
self.f = f | ||||
self.limit = limit | ||||
def read(self, length): | ||||
if self.limit == 0: | ||||
return '' | ||||
length = length > self.limit and self.limit or length | ||||
self.limit -= length | ||||
return self.f.read(length) | ||||
def close(self): | ||||
pass | ||||
def blockstream(infile, blocksize=128 * 1024): | ||||
"""Generator that yields blocks of data from infile and closes infile.""" | ||||
while True: | ||||
data = infile.read(blocksize) | ||||
if not data: | ||||
break | ||||
yield data | ||||
Greg Ward
|
r15252 | # same blecch as copyandhash() above | ||
various
|
r15168 | infile.close() | ||
def writehash(hash, filename, executable): | ||||
util.makedirs(os.path.dirname(filename)) | ||||
Martin Geisler
|
r15574 | util.writefile(filename, hash + '\n') | ||
os.chmod(filename, getmode(executable)) | ||||
various
|
r15168 | |||
def getexecutable(filename): | ||||
mode = os.stat(filename).st_mode | ||||
Greg Ward
|
r15255 | return ((mode & stat.S_IXUSR) and | ||
(mode & stat.S_IXGRP) and | ||||
(mode & stat.S_IXOTH)) | ||||
various
|
r15168 | |||
def getmode(executable): | ||||
if executable: | ||||
return 0755 | ||||
else: | ||||
return 0644 | ||||
def urljoin(first, second, *arg): | ||||
def join(left, right): | ||||
if not left.endswith('/'): | ||||
left += '/' | ||||
if right.startswith('/'): | ||||
right = right[1:] | ||||
return left + right | ||||
url = join(first, second) | ||||
for a in arg: | ||||
url = join(url, a) | ||||
return url | ||||
def hexsha1(data): | ||||
"""hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | ||||
object data""" | ||||
Thomas Arendsen Hein
|
r15347 | h = util.sha1() | ||
various
|
r15168 | for chunk in util.filechunkiter(data): | ||
h.update(chunk) | ||||
return h.hexdigest() | ||||
def httpsendfile(ui, filename): | ||||
Na'Tosha Bard
|
r15224 | return httpconnection.httpsendfile(ui, filename, 'rb') | ||
various
|
r15168 | |||
def unixpath(path): | ||||
Greg Ward
|
r15252 | '''Return a version of path normalized for use with the lfdirstate.''' | ||
FUJIWARA Katsunori
|
r16066 | return util.pconvert(os.path.normpath(path)) | ||
various
|
r15168 | |||
def islfilesrepo(repo): | ||||
Matt Mackall
|
r15170 | return ('largefiles' in repo.requirements and | ||
Benjamin Pollack
|
r15319 | util.any(shortname + '/' in f[0] for f in repo.store.datafiles())) | ||
various
|
r15168 | |||
Hao Lian
|
r15391 | def mkstemp(repo, prefix): | ||
'''Returns a file descriptor and a filename corresponding to a temporary | ||||
file in the repo's largefiles store.''' | ||||
path = repo.join(longname) | ||||
Matt Mackall
|
r15392 | util.makedirs(path) | ||
Hao Lian
|
r15391 | return tempfile.mkstemp(prefix=prefix, dir=path) | ||
Matt Mackall
|
r15333 | class storeprotonotcapable(Exception): | ||
various
|
r15168 | def __init__(self, storetypes): | ||
self.storetypes = storetypes | ||||
Na'Tosha Bard
|
r16103 | |||
def getcurrentheads(repo): | ||||
branches = repo.branchmap() | ||||
heads = [] | ||||
for branch in branches: | ||||
newheads = repo.branchheads(branch) | ||||
heads = heads + newheads | ||||
return heads | ||||