lfutil.py
797 lines
| 25.4 KiB
| text/x-python
|
PythonLexer
various
|
r15168 | # Copyright 2009-2010 Gregory P. Ward | ||
# Copyright 2009-2010 Intelerad Medical Systems Incorporated | ||||
# Copyright 2010-2011 Fog Creek Software | ||||
# Copyright 2010-2011 Unity Technologies | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
'''largefiles utility code: must not import other modules in this package.''' | ||||
Martin von Zweigbergk
|
r43982 | import contextlib | ||
liscju
|
r29309 | import copy | ||
various
|
r15168 | import os | ||
import stat | ||||
liscju
|
r29309 | |||
from mercurial.i18n import _ | ||||
Joerg Sonnenberger
|
r47771 | from mercurial.node import hex | ||
Gregory Szorc
|
r43355 | from mercurial.pycompat import open | ||
various
|
r15168 | |||
liscju
|
r29309 | from mercurial import ( | ||
dirstate, | ||||
Pulkit Goyal
|
r30820 | encoding, | ||
liscju
|
r29309 | error, | ||
httpconnection, | ||||
liscju
|
r29320 | match as matchmod, | ||
Pulkit Goyal
|
r30640 | pycompat, | ||
Simon Sapin
|
r48055 | requirements, | ||
liscju
|
r29309 | scmutil, | ||
Gregory Szorc
|
r33373 | sparse, | ||
liscju
|
r29309 | util, | ||
Pierre-Yves David
|
r31247 | vfs as vfsmod, | ||
liscju
|
r29309 | ) | ||
Augie Fackler
|
r44519 | from mercurial.utils import hashutil | ||
r49205 | from mercurial.dirstateutils import timestamp | |||
various
|
r15168 | |||
Augie Fackler
|
r43347 | shortname = b'.hglf' | ||
shortnameslash = shortname + b'/' | ||||
longname = b'largefiles' | ||||
various
|
r15168 | |||
# -- Private worker functions ------------------------------------------ | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r43982 | @contextlib.contextmanager | ||
Martin von Zweigbergk
|
r43983 | def lfstatus(repo, value=True): | ||
Martin von Zweigbergk
|
r43982 | oldvalue = getattr(repo, 'lfstatus', False) | ||
Martin von Zweigbergk
|
r43983 | repo.lfstatus = value | ||
Martin von Zweigbergk
|
r43982 | try: | ||
yield | ||||
finally: | ||||
repo.lfstatus = oldvalue | ||||
Greg Ward
|
r15227 | def getminsize(ui, assumelfiles, opt, default=10): | ||
lfsize = opt | ||||
if not lfsize and assumelfiles: | ||||
Augie Fackler
|
r43347 | lfsize = ui.config(longname, b'minsize', default=default) | ||
Greg Ward
|
r15227 | if lfsize: | ||
try: | ||||
Greg Ward
|
r15228 | lfsize = float(lfsize) | ||
Greg Ward
|
r15227 | except ValueError: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b'largefiles: size must be number (not %s)\n') % lfsize | ||
Augie Fackler
|
r43346 | ) | ||
Greg Ward
|
r15227 | if lfsize is None: | ||
Augie Fackler
|
r43347 | raise error.Abort(_(b'minimum size for largefiles must be specified')) | ||
Greg Ward
|
r15227 | return lfsize | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def link(src, dest): | ||
Mads Kiilerich
|
r28576 | """Try to create hardlink - if that fails, efficiently make a copy.""" | ||
Mads Kiilerich
|
r18998 | util.makedirs(os.path.dirname(dest)) | ||
various
|
r15168 | try: | ||
Na'Tosha Bard
|
r15206 | util.oslink(src, dest) | ||
various
|
r15168 | except OSError: | ||
Martin Geisler
|
r15572 | # if hardlinks fail, fallback on atomic copy | ||
Augie Fackler
|
r43347 | with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf: | ||
Jun Wu
|
r33438 | for chunk in util.filechunkiter(srcf): | ||
dstf.write(chunk) | ||||
various
|
r15168 | os.chmod(dest, os.stat(src).st_mode) | ||
Augie Fackler
|
r43346 | |||
Benjamin Pollack
|
r15316 | def usercachepath(ui, hash): | ||
Augie Fackler
|
r46554 | """Return the correct location in the "global" largefiles cache for a file | ||
Mads Kiilerich
|
r28574 | with the given hash. | ||
This cache is used for sharing of largefiles across repositories - both | ||||
Augie Fackler
|
r46554 | to preserve download bandwidth and storage space.""" | ||
Mads Kiilerich
|
r28575 | return os.path.join(_usercachedir(ui), hash) | ||
Mads Kiilerich
|
r28574 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r35280 | def _usercachedir(ui, name=longname): | ||
Mads Kiilerich
|
r28574 | '''Return the location of the "global" largefiles cache.''' | ||
Augie Fackler
|
r43347 | path = ui.configpath(name, b'usercache') | ||
various
|
r15168 | if path: | ||
Mads Kiilerich
|
r28574 | return path | ||
Matt Harbison
|
r44863 | |||
hint = None | ||||
Jun Wu
|
r34646 | if pycompat.iswindows: | ||
Augie Fackler
|
r43346 | appdata = encoding.environ.get( | ||
Augie Fackler
|
r43347 | b'LOCALAPPDATA', encoding.environ.get(b'APPDATA') | ||
Augie Fackler
|
r43346 | ) | ||
Mads Kiilerich
|
r28574 | if appdata: | ||
Matt Harbison
|
r35280 | return os.path.join(appdata, name) | ||
Matt Harbison
|
r44863 | |||
hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | ||||
b"LOCALAPPDATA", | ||||
b"APPDATA", | ||||
name, | ||||
) | ||||
Jun Wu
|
r34648 | elif pycompat.isdarwin: | ||
Augie Fackler
|
r43347 | home = encoding.environ.get(b'HOME') | ||
Mads Kiilerich
|
r28574 | if home: | ||
Augie Fackler
|
r43347 | return os.path.join(home, b'Library', b'Caches', name) | ||
Matt Harbison
|
r44863 | |||
hint = _(b"define %s in the environment, or set %s.usercache") % ( | ||||
b"HOME", | ||||
name, | ||||
) | ||||
Jun Wu
|
r34647 | elif pycompat.isposix: | ||
Augie Fackler
|
r43347 | path = encoding.environ.get(b'XDG_CACHE_HOME') | ||
Mads Kiilerich
|
r28574 | if path: | ||
Matt Harbison
|
r35280 | return os.path.join(path, name) | ||
Augie Fackler
|
r43347 | home = encoding.environ.get(b'HOME') | ||
Mads Kiilerich
|
r28574 | if home: | ||
Augie Fackler
|
r43347 | return os.path.join(home, b'.cache', name) | ||
Matt Harbison
|
r44863 | |||
hint = _(b"define %s or %s in the environment, or set %s.usercache") % ( | ||||
b"XDG_CACHE_HOME", | ||||
b"HOME", | ||||
name, | ||||
) | ||||
various
|
r15168 | else: | ||
Augie Fackler
|
r43347 | raise error.Abort( | ||
_(b'unknown operating system: %s\n') % pycompat.osname | ||||
) | ||||
Matt Harbison
|
r44863 | |||
raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint) | ||||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Benjamin Pollack
|
r15316 | def inusercache(ui, hash): | ||
Kevin Gessner
|
r15658 | path = usercachepath(ui, hash) | ||
Mads Kiilerich
|
r28575 | return os.path.exists(path) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def findfile(repo, hash): | ||
Augie Fackler
|
r46554 | """Return store path of the largefile with the specified hash. | ||
Mads Kiilerich
|
r28576 | As a side effect, the file might be linked from user cache. | ||
Augie Fackler
|
r46554 | Return None if the file can't be found locally.""" | ||
Matt Harbison
|
r24631 | path, exists = findstorepath(repo, hash) | ||
if exists: | ||||
Augie Fackler
|
r43347 | repo.ui.note(_(b'found %s in store\n') % hash) | ||
Matt Harbison
|
r24631 | return path | ||
Benjamin Pollack
|
r15317 | elif inusercache(repo.ui, hash): | ||
Augie Fackler
|
r43347 | repo.ui.note(_(b'found %s in system cache\n') % hash) | ||
Hao Lian
|
r15408 | path = storepath(repo, hash) | ||
link(usercachepath(repo.ui, hash), path) | ||||
Na'Tosha Bard
|
r15913 | return path | ||
return None | ||||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r16247 | class largefilesdirstate(dirstate.dirstate): | ||
various
|
r15168 | def __getitem__(self, key): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).__getitem__(unixpath(key)) | ||
Augie Fackler
|
r43346 | |||
r48393 | def set_tracked(self, f): | |||
return super(largefilesdirstate, self).set_tracked(unixpath(f)) | ||||
r48399 | def set_untracked(self, f): | |||
return super(largefilesdirstate, self).set_untracked(unixpath(f)) | ||||
r48512 | def normal(self, f, parentfiledata=None): | |||
# not sure if we should pass the `parentfiledata` down or throw it | ||||
# away. So throwing it away to stay on the safe side. | ||||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).normal(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def remove(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).remove(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def add(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).add(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def drop(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).drop(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def forget(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).forget(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r15793 | def normallookup(self, f): | ||
Na'Tosha Bard
|
r16247 | return super(largefilesdirstate, self).normallookup(unixpath(f)) | ||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r21085 | def _ignore(self, f): | ||
Mads Kiilerich
|
r18148 | return False | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r48982 | def write(self, tr): | ||
FUJIWARA Katsunori
|
r26749 | # (1) disable PENDING mode always | ||
# (lfdirstate isn't yet managed as a part of the transaction) | ||||
# (2) avoid develwarn 'use dirstate.write with ....' | ||||
Pulkit Goyal
|
r48983 | if tr: | ||
tr.addbackup(b'largefiles/dirstate', location=b'plain') | ||||
FUJIWARA Katsunori
|
r26749 | super(largefilesdirstate, self).write(None) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r17659 | def openlfdirstate(ui, repo, create=True): | ||
Augie Fackler
|
r46554 | """ | ||
Greg Ward
|
r15252 | Return a dirstate object that tracks largefiles: i.e. its root is | ||
the repo root, but it is saved in .hg/largefiles/dirstate. | ||||
Augie Fackler
|
r46554 | """ | ||
liscju
|
r28560 | vfs = repo.vfs | ||
lfstoredir = longname | ||||
Pierre-Yves David
|
r31247 | opener = vfsmod.vfs(vfs.join(lfstoredir)) | ||
Simon Sapin
|
r48055 | use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements | ||
Augie Fackler
|
r43346 | lfdirstate = largefilesdirstate( | ||
opener, | ||||
ui, | ||||
repo.root, | ||||
repo.dirstate._validate, | ||||
lambda: sparse.matcher(repo), | ||||
Joerg Sonnenberger
|
r47538 | repo.nodeconstants, | ||
Simon Sapin
|
r48055 | use_dirstate_v2, | ||
Augie Fackler
|
r43346 | ) | ||
various
|
r15168 | |||
Greg Ward
|
r15252 | # If the largefiles dirstate does not exist, populate and create | ||
# it. This ensures that we create it on the first meaningful | ||||
Levi Bard
|
r15794 | # largefiles operation in a new clone. | ||
Augie Fackler
|
r43347 | if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')): | ||
various
|
r15168 | matcher = getstandinmatcher(repo) | ||
Augie Fackler
|
r43346 | standins = repo.dirstate.walk( | ||
matcher, subrepos=[], unknown=False, ignored=False | ||||
) | ||||
Matt Harbison
|
r21917 | |||
if len(standins) > 0: | ||||
liscju
|
r28560 | vfs.makedirs(lfstoredir) | ||
Matt Harbison
|
r21917 | |||
r50854 | with lfdirstate.parentchange(repo): | |||
r48523 | for standin in standins: | |||
lfile = splitstandin(standin) | ||||
lfdirstate.update_file( | ||||
lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True | ||||
) | ||||
various
|
r15168 | return lfdirstate | ||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r23039 | def lfdirstatestatus(lfdirstate, repo): | ||
Augie Fackler
|
r43347 | pctx = repo[b'.'] | ||
Martin von Zweigbergk
|
r41825 | match = matchmod.always() | ||
r49213 | unsure, s, mtime_boundary = lfdirstate.status( | |||
Augie Fackler
|
r43346 | match, subrepos=[], ignored=False, clean=False, unknown=False | ||
) | ||||
Martin von Zweigbergk
|
r22919 | modified, clean = s.modified, s.clean | ||
r49205 | wctx = repo[None] | |||
Levi Bard
|
r15794 | for lfile in unsure: | ||
Mads Kiilerich
|
r18299 | try: | ||
FUJIWARA Katsunori
|
r31657 | fctx = pctx[standin(lfile)] | ||
Mads Kiilerich
|
r18299 | except LookupError: | ||
fctx = None | ||||
FUJIWARA Katsunori
|
r31740 | if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)): | ||
Levi Bard
|
r15794 | modified.append(lfile) | ||
else: | ||||
clean.append(lfile) | ||||
r49205 | st = wctx[lfile].lstat() | |||
mode = st.st_mode | ||||
size = st.st_size | ||||
r49225 | mtime = timestamp.reliable_mtime_of(st, mtime_boundary) | |||
if mtime is not None: | ||||
cache_data = (mode, size, mtime) | ||||
lfdirstate.set_clean(lfile, cache_data) | ||||
Martin von Zweigbergk
|
r22912 | return s | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def listlfiles(repo, rev=None, matcher=None): | ||
Augie Fackler
|
r46554 | """return a list of largefiles in the working copy or the | ||
specified changeset""" | ||||
various
|
r15168 | |||
if matcher is None: | ||||
matcher = getstandinmatcher(repo) | ||||
# ignore unknown files in working directory | ||||
Augie Fackler
|
r43346 | return [ | ||
splitstandin(f) | ||||
for f in repo[rev].walk(matcher) | ||||
r48918 | if rev is not None or repo.dirstate.get_entry(f).any_tracked | |||
Augie Fackler
|
r43346 | ] | ||
various
|
r15168 | |||
Matt Harbison
|
r24631 | def instore(repo, hash, forcelocal=False): | ||
liscju
|
r29419 | '''Return true if a largefile with the given hash exists in the store''' | ||
Matt Harbison
|
r24631 | return os.path.exists(storepath(repo, hash, forcelocal)) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r24631 | def storepath(repo, hash, forcelocal=False): | ||
Augie Fackler
|
r46554 | """Return the correct location in the repository largefiles store for a | ||
file with the given hash.""" | ||||
Matt Harbison
|
r24631 | if not forcelocal and repo.shared(): | ||
return repo.vfs.reljoin(repo.sharedpath, longname, hash) | ||||
Pierre-Yves David
|
r31332 | return repo.vfs.join(longname, hash) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r24629 | def findstorepath(repo, hash): | ||
Augie Fackler
|
r46554 | """Search through the local store path(s) to find the file for the given | ||
Matt Harbison
|
r24629 | hash. If the file is not found, its path in the primary store is returned. | ||
The return value is a tuple of (path, exists(path)). | ||||
Augie Fackler
|
r46554 | """ | ||
Matt Harbison
|
r24631 | # For shared repos, the primary store is in the share source. But for | ||
# backward compatibility, force a lookup in the local store if it wasn't | ||||
# found in the share source. | ||||
path = storepath(repo, hash, False) | ||||
if instore(repo, hash): | ||||
return (path, True) | ||||
elif repo.shared() and instore(repo, hash, True): | ||||
Henrik Stuart
|
r29329 | return storepath(repo, hash, True), True | ||
Matt Harbison
|
r24631 | |||
return (path, False) | ||||
Matt Harbison
|
r24629 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def copyfromcache(repo, hash, filename): | ||
Augie Fackler
|
r46554 | """Copy the specified largefile from the repo or system cache to | ||
Greg Ward
|
r15252 | filename in the repository. Return true on success or false if the | ||
file was not found in either cache (which should not happened: | ||||
this is meant to be called only after ensuring that the needed | ||||
Augie Fackler
|
r46554 | largefile exists in the cache).""" | ||
liscju
|
r28560 | wvfs = repo.wvfs | ||
various
|
r15168 | path = findfile(repo, hash) | ||
if path is None: | ||||
return False | ||||
liscju
|
r28560 | wvfs.makedirs(wvfs.dirname(wvfs.join(filename))) | ||
Martin Geisler
|
r15570 | # The write may fail before the file is fully written, but we | ||
# don't use atomic writes in the working copy. | ||||
Augie Fackler
|
r43347 | with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd: | ||
Augie Fackler
|
r43346 | gothash = copyandhash(util.filechunkiter(srcfd), destfd) | ||
Mads Kiilerich
|
r26823 | if gothash != hash: | ||
Augie Fackler
|
r43346 | repo.ui.warn( | ||
Augie Fackler
|
r43347 | _(b'%s: data corruption in %s with hash %s\n') | ||
Augie Fackler
|
r43346 | % (filename, path, gothash) | ||
) | ||||
liscju
|
r28560 | wvfs.unlink(filename) | ||
Mads Kiilerich
|
r26823 | return False | ||
various
|
r15168 | return True | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r31738 | def copytostore(repo, ctx, file, fstandin): | ||
liscju
|
r28560 | wvfs = repo.wvfs | ||
FUJIWARA Katsunori
|
r31738 | hash = readasstandin(ctx[fstandin]) | ||
Benjamin Pollack
|
r15316 | if instore(repo, hash): | ||
various
|
r15168 | return | ||
liscju
|
r28560 | if wvfs.exists(file): | ||
copytostoreabsolute(repo, wvfs.join(file), hash) | ||||
Mads Kiilerich
|
r27903 | else: | ||
Augie Fackler
|
r43346 | repo.ui.warn( | ||
Augie Fackler
|
r43347 | _(b"%s: largefile %s not available from local store\n") | ||
Augie Fackler
|
r43346 | % (file, hash) | ||
) | ||||
various
|
r15168 | |||
Dan Villiom Podlaski Christiansen
|
r15796 | def copyalltostore(repo, node): | ||
'''Copy all largefiles in a given revision to the store''' | ||||
ctx = repo[node] | ||||
for filename in ctx.files(): | ||||
FUJIWARA Katsunori
|
r31613 | realfile = splitstandin(filename) | ||
if realfile is not None and filename in ctx.manifest(): | ||||
FUJIWARA Katsunori
|
r31736 | copytostore(repo, ctx, realfile, filename) | ||
Dan Villiom Podlaski Christiansen
|
r15796 | |||
Augie Fackler
|
r43346 | |||
Benjamin Pollack
|
r15316 | def copytostoreabsolute(repo, file, hash): | ||
if inusercache(repo.ui, hash): | ||||
link(usercachepath(repo.ui, hash), storepath(repo, hash)) | ||||
FUJIWARA Katsunori
|
r23276 | else: | ||
Mads Kiilerich
|
r18998 | util.makedirs(os.path.dirname(storepath(repo, hash))) | ||
Augie Fackler
|
r43347 | with open(file, b'rb') as srcf: | ||
Augie Fackler
|
r43346 | with util.atomictempfile( | ||
storepath(repo, hash), createmode=repo.store.createmode | ||||
) as dstf: | ||||
Mads Kiilerich
|
r30142 | for chunk in util.filechunkiter(srcf): | ||
dstf.write(chunk) | ||||
Benjamin Pollack
|
r15316 | linktousercache(repo, hash) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Benjamin Pollack
|
r15316 | def linktousercache(repo, hash): | ||
Augie Fackler
|
r46554 | """Link / copy the largefile with the specified hash from the store | ||
to the cache.""" | ||||
Kevin Gessner
|
r15658 | path = usercachepath(repo.ui, hash) | ||
Mads Kiilerich
|
r28575 | link(storepath(repo, hash), path) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r25292 | def getstandinmatcher(repo, rmatcher=None): | ||
'''Return a match object that applies rmatcher to the standin directory''' | ||||
liscju
|
r28560 | wvfs = repo.wvfs | ||
standindir = shortname | ||||
Matt Harbison
|
r25470 | |||
# no warnings about missing files or directories | ||||
badfn = lambda f, msg: None | ||||
Martin von Zweigbergk
|
r25293 | if rmatcher and not rmatcher.always(): | ||
liscju
|
r28560 | pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()] | ||
Matt Harbison
|
r26025 | if not pats: | ||
liscju
|
r28560 | pats = [wvfs.join(standindir)] | ||
Matt Harbison
|
r25470 | match = scmutil.match(repo[None], pats, badfn=badfn) | ||
Mads Kiilerich
|
r18724 | else: | ||
various
|
r15168 | # no patterns: relative to repo root | ||
liscju
|
r28560 | match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn) | ||
various
|
r15168 | return match | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def composestandinmatcher(repo, rmatcher): | ||
Augie Fackler
|
r46554 | """Return a matcher that accepts standins corresponding to the | ||
Greg Ward
|
r15252 | files accepted by rmatcher. Pass the list of files in the matcher | ||
Augie Fackler
|
r46554 | as the paths specified by the user.""" | ||
Martin von Zweigbergk
|
r25292 | smatcher = getstandinmatcher(repo, rmatcher) | ||
various
|
r15168 | isstandin = smatcher.matchfn | ||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r16247 | def composedmatchfn(f): | ||
various
|
r15168 | return isstandin(f) and rmatcher.matchfn(splitstandin(f)) | ||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r16247 | smatcher.matchfn = composedmatchfn | ||
various
|
r15168 | |||
return smatcher | ||||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def standin(filename): | ||
Augie Fackler
|
r46554 | """Return the repo-relative path to the standin for the specified big | ||
file.""" | ||||
various
|
r15168 | # Notes: | ||
Mads Kiilerich
|
r17425 | # 1) Some callers want an absolute path, but for instance addlargefiles | ||
Mads Kiilerich
|
r18154 | # needs it repo-relative so it can be passed to repo[None].add(). So | ||
# leave it up to the caller to use repo.wjoin() to get an absolute path. | ||||
various
|
r15168 | # 2) Join with '/' because that's what dirstate always uses, even on | ||
# Windows. Change existing separator to '/' first in case we are | ||||
# passed filenames from an external source (like the command line). | ||||
Mads Kiilerich
|
r18151 | return shortnameslash + util.pconvert(filename) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def isstandin(filename): | ||
Augie Fackler
|
r46554 | """Return true if filename is a big file standin. filename must be | ||
in Mercurial's internal form (slash-separated).""" | ||||
Mads Kiilerich
|
r18151 | return filename.startswith(shortnameslash) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def splitstandin(filename): | ||
# Split on / because that's what dirstate always uses, even on Windows. | ||||
# Change local separator to / first just in case we are passed filenames | ||||
# from an external source (like the command line). | ||||
Augie Fackler
|
r43347 | bits = util.pconvert(filename).split(b'/', 1) | ||
various
|
r15168 | if len(bits) == 2 and bits[0] == shortname: | ||
return bits[1] | ||||
else: | ||||
return None | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r31659 | def updatestandin(repo, lfile, standin): | ||
"""Re-calculate hash value of lfile and write it into standin | ||||
This assumes that "lfutil.standin(lfile) == standin", for efficiency. | ||||
""" | ||||
FUJIWARA Katsunori
|
r31615 | file = repo.wjoin(lfile) | ||
if repo.wvfs.exists(lfile): | ||||
various
|
r15168 | hash = hashfile(file) | ||
executable = getexecutable(file) | ||||
writestandin(repo, standin, hash, executable) | ||||
Matt Harbison
|
r27947 | else: | ||
Augie Fackler
|
r43347 | raise error.Abort(_(b'%s: file not found!') % lfile) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r31734 | def readasstandin(fctx): | ||
Augie Fackler
|
r46554 | """read hex hash from given filectx of standin file | ||
FUJIWARA Katsunori
|
r31734 | |||
Augie Fackler
|
r46554 | This encapsulates how "standin" data is stored into storage layer.""" | ||
FUJIWARA Katsunori
|
r31734 | return fctx.data().strip() | ||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def writestandin(repo, standin, hash, executable): | ||
Greg Ward
|
r15252 | '''write hash to <repo.root>/<standin>''' | ||
Augie Fackler
|
r43347 | repo.wwrite(standin, hash + b'\n', executable and b'x' or b'') | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def copyandhash(instream, outfile): | ||
Augie Fackler
|
r46554 | """Read bytes from instream (iterable) and write them to outfile, | ||
computing the SHA-1 hash of the data along the way. Return the hash.""" | ||||
Augie Fackler
|
r44519 | hasher = hashutil.sha1(b'') | ||
various
|
r15168 | for data in instream: | ||
hasher.update(data) | ||||
outfile.write(data) | ||||
Gregory Szorc
|
r36129 | return hex(hasher.digest()) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def hashfile(file): | ||
if not os.path.exists(file): | ||||
Augie Fackler
|
r43347 | return b'' | ||
with open(file, b'rb') as fd: | ||||
FUJIWARA Katsunori
|
r31652 | return hexsha1(fd) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def getexecutable(filename): | ||
mode = os.stat(filename).st_mode | ||||
Augie Fackler
|
r43346 | return ( | ||
(mode & stat.S_IXUSR) | ||||
and (mode & stat.S_IXGRP) | ||||
and (mode & stat.S_IXOTH) | ||||
) | ||||
various
|
r15168 | |||
def urljoin(first, second, *arg): | ||||
def join(left, right): | ||||
Augie Fackler
|
r43347 | if not left.endswith(b'/'): | ||
left += b'/' | ||||
if right.startswith(b'/'): | ||||
various
|
r15168 | right = right[1:] | ||
return left + right | ||||
url = join(first, second) | ||||
for a in arg: | ||||
url = join(url, a) | ||||
return url | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r31652 | def hexsha1(fileobj): | ||
various
|
r15168 | """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like | ||
object data""" | ||||
Augie Fackler
|
r44519 | h = hashutil.sha1() | ||
FUJIWARA Katsunori
|
r31652 | for chunk in util.filechunkiter(fileobj): | ||
various
|
r15168 | h.update(chunk) | ||
Gregory Szorc
|
r36129 | return hex(h.digest()) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def httpsendfile(ui, filename): | ||
Augie Fackler
|
r43347 | return httpconnection.httpsendfile(ui, filename, b'rb') | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def unixpath(path): | ||
Greg Ward
|
r15252 | '''Return a version of path normalized for use with the lfdirstate.''' | ||
FUJIWARA Katsunori
|
r16066 | return util.pconvert(os.path.normpath(path)) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
various
|
r15168 | def islfilesrepo(repo): | ||
Mads Kiilerich
|
r28576 | '''Return true if the repo is a largefile repo.''' | ||
Augie Fackler
|
r43347 | if b'largefiles' in repo.requirements and any( | ||
r47657 | shortnameslash in f[1] for f in repo.store.datafiles() | |||
Augie Fackler
|
r43346 | ): | ||
Matt Harbison
|
r17659 | return True | ||
Augie Fackler
|
r25149 | return any(openlfdirstate(repo.ui, repo, False)) | ||
various
|
r15168 | |||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r15333 | class storeprotonotcapable(Exception): | ||
various
|
r15168 | def __init__(self, storetypes): | ||
self.storetypes = storetypes | ||||
Na'Tosha Bard
|
r16103 | |||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r16120 | def getstandinsstate(repo): | ||
standins = [] | ||||
matcher = getstandinmatcher(repo) | ||||
FUJIWARA Katsunori
|
r31735 | wctx = repo[None] | ||
Augie Fackler
|
r43346 | for standin in repo.dirstate.walk( | ||
matcher, subrepos=[], unknown=False, ignored=False | ||||
): | ||||
Na'Tosha Bard
|
r16120 | lfile = splitstandin(standin) | ||
Mads Kiilerich
|
r18300 | try: | ||
FUJIWARA Katsunori
|
r31735 | hash = readasstandin(wctx[standin]) | ||
Mads Kiilerich
|
r18300 | except IOError: | ||
hash = None | ||||
standins.append((lfile, hash)) | ||||
Na'Tosha Bard
|
r16120 | return standins | ||
Na'Tosha Bard
|
r16245 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r22095 | def synclfdirstate(repo, lfdirstate, lfile, normallookup): | ||
lfstandin = standin(lfile) | ||||
r48499 | if lfstandin not in repo.dirstate: | |||
r48549 | lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False) | |||
r48499 | else: | |||
r48956 | entry = repo.dirstate.get_entry(lfstandin) | |||
lfdirstate.update_file( | ||||
lfile, | ||||
wc_tracked=entry.tracked, | ||||
p1_tracked=entry.p1_tracked, | ||||
p2_info=entry.p2_info, | ||||
possibly_dirty=True, | ||||
) | ||||
FUJIWARA Katsunori
|
r22095 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r23184 | def markcommitted(orig, ctx, node): | ||
Matt Harbison
|
r24336 | repo = ctx.repo() | ||
FUJIWARA Katsunori
|
r23184 | |||
r48456 | lfdirstate = openlfdirstate(repo.ui, repo) | |||
r50854 | with lfdirstate.parentchange(repo): | |||
r48447 | orig(node) | |||
FUJIWARA Katsunori
|
r23184 | |||
r48447 | # ATTENTION: "ctx.files()" may differ from "repo[node].files()" | |||
# because files coming from the 2nd parent are omitted in the latter. | ||||
# | ||||
# The former should be used to get targets of "synclfdirstate", | ||||
# because such files: | ||||
# - are marked as "a" by "patch.patch()" (e.g. via transplant), and | ||||
# - have to be marked as "n" after commit, but | ||||
# - aren't listed in "repo[node].files()" | ||||
FUJIWARA Katsunori
|
r23273 | |||
r48447 | for f in ctx.files(): | |||
lfile = splitstandin(f) | ||||
if lfile is not None: | ||||
synclfdirstate(repo, lfdirstate, lfile, False) | ||||
Pulkit Goyal
|
r48982 | lfdirstate.write(repo.currenttransaction()) | ||
FUJIWARA Katsunori
|
r23184 | |||
FUJIWARA Katsunori
|
r23276 | # As part of committing, copy all of the largefiles into the cache. | ||
FUJIWARA Katsunori
|
r31616 | # | ||
# Using "node" instead of "ctx" implies additional "repo[node]" | ||||
# lookup while copyalltostore(), but can omit redundant check for | ||||
# files comming from the 2nd parent, which should exist in store | ||||
# at merging. | ||||
FUJIWARA Katsunori
|
r23276 | copyalltostore(repo, node) | ||
Augie Fackler
|
r43346 | |||
Na'Tosha Bard
|
r16245 | def getlfilestoupdate(oldstandins, newstandins): | ||
changedstandins = set(oldstandins).symmetric_difference(set(newstandins)) | ||||
filelist = [] | ||||
for f in changedstandins: | ||||
if f[0] not in filelist: | ||||
filelist.append(f[0]) | ||||
return filelist | ||||
FUJIWARA Katsunori
|
r21042 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r21042 | def getlfilestoupload(repo, missing, addfunc): | ||
Matt Harbison
|
r39427 | makeprogress = repo.ui.makeprogress | ||
Augie Fackler
|
r43346 | with makeprogress( | ||
Augie Fackler
|
r43347 | _(b'finding outgoing largefiles'), | ||
unit=_(b'revisions'), | ||||
Augie Fackler
|
r43346 | total=len(missing), | ||
) as progress: | ||||
Matt Harbison
|
r39427 | for i, n in enumerate(missing): | ||
progress.update(i) | ||||
Joerg Sonnenberger
|
r47771 | parents = [p for p in repo[n].parents() if p != repo.nullid] | ||
Matt Harbison
|
r23657 | |||
Martin von Zweigbergk
|
r43983 | with lfstatus(repo, value=False): | ||
Matt Harbison
|
r39427 | ctx = repo[n] | ||
Matt Harbison
|
r23657 | |||
Matt Harbison
|
r39427 | files = set(ctx.files()) | ||
if len(parents) == 2: | ||||
mc = ctx.manifest() | ||||
Martin von Zweigbergk
|
r41442 | mp1 = ctx.p1().manifest() | ||
mp2 = ctx.p2().manifest() | ||||
Matt Harbison
|
r39427 | for f in mp1: | ||
if f not in mc: | ||||
files.add(f) | ||||
for f in mp2: | ||||
if f not in mc: | ||||
files.add(f) | ||||
for f in mc: | ||||
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None): | ||||
files.add(f) | ||||
for fn in files: | ||||
if isstandin(fn) and fn in ctx: | ||||
addfunc(fn, readasstandin(ctx[fn])) | ||||
FUJIWARA Katsunori
|
r23185 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r23185 | def updatestandinsbymatch(repo, match): | ||
Augie Fackler
|
r46554 | """Update standins in the working directory according to specified match | ||
FUJIWARA Katsunori
|
r23185 | |||
This returns (possibly modified) ``match`` object to be used for | ||||
subsequent commit process. | ||||
Augie Fackler
|
r46554 | """ | ||
FUJIWARA Katsunori
|
r23185 | |||
ui = repo.ui | ||||
# Case 1: user calls commit with no specific files or | ||||
# include/exclude patterns: refresh and commit all files that | ||||
# are "dirty". | ||||
if match is None or match.always(): | ||||
# Spend a bit of time here to get a list of files we know | ||||
# are modified so we can compare only against those. | ||||
# It can cost a lot of time (several seconds) | ||||
# otherwise to update all standins if the largefiles are | ||||
# large. | ||||
lfdirstate = openlfdirstate(ui, repo) | ||||
Martin von Zweigbergk
|
r41825 | dirtymatch = matchmod.always() | ||
r49213 | unsure, s, mtime_boundary = lfdirstate.status( | |||
Augie Fackler
|
r43346 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False | ||
) | ||||
FUJIWARA Katsunori
|
r23185 | modifiedfiles = unsure + s.modified + s.added + s.removed | ||
lfiles = listlfiles(repo) | ||||
# this only loops through largefiles that exist (not | ||||
# removed/renamed) | ||||
for lfile in lfiles: | ||||
if lfile in modifiedfiles: | ||||
FUJIWARA Katsunori
|
r31618 | fstandin = standin(lfile) | ||
if repo.wvfs.exists(fstandin): | ||||
FUJIWARA Katsunori
|
r23185 | # this handles the case where a rebase is being | ||
# performed and the working copy is not updated | ||||
# yet. | ||||
liscju
|
r28560 | if repo.wvfs.exists(lfile): | ||
FUJIWARA Katsunori
|
r31659 | updatestandin(repo, lfile, fstandin) | ||
FUJIWARA Katsunori
|
r23185 | |||
return match | ||||
lfiles = listlfiles(repo) | ||||
match._files = repo._subdirlfs(match.files(), lfiles) | ||||
# Case 2: user calls commit with specified patterns: refresh | ||||
# any matching big files. | ||||
smatcher = composestandinmatcher(repo, match) | ||||
Augie Fackler
|
r43346 | standins = repo.dirstate.walk( | ||
smatcher, subrepos=[], unknown=False, ignored=False | ||||
) | ||||
FUJIWARA Katsunori
|
r23185 | |||
# No matching big files: get out of the way and pass control to | ||||
# the usual commit() method. | ||||
if not standins: | ||||
return match | ||||
# Refresh all matching big files. It's possible that the | ||||
# commit will end up failing, in which case the big files will | ||||
# stay refreshed. No harm done: the user modified them and | ||||
# asked to commit them, so sooner or later we're going to | ||||
# refresh the standins. Might as well leave them refreshed. | ||||
lfdirstate = openlfdirstate(ui, repo) | ||||
for fstandin in standins: | ||||
lfile = splitstandin(fstandin) | ||||
r48918 | if lfdirstate.get_entry(lfile).tracked: | |||
FUJIWARA Katsunori
|
r31659 | updatestandin(repo, lfile, fstandin) | ||
FUJIWARA Katsunori
|
r23185 | |||
# Cook up a new matcher that only matches regular files or | ||||
# standins corresponding to the big files requested by the | ||||
# user. Have to modify _files to prevent commit() from | ||||
# complaining "not tracked" for big files. | ||||
match = copy.copy(match) | ||||
origmatchfn = match.matchfn | ||||
# Check both the list of largefiles and the list of | ||||
# standins because if a largefile was removed, it | ||||
# won't be in the list of largefiles at this point | ||||
match._files += sorted(standins) | ||||
actualfiles = [] | ||||
for f in match._files: | ||||
fstandin = standin(f) | ||||
Mads Kiilerich
|
r26817 | # For largefiles, only one of the normal and standin should be | ||
Matt Harbison
|
r27942 | # committed (except if one of them is a remove). In the case of a | ||
# standin removal, drop the normal file if it is unknown to dirstate. | ||||
Mads Kiilerich
|
r26817 | # Thus, skip plain largefile names but keep the standin. | ||
Matt Harbison
|
r27942 | if f in lfiles or fstandin in standins: | ||
r48918 | if not repo.dirstate.get_entry(fstandin).removed: | |||
if not repo.dirstate.get_entry(f).removed: | ||||
Matt Harbison
|
r27942 | continue | ||
r48918 | elif not repo.dirstate.get_entry(f).any_tracked: | |||
Matt Harbison
|
r27942 | continue | ||
FUJIWARA Katsunori
|
r23185 | |||
actualfiles.append(f) | ||||
match._files = actualfiles | ||||
def matchfn(f): | ||||
if origmatchfn(f): | ||||
return f not in lfiles | ||||
else: | ||||
return f in standins | ||||
match.matchfn = matchfn | ||||
return match | ||||
FUJIWARA Katsunori
|
r23187 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class automatedcommithook: | ||
Augie Fackler
|
r46554 | """Stateful hook to update standins at the 1st commit of resuming | ||
FUJIWARA Katsunori
|
r23187 | |||
For efficiency, updating standins in the working directory should | ||||
be avoided while automated committing (like rebase, transplant and | ||||
so on), because they should be updated before committing. | ||||
But the 1st commit of resuming automated committing (e.g. ``rebase | ||||
--continue``) should update them, because largefiles may be | ||||
modified manually. | ||||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r23187 | def __init__(self, resuming): | ||
self.resuming = resuming | ||||
def __call__(self, repo, match): | ||||
if self.resuming: | ||||
Augie Fackler
|
r43346 | self.resuming = False # avoids updating at subsequent commits | ||
FUJIWARA Katsunori
|
r23187 | return updatestandinsbymatch(repo, match) | ||
else: | ||||
return match | ||||
FUJIWARA Katsunori
|
r23188 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r23188 | def getstatuswriter(ui, repo, forcibly=None): | ||
Augie Fackler
|
r46554 | """Return the function to write largefiles specific status out | ||
FUJIWARA Katsunori
|
r23188 | |||
If ``forcibly`` is ``None``, this returns the last element of | ||||
Mads Kiilerich
|
r23543 | ``repo._lfstatuswriters`` as "default" writer function. | ||
FUJIWARA Katsunori
|
r23188 | |||
Otherwise, this returns the function to always write out (or | ||||
ignore if ``not forcibly``) status. | ||||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43347 | if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'): | ||
FUJIWARA Katsunori
|
r23188 | return repo._lfstatuswriters[-1] | ||
else: | ||||
if forcibly: | ||||
Augie Fackler
|
r43346 | return ui.status # forcibly WRITE OUT | ||
FUJIWARA Katsunori
|
r23188 | else: | ||
Augie Fackler
|
r43346 | return lambda *msg, **opts: None # forcibly IGNORE | ||