##// END OF EJS Templates
largefiles: use progress helper...
Martin von Zweigbergk -
r38426:164306d3 default
parent child Browse files
Show More
@@ -1,164 +1,165 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 from mercurial import node, util
14 from mercurial import node, util
15
15
16 from . import lfutil
16 from . import lfutil
17
17
18 class StoreError(Exception):
18 class StoreError(Exception):
19 '''Raised when there is a problem getting files from or putting
19 '''Raised when there is a problem getting files from or putting
20 files to a central store.'''
20 files to a central store.'''
21 def __init__(self, filename, hash, url, detail):
21 def __init__(self, filename, hash, url, detail):
22 self.filename = filename
22 self.filename = filename
23 self.hash = hash
23 self.hash = hash
24 self.url = url
24 self.url = url
25 self.detail = detail
25 self.detail = detail
26
26
27 def longmessage(self):
27 def longmessage(self):
28 return (_("error getting id %s from url %s for file %s: %s\n") %
28 return (_("error getting id %s from url %s for file %s: %s\n") %
29 (self.hash, util.hidepassword(self.url), self.filename,
29 (self.hash, util.hidepassword(self.url), self.filename,
30 self.detail))
30 self.detail))
31
31
32 def __str__(self):
32 def __str__(self):
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
34
34
35 class basestore(object):
35 class basestore(object):
36 def __init__(self, ui, repo, url):
36 def __init__(self, ui, repo, url):
37 self.ui = ui
37 self.ui = ui
38 self.repo = repo
38 self.repo = repo
39 self.url = url
39 self.url = url
40
40
41 def put(self, source, hash):
41 def put(self, source, hash):
42 '''Put source file into the store so it can be retrieved by hash.'''
42 '''Put source file into the store so it can be retrieved by hash.'''
43 raise NotImplementedError('abstract method')
43 raise NotImplementedError('abstract method')
44
44
45 def exists(self, hashes):
45 def exists(self, hashes):
46 '''Check to see if the store contains the given hashes. Given an
46 '''Check to see if the store contains the given hashes. Given an
47 iterable of hashes it returns a mapping from hash to bool.'''
47 iterable of hashes it returns a mapping from hash to bool.'''
48 raise NotImplementedError('abstract method')
48 raise NotImplementedError('abstract method')
49
49
50 def get(self, files):
50 def get(self, files):
51 '''Get the specified largefiles from the store and write to local
51 '''Get the specified largefiles from the store and write to local
52 files under repo.root. files is a list of (filename, hash)
52 files under repo.root. files is a list of (filename, hash)
53 tuples. Return (success, missing), lists of files successfully
53 tuples. Return (success, missing), lists of files successfully
54 downloaded and those not found in the store. success is a list
54 downloaded and those not found in the store. success is a list
55 of (filename, hash) tuples; missing is a list of filenames that
55 of (filename, hash) tuples; missing is a list of filenames that
56 we could not get. (The detailed error message will already have
56 we could not get. (The detailed error message will already have
57 been presented to the user, so missing is just supplied as a
57 been presented to the user, so missing is just supplied as a
58 summary.)'''
58 summary.)'''
59 success = []
59 success = []
60 missing = []
60 missing = []
61 ui = self.ui
61 ui = self.ui
62
62
63 at = 0
63 at = 0
64 available = self.exists(set(hash for (_filename, hash) in files))
64 available = self.exists(set(hash for (_filename, hash) in files))
65 progress = ui.makeprogress(_('getting largefiles'), unit=_('files'),
66 total=len(files))
65 for filename, hash in files:
67 for filename, hash in files:
66 ui.progress(_('getting largefiles'), at, unit=_('files'),
68 progress.update(at)
67 total=len(files))
68 at += 1
69 at += 1
69 ui.note(_('getting %s:%s\n') % (filename, hash))
70 ui.note(_('getting %s:%s\n') % (filename, hash))
70
71
71 if not available.get(hash):
72 if not available.get(hash):
72 ui.warn(_('%s: largefile %s not available from %s\n')
73 ui.warn(_('%s: largefile %s not available from %s\n')
73 % (filename, hash, util.hidepassword(self.url)))
74 % (filename, hash, util.hidepassword(self.url)))
74 missing.append(filename)
75 missing.append(filename)
75 continue
76 continue
76
77
77 if self._gethash(filename, hash):
78 if self._gethash(filename, hash):
78 success.append((filename, hash))
79 success.append((filename, hash))
79 else:
80 else:
80 missing.append(filename)
81 missing.append(filename)
81
82
82 ui.progress(_('getting largefiles'), None)
83 progress.complete()
83 return (success, missing)
84 return (success, missing)
84
85
85 def _gethash(self, filename, hash):
86 def _gethash(self, filename, hash):
86 """Get file with the provided hash and store it in the local repo's
87 """Get file with the provided hash and store it in the local repo's
87 store and in the usercache.
88 store and in the usercache.
88 filename is for informational messages only.
89 filename is for informational messages only.
89 """
90 """
90 util.makedirs(lfutil.storepath(self.repo, ''))
91 util.makedirs(lfutil.storepath(self.repo, ''))
91 storefilename = lfutil.storepath(self.repo, hash)
92 storefilename = lfutil.storepath(self.repo, hash)
92
93
93 tmpname = storefilename + '.tmp'
94 tmpname = storefilename + '.tmp'
94 with util.atomictempfile(tmpname,
95 with util.atomictempfile(tmpname,
95 createmode=self.repo.store.createmode) as tmpfile:
96 createmode=self.repo.store.createmode) as tmpfile:
96 try:
97 try:
97 gothash = self._getfile(tmpfile, filename, hash)
98 gothash = self._getfile(tmpfile, filename, hash)
98 except StoreError as err:
99 except StoreError as err:
99 self.ui.warn(err.longmessage())
100 self.ui.warn(err.longmessage())
100 gothash = ""
101 gothash = ""
101
102
102 if gothash != hash:
103 if gothash != hash:
103 if gothash != "":
104 if gothash != "":
104 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
105 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
105 % (filename, hash, gothash))
106 % (filename, hash, gothash))
106 util.unlink(tmpname)
107 util.unlink(tmpname)
107 return False
108 return False
108
109
109 util.rename(tmpname, storefilename)
110 util.rename(tmpname, storefilename)
110 lfutil.linktousercache(self.repo, hash)
111 lfutil.linktousercache(self.repo, hash)
111 return True
112 return True
112
113
113 def verify(self, revs, contents=False):
114 def verify(self, revs, contents=False):
114 '''Verify the existence (and, optionally, contents) of every big
115 '''Verify the existence (and, optionally, contents) of every big
115 file revision referenced by every changeset in revs.
116 file revision referenced by every changeset in revs.
116 Return 0 if all is well, non-zero on any errors.'''
117 Return 0 if all is well, non-zero on any errors.'''
117
118
118 self.ui.status(_('searching %d changesets for largefiles\n') %
119 self.ui.status(_('searching %d changesets for largefiles\n') %
119 len(revs))
120 len(revs))
120 verified = set() # set of (filename, filenode) tuples
121 verified = set() # set of (filename, filenode) tuples
121 filestocheck = [] # list of (cset, filename, expectedhash)
122 filestocheck = [] # list of (cset, filename, expectedhash)
122 for rev in revs:
123 for rev in revs:
123 cctx = self.repo[rev]
124 cctx = self.repo[rev]
124 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
125 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
125
126
126 for standin in cctx:
127 for standin in cctx:
127 filename = lfutil.splitstandin(standin)
128 filename = lfutil.splitstandin(standin)
128 if filename:
129 if filename:
129 fctx = cctx[standin]
130 fctx = cctx[standin]
130 key = (filename, fctx.filenode())
131 key = (filename, fctx.filenode())
131 if key not in verified:
132 if key not in verified:
132 verified.add(key)
133 verified.add(key)
133 expectedhash = lfutil.readasstandin(fctx)
134 expectedhash = lfutil.readasstandin(fctx)
134 filestocheck.append((cset, filename, expectedhash))
135 filestocheck.append((cset, filename, expectedhash))
135
136
136 failed = self._verifyfiles(contents, filestocheck)
137 failed = self._verifyfiles(contents, filestocheck)
137
138
138 numrevs = len(verified)
139 numrevs = len(verified)
139 numlfiles = len(set([fname for (fname, fnode) in verified]))
140 numlfiles = len(set([fname for (fname, fnode) in verified]))
140 if contents:
141 if contents:
141 self.ui.status(
142 self.ui.status(
142 _('verified contents of %d revisions of %d largefiles\n')
143 _('verified contents of %d revisions of %d largefiles\n')
143 % (numrevs, numlfiles))
144 % (numrevs, numlfiles))
144 else:
145 else:
145 self.ui.status(
146 self.ui.status(
146 _('verified existence of %d revisions of %d largefiles\n')
147 _('verified existence of %d revisions of %d largefiles\n')
147 % (numrevs, numlfiles))
148 % (numrevs, numlfiles))
148 return int(failed)
149 return int(failed)
149
150
150 def _getfile(self, tmpfile, filename, hash):
151 def _getfile(self, tmpfile, filename, hash):
151 '''Fetch one revision of one file from the store and write it
152 '''Fetch one revision of one file from the store and write it
152 to tmpfile. Compute the hash of the file on-the-fly as it
153 to tmpfile. Compute the hash of the file on-the-fly as it
153 downloads and return the hash. Close tmpfile. Raise
154 downloads and return the hash. Close tmpfile. Raise
154 StoreError if unable to download the file (e.g. it does not
155 StoreError if unable to download the file (e.g. it does not
155 exist in the store).'''
156 exist in the store).'''
156 raise NotImplementedError('abstract method')
157 raise NotImplementedError('abstract method')
157
158
158 def _verifyfiles(self, contents, filestocheck):
159 def _verifyfiles(self, contents, filestocheck):
159 '''Perform the actual verification of files in the store.
160 '''Perform the actual verification of files in the store.
160 'contents' controls verification of content hash.
161 'contents' controls verification of content hash.
161 'filestocheck' is list of files to check.
162 'filestocheck' is list of files to check.
162 Returns _true_ if any problems are found!
163 Returns _true_ if any problems are found!
163 '''
164 '''
164 raise NotImplementedError('abstract method')
165 raise NotImplementedError('abstract method')
@@ -1,604 +1,607 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import shutil
15 import shutil
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 from mercurial import (
19 from mercurial import (
20 cmdutil,
20 cmdutil,
21 context,
21 context,
22 error,
22 error,
23 hg,
23 hg,
24 lock,
24 lock,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 registrar,
28 registrar,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from ..convert import (
33 from ..convert import (
34 convcmd,
34 convcmd,
35 filemap,
35 filemap,
36 )
36 )
37
37
38 from . import (
38 from . import (
39 lfutil,
39 lfutil,
40 storefactory
40 storefactory
41 )
41 )
42
42
43 release = lock.release
43 release = lock.release
44
44
45 # -- Commands ----------------------------------------------------------
45 # -- Commands ----------------------------------------------------------
46
46
47 cmdtable = {}
47 cmdtable = {}
48 command = registrar.command(cmdtable)
48 command = registrar.command(cmdtable)
49
49
50 @command('lfconvert',
50 @command('lfconvert',
51 [('s', 'size', '',
51 [('s', 'size', '',
52 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
52 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
53 ('', 'to-normal', False,
53 ('', 'to-normal', False,
54 _('convert from a largefiles repo to a normal repo')),
54 _('convert from a largefiles repo to a normal repo')),
55 ],
55 ],
56 _('hg lfconvert SOURCE DEST [FILE ...]'),
56 _('hg lfconvert SOURCE DEST [FILE ...]'),
57 norepo=True,
57 norepo=True,
58 inferrepo=True)
58 inferrepo=True)
59 def lfconvert(ui, src, dest, *pats, **opts):
59 def lfconvert(ui, src, dest, *pats, **opts):
60 '''convert a normal repository to a largefiles repository
60 '''convert a normal repository to a largefiles repository
61
61
62 Convert repository SOURCE to a new repository DEST, identical to
62 Convert repository SOURCE to a new repository DEST, identical to
63 SOURCE except that certain files will be converted as largefiles:
63 SOURCE except that certain files will be converted as largefiles:
64 specifically, any file that matches any PATTERN *or* whose size is
64 specifically, any file that matches any PATTERN *or* whose size is
65 above the minimum size threshold is converted as a largefile. The
65 above the minimum size threshold is converted as a largefile. The
66 size used to determine whether or not to track a file as a
66 size used to determine whether or not to track a file as a
67 largefile is the size of the first version of the file. The
67 largefile is the size of the first version of the file. The
68 minimum size can be specified either with --size or in
68 minimum size can be specified either with --size or in
69 configuration as ``largefiles.size``.
69 configuration as ``largefiles.size``.
70
70
71 After running this command you will need to make sure that
71 After running this command you will need to make sure that
72 largefiles is enabled anywhere you intend to push the new
72 largefiles is enabled anywhere you intend to push the new
73 repository.
73 repository.
74
74
75 Use --to-normal to convert largefiles back to normal files; after
75 Use --to-normal to convert largefiles back to normal files; after
76 this, the DEST repository can be used without largefiles at all.'''
76 this, the DEST repository can be used without largefiles at all.'''
77
77
78 opts = pycompat.byteskwargs(opts)
78 opts = pycompat.byteskwargs(opts)
79 if opts['to_normal']:
79 if opts['to_normal']:
80 tolfile = False
80 tolfile = False
81 else:
81 else:
82 tolfile = True
82 tolfile = True
83 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
83 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
84
84
85 if not hg.islocal(src):
85 if not hg.islocal(src):
86 raise error.Abort(_('%s is not a local Mercurial repo') % src)
86 raise error.Abort(_('%s is not a local Mercurial repo') % src)
87 if not hg.islocal(dest):
87 if not hg.islocal(dest):
88 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
88 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
89
89
90 rsrc = hg.repository(ui, src)
90 rsrc = hg.repository(ui, src)
91 ui.status(_('initializing destination %s\n') % dest)
91 ui.status(_('initializing destination %s\n') % dest)
92 rdst = hg.repository(ui, dest, create=True)
92 rdst = hg.repository(ui, dest, create=True)
93
93
94 success = False
94 success = False
95 dstwlock = dstlock = None
95 dstwlock = dstlock = None
96 try:
96 try:
97 # Get a list of all changesets in the source. The easy way to do this
97 # Get a list of all changesets in the source. The easy way to do this
98 # is to simply walk the changelog, using changelog.nodesbetween().
98 # is to simply walk the changelog, using changelog.nodesbetween().
99 # Take a look at mercurial/revlog.py:639 for more details.
99 # Take a look at mercurial/revlog.py:639 for more details.
100 # Use a generator instead of a list to decrease memory usage
100 # Use a generator instead of a list to decrease memory usage
101 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
101 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
102 rsrc.heads())[0])
102 rsrc.heads())[0])
103 revmap = {node.nullid: node.nullid}
103 revmap = {node.nullid: node.nullid}
104 if tolfile:
104 if tolfile:
105 # Lock destination to prevent modification while it is converted to.
105 # Lock destination to prevent modification while it is converted to.
106 # Don't need to lock src because we are just reading from its
106 # Don't need to lock src because we are just reading from its
107 # history which can't change.
107 # history which can't change.
108 dstwlock = rdst.wlock()
108 dstwlock = rdst.wlock()
109 dstlock = rdst.lock()
109 dstlock = rdst.lock()
110
110
111 lfiles = set()
111 lfiles = set()
112 normalfiles = set()
112 normalfiles = set()
113 if not pats:
113 if not pats:
114 pats = ui.configlist(lfutil.longname, 'patterns')
114 pats = ui.configlist(lfutil.longname, 'patterns')
115 if pats:
115 if pats:
116 matcher = matchmod.match(rsrc.root, '', list(pats))
116 matcher = matchmod.match(rsrc.root, '', list(pats))
117 else:
117 else:
118 matcher = None
118 matcher = None
119
119
120 lfiletohash = {}
120 lfiletohash = {}
121 progress = ui.makeprogress(_('converting revisions'),
122 unit=_('revisions'),
123 total=rsrc['tip'].rev())
121 for ctx in ctxs:
124 for ctx in ctxs:
122 ui.progress(_('converting revisions'), ctx.rev(),
125 progress.update(ctx.rev())
123 unit=_('revisions'), total=rsrc['tip'].rev())
124 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
126 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
125 lfiles, normalfiles, matcher, size, lfiletohash)
127 lfiles, normalfiles, matcher, size, lfiletohash)
126 ui.progress(_('converting revisions'), None)
128 progress.complete()
127
129
128 if rdst.wvfs.exists(lfutil.shortname):
130 if rdst.wvfs.exists(lfutil.shortname):
129 rdst.wvfs.rmtree(lfutil.shortname)
131 rdst.wvfs.rmtree(lfutil.shortname)
130
132
131 for f in lfiletohash.keys():
133 for f in lfiletohash.keys():
132 if rdst.wvfs.isfile(f):
134 if rdst.wvfs.isfile(f):
133 rdst.wvfs.unlink(f)
135 rdst.wvfs.unlink(f)
134 try:
136 try:
135 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
137 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
136 except OSError:
138 except OSError:
137 pass
139 pass
138
140
139 # If there were any files converted to largefiles, add largefiles
141 # If there were any files converted to largefiles, add largefiles
140 # to the destination repository's requirements.
142 # to the destination repository's requirements.
141 if lfiles:
143 if lfiles:
142 rdst.requirements.add('largefiles')
144 rdst.requirements.add('largefiles')
143 rdst._writerequirements()
145 rdst._writerequirements()
144 else:
146 else:
145 class lfsource(filemap.filemap_source):
147 class lfsource(filemap.filemap_source):
146 def __init__(self, ui, source):
148 def __init__(self, ui, source):
147 super(lfsource, self).__init__(ui, source, None)
149 super(lfsource, self).__init__(ui, source, None)
148 self.filemapper.rename[lfutil.shortname] = '.'
150 self.filemapper.rename[lfutil.shortname] = '.'
149
151
150 def getfile(self, name, rev):
152 def getfile(self, name, rev):
151 realname, realrev = rev
153 realname, realrev = rev
152 f = super(lfsource, self).getfile(name, rev)
154 f = super(lfsource, self).getfile(name, rev)
153
155
154 if (not realname.startswith(lfutil.shortnameslash)
156 if (not realname.startswith(lfutil.shortnameslash)
155 or f[0] is None):
157 or f[0] is None):
156 return f
158 return f
157
159
158 # Substitute in the largefile data for the hash
160 # Substitute in the largefile data for the hash
159 hash = f[0].strip()
161 hash = f[0].strip()
160 path = lfutil.findfile(rsrc, hash)
162 path = lfutil.findfile(rsrc, hash)
161
163
162 if path is None:
164 if path is None:
163 raise error.Abort(_("missing largefile for '%s' in %s")
165 raise error.Abort(_("missing largefile for '%s' in %s")
164 % (realname, realrev))
166 % (realname, realrev))
165 return util.readfile(path), f[1]
167 return util.readfile(path), f[1]
166
168
167 class converter(convcmd.converter):
169 class converter(convcmd.converter):
168 def __init__(self, ui, source, dest, revmapfile, opts):
170 def __init__(self, ui, source, dest, revmapfile, opts):
169 src = lfsource(ui, source)
171 src = lfsource(ui, source)
170
172
171 super(converter, self).__init__(ui, src, dest, revmapfile,
173 super(converter, self).__init__(ui, src, dest, revmapfile,
172 opts)
174 opts)
173
175
174 found, missing = downloadlfiles(ui, rsrc)
176 found, missing = downloadlfiles(ui, rsrc)
175 if missing != 0:
177 if missing != 0:
176 raise error.Abort(_("all largefiles must be present locally"))
178 raise error.Abort(_("all largefiles must be present locally"))
177
179
178 orig = convcmd.converter
180 orig = convcmd.converter
179 convcmd.converter = converter
181 convcmd.converter = converter
180
182
181 try:
183 try:
182 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
184 convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
183 finally:
185 finally:
184 convcmd.converter = orig
186 convcmd.converter = orig
185 success = True
187 success = True
186 finally:
188 finally:
187 if tolfile:
189 if tolfile:
188 rdst.dirstate.clear()
190 rdst.dirstate.clear()
189 release(dstlock, dstwlock)
191 release(dstlock, dstwlock)
190 if not success:
192 if not success:
191 # we failed, remove the new directory
193 # we failed, remove the new directory
192 shutil.rmtree(rdst.root)
194 shutil.rmtree(rdst.root)
193
195
194 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
196 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
195 matcher, size, lfiletohash):
197 matcher, size, lfiletohash):
196 # Convert src parents to dst parents
198 # Convert src parents to dst parents
197 parents = _convertparents(ctx, revmap)
199 parents = _convertparents(ctx, revmap)
198
200
199 # Generate list of changed files
201 # Generate list of changed files
200 files = _getchangedfiles(ctx, parents)
202 files = _getchangedfiles(ctx, parents)
201
203
202 dstfiles = []
204 dstfiles = []
203 for f in files:
205 for f in files:
204 if f not in lfiles and f not in normalfiles:
206 if f not in lfiles and f not in normalfiles:
205 islfile = _islfile(f, ctx, matcher, size)
207 islfile = _islfile(f, ctx, matcher, size)
206 # If this file was renamed or copied then copy
208 # If this file was renamed or copied then copy
207 # the largefile-ness of its predecessor
209 # the largefile-ness of its predecessor
208 if f in ctx.manifest():
210 if f in ctx.manifest():
209 fctx = ctx.filectx(f)
211 fctx = ctx.filectx(f)
210 renamed = fctx.renamed()
212 renamed = fctx.renamed()
211 renamedlfile = renamed and renamed[0] in lfiles
213 renamedlfile = renamed and renamed[0] in lfiles
212 islfile |= renamedlfile
214 islfile |= renamedlfile
213 if 'l' in fctx.flags():
215 if 'l' in fctx.flags():
214 if renamedlfile:
216 if renamedlfile:
215 raise error.Abort(
217 raise error.Abort(
216 _('renamed/copied largefile %s becomes symlink')
218 _('renamed/copied largefile %s becomes symlink')
217 % f)
219 % f)
218 islfile = False
220 islfile = False
219 if islfile:
221 if islfile:
220 lfiles.add(f)
222 lfiles.add(f)
221 else:
223 else:
222 normalfiles.add(f)
224 normalfiles.add(f)
223
225
224 if f in lfiles:
226 if f in lfiles:
225 fstandin = lfutil.standin(f)
227 fstandin = lfutil.standin(f)
226 dstfiles.append(fstandin)
228 dstfiles.append(fstandin)
227 # largefile in manifest if it has not been removed/renamed
229 # largefile in manifest if it has not been removed/renamed
228 if f in ctx.manifest():
230 if f in ctx.manifest():
229 fctx = ctx.filectx(f)
231 fctx = ctx.filectx(f)
230 if 'l' in fctx.flags():
232 if 'l' in fctx.flags():
231 renamed = fctx.renamed()
233 renamed = fctx.renamed()
232 if renamed and renamed[0] in lfiles:
234 if renamed and renamed[0] in lfiles:
233 raise error.Abort(_('largefile %s becomes symlink') % f)
235 raise error.Abort(_('largefile %s becomes symlink') % f)
234
236
235 # largefile was modified, update standins
237 # largefile was modified, update standins
236 m = hashlib.sha1('')
238 m = hashlib.sha1('')
237 m.update(ctx[f].data())
239 m.update(ctx[f].data())
238 hash = m.hexdigest()
240 hash = m.hexdigest()
239 if f not in lfiletohash or lfiletohash[f] != hash:
241 if f not in lfiletohash or lfiletohash[f] != hash:
240 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
242 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
241 executable = 'x' in ctx[f].flags()
243 executable = 'x' in ctx[f].flags()
242 lfutil.writestandin(rdst, fstandin, hash,
244 lfutil.writestandin(rdst, fstandin, hash,
243 executable)
245 executable)
244 lfiletohash[f] = hash
246 lfiletohash[f] = hash
245 else:
247 else:
246 # normal file
248 # normal file
247 dstfiles.append(f)
249 dstfiles.append(f)
248
250
249 def getfilectx(repo, memctx, f):
251 def getfilectx(repo, memctx, f):
250 srcfname = lfutil.splitstandin(f)
252 srcfname = lfutil.splitstandin(f)
251 if srcfname is not None:
253 if srcfname is not None:
252 # if the file isn't in the manifest then it was removed
254 # if the file isn't in the manifest then it was removed
253 # or renamed, return None to indicate this
255 # or renamed, return None to indicate this
254 try:
256 try:
255 fctx = ctx.filectx(srcfname)
257 fctx = ctx.filectx(srcfname)
256 except error.LookupError:
258 except error.LookupError:
257 return None
259 return None
258 renamed = fctx.renamed()
260 renamed = fctx.renamed()
259 if renamed:
261 if renamed:
260 # standin is always a largefile because largefile-ness
262 # standin is always a largefile because largefile-ness
261 # doesn't change after rename or copy
263 # doesn't change after rename or copy
262 renamed = lfutil.standin(renamed[0])
264 renamed = lfutil.standin(renamed[0])
263
265
264 return context.memfilectx(repo, memctx, f,
266 return context.memfilectx(repo, memctx, f,
265 lfiletohash[srcfname] + '\n',
267 lfiletohash[srcfname] + '\n',
266 'l' in fctx.flags(), 'x' in fctx.flags(),
268 'l' in fctx.flags(), 'x' in fctx.flags(),
267 renamed)
269 renamed)
268 else:
270 else:
269 return _getnormalcontext(repo, ctx, f, revmap)
271 return _getnormalcontext(repo, ctx, f, revmap)
270
272
271 # Commit
273 # Commit
272 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
274 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
273
275
274 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
276 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
275 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
277 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
276 getfilectx, ctx.user(), ctx.date(), ctx.extra())
278 getfilectx, ctx.user(), ctx.date(), ctx.extra())
277 ret = rdst.commitctx(mctx)
279 ret = rdst.commitctx(mctx)
278 lfutil.copyalltostore(rdst, ret)
280 lfutil.copyalltostore(rdst, ret)
279 rdst.setparents(ret)
281 rdst.setparents(ret)
280 revmap[ctx.node()] = rdst.changelog.tip()
282 revmap[ctx.node()] = rdst.changelog.tip()
281
283
282 # Generate list of changed files
284 # Generate list of changed files
283 def _getchangedfiles(ctx, parents):
285 def _getchangedfiles(ctx, parents):
284 files = set(ctx.files())
286 files = set(ctx.files())
285 if node.nullid not in parents:
287 if node.nullid not in parents:
286 mc = ctx.manifest()
288 mc = ctx.manifest()
287 mp1 = ctx.parents()[0].manifest()
289 mp1 = ctx.parents()[0].manifest()
288 mp2 = ctx.parents()[1].manifest()
290 mp2 = ctx.parents()[1].manifest()
289 files |= (set(mp1) | set(mp2)) - set(mc)
291 files |= (set(mp1) | set(mp2)) - set(mc)
290 for f in mc:
292 for f in mc:
291 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
293 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
292 files.add(f)
294 files.add(f)
293 return files
295 return files
294
296
295 # Convert src parents to dst parents
297 # Convert src parents to dst parents
296 def _convertparents(ctx, revmap):
298 def _convertparents(ctx, revmap):
297 parents = []
299 parents = []
298 for p in ctx.parents():
300 for p in ctx.parents():
299 parents.append(revmap[p.node()])
301 parents.append(revmap[p.node()])
300 while len(parents) < 2:
302 while len(parents) < 2:
301 parents.append(node.nullid)
303 parents.append(node.nullid)
302 return parents
304 return parents
303
305
304 # Get memfilectx for a normal file
306 # Get memfilectx for a normal file
305 def _getnormalcontext(repo, ctx, f, revmap):
307 def _getnormalcontext(repo, ctx, f, revmap):
306 try:
308 try:
307 fctx = ctx.filectx(f)
309 fctx = ctx.filectx(f)
308 except error.LookupError:
310 except error.LookupError:
309 return None
311 return None
310 renamed = fctx.renamed()
312 renamed = fctx.renamed()
311 if renamed:
313 if renamed:
312 renamed = renamed[0]
314 renamed = renamed[0]
313
315
314 data = fctx.data()
316 data = fctx.data()
315 if f == '.hgtags':
317 if f == '.hgtags':
316 data = _converttags (repo.ui, revmap, data)
318 data = _converttags (repo.ui, revmap, data)
317 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
319 return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
318 'x' in fctx.flags(), renamed)
320 'x' in fctx.flags(), renamed)
319
321
320 # Remap tag data using a revision map
322 # Remap tag data using a revision map
321 def _converttags(ui, revmap, data):
323 def _converttags(ui, revmap, data):
322 newdata = []
324 newdata = []
323 for line in data.splitlines():
325 for line in data.splitlines():
324 try:
326 try:
325 id, name = line.split(' ', 1)
327 id, name = line.split(' ', 1)
326 except ValueError:
328 except ValueError:
327 ui.warn(_('skipping incorrectly formatted tag %s\n')
329 ui.warn(_('skipping incorrectly formatted tag %s\n')
328 % line)
330 % line)
329 continue
331 continue
330 try:
332 try:
331 newid = node.bin(id)
333 newid = node.bin(id)
332 except TypeError:
334 except TypeError:
333 ui.warn(_('skipping incorrectly formatted id %s\n')
335 ui.warn(_('skipping incorrectly formatted id %s\n')
334 % id)
336 % id)
335 continue
337 continue
336 try:
338 try:
337 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
339 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
338 name))
340 name))
339 except KeyError:
341 except KeyError:
340 ui.warn(_('no mapping for id %s\n') % id)
342 ui.warn(_('no mapping for id %s\n') % id)
341 continue
343 continue
342 return ''.join(newdata)
344 return ''.join(newdata)
343
345
344 def _islfile(file, ctx, matcher, size):
346 def _islfile(file, ctx, matcher, size):
345 '''Return true if file should be considered a largefile, i.e.
347 '''Return true if file should be considered a largefile, i.e.
346 matcher matches it or it is larger than size.'''
348 matcher matches it or it is larger than size.'''
347 # never store special .hg* files as largefiles
349 # never store special .hg* files as largefiles
348 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
350 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
349 return False
351 return False
350 if matcher and matcher(file):
352 if matcher and matcher(file):
351 return True
353 return True
352 try:
354 try:
353 return ctx.filectx(file).size() >= size * 1024 * 1024
355 return ctx.filectx(file).size() >= size * 1024 * 1024
354 except error.LookupError:
356 except error.LookupError:
355 return False
357 return False
356
358
357 def uploadlfiles(ui, rsrc, rdst, files):
359 def uploadlfiles(ui, rsrc, rdst, files):
358 '''upload largefiles to the central store'''
360 '''upload largefiles to the central store'''
359
361
360 if not files:
362 if not files:
361 return
363 return
362
364
363 store = storefactory.openstore(rsrc, rdst, put=True)
365 store = storefactory.openstore(rsrc, rdst, put=True)
364
366
365 at = 0
367 at = 0
366 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
368 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
367 retval = store.exists(files)
369 retval = store.exists(files)
368 files = [h for h in files if not retval[h]]
370 files = [h for h in files if not retval[h]]
369 ui.debug("%d largefiles need to be uploaded\n" % len(files))
371 ui.debug("%d largefiles need to be uploaded\n" % len(files))
370
372
373 progress = ui.makeprogress(_('uploading largefiles'), unit=_('files'),
374 total=len(files))
371 for hash in files:
375 for hash in files:
372 ui.progress(_('uploading largefiles'), at, unit=_('files'),
376 progress.update(at)
373 total=len(files))
374 source = lfutil.findfile(rsrc, hash)
377 source = lfutil.findfile(rsrc, hash)
375 if not source:
378 if not source:
376 raise error.Abort(_('largefile %s missing from store'
379 raise error.Abort(_('largefile %s missing from store'
377 ' (needs to be uploaded)') % hash)
380 ' (needs to be uploaded)') % hash)
378 # XXX check for errors here
381 # XXX check for errors here
379 store.put(source, hash)
382 store.put(source, hash)
380 at += 1
383 at += 1
381 ui.progress(_('uploading largefiles'), None)
384 progress.complete()
382
385
383 def verifylfiles(ui, repo, all=False, contents=False):
386 def verifylfiles(ui, repo, all=False, contents=False):
384 '''Verify that every largefile revision in the current changeset
387 '''Verify that every largefile revision in the current changeset
385 exists in the central store. With --contents, also verify that
388 exists in the central store. With --contents, also verify that
386 the contents of each local largefile file revision are correct (SHA-1 hash
389 the contents of each local largefile file revision are correct (SHA-1 hash
387 matches the revision ID). With --all, check every changeset in
390 matches the revision ID). With --all, check every changeset in
388 this repository.'''
391 this repository.'''
389 if all:
392 if all:
390 revs = repo.revs('all()')
393 revs = repo.revs('all()')
391 else:
394 else:
392 revs = ['.']
395 revs = ['.']
393
396
394 store = storefactory.openstore(repo)
397 store = storefactory.openstore(repo)
395 return store.verify(revs, contents=contents)
398 return store.verify(revs, contents=contents)
396
399
397 def cachelfiles(ui, repo, node, filelist=None):
400 def cachelfiles(ui, repo, node, filelist=None):
398 '''cachelfiles ensures that all largefiles needed by the specified revision
401 '''cachelfiles ensures that all largefiles needed by the specified revision
399 are present in the repository's largefile cache.
402 are present in the repository's largefile cache.
400
403
401 returns a tuple (cached, missing). cached is the list of files downloaded
404 returns a tuple (cached, missing). cached is the list of files downloaded
402 by this operation; missing is the list of files that were needed but could
405 by this operation; missing is the list of files that were needed but could
403 not be found.'''
406 not be found.'''
404 lfiles = lfutil.listlfiles(repo, node)
407 lfiles = lfutil.listlfiles(repo, node)
405 if filelist:
408 if filelist:
406 lfiles = set(lfiles) & set(filelist)
409 lfiles = set(lfiles) & set(filelist)
407 toget = []
410 toget = []
408
411
409 ctx = repo[node]
412 ctx = repo[node]
410 for lfile in lfiles:
413 for lfile in lfiles:
411 try:
414 try:
412 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
415 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
413 except IOError as err:
416 except IOError as err:
414 if err.errno == errno.ENOENT:
417 if err.errno == errno.ENOENT:
415 continue # node must be None and standin wasn't found in wctx
418 continue # node must be None and standin wasn't found in wctx
416 raise
419 raise
417 if not lfutil.findfile(repo, expectedhash):
420 if not lfutil.findfile(repo, expectedhash):
418 toget.append((lfile, expectedhash))
421 toget.append((lfile, expectedhash))
419
422
420 if toget:
423 if toget:
421 store = storefactory.openstore(repo)
424 store = storefactory.openstore(repo)
422 ret = store.get(toget)
425 ret = store.get(toget)
423 return ret
426 return ret
424
427
425 return ([], [])
428 return ([], [])
426
429
427 def downloadlfiles(ui, repo, rev=None):
430 def downloadlfiles(ui, repo, rev=None):
428 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
431 match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {})
429 def prepare(ctx, fns):
432 def prepare(ctx, fns):
430 pass
433 pass
431 totalsuccess = 0
434 totalsuccess = 0
432 totalmissing = 0
435 totalmissing = 0
433 if rev != []: # walkchangerevs on empty list would return all revs
436 if rev != []: # walkchangerevs on empty list would return all revs
434 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
437 for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev},
435 prepare):
438 prepare):
436 success, missing = cachelfiles(ui, repo, ctx.node())
439 success, missing = cachelfiles(ui, repo, ctx.node())
437 totalsuccess += len(success)
440 totalsuccess += len(success)
438 totalmissing += len(missing)
441 totalmissing += len(missing)
439 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
442 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
440 if totalmissing > 0:
443 if totalmissing > 0:
441 ui.status(_("%d largefiles failed to download\n") % totalmissing)
444 ui.status(_("%d largefiles failed to download\n") % totalmissing)
442 return totalsuccess, totalmissing
445 return totalsuccess, totalmissing
443
446
444 def updatelfiles(ui, repo, filelist=None, printmessage=None,
447 def updatelfiles(ui, repo, filelist=None, printmessage=None,
445 normallookup=False):
448 normallookup=False):
446 '''Update largefiles according to standins in the working directory
449 '''Update largefiles according to standins in the working directory
447
450
448 If ``printmessage`` is other than ``None``, it means "print (or
451 If ``printmessage`` is other than ``None``, it means "print (or
449 ignore, for false) message forcibly".
452 ignore, for false) message forcibly".
450 '''
453 '''
451 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
454 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
452 with repo.wlock():
455 with repo.wlock():
453 lfdirstate = lfutil.openlfdirstate(ui, repo)
456 lfdirstate = lfutil.openlfdirstate(ui, repo)
454 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
457 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
455
458
456 if filelist is not None:
459 if filelist is not None:
457 filelist = set(filelist)
460 filelist = set(filelist)
458 lfiles = [f for f in lfiles if f in filelist]
461 lfiles = [f for f in lfiles if f in filelist]
459
462
460 update = {}
463 update = {}
461 dropped = set()
464 dropped = set()
462 updated, removed = 0, 0
465 updated, removed = 0, 0
463 wvfs = repo.wvfs
466 wvfs = repo.wvfs
464 wctx = repo[None]
467 wctx = repo[None]
465 for lfile in lfiles:
468 for lfile in lfiles:
466 rellfile = lfile
469 rellfile = lfile
467 rellfileorig = os.path.relpath(
470 rellfileorig = os.path.relpath(
468 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
471 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
469 start=repo.root)
472 start=repo.root)
470 relstandin = lfutil.standin(lfile)
473 relstandin = lfutil.standin(lfile)
471 relstandinorig = os.path.relpath(
474 relstandinorig = os.path.relpath(
472 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
475 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
473 start=repo.root)
476 start=repo.root)
474 if wvfs.exists(relstandin):
477 if wvfs.exists(relstandin):
475 if (wvfs.exists(relstandinorig) and
478 if (wvfs.exists(relstandinorig) and
476 wvfs.exists(rellfile)):
479 wvfs.exists(rellfile)):
477 shutil.copyfile(wvfs.join(rellfile),
480 shutil.copyfile(wvfs.join(rellfile),
478 wvfs.join(rellfileorig))
481 wvfs.join(rellfileorig))
479 wvfs.unlinkpath(relstandinorig)
482 wvfs.unlinkpath(relstandinorig)
480 expecthash = lfutil.readasstandin(wctx[relstandin])
483 expecthash = lfutil.readasstandin(wctx[relstandin])
481 if expecthash != '':
484 if expecthash != '':
482 if lfile not in wctx: # not switched to normal file
485 if lfile not in wctx: # not switched to normal file
483 if repo.dirstate[relstandin] != '?':
486 if repo.dirstate[relstandin] != '?':
484 wvfs.unlinkpath(rellfile, ignoremissing=True)
487 wvfs.unlinkpath(rellfile, ignoremissing=True)
485 else:
488 else:
486 dropped.add(rellfile)
489 dropped.add(rellfile)
487
490
488 # use normallookup() to allocate an entry in largefiles
491 # use normallookup() to allocate an entry in largefiles
489 # dirstate to prevent lfilesrepo.status() from reporting
492 # dirstate to prevent lfilesrepo.status() from reporting
490 # missing files as removed.
493 # missing files as removed.
491 lfdirstate.normallookup(lfile)
494 lfdirstate.normallookup(lfile)
492 update[lfile] = expecthash
495 update[lfile] = expecthash
493 else:
496 else:
494 # Remove lfiles for which the standin is deleted, unless the
497 # Remove lfiles for which the standin is deleted, unless the
495 # lfile is added to the repository again. This happens when a
498 # lfile is added to the repository again. This happens when a
496 # largefile is converted back to a normal file: the standin
499 # largefile is converted back to a normal file: the standin
497 # disappears, but a new (normal) file appears as the lfile.
500 # disappears, but a new (normal) file appears as the lfile.
498 if (wvfs.exists(rellfile) and
501 if (wvfs.exists(rellfile) and
499 repo.dirstate.normalize(lfile) not in wctx):
502 repo.dirstate.normalize(lfile) not in wctx):
500 wvfs.unlinkpath(rellfile)
503 wvfs.unlinkpath(rellfile)
501 removed += 1
504 removed += 1
502
505
503 # largefile processing might be slow and be interrupted - be prepared
506 # largefile processing might be slow and be interrupted - be prepared
504 lfdirstate.write()
507 lfdirstate.write()
505
508
506 if lfiles:
509 if lfiles:
507 lfiles = [f for f in lfiles if f not in dropped]
510 lfiles = [f for f in lfiles if f not in dropped]
508
511
509 for f in dropped:
512 for f in dropped:
510 repo.wvfs.unlinkpath(lfutil.standin(f))
513 repo.wvfs.unlinkpath(lfutil.standin(f))
511
514
512 # This needs to happen for dropped files, otherwise they stay in
515 # This needs to happen for dropped files, otherwise they stay in
513 # the M state.
516 # the M state.
514 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
517 lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
515
518
516 statuswriter(_('getting changed largefiles\n'))
519 statuswriter(_('getting changed largefiles\n'))
517 cachelfiles(ui, repo, None, lfiles)
520 cachelfiles(ui, repo, None, lfiles)
518
521
519 for lfile in lfiles:
522 for lfile in lfiles:
520 update1 = 0
523 update1 = 0
521
524
522 expecthash = update.get(lfile)
525 expecthash = update.get(lfile)
523 if expecthash:
526 if expecthash:
524 if not lfutil.copyfromcache(repo, expecthash, lfile):
527 if not lfutil.copyfromcache(repo, expecthash, lfile):
525 # failed ... but already removed and set to normallookup
528 # failed ... but already removed and set to normallookup
526 continue
529 continue
527 # Synchronize largefile dirstate to the last modified
530 # Synchronize largefile dirstate to the last modified
528 # time of the file
531 # time of the file
529 lfdirstate.normal(lfile)
532 lfdirstate.normal(lfile)
530 update1 = 1
533 update1 = 1
531
534
532 # copy the exec mode of largefile standin from the repository's
535 # copy the exec mode of largefile standin from the repository's
533 # dirstate to its state in the lfdirstate.
536 # dirstate to its state in the lfdirstate.
534 rellfile = lfile
537 rellfile = lfile
535 relstandin = lfutil.standin(lfile)
538 relstandin = lfutil.standin(lfile)
536 if wvfs.exists(relstandin):
539 if wvfs.exists(relstandin):
537 # exec is decided by the users permissions using mask 0o100
540 # exec is decided by the users permissions using mask 0o100
538 standinexec = wvfs.stat(relstandin).st_mode & 0o100
541 standinexec = wvfs.stat(relstandin).st_mode & 0o100
539 st = wvfs.stat(rellfile)
542 st = wvfs.stat(rellfile)
540 mode = st.st_mode
543 mode = st.st_mode
541 if standinexec != mode & 0o100:
544 if standinexec != mode & 0o100:
542 # first remove all X bits, then shift all R bits to X
545 # first remove all X bits, then shift all R bits to X
543 mode &= ~0o111
546 mode &= ~0o111
544 if standinexec:
547 if standinexec:
545 mode |= (mode >> 2) & 0o111 & ~util.umask
548 mode |= (mode >> 2) & 0o111 & ~util.umask
546 wvfs.chmod(rellfile, mode)
549 wvfs.chmod(rellfile, mode)
547 update1 = 1
550 update1 = 1
548
551
549 updated += update1
552 updated += update1
550
553
551 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
554 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
552
555
553 lfdirstate.write()
556 lfdirstate.write()
554 if lfiles:
557 if lfiles:
555 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
558 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
556 removed))
559 removed))
557
560
558 @command('lfpull',
561 @command('lfpull',
559 [('r', 'rev', [], _('pull largefiles for these revisions'))
562 [('r', 'rev', [], _('pull largefiles for these revisions'))
560 ] + cmdutil.remoteopts,
563 ] + cmdutil.remoteopts,
561 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
564 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
562 def lfpull(ui, repo, source="default", **opts):
565 def lfpull(ui, repo, source="default", **opts):
563 """pull largefiles for the specified revisions from the specified source
566 """pull largefiles for the specified revisions from the specified source
564
567
565 Pull largefiles that are referenced from local changesets but missing
568 Pull largefiles that are referenced from local changesets but missing
566 locally, pulling from a remote repository to the local cache.
569 locally, pulling from a remote repository to the local cache.
567
570
568 If SOURCE is omitted, the 'default' path will be used.
571 If SOURCE is omitted, the 'default' path will be used.
569 See :hg:`help urls` for more information.
572 See :hg:`help urls` for more information.
570
573
571 .. container:: verbose
574 .. container:: verbose
572
575
573 Some examples:
576 Some examples:
574
577
575 - pull largefiles for all branch heads::
578 - pull largefiles for all branch heads::
576
579
577 hg lfpull -r "head() and not closed()"
580 hg lfpull -r "head() and not closed()"
578
581
579 - pull largefiles on the default branch::
582 - pull largefiles on the default branch::
580
583
581 hg lfpull -r "branch(default)"
584 hg lfpull -r "branch(default)"
582 """
585 """
583 repo.lfpullsource = source
586 repo.lfpullsource = source
584
587
585 revs = opts.get(r'rev', [])
588 revs = opts.get(r'rev', [])
586 if not revs:
589 if not revs:
587 raise error.Abort(_('no revisions specified'))
590 raise error.Abort(_('no revisions specified'))
588 revs = scmutil.revrange(repo, revs)
591 revs = scmutil.revrange(repo, revs)
589
592
590 numcached = 0
593 numcached = 0
591 for rev in revs:
594 for rev in revs:
592 ui.note(_('pulling largefiles for revision %d\n') % rev)
595 ui.note(_('pulling largefiles for revision %d\n') % rev)
593 (cached, missing) = cachelfiles(ui, repo, rev)
596 (cached, missing) = cachelfiles(ui, repo, rev)
594 numcached += len(cached)
597 numcached += len(cached)
595 ui.status(_("%d largefiles cached\n") % numcached)
598 ui.status(_("%d largefiles cached\n") % numcached)
596
599
597 @command('debuglfput',
600 @command('debuglfput',
598 [] + cmdutil.remoteopts,
601 [] + cmdutil.remoteopts,
599 _('FILE'))
602 _('FILE'))
600 def debuglfput(ui, repo, filepath, **kwargs):
603 def debuglfput(ui, repo, filepath, **kwargs):
601 hash = lfutil.hashfile(filepath)
604 hash = lfutil.hashfile(filepath)
602 storefactory.openstore(repo).put(filepath, hash)
605 storefactory.openstore(repo).put(filepath, hash)
603 ui.write('%s\n' % hash)
606 ui.write('%s\n' % hash)
604 return 0
607 return 0
@@ -1,674 +1,675 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34 shortname = '.hglf'
34 shortname = '.hglf'
35 shortnameslash = shortname + '/'
35 shortnameslash = shortname + '/'
36 longname = 'largefiles'
36 longname = 'largefiles'
37
37
38 # -- Private worker functions ------------------------------------------
38 # -- Private worker functions ------------------------------------------
39
39
40 def getminsize(ui, assumelfiles, opt, default=10):
40 def getminsize(ui, assumelfiles, opt, default=10):
41 lfsize = opt
41 lfsize = opt
42 if not lfsize and assumelfiles:
42 if not lfsize and assumelfiles:
43 lfsize = ui.config(longname, 'minsize', default=default)
43 lfsize = ui.config(longname, 'minsize', default=default)
44 if lfsize:
44 if lfsize:
45 try:
45 try:
46 lfsize = float(lfsize)
46 lfsize = float(lfsize)
47 except ValueError:
47 except ValueError:
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
49 % lfsize)
49 % lfsize)
50 if lfsize is None:
50 if lfsize is None:
51 raise error.Abort(_('minimum size for largefiles must be specified'))
51 raise error.Abort(_('minimum size for largefiles must be specified'))
52 return lfsize
52 return lfsize
53
53
54 def link(src, dest):
54 def link(src, dest):
55 """Try to create hardlink - if that fails, efficiently make a copy."""
55 """Try to create hardlink - if that fails, efficiently make a copy."""
56 util.makedirs(os.path.dirname(dest))
56 util.makedirs(os.path.dirname(dest))
57 try:
57 try:
58 util.oslink(src, dest)
58 util.oslink(src, dest)
59 except OSError:
59 except OSError:
60 # if hardlinks fail, fallback on atomic copy
60 # if hardlinks fail, fallback on atomic copy
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
62 for chunk in util.filechunkiter(srcf):
62 for chunk in util.filechunkiter(srcf):
63 dstf.write(chunk)
63 dstf.write(chunk)
64 os.chmod(dest, os.stat(src).st_mode)
64 os.chmod(dest, os.stat(src).st_mode)
65
65
66 def usercachepath(ui, hash):
66 def usercachepath(ui, hash):
67 '''Return the correct location in the "global" largefiles cache for a file
67 '''Return the correct location in the "global" largefiles cache for a file
68 with the given hash.
68 with the given hash.
69 This cache is used for sharing of largefiles across repositories - both
69 This cache is used for sharing of largefiles across repositories - both
70 to preserve download bandwidth and storage space.'''
70 to preserve download bandwidth and storage space.'''
71 return os.path.join(_usercachedir(ui), hash)
71 return os.path.join(_usercachedir(ui), hash)
72
72
73 def _usercachedir(ui, name=longname):
73 def _usercachedir(ui, name=longname):
74 '''Return the location of the "global" largefiles cache.'''
74 '''Return the location of the "global" largefiles cache.'''
75 path = ui.configpath(name, 'usercache')
75 path = ui.configpath(name, 'usercache')
76 if path:
76 if path:
77 return path
77 return path
78 if pycompat.iswindows:
78 if pycompat.iswindows:
79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 encoding.environ.get('APPDATA'))
80 encoding.environ.get('APPDATA'))
81 if appdata:
81 if appdata:
82 return os.path.join(appdata, name)
82 return os.path.join(appdata, name)
83 elif pycompat.isdarwin:
83 elif pycompat.isdarwin:
84 home = encoding.environ.get('HOME')
84 home = encoding.environ.get('HOME')
85 if home:
85 if home:
86 return os.path.join(home, 'Library', 'Caches', name)
86 return os.path.join(home, 'Library', 'Caches', name)
87 elif pycompat.isposix:
87 elif pycompat.isposix:
88 path = encoding.environ.get('XDG_CACHE_HOME')
88 path = encoding.environ.get('XDG_CACHE_HOME')
89 if path:
89 if path:
90 return os.path.join(path, name)
90 return os.path.join(path, name)
91 home = encoding.environ.get('HOME')
91 home = encoding.environ.get('HOME')
92 if home:
92 if home:
93 return os.path.join(home, '.cache', name)
93 return os.path.join(home, '.cache', name)
94 else:
94 else:
95 raise error.Abort(_('unknown operating system: %s\n')
95 raise error.Abort(_('unknown operating system: %s\n')
96 % pycompat.osname)
96 % pycompat.osname)
97 raise error.Abort(_('unknown %s usercache location') % name)
97 raise error.Abort(_('unknown %s usercache location') % name)
98
98
99 def inusercache(ui, hash):
99 def inusercache(ui, hash):
100 path = usercachepath(ui, hash)
100 path = usercachepath(ui, hash)
101 return os.path.exists(path)
101 return os.path.exists(path)
102
102
103 def findfile(repo, hash):
103 def findfile(repo, hash):
104 '''Return store path of the largefile with the specified hash.
104 '''Return store path of the largefile with the specified hash.
105 As a side effect, the file might be linked from user cache.
105 As a side effect, the file might be linked from user cache.
106 Return None if the file can't be found locally.'''
106 Return None if the file can't be found locally.'''
107 path, exists = findstorepath(repo, hash)
107 path, exists = findstorepath(repo, hash)
108 if exists:
108 if exists:
109 repo.ui.note(_('found %s in store\n') % hash)
109 repo.ui.note(_('found %s in store\n') % hash)
110 return path
110 return path
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('found %s in system cache\n') % hash)
112 repo.ui.note(_('found %s in system cache\n') % hash)
113 path = storepath(repo, hash)
113 path = storepath(repo, hash)
114 link(usercachepath(repo.ui, hash), path)
114 link(usercachepath(repo.ui, hash), path)
115 return path
115 return path
116 return None
116 return None
117
117
118 class largefilesdirstate(dirstate.dirstate):
118 class largefilesdirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefilesdirstate, self).normal(unixpath(f))
122 return super(largefilesdirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefilesdirstate, self).remove(unixpath(f))
124 return super(largefilesdirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefilesdirstate, self).add(unixpath(f))
126 return super(largefilesdirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefilesdirstate, self).drop(unixpath(f))
128 return super(largefilesdirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefilesdirstate, self).forget(unixpath(f))
130 return super(largefilesdirstate, self).forget(unixpath(f))
131 def normallookup(self, f):
131 def normallookup(self, f):
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 def _ignore(self, f):
133 def _ignore(self, f):
134 return False
134 return False
135 def write(self, tr=False):
135 def write(self, tr=False):
136 # (1) disable PENDING mode always
136 # (1) disable PENDING mode always
137 # (lfdirstate isn't yet managed as a part of the transaction)
137 # (lfdirstate isn't yet managed as a part of the transaction)
138 # (2) avoid develwarn 'use dirstate.write with ....'
138 # (2) avoid develwarn 'use dirstate.write with ....'
139 super(largefilesdirstate, self).write(None)
139 super(largefilesdirstate, self).write(None)
140
140
141 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
142 '''
142 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
145 '''
146 vfs = repo.vfs
146 vfs = repo.vfs
147 lfstoredir = longname
147 lfstoredir = longname
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 repo.dirstate._validate,
150 repo.dirstate._validate,
151 lambda: sparse.matcher(repo))
151 lambda: sparse.matcher(repo))
152
152
153 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
154 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
155 # largefiles operation in a new clone.
155 # largefiles operation in a new clone.
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
157 matcher = getstandinmatcher(repo)
157 matcher = getstandinmatcher(repo)
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
159 ignored=False)
159 ignored=False)
160
160
161 if len(standins) > 0:
161 if len(standins) > 0:
162 vfs.makedirs(lfstoredir)
162 vfs.makedirs(lfstoredir)
163
163
164 for standin in standins:
164 for standin in standins:
165 lfile = splitstandin(standin)
165 lfile = splitstandin(standin)
166 lfdirstate.normallookup(lfile)
166 lfdirstate.normallookup(lfile)
167 return lfdirstate
167 return lfdirstate
168
168
169 def lfdirstatestatus(lfdirstate, repo):
169 def lfdirstatestatus(lfdirstate, repo):
170 pctx = repo['.']
170 pctx = repo['.']
171 match = matchmod.always(repo.root, repo.getcwd())
171 match = matchmod.always(repo.root, repo.getcwd())
172 unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
172 unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
173 clean=False, unknown=False)
173 clean=False, unknown=False)
174 modified, clean = s.modified, s.clean
174 modified, clean = s.modified, s.clean
175 for lfile in unsure:
175 for lfile in unsure:
176 try:
176 try:
177 fctx = pctx[standin(lfile)]
177 fctx = pctx[standin(lfile)]
178 except LookupError:
178 except LookupError:
179 fctx = None
179 fctx = None
180 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
180 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
181 modified.append(lfile)
181 modified.append(lfile)
182 else:
182 else:
183 clean.append(lfile)
183 clean.append(lfile)
184 lfdirstate.normal(lfile)
184 lfdirstate.normal(lfile)
185 return s
185 return s
186
186
187 def listlfiles(repo, rev=None, matcher=None):
187 def listlfiles(repo, rev=None, matcher=None):
188 '''return a list of largefiles in the working copy or the
188 '''return a list of largefiles in the working copy or the
189 specified changeset'''
189 specified changeset'''
190
190
191 if matcher is None:
191 if matcher is None:
192 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
193
193
194 # ignore unknown files in working directory
194 # ignore unknown files in working directory
195 return [splitstandin(f)
195 return [splitstandin(f)
196 for f in repo[rev].walk(matcher)
196 for f in repo[rev].walk(matcher)
197 if rev is not None or repo.dirstate[f] != '?']
197 if rev is not None or repo.dirstate[f] != '?']
198
198
199 def instore(repo, hash, forcelocal=False):
199 def instore(repo, hash, forcelocal=False):
200 '''Return true if a largefile with the given hash exists in the store'''
200 '''Return true if a largefile with the given hash exists in the store'''
201 return os.path.exists(storepath(repo, hash, forcelocal))
201 return os.path.exists(storepath(repo, hash, forcelocal))
202
202
203 def storepath(repo, hash, forcelocal=False):
203 def storepath(repo, hash, forcelocal=False):
204 '''Return the correct location in the repository largefiles store for a
204 '''Return the correct location in the repository largefiles store for a
205 file with the given hash.'''
205 file with the given hash.'''
206 if not forcelocal and repo.shared():
206 if not forcelocal and repo.shared():
207 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
207 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
208 return repo.vfs.join(longname, hash)
208 return repo.vfs.join(longname, hash)
209
209
210 def findstorepath(repo, hash):
210 def findstorepath(repo, hash):
211 '''Search through the local store path(s) to find the file for the given
211 '''Search through the local store path(s) to find the file for the given
212 hash. If the file is not found, its path in the primary store is returned.
212 hash. If the file is not found, its path in the primary store is returned.
213 The return value is a tuple of (path, exists(path)).
213 The return value is a tuple of (path, exists(path)).
214 '''
214 '''
215 # For shared repos, the primary store is in the share source. But for
215 # For shared repos, the primary store is in the share source. But for
216 # backward compatibility, force a lookup in the local store if it wasn't
216 # backward compatibility, force a lookup in the local store if it wasn't
217 # found in the share source.
217 # found in the share source.
218 path = storepath(repo, hash, False)
218 path = storepath(repo, hash, False)
219
219
220 if instore(repo, hash):
220 if instore(repo, hash):
221 return (path, True)
221 return (path, True)
222 elif repo.shared() and instore(repo, hash, True):
222 elif repo.shared() and instore(repo, hash, True):
223 return storepath(repo, hash, True), True
223 return storepath(repo, hash, True), True
224
224
225 return (path, False)
225 return (path, False)
226
226
227 def copyfromcache(repo, hash, filename):
227 def copyfromcache(repo, hash, filename):
228 '''Copy the specified largefile from the repo or system cache to
228 '''Copy the specified largefile from the repo or system cache to
229 filename in the repository. Return true on success or false if the
229 filename in the repository. Return true on success or false if the
230 file was not found in either cache (which should not happened:
230 file was not found in either cache (which should not happened:
231 this is meant to be called only after ensuring that the needed
231 this is meant to be called only after ensuring that the needed
232 largefile exists in the cache).'''
232 largefile exists in the cache).'''
233 wvfs = repo.wvfs
233 wvfs = repo.wvfs
234 path = findfile(repo, hash)
234 path = findfile(repo, hash)
235 if path is None:
235 if path is None:
236 return False
236 return False
237 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
237 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
238 # The write may fail before the file is fully written, but we
238 # The write may fail before the file is fully written, but we
239 # don't use atomic writes in the working copy.
239 # don't use atomic writes in the working copy.
240 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
240 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
241 gothash = copyandhash(
241 gothash = copyandhash(
242 util.filechunkiter(srcfd), destfd)
242 util.filechunkiter(srcfd), destfd)
243 if gothash != hash:
243 if gothash != hash:
244 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
244 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
245 % (filename, path, gothash))
245 % (filename, path, gothash))
246 wvfs.unlink(filename)
246 wvfs.unlink(filename)
247 return False
247 return False
248 return True
248 return True
249
249
250 def copytostore(repo, ctx, file, fstandin):
250 def copytostore(repo, ctx, file, fstandin):
251 wvfs = repo.wvfs
251 wvfs = repo.wvfs
252 hash = readasstandin(ctx[fstandin])
252 hash = readasstandin(ctx[fstandin])
253 if instore(repo, hash):
253 if instore(repo, hash):
254 return
254 return
255 if wvfs.exists(file):
255 if wvfs.exists(file):
256 copytostoreabsolute(repo, wvfs.join(file), hash)
256 copytostoreabsolute(repo, wvfs.join(file), hash)
257 else:
257 else:
258 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
258 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
259 (file, hash))
259 (file, hash))
260
260
261 def copyalltostore(repo, node):
261 def copyalltostore(repo, node):
262 '''Copy all largefiles in a given revision to the store'''
262 '''Copy all largefiles in a given revision to the store'''
263
263
264 ctx = repo[node]
264 ctx = repo[node]
265 for filename in ctx.files():
265 for filename in ctx.files():
266 realfile = splitstandin(filename)
266 realfile = splitstandin(filename)
267 if realfile is not None and filename in ctx.manifest():
267 if realfile is not None and filename in ctx.manifest():
268 copytostore(repo, ctx, realfile, filename)
268 copytostore(repo, ctx, realfile, filename)
269
269
270 def copytostoreabsolute(repo, file, hash):
270 def copytostoreabsolute(repo, file, hash):
271 if inusercache(repo.ui, hash):
271 if inusercache(repo.ui, hash):
272 link(usercachepath(repo.ui, hash), storepath(repo, hash))
272 link(usercachepath(repo.ui, hash), storepath(repo, hash))
273 else:
273 else:
274 util.makedirs(os.path.dirname(storepath(repo, hash)))
274 util.makedirs(os.path.dirname(storepath(repo, hash)))
275 with open(file, 'rb') as srcf:
275 with open(file, 'rb') as srcf:
276 with util.atomictempfile(storepath(repo, hash),
276 with util.atomictempfile(storepath(repo, hash),
277 createmode=repo.store.createmode) as dstf:
277 createmode=repo.store.createmode) as dstf:
278 for chunk in util.filechunkiter(srcf):
278 for chunk in util.filechunkiter(srcf):
279 dstf.write(chunk)
279 dstf.write(chunk)
280 linktousercache(repo, hash)
280 linktousercache(repo, hash)
281
281
282 def linktousercache(repo, hash):
282 def linktousercache(repo, hash):
283 '''Link / copy the largefile with the specified hash from the store
283 '''Link / copy the largefile with the specified hash from the store
284 to the cache.'''
284 to the cache.'''
285 path = usercachepath(repo.ui, hash)
285 path = usercachepath(repo.ui, hash)
286 link(storepath(repo, hash), path)
286 link(storepath(repo, hash), path)
287
287
288 def getstandinmatcher(repo, rmatcher=None):
288 def getstandinmatcher(repo, rmatcher=None):
289 '''Return a match object that applies rmatcher to the standin directory'''
289 '''Return a match object that applies rmatcher to the standin directory'''
290 wvfs = repo.wvfs
290 wvfs = repo.wvfs
291 standindir = shortname
291 standindir = shortname
292
292
293 # no warnings about missing files or directories
293 # no warnings about missing files or directories
294 badfn = lambda f, msg: None
294 badfn = lambda f, msg: None
295
295
296 if rmatcher and not rmatcher.always():
296 if rmatcher and not rmatcher.always():
297 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
297 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
298 if not pats:
298 if not pats:
299 pats = [wvfs.join(standindir)]
299 pats = [wvfs.join(standindir)]
300 match = scmutil.match(repo[None], pats, badfn=badfn)
300 match = scmutil.match(repo[None], pats, badfn=badfn)
301 else:
301 else:
302 # no patterns: relative to repo root
302 # no patterns: relative to repo root
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 return match
304 return match
305
305
306 def composestandinmatcher(repo, rmatcher):
306 def composestandinmatcher(repo, rmatcher):
307 '''Return a matcher that accepts standins corresponding to the
307 '''Return a matcher that accepts standins corresponding to the
308 files accepted by rmatcher. Pass the list of files in the matcher
308 files accepted by rmatcher. Pass the list of files in the matcher
309 as the paths specified by the user.'''
309 as the paths specified by the user.'''
310 smatcher = getstandinmatcher(repo, rmatcher)
310 smatcher = getstandinmatcher(repo, rmatcher)
311 isstandin = smatcher.matchfn
311 isstandin = smatcher.matchfn
312 def composedmatchfn(f):
312 def composedmatchfn(f):
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 smatcher.matchfn = composedmatchfn
314 smatcher.matchfn = composedmatchfn
315
315
316 return smatcher
316 return smatcher
317
317
318 def standin(filename):
318 def standin(filename):
319 '''Return the repo-relative path to the standin for the specified big
319 '''Return the repo-relative path to the standin for the specified big
320 file.'''
320 file.'''
321 # Notes:
321 # Notes:
322 # 1) Some callers want an absolute path, but for instance addlargefiles
322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 # needs it repo-relative so it can be passed to repo[None].add(). So
323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 # 2) Join with '/' because that's what dirstate always uses, even on
325 # 2) Join with '/' because that's what dirstate always uses, even on
326 # Windows. Change existing separator to '/' first in case we are
326 # Windows. Change existing separator to '/' first in case we are
327 # passed filenames from an external source (like the command line).
327 # passed filenames from an external source (like the command line).
328 return shortnameslash + util.pconvert(filename)
328 return shortnameslash + util.pconvert(filename)
329
329
330 def isstandin(filename):
330 def isstandin(filename):
331 '''Return true if filename is a big file standin. filename must be
331 '''Return true if filename is a big file standin. filename must be
332 in Mercurial's internal form (slash-separated).'''
332 in Mercurial's internal form (slash-separated).'''
333 return filename.startswith(shortnameslash)
333 return filename.startswith(shortnameslash)
334
334
335 def splitstandin(filename):
335 def splitstandin(filename):
336 # Split on / because that's what dirstate always uses, even on Windows.
336 # Split on / because that's what dirstate always uses, even on Windows.
337 # Change local separator to / first just in case we are passed filenames
337 # Change local separator to / first just in case we are passed filenames
338 # from an external source (like the command line).
338 # from an external source (like the command line).
339 bits = util.pconvert(filename).split('/', 1)
339 bits = util.pconvert(filename).split('/', 1)
340 if len(bits) == 2 and bits[0] == shortname:
340 if len(bits) == 2 and bits[0] == shortname:
341 return bits[1]
341 return bits[1]
342 else:
342 else:
343 return None
343 return None
344
344
345 def updatestandin(repo, lfile, standin):
345 def updatestandin(repo, lfile, standin):
346 """Re-calculate hash value of lfile and write it into standin
346 """Re-calculate hash value of lfile and write it into standin
347
347
348 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
348 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
349 """
349 """
350 file = repo.wjoin(lfile)
350 file = repo.wjoin(lfile)
351 if repo.wvfs.exists(lfile):
351 if repo.wvfs.exists(lfile):
352 hash = hashfile(file)
352 hash = hashfile(file)
353 executable = getexecutable(file)
353 executable = getexecutable(file)
354 writestandin(repo, standin, hash, executable)
354 writestandin(repo, standin, hash, executable)
355 else:
355 else:
356 raise error.Abort(_('%s: file not found!') % lfile)
356 raise error.Abort(_('%s: file not found!') % lfile)
357
357
358 def readasstandin(fctx):
358 def readasstandin(fctx):
359 '''read hex hash from given filectx of standin file
359 '''read hex hash from given filectx of standin file
360
360
361 This encapsulates how "standin" data is stored into storage layer.'''
361 This encapsulates how "standin" data is stored into storage layer.'''
362 return fctx.data().strip()
362 return fctx.data().strip()
363
363
364 def writestandin(repo, standin, hash, executable):
364 def writestandin(repo, standin, hash, executable):
365 '''write hash to <repo.root>/<standin>'''
365 '''write hash to <repo.root>/<standin>'''
366 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
366 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
367
367
368 def copyandhash(instream, outfile):
368 def copyandhash(instream, outfile):
369 '''Read bytes from instream (iterable) and write them to outfile,
369 '''Read bytes from instream (iterable) and write them to outfile,
370 computing the SHA-1 hash of the data along the way. Return the hash.'''
370 computing the SHA-1 hash of the data along the way. Return the hash.'''
371 hasher = hashlib.sha1('')
371 hasher = hashlib.sha1('')
372 for data in instream:
372 for data in instream:
373 hasher.update(data)
373 hasher.update(data)
374 outfile.write(data)
374 outfile.write(data)
375 return hex(hasher.digest())
375 return hex(hasher.digest())
376
376
377 def hashfile(file):
377 def hashfile(file):
378 if not os.path.exists(file):
378 if not os.path.exists(file):
379 return ''
379 return ''
380 with open(file, 'rb') as fd:
380 with open(file, 'rb') as fd:
381 return hexsha1(fd)
381 return hexsha1(fd)
382
382
383 def getexecutable(filename):
383 def getexecutable(filename):
384 mode = os.stat(filename).st_mode
384 mode = os.stat(filename).st_mode
385 return ((mode & stat.S_IXUSR) and
385 return ((mode & stat.S_IXUSR) and
386 (mode & stat.S_IXGRP) and
386 (mode & stat.S_IXGRP) and
387 (mode & stat.S_IXOTH))
387 (mode & stat.S_IXOTH))
388
388
389 def urljoin(first, second, *arg):
389 def urljoin(first, second, *arg):
390 def join(left, right):
390 def join(left, right):
391 if not left.endswith('/'):
391 if not left.endswith('/'):
392 left += '/'
392 left += '/'
393 if right.startswith('/'):
393 if right.startswith('/'):
394 right = right[1:]
394 right = right[1:]
395 return left + right
395 return left + right
396
396
397 url = join(first, second)
397 url = join(first, second)
398 for a in arg:
398 for a in arg:
399 url = join(url, a)
399 url = join(url, a)
400 return url
400 return url
401
401
402 def hexsha1(fileobj):
402 def hexsha1(fileobj):
403 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
404 object data"""
404 object data"""
405 h = hashlib.sha1()
405 h = hashlib.sha1()
406 for chunk in util.filechunkiter(fileobj):
406 for chunk in util.filechunkiter(fileobj):
407 h.update(chunk)
407 h.update(chunk)
408 return hex(h.digest())
408 return hex(h.digest())
409
409
410 def httpsendfile(ui, filename):
410 def httpsendfile(ui, filename):
411 return httpconnection.httpsendfile(ui, filename, 'rb')
411 return httpconnection.httpsendfile(ui, filename, 'rb')
412
412
413 def unixpath(path):
413 def unixpath(path):
414 '''Return a version of path normalized for use with the lfdirstate.'''
414 '''Return a version of path normalized for use with the lfdirstate.'''
415 return util.pconvert(os.path.normpath(path))
415 return util.pconvert(os.path.normpath(path))
416
416
417 def islfilesrepo(repo):
417 def islfilesrepo(repo):
418 '''Return true if the repo is a largefile repo.'''
418 '''Return true if the repo is a largefile repo.'''
419 if ('largefiles' in repo.requirements and
419 if ('largefiles' in repo.requirements and
420 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 any(shortnameslash in f[0] for f in repo.store.datafiles())):
421 return True
421 return True
422
422
423 return any(openlfdirstate(repo.ui, repo, False))
423 return any(openlfdirstate(repo.ui, repo, False))
424
424
425 class storeprotonotcapable(Exception):
425 class storeprotonotcapable(Exception):
426 def __init__(self, storetypes):
426 def __init__(self, storetypes):
427 self.storetypes = storetypes
427 self.storetypes = storetypes
428
428
429 def getstandinsstate(repo):
429 def getstandinsstate(repo):
430 standins = []
430 standins = []
431 matcher = getstandinmatcher(repo)
431 matcher = getstandinmatcher(repo)
432 wctx = repo[None]
432 wctx = repo[None]
433 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
433 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
434 ignored=False):
434 ignored=False):
435 lfile = splitstandin(standin)
435 lfile = splitstandin(standin)
436 try:
436 try:
437 hash = readasstandin(wctx[standin])
437 hash = readasstandin(wctx[standin])
438 except IOError:
438 except IOError:
439 hash = None
439 hash = None
440 standins.append((lfile, hash))
440 standins.append((lfile, hash))
441 return standins
441 return standins
442
442
443 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
443 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
444 lfstandin = standin(lfile)
444 lfstandin = standin(lfile)
445 if lfstandin in repo.dirstate:
445 if lfstandin in repo.dirstate:
446 stat = repo.dirstate._map[lfstandin]
446 stat = repo.dirstate._map[lfstandin]
447 state, mtime = stat[0], stat[3]
447 state, mtime = stat[0], stat[3]
448 else:
448 else:
449 state, mtime = '?', -1
449 state, mtime = '?', -1
450 if state == 'n':
450 if state == 'n':
451 if (normallookup or mtime < 0 or
451 if (normallookup or mtime < 0 or
452 not repo.wvfs.exists(lfile)):
452 not repo.wvfs.exists(lfile)):
453 # state 'n' doesn't ensure 'clean' in this case
453 # state 'n' doesn't ensure 'clean' in this case
454 lfdirstate.normallookup(lfile)
454 lfdirstate.normallookup(lfile)
455 else:
455 else:
456 lfdirstate.normal(lfile)
456 lfdirstate.normal(lfile)
457 elif state == 'm':
457 elif state == 'm':
458 lfdirstate.normallookup(lfile)
458 lfdirstate.normallookup(lfile)
459 elif state == 'r':
459 elif state == 'r':
460 lfdirstate.remove(lfile)
460 lfdirstate.remove(lfile)
461 elif state == 'a':
461 elif state == 'a':
462 lfdirstate.add(lfile)
462 lfdirstate.add(lfile)
463 elif state == '?':
463 elif state == '?':
464 lfdirstate.drop(lfile)
464 lfdirstate.drop(lfile)
465
465
466 def markcommitted(orig, ctx, node):
466 def markcommitted(orig, ctx, node):
467 repo = ctx.repo()
467 repo = ctx.repo()
468
468
469 orig(node)
469 orig(node)
470
470
471 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
471 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
472 # because files coming from the 2nd parent are omitted in the latter.
472 # because files coming from the 2nd parent are omitted in the latter.
473 #
473 #
474 # The former should be used to get targets of "synclfdirstate",
474 # The former should be used to get targets of "synclfdirstate",
475 # because such files:
475 # because such files:
476 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
476 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
477 # - have to be marked as "n" after commit, but
477 # - have to be marked as "n" after commit, but
478 # - aren't listed in "repo[node].files()"
478 # - aren't listed in "repo[node].files()"
479
479
480 lfdirstate = openlfdirstate(repo.ui, repo)
480 lfdirstate = openlfdirstate(repo.ui, repo)
481 for f in ctx.files():
481 for f in ctx.files():
482 lfile = splitstandin(f)
482 lfile = splitstandin(f)
483 if lfile is not None:
483 if lfile is not None:
484 synclfdirstate(repo, lfdirstate, lfile, False)
484 synclfdirstate(repo, lfdirstate, lfile, False)
485 lfdirstate.write()
485 lfdirstate.write()
486
486
487 # As part of committing, copy all of the largefiles into the cache.
487 # As part of committing, copy all of the largefiles into the cache.
488 #
488 #
489 # Using "node" instead of "ctx" implies additional "repo[node]"
489 # Using "node" instead of "ctx" implies additional "repo[node]"
490 # lookup while copyalltostore(), but can omit redundant check for
490 # lookup while copyalltostore(), but can omit redundant check for
491 # files comming from the 2nd parent, which should exist in store
491 # files comming from the 2nd parent, which should exist in store
492 # at merging.
492 # at merging.
493 copyalltostore(repo, node)
493 copyalltostore(repo, node)
494
494
495 def getlfilestoupdate(oldstandins, newstandins):
495 def getlfilestoupdate(oldstandins, newstandins):
496 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
496 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
497 filelist = []
497 filelist = []
498 for f in changedstandins:
498 for f in changedstandins:
499 if f[0] not in filelist:
499 if f[0] not in filelist:
500 filelist.append(f[0])
500 filelist.append(f[0])
501 return filelist
501 return filelist
502
502
503 def getlfilestoupload(repo, missing, addfunc):
503 def getlfilestoupload(repo, missing, addfunc):
504 progress = repo.ui.makeprogress(_('finding outgoing largefiles'),
505 unit=_('revisions'), total=len(missing))
504 for i, n in enumerate(missing):
506 for i, n in enumerate(missing):
505 repo.ui.progress(_('finding outgoing largefiles'), i,
507 progress.update(i)
506 unit=_('revisions'), total=len(missing))
507 parents = [p for p in repo[n].parents() if p != node.nullid]
508 parents = [p for p in repo[n].parents() if p != node.nullid]
508
509
509 oldlfstatus = repo.lfstatus
510 oldlfstatus = repo.lfstatus
510 repo.lfstatus = False
511 repo.lfstatus = False
511 try:
512 try:
512 ctx = repo[n]
513 ctx = repo[n]
513 finally:
514 finally:
514 repo.lfstatus = oldlfstatus
515 repo.lfstatus = oldlfstatus
515
516
516 files = set(ctx.files())
517 files = set(ctx.files())
517 if len(parents) == 2:
518 if len(parents) == 2:
518 mc = ctx.manifest()
519 mc = ctx.manifest()
519 mp1 = ctx.parents()[0].manifest()
520 mp1 = ctx.parents()[0].manifest()
520 mp2 = ctx.parents()[1].manifest()
521 mp2 = ctx.parents()[1].manifest()
521 for f in mp1:
522 for f in mp1:
522 if f not in mc:
523 if f not in mc:
523 files.add(f)
524 files.add(f)
524 for f in mp2:
525 for f in mp2:
525 if f not in mc:
526 if f not in mc:
526 files.add(f)
527 files.add(f)
527 for f in mc:
528 for f in mc:
528 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
529 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
529 files.add(f)
530 files.add(f)
530 for fn in files:
531 for fn in files:
531 if isstandin(fn) and fn in ctx:
532 if isstandin(fn) and fn in ctx:
532 addfunc(fn, readasstandin(ctx[fn]))
533 addfunc(fn, readasstandin(ctx[fn]))
533 repo.ui.progress(_('finding outgoing largefiles'), None)
534 progress.complete()
534
535
535 def updatestandinsbymatch(repo, match):
536 def updatestandinsbymatch(repo, match):
536 '''Update standins in the working directory according to specified match
537 '''Update standins in the working directory according to specified match
537
538
538 This returns (possibly modified) ``match`` object to be used for
539 This returns (possibly modified) ``match`` object to be used for
539 subsequent commit process.
540 subsequent commit process.
540 '''
541 '''
541
542
542 ui = repo.ui
543 ui = repo.ui
543
544
544 # Case 1: user calls commit with no specific files or
545 # Case 1: user calls commit with no specific files or
545 # include/exclude patterns: refresh and commit all files that
546 # include/exclude patterns: refresh and commit all files that
546 # are "dirty".
547 # are "dirty".
547 if match is None or match.always():
548 if match is None or match.always():
548 # Spend a bit of time here to get a list of files we know
549 # Spend a bit of time here to get a list of files we know
549 # are modified so we can compare only against those.
550 # are modified so we can compare only against those.
550 # It can cost a lot of time (several seconds)
551 # It can cost a lot of time (several seconds)
551 # otherwise to update all standins if the largefiles are
552 # otherwise to update all standins if the largefiles are
552 # large.
553 # large.
553 lfdirstate = openlfdirstate(ui, repo)
554 lfdirstate = openlfdirstate(ui, repo)
554 dirtymatch = matchmod.always(repo.root, repo.getcwd())
555 dirtymatch = matchmod.always(repo.root, repo.getcwd())
555 unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
556 unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
556 clean=False, unknown=False)
557 clean=False, unknown=False)
557 modifiedfiles = unsure + s.modified + s.added + s.removed
558 modifiedfiles = unsure + s.modified + s.added + s.removed
558 lfiles = listlfiles(repo)
559 lfiles = listlfiles(repo)
559 # this only loops through largefiles that exist (not
560 # this only loops through largefiles that exist (not
560 # removed/renamed)
561 # removed/renamed)
561 for lfile in lfiles:
562 for lfile in lfiles:
562 if lfile in modifiedfiles:
563 if lfile in modifiedfiles:
563 fstandin = standin(lfile)
564 fstandin = standin(lfile)
564 if repo.wvfs.exists(fstandin):
565 if repo.wvfs.exists(fstandin):
565 # this handles the case where a rebase is being
566 # this handles the case where a rebase is being
566 # performed and the working copy is not updated
567 # performed and the working copy is not updated
567 # yet.
568 # yet.
568 if repo.wvfs.exists(lfile):
569 if repo.wvfs.exists(lfile):
569 updatestandin(repo, lfile, fstandin)
570 updatestandin(repo, lfile, fstandin)
570
571
571 return match
572 return match
572
573
573 lfiles = listlfiles(repo)
574 lfiles = listlfiles(repo)
574 match._files = repo._subdirlfs(match.files(), lfiles)
575 match._files = repo._subdirlfs(match.files(), lfiles)
575
576
576 # Case 2: user calls commit with specified patterns: refresh
577 # Case 2: user calls commit with specified patterns: refresh
577 # any matching big files.
578 # any matching big files.
578 smatcher = composestandinmatcher(repo, match)
579 smatcher = composestandinmatcher(repo, match)
579 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
580 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
580 ignored=False)
581 ignored=False)
581
582
582 # No matching big files: get out of the way and pass control to
583 # No matching big files: get out of the way and pass control to
583 # the usual commit() method.
584 # the usual commit() method.
584 if not standins:
585 if not standins:
585 return match
586 return match
586
587
587 # Refresh all matching big files. It's possible that the
588 # Refresh all matching big files. It's possible that the
588 # commit will end up failing, in which case the big files will
589 # commit will end up failing, in which case the big files will
589 # stay refreshed. No harm done: the user modified them and
590 # stay refreshed. No harm done: the user modified them and
590 # asked to commit them, so sooner or later we're going to
591 # asked to commit them, so sooner or later we're going to
591 # refresh the standins. Might as well leave them refreshed.
592 # refresh the standins. Might as well leave them refreshed.
592 lfdirstate = openlfdirstate(ui, repo)
593 lfdirstate = openlfdirstate(ui, repo)
593 for fstandin in standins:
594 for fstandin in standins:
594 lfile = splitstandin(fstandin)
595 lfile = splitstandin(fstandin)
595 if lfdirstate[lfile] != 'r':
596 if lfdirstate[lfile] != 'r':
596 updatestandin(repo, lfile, fstandin)
597 updatestandin(repo, lfile, fstandin)
597
598
598 # Cook up a new matcher that only matches regular files or
599 # Cook up a new matcher that only matches regular files or
599 # standins corresponding to the big files requested by the
600 # standins corresponding to the big files requested by the
600 # user. Have to modify _files to prevent commit() from
601 # user. Have to modify _files to prevent commit() from
601 # complaining "not tracked" for big files.
602 # complaining "not tracked" for big files.
602 match = copy.copy(match)
603 match = copy.copy(match)
603 origmatchfn = match.matchfn
604 origmatchfn = match.matchfn
604
605
605 # Check both the list of largefiles and the list of
606 # Check both the list of largefiles and the list of
606 # standins because if a largefile was removed, it
607 # standins because if a largefile was removed, it
607 # won't be in the list of largefiles at this point
608 # won't be in the list of largefiles at this point
608 match._files += sorted(standins)
609 match._files += sorted(standins)
609
610
610 actualfiles = []
611 actualfiles = []
611 for f in match._files:
612 for f in match._files:
612 fstandin = standin(f)
613 fstandin = standin(f)
613
614
614 # For largefiles, only one of the normal and standin should be
615 # For largefiles, only one of the normal and standin should be
615 # committed (except if one of them is a remove). In the case of a
616 # committed (except if one of them is a remove). In the case of a
616 # standin removal, drop the normal file if it is unknown to dirstate.
617 # standin removal, drop the normal file if it is unknown to dirstate.
617 # Thus, skip plain largefile names but keep the standin.
618 # Thus, skip plain largefile names but keep the standin.
618 if f in lfiles or fstandin in standins:
619 if f in lfiles or fstandin in standins:
619 if repo.dirstate[fstandin] != 'r':
620 if repo.dirstate[fstandin] != 'r':
620 if repo.dirstate[f] != 'r':
621 if repo.dirstate[f] != 'r':
621 continue
622 continue
622 elif repo.dirstate[f] == '?':
623 elif repo.dirstate[f] == '?':
623 continue
624 continue
624
625
625 actualfiles.append(f)
626 actualfiles.append(f)
626 match._files = actualfiles
627 match._files = actualfiles
627
628
628 def matchfn(f):
629 def matchfn(f):
629 if origmatchfn(f):
630 if origmatchfn(f):
630 return f not in lfiles
631 return f not in lfiles
631 else:
632 else:
632 return f in standins
633 return f in standins
633
634
634 match.matchfn = matchfn
635 match.matchfn = matchfn
635
636
636 return match
637 return match
637
638
638 class automatedcommithook(object):
639 class automatedcommithook(object):
639 '''Stateful hook to update standins at the 1st commit of resuming
640 '''Stateful hook to update standins at the 1st commit of resuming
640
641
641 For efficiency, updating standins in the working directory should
642 For efficiency, updating standins in the working directory should
642 be avoided while automated committing (like rebase, transplant and
643 be avoided while automated committing (like rebase, transplant and
643 so on), because they should be updated before committing.
644 so on), because they should be updated before committing.
644
645
645 But the 1st commit of resuming automated committing (e.g. ``rebase
646 But the 1st commit of resuming automated committing (e.g. ``rebase
646 --continue``) should update them, because largefiles may be
647 --continue``) should update them, because largefiles may be
647 modified manually.
648 modified manually.
648 '''
649 '''
649 def __init__(self, resuming):
650 def __init__(self, resuming):
650 self.resuming = resuming
651 self.resuming = resuming
651
652
652 def __call__(self, repo, match):
653 def __call__(self, repo, match):
653 if self.resuming:
654 if self.resuming:
654 self.resuming = False # avoids updating at subsequent commits
655 self.resuming = False # avoids updating at subsequent commits
655 return updatestandinsbymatch(repo, match)
656 return updatestandinsbymatch(repo, match)
656 else:
657 else:
657 return match
658 return match
658
659
659 def getstatuswriter(ui, repo, forcibly=None):
660 def getstatuswriter(ui, repo, forcibly=None):
660 '''Return the function to write largefiles specific status out
661 '''Return the function to write largefiles specific status out
661
662
662 If ``forcibly`` is ``None``, this returns the last element of
663 If ``forcibly`` is ``None``, this returns the last element of
663 ``repo._lfstatuswriters`` as "default" writer function.
664 ``repo._lfstatuswriters`` as "default" writer function.
664
665
665 Otherwise, this returns the function to always write out (or
666 Otherwise, this returns the function to always write out (or
666 ignore if ``not forcibly``) status.
667 ignore if ``not forcibly``) status.
667 '''
668 '''
668 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
669 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
669 return repo._lfstatuswriters[-1]
670 return repo._lfstatuswriters[-1]
670 else:
671 else:
671 if forcibly:
672 if forcibly:
672 return ui.status # forcibly WRITE OUT
673 return ui.status # forcibly WRITE OUT
673 else:
674 else:
674 return lambda *msg, **opts: None # forcibly IGNORE
675 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now