##// END OF EJS Templates
largefiles: move basestore._openstore into new module to remove cycle
liscju -
r29305:814076f4 default
parent child Browse files
Show More
@@ -1,226 +1,164
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import re
11 from mercurial import util, node
12
13 from mercurial import util, node, hg, error
14 from mercurial.i18n import _
12 from mercurial.i18n import _
15
13
16 import lfutil
14 import lfutil
17
15
18 class StoreError(Exception):
16 class StoreError(Exception):
19 '''Raised when there is a problem getting files from or putting
17 '''Raised when there is a problem getting files from or putting
20 files to a central store.'''
18 files to a central store.'''
21 def __init__(self, filename, hash, url, detail):
19 def __init__(self, filename, hash, url, detail):
22 self.filename = filename
20 self.filename = filename
23 self.hash = hash
21 self.hash = hash
24 self.url = url
22 self.url = url
25 self.detail = detail
23 self.detail = detail
26
24
27 def longmessage(self):
25 def longmessage(self):
28 return (_("error getting id %s from url %s for file %s: %s\n") %
26 return (_("error getting id %s from url %s for file %s: %s\n") %
29 (self.hash, util.hidepassword(self.url), self.filename,
27 (self.hash, util.hidepassword(self.url), self.filename,
30 self.detail))
28 self.detail))
31
29
32 def __str__(self):
30 def __str__(self):
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
31 return "%s: %s" % (util.hidepassword(self.url), self.detail)
34
32
35 class basestore(object):
33 class basestore(object):
36 def __init__(self, ui, repo, url):
34 def __init__(self, ui, repo, url):
37 self.ui = ui
35 self.ui = ui
38 self.repo = repo
36 self.repo = repo
39 self.url = url
37 self.url = url
40
38
41 def put(self, source, hash):
39 def put(self, source, hash):
42 '''Put source file into the store so it can be retrieved by hash.'''
40 '''Put source file into the store so it can be retrieved by hash.'''
43 raise NotImplementedError('abstract method')
41 raise NotImplementedError('abstract method')
44
42
45 def exists(self, hashes):
43 def exists(self, hashes):
46 '''Check to see if the store contains the given hashes. Given an
44 '''Check to see if the store contains the given hashes. Given an
47 iterable of hashes it returns a mapping from hash to bool.'''
45 iterable of hashes it returns a mapping from hash to bool.'''
48 raise NotImplementedError('abstract method')
46 raise NotImplementedError('abstract method')
49
47
50 def get(self, files):
48 def get(self, files):
51 '''Get the specified largefiles from the store and write to local
49 '''Get the specified largefiles from the store and write to local
52 files under repo.root. files is a list of (filename, hash)
50 files under repo.root. files is a list of (filename, hash)
53 tuples. Return (success, missing), lists of files successfully
51 tuples. Return (success, missing), lists of files successfully
54 downloaded and those not found in the store. success is a list
52 downloaded and those not found in the store. success is a list
55 of (filename, hash) tuples; missing is a list of filenames that
53 of (filename, hash) tuples; missing is a list of filenames that
56 we could not get. (The detailed error message will already have
54 we could not get. (The detailed error message will already have
57 been presented to the user, so missing is just supplied as a
55 been presented to the user, so missing is just supplied as a
58 summary.)'''
56 summary.)'''
59 success = []
57 success = []
60 missing = []
58 missing = []
61 ui = self.ui
59 ui = self.ui
62
60
63 at = 0
61 at = 0
64 available = self.exists(set(hash for (_filename, hash) in files))
62 available = self.exists(set(hash for (_filename, hash) in files))
65 for filename, hash in files:
63 for filename, hash in files:
66 ui.progress(_('getting largefiles'), at, unit=_('files'),
64 ui.progress(_('getting largefiles'), at, unit=_('files'),
67 total=len(files))
65 total=len(files))
68 at += 1
66 at += 1
69 ui.note(_('getting %s:%s\n') % (filename, hash))
67 ui.note(_('getting %s:%s\n') % (filename, hash))
70
68
71 if not available.get(hash):
69 if not available.get(hash):
72 ui.warn(_('%s: largefile %s not available from %s\n')
70 ui.warn(_('%s: largefile %s not available from %s\n')
73 % (filename, hash, util.hidepassword(self.url)))
71 % (filename, hash, util.hidepassword(self.url)))
74 missing.append(filename)
72 missing.append(filename)
75 continue
73 continue
76
74
77 if self._gethash(filename, hash):
75 if self._gethash(filename, hash):
78 success.append((filename, hash))
76 success.append((filename, hash))
79 else:
77 else:
80 missing.append(filename)
78 missing.append(filename)
81
79
82 ui.progress(_('getting largefiles'), None)
80 ui.progress(_('getting largefiles'), None)
83 return (success, missing)
81 return (success, missing)
84
82
85 def _gethash(self, filename, hash):
83 def _gethash(self, filename, hash):
86 """Get file with the provided hash and store it in the local repo's
84 """Get file with the provided hash and store it in the local repo's
87 store and in the usercache.
85 store and in the usercache.
88 filename is for informational messages only.
86 filename is for informational messages only.
89 """
87 """
90 util.makedirs(lfutil.storepath(self.repo, ''))
88 util.makedirs(lfutil.storepath(self.repo, ''))
91 storefilename = lfutil.storepath(self.repo, hash)
89 storefilename = lfutil.storepath(self.repo, hash)
92
90
93 tmpname = storefilename + '.tmp'
91 tmpname = storefilename + '.tmp'
94 tmpfile = util.atomictempfile(tmpname,
92 tmpfile = util.atomictempfile(tmpname,
95 createmode=self.repo.store.createmode)
93 createmode=self.repo.store.createmode)
96
94
97 try:
95 try:
98 gothash = self._getfile(tmpfile, filename, hash)
96 gothash = self._getfile(tmpfile, filename, hash)
99 except StoreError as err:
97 except StoreError as err:
100 self.ui.warn(err.longmessage())
98 self.ui.warn(err.longmessage())
101 gothash = ""
99 gothash = ""
102 tmpfile.close()
100 tmpfile.close()
103
101
104 if gothash != hash:
102 if gothash != hash:
105 if gothash != "":
103 if gothash != "":
106 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
104 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
107 % (filename, hash, gothash))
105 % (filename, hash, gothash))
108 util.unlink(tmpname)
106 util.unlink(tmpname)
109 return False
107 return False
110
108
111 util.rename(tmpname, storefilename)
109 util.rename(tmpname, storefilename)
112 lfutil.linktousercache(self.repo, hash)
110 lfutil.linktousercache(self.repo, hash)
113 return True
111 return True
114
112
115 def verify(self, revs, contents=False):
113 def verify(self, revs, contents=False):
116 '''Verify the existence (and, optionally, contents) of every big
114 '''Verify the existence (and, optionally, contents) of every big
117 file revision referenced by every changeset in revs.
115 file revision referenced by every changeset in revs.
118 Return 0 if all is well, non-zero on any errors.'''
116 Return 0 if all is well, non-zero on any errors.'''
119
117
120 self.ui.status(_('searching %d changesets for largefiles\n') %
118 self.ui.status(_('searching %d changesets for largefiles\n') %
121 len(revs))
119 len(revs))
122 verified = set() # set of (filename, filenode) tuples
120 verified = set() # set of (filename, filenode) tuples
123 filestocheck = [] # list of (cset, filename, expectedhash)
121 filestocheck = [] # list of (cset, filename, expectedhash)
124 for rev in revs:
122 for rev in revs:
125 cctx = self.repo[rev]
123 cctx = self.repo[rev]
126 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
124 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
127
125
128 for standin in cctx:
126 for standin in cctx:
129 filename = lfutil.splitstandin(standin)
127 filename = lfutil.splitstandin(standin)
130 if filename:
128 if filename:
131 fctx = cctx[standin]
129 fctx = cctx[standin]
132 key = (filename, fctx.filenode())
130 key = (filename, fctx.filenode())
133 if key not in verified:
131 if key not in verified:
134 verified.add(key)
132 verified.add(key)
135 expectedhash = fctx.data()[0:40]
133 expectedhash = fctx.data()[0:40]
136 filestocheck.append((cset, filename, expectedhash))
134 filestocheck.append((cset, filename, expectedhash))
137
135
138 failed = self._verifyfiles(contents, filestocheck)
136 failed = self._verifyfiles(contents, filestocheck)
139
137
140 numrevs = len(verified)
138 numrevs = len(verified)
141 numlfiles = len(set([fname for (fname, fnode) in verified]))
139 numlfiles = len(set([fname for (fname, fnode) in verified]))
142 if contents:
140 if contents:
143 self.ui.status(
141 self.ui.status(
144 _('verified contents of %d revisions of %d largefiles\n')
142 _('verified contents of %d revisions of %d largefiles\n')
145 % (numrevs, numlfiles))
143 % (numrevs, numlfiles))
146 else:
144 else:
147 self.ui.status(
145 self.ui.status(
148 _('verified existence of %d revisions of %d largefiles\n')
146 _('verified existence of %d revisions of %d largefiles\n')
149 % (numrevs, numlfiles))
147 % (numrevs, numlfiles))
150 return int(failed)
148 return int(failed)
151
149
152 def _getfile(self, tmpfile, filename, hash):
150 def _getfile(self, tmpfile, filename, hash):
153 '''Fetch one revision of one file from the store and write it
151 '''Fetch one revision of one file from the store and write it
154 to tmpfile. Compute the hash of the file on-the-fly as it
152 to tmpfile. Compute the hash of the file on-the-fly as it
155 downloads and return the hash. Close tmpfile. Raise
153 downloads and return the hash. Close tmpfile. Raise
156 StoreError if unable to download the file (e.g. it does not
154 StoreError if unable to download the file (e.g. it does not
157 exist in the store).'''
155 exist in the store).'''
158 raise NotImplementedError('abstract method')
156 raise NotImplementedError('abstract method')
159
157
160 def _verifyfiles(self, contents, filestocheck):
158 def _verifyfiles(self, contents, filestocheck):
161 '''Perform the actual verification of files in the store.
159 '''Perform the actual verification of files in the store.
162 'contents' controls verification of content hash.
160 'contents' controls verification of content hash.
163 'filestocheck' is list of files to check.
161 'filestocheck' is list of files to check.
164 Returns _true_ if any problems are found!
162 Returns _true_ if any problems are found!
165 '''
163 '''
166 raise NotImplementedError('abstract method')
164 raise NotImplementedError('abstract method')
167
168 import localstore, wirestore
169
170 _storeprovider = {
171 'file': [localstore.localstore],
172 'http': [wirestore.wirestore],
173 'https': [wirestore.wirestore],
174 'ssh': [wirestore.wirestore],
175 }
176
177 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
178
179 # During clone this function is passed the src's ui object
180 # but it needs the dest's ui object so it can read out of
181 # the config file. Use repo.ui instead.
182 def _openstore(repo, remote=None, put=False):
183 ui = repo.ui
184
185 if not remote:
186 lfpullsource = getattr(repo, 'lfpullsource', None)
187 if lfpullsource:
188 path = ui.expandpath(lfpullsource)
189 elif put:
190 path = ui.expandpath('default-push', 'default')
191 else:
192 path = ui.expandpath('default')
193
194 # ui.expandpath() leaves 'default-push' and 'default' alone if
195 # they cannot be expanded: fallback to the empty string,
196 # meaning the current directory.
197 if path == 'default-push' or path == 'default':
198 path = ''
199 remote = repo
200 else:
201 path, _branches = hg.parseurl(path)
202 remote = hg.peer(repo, {}, path)
203
204 # The path could be a scheme so use Mercurial's normal functionality
205 # to resolve the scheme to a repository and use its path
206 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
207
208 match = _scheme_re.match(path)
209 if not match: # regular filesystem path
210 scheme = 'file'
211 else:
212 scheme = match.group(1)
213
214 try:
215 storeproviders = _storeprovider[scheme]
216 except KeyError:
217 raise error.Abort(_('unsupported URL scheme %r') % scheme)
218
219 for classobj in storeproviders:
220 try:
221 return classobj(ui, repo, remote)
222 except lfutil.storeprotonotcapable:
223 pass
224
225 raise error.Abort(_('%s does not appear to be a largefile store') %
226 util.hidepassword(path))
@@ -1,550 +1,550
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os, errno
11 import os, errno
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, \
14 from mercurial import util, match as match_, hg, node, context, error, \
15 cmdutil, scmutil, commands
15 cmdutil, scmutil, commands
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.lock import release
17 from mercurial.lock import release
18
18
19 from hgext.convert import convcmd
19 from hgext.convert import convcmd
20 from hgext.convert import filemap
20 from hgext.convert import filemap
21
21
22 import lfutil
22 import lfutil
23 import basestore
23 import storefactory
24
24
25 # -- Commands ----------------------------------------------------------
25 # -- Commands ----------------------------------------------------------
26
26
27 cmdtable = {}
27 cmdtable = {}
28 command = cmdutil.command(cmdtable)
28 command = cmdutil.command(cmdtable)
29
29
30 @command('lfconvert',
30 @command('lfconvert',
31 [('s', 'size', '',
31 [('s', 'size', '',
32 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
32 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
33 ('', 'to-normal', False,
33 ('', 'to-normal', False,
34 _('convert from a largefiles repo to a normal repo')),
34 _('convert from a largefiles repo to a normal repo')),
35 ],
35 ],
36 _('hg lfconvert SOURCE DEST [FILE ...]'),
36 _('hg lfconvert SOURCE DEST [FILE ...]'),
37 norepo=True,
37 norepo=True,
38 inferrepo=True)
38 inferrepo=True)
39 def lfconvert(ui, src, dest, *pats, **opts):
39 def lfconvert(ui, src, dest, *pats, **opts):
40 '''convert a normal repository to a largefiles repository
40 '''convert a normal repository to a largefiles repository
41
41
42 Convert repository SOURCE to a new repository DEST, identical to
42 Convert repository SOURCE to a new repository DEST, identical to
43 SOURCE except that certain files will be converted as largefiles:
43 SOURCE except that certain files will be converted as largefiles:
44 specifically, any file that matches any PATTERN *or* whose size is
44 specifically, any file that matches any PATTERN *or* whose size is
45 above the minimum size threshold is converted as a largefile. The
45 above the minimum size threshold is converted as a largefile. The
46 size used to determine whether or not to track a file as a
46 size used to determine whether or not to track a file as a
47 largefile is the size of the first version of the file. The
47 largefile is the size of the first version of the file. The
48 minimum size can be specified either with --size or in
48 minimum size can be specified either with --size or in
49 configuration as ``largefiles.size``.
49 configuration as ``largefiles.size``.
50
50
51 After running this command you will need to make sure that
51 After running this command you will need to make sure that
52 largefiles is enabled anywhere you intend to push the new
52 largefiles is enabled anywhere you intend to push the new
53 repository.
53 repository.
54
54
55 Use --to-normal to convert largefiles back to normal files; after
55 Use --to-normal to convert largefiles back to normal files; after
56 this, the DEST repository can be used without largefiles at all.'''
56 this, the DEST repository can be used without largefiles at all.'''
57
57
58 if opts['to_normal']:
58 if opts['to_normal']:
59 tolfile = False
59 tolfile = False
60 else:
60 else:
61 tolfile = True
61 tolfile = True
62 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
62 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
63
63
64 if not hg.islocal(src):
64 if not hg.islocal(src):
65 raise error.Abort(_('%s is not a local Mercurial repo') % src)
65 raise error.Abort(_('%s is not a local Mercurial repo') % src)
66 if not hg.islocal(dest):
66 if not hg.islocal(dest):
67 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
67 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
68
68
69 rsrc = hg.repository(ui, src)
69 rsrc = hg.repository(ui, src)
70 ui.status(_('initializing destination %s\n') % dest)
70 ui.status(_('initializing destination %s\n') % dest)
71 rdst = hg.repository(ui, dest, create=True)
71 rdst = hg.repository(ui, dest, create=True)
72
72
73 success = False
73 success = False
74 dstwlock = dstlock = None
74 dstwlock = dstlock = None
75 try:
75 try:
76 # Get a list of all changesets in the source. The easy way to do this
76 # Get a list of all changesets in the source. The easy way to do this
77 # is to simply walk the changelog, using changelog.nodesbetween().
77 # is to simply walk the changelog, using changelog.nodesbetween().
78 # Take a look at mercurial/revlog.py:639 for more details.
78 # Take a look at mercurial/revlog.py:639 for more details.
79 # Use a generator instead of a list to decrease memory usage
79 # Use a generator instead of a list to decrease memory usage
80 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
80 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
81 rsrc.heads())[0])
81 rsrc.heads())[0])
82 revmap = {node.nullid: node.nullid}
82 revmap = {node.nullid: node.nullid}
83 if tolfile:
83 if tolfile:
84 # Lock destination to prevent modification while it is converted to.
84 # Lock destination to prevent modification while it is converted to.
85 # Don't need to lock src because we are just reading from its
85 # Don't need to lock src because we are just reading from its
86 # history which can't change.
86 # history which can't change.
87 dstwlock = rdst.wlock()
87 dstwlock = rdst.wlock()
88 dstlock = rdst.lock()
88 dstlock = rdst.lock()
89
89
90 lfiles = set()
90 lfiles = set()
91 normalfiles = set()
91 normalfiles = set()
92 if not pats:
92 if not pats:
93 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
93 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
94 if pats:
94 if pats:
95 matcher = match_.match(rsrc.root, '', list(pats))
95 matcher = match_.match(rsrc.root, '', list(pats))
96 else:
96 else:
97 matcher = None
97 matcher = None
98
98
99 lfiletohash = {}
99 lfiletohash = {}
100 for ctx in ctxs:
100 for ctx in ctxs:
101 ui.progress(_('converting revisions'), ctx.rev(),
101 ui.progress(_('converting revisions'), ctx.rev(),
102 unit=_('revisions'), total=rsrc['tip'].rev())
102 unit=_('revisions'), total=rsrc['tip'].rev())
103 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
103 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
104 lfiles, normalfiles, matcher, size, lfiletohash)
104 lfiles, normalfiles, matcher, size, lfiletohash)
105 ui.progress(_('converting revisions'), None)
105 ui.progress(_('converting revisions'), None)
106
106
107 if rdst.wvfs.exists(lfutil.shortname):
107 if rdst.wvfs.exists(lfutil.shortname):
108 rdst.wvfs.rmtree(lfutil.shortname)
108 rdst.wvfs.rmtree(lfutil.shortname)
109
109
110 for f in lfiletohash.keys():
110 for f in lfiletohash.keys():
111 if rdst.wvfs.isfile(f):
111 if rdst.wvfs.isfile(f):
112 rdst.wvfs.unlink(f)
112 rdst.wvfs.unlink(f)
113 try:
113 try:
114 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
114 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
115 except OSError:
115 except OSError:
116 pass
116 pass
117
117
118 # If there were any files converted to largefiles, add largefiles
118 # If there were any files converted to largefiles, add largefiles
119 # to the destination repository's requirements.
119 # to the destination repository's requirements.
120 if lfiles:
120 if lfiles:
121 rdst.requirements.add('largefiles')
121 rdst.requirements.add('largefiles')
122 rdst._writerequirements()
122 rdst._writerequirements()
123 else:
123 else:
124 class lfsource(filemap.filemap_source):
124 class lfsource(filemap.filemap_source):
125 def __init__(self, ui, source):
125 def __init__(self, ui, source):
126 super(lfsource, self).__init__(ui, source, None)
126 super(lfsource, self).__init__(ui, source, None)
127 self.filemapper.rename[lfutil.shortname] = '.'
127 self.filemapper.rename[lfutil.shortname] = '.'
128
128
129 def getfile(self, name, rev):
129 def getfile(self, name, rev):
130 realname, realrev = rev
130 realname, realrev = rev
131 f = super(lfsource, self).getfile(name, rev)
131 f = super(lfsource, self).getfile(name, rev)
132
132
133 if (not realname.startswith(lfutil.shortnameslash)
133 if (not realname.startswith(lfutil.shortnameslash)
134 or f[0] is None):
134 or f[0] is None):
135 return f
135 return f
136
136
137 # Substitute in the largefile data for the hash
137 # Substitute in the largefile data for the hash
138 hash = f[0].strip()
138 hash = f[0].strip()
139 path = lfutil.findfile(rsrc, hash)
139 path = lfutil.findfile(rsrc, hash)
140
140
141 if path is None:
141 if path is None:
142 raise error.Abort(_("missing largefile for '%s' in %s")
142 raise error.Abort(_("missing largefile for '%s' in %s")
143 % (realname, realrev))
143 % (realname, realrev))
144 return util.readfile(path), f[1]
144 return util.readfile(path), f[1]
145
145
146 class converter(convcmd.converter):
146 class converter(convcmd.converter):
147 def __init__(self, ui, source, dest, revmapfile, opts):
147 def __init__(self, ui, source, dest, revmapfile, opts):
148 src = lfsource(ui, source)
148 src = lfsource(ui, source)
149
149
150 super(converter, self).__init__(ui, src, dest, revmapfile,
150 super(converter, self).__init__(ui, src, dest, revmapfile,
151 opts)
151 opts)
152
152
153 found, missing = downloadlfiles(ui, rsrc)
153 found, missing = downloadlfiles(ui, rsrc)
154 if missing != 0:
154 if missing != 0:
155 raise error.Abort(_("all largefiles must be present locally"))
155 raise error.Abort(_("all largefiles must be present locally"))
156
156
157 orig = convcmd.converter
157 orig = convcmd.converter
158 convcmd.converter = converter
158 convcmd.converter = converter
159
159
160 try:
160 try:
161 convcmd.convert(ui, src, dest)
161 convcmd.convert(ui, src, dest)
162 finally:
162 finally:
163 convcmd.converter = orig
163 convcmd.converter = orig
164 success = True
164 success = True
165 finally:
165 finally:
166 if tolfile:
166 if tolfile:
167 rdst.dirstate.clear()
167 rdst.dirstate.clear()
168 release(dstlock, dstwlock)
168 release(dstlock, dstwlock)
169 if not success:
169 if not success:
170 # we failed, remove the new directory
170 # we failed, remove the new directory
171 shutil.rmtree(rdst.root)
171 shutil.rmtree(rdst.root)
172
172
173 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
173 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
174 matcher, size, lfiletohash):
174 matcher, size, lfiletohash):
175 # Convert src parents to dst parents
175 # Convert src parents to dst parents
176 parents = _convertparents(ctx, revmap)
176 parents = _convertparents(ctx, revmap)
177
177
178 # Generate list of changed files
178 # Generate list of changed files
179 files = _getchangedfiles(ctx, parents)
179 files = _getchangedfiles(ctx, parents)
180
180
181 dstfiles = []
181 dstfiles = []
182 for f in files:
182 for f in files:
183 if f not in lfiles and f not in normalfiles:
183 if f not in lfiles and f not in normalfiles:
184 islfile = _islfile(f, ctx, matcher, size)
184 islfile = _islfile(f, ctx, matcher, size)
185 # If this file was renamed or copied then copy
185 # If this file was renamed or copied then copy
186 # the largefile-ness of its predecessor
186 # the largefile-ness of its predecessor
187 if f in ctx.manifest():
187 if f in ctx.manifest():
188 fctx = ctx.filectx(f)
188 fctx = ctx.filectx(f)
189 renamed = fctx.renamed()
189 renamed = fctx.renamed()
190 renamedlfile = renamed and renamed[0] in lfiles
190 renamedlfile = renamed and renamed[0] in lfiles
191 islfile |= renamedlfile
191 islfile |= renamedlfile
192 if 'l' in fctx.flags():
192 if 'l' in fctx.flags():
193 if renamedlfile:
193 if renamedlfile:
194 raise error.Abort(
194 raise error.Abort(
195 _('renamed/copied largefile %s becomes symlink')
195 _('renamed/copied largefile %s becomes symlink')
196 % f)
196 % f)
197 islfile = False
197 islfile = False
198 if islfile:
198 if islfile:
199 lfiles.add(f)
199 lfiles.add(f)
200 else:
200 else:
201 normalfiles.add(f)
201 normalfiles.add(f)
202
202
203 if f in lfiles:
203 if f in lfiles:
204 dstfiles.append(lfutil.standin(f))
204 dstfiles.append(lfutil.standin(f))
205 # largefile in manifest if it has not been removed/renamed
205 # largefile in manifest if it has not been removed/renamed
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 if 'l' in fctx.flags():
208 if 'l' in fctx.flags():
209 renamed = fctx.renamed()
209 renamed = fctx.renamed()
210 if renamed and renamed[0] in lfiles:
210 if renamed and renamed[0] in lfiles:
211 raise error.Abort(_('largefile %s becomes symlink') % f)
211 raise error.Abort(_('largefile %s becomes symlink') % f)
212
212
213 # largefile was modified, update standins
213 # largefile was modified, update standins
214 m = util.sha1('')
214 m = util.sha1('')
215 m.update(ctx[f].data())
215 m.update(ctx[f].data())
216 hash = m.hexdigest()
216 hash = m.hexdigest()
217 if f not in lfiletohash or lfiletohash[f] != hash:
217 if f not in lfiletohash or lfiletohash[f] != hash:
218 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
218 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
219 executable = 'x' in ctx[f].flags()
219 executable = 'x' in ctx[f].flags()
220 lfutil.writestandin(rdst, lfutil.standin(f), hash,
220 lfutil.writestandin(rdst, lfutil.standin(f), hash,
221 executable)
221 executable)
222 lfiletohash[f] = hash
222 lfiletohash[f] = hash
223 else:
223 else:
224 # normal file
224 # normal file
225 dstfiles.append(f)
225 dstfiles.append(f)
226
226
227 def getfilectx(repo, memctx, f):
227 def getfilectx(repo, memctx, f):
228 if lfutil.isstandin(f):
228 if lfutil.isstandin(f):
229 # if the file isn't in the manifest then it was removed
229 # if the file isn't in the manifest then it was removed
230 # or renamed, raise IOError to indicate this
230 # or renamed, raise IOError to indicate this
231 srcfname = lfutil.splitstandin(f)
231 srcfname = lfutil.splitstandin(f)
232 try:
232 try:
233 fctx = ctx.filectx(srcfname)
233 fctx = ctx.filectx(srcfname)
234 except error.LookupError:
234 except error.LookupError:
235 return None
235 return None
236 renamed = fctx.renamed()
236 renamed = fctx.renamed()
237 if renamed:
237 if renamed:
238 # standin is always a largefile because largefile-ness
238 # standin is always a largefile because largefile-ness
239 # doesn't change after rename or copy
239 # doesn't change after rename or copy
240 renamed = lfutil.standin(renamed[0])
240 renamed = lfutil.standin(renamed[0])
241
241
242 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
242 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
243 'l' in fctx.flags(), 'x' in fctx.flags(),
243 'l' in fctx.flags(), 'x' in fctx.flags(),
244 renamed)
244 renamed)
245 else:
245 else:
246 return _getnormalcontext(repo, ctx, f, revmap)
246 return _getnormalcontext(repo, ctx, f, revmap)
247
247
248 # Commit
248 # Commit
249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
250
250
251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
254 ret = rdst.commitctx(mctx)
254 ret = rdst.commitctx(mctx)
255 lfutil.copyalltostore(rdst, ret)
255 lfutil.copyalltostore(rdst, ret)
256 rdst.setparents(ret)
256 rdst.setparents(ret)
257 revmap[ctx.node()] = rdst.changelog.tip()
257 revmap[ctx.node()] = rdst.changelog.tip()
258
258
259 # Generate list of changed files
259 # Generate list of changed files
260 def _getchangedfiles(ctx, parents):
260 def _getchangedfiles(ctx, parents):
261 files = set(ctx.files())
261 files = set(ctx.files())
262 if node.nullid not in parents:
262 if node.nullid not in parents:
263 mc = ctx.manifest()
263 mc = ctx.manifest()
264 mp1 = ctx.parents()[0].manifest()
264 mp1 = ctx.parents()[0].manifest()
265 mp2 = ctx.parents()[1].manifest()
265 mp2 = ctx.parents()[1].manifest()
266 files |= (set(mp1) | set(mp2)) - set(mc)
266 files |= (set(mp1) | set(mp2)) - set(mc)
267 for f in mc:
267 for f in mc:
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
269 files.add(f)
269 files.add(f)
270 return files
270 return files
271
271
272 # Convert src parents to dst parents
272 # Convert src parents to dst parents
273 def _convertparents(ctx, revmap):
273 def _convertparents(ctx, revmap):
274 parents = []
274 parents = []
275 for p in ctx.parents():
275 for p in ctx.parents():
276 parents.append(revmap[p.node()])
276 parents.append(revmap[p.node()])
277 while len(parents) < 2:
277 while len(parents) < 2:
278 parents.append(node.nullid)
278 parents.append(node.nullid)
279 return parents
279 return parents
280
280
281 # Get memfilectx for a normal file
281 # Get memfilectx for a normal file
282 def _getnormalcontext(repo, ctx, f, revmap):
282 def _getnormalcontext(repo, ctx, f, revmap):
283 try:
283 try:
284 fctx = ctx.filectx(f)
284 fctx = ctx.filectx(f)
285 except error.LookupError:
285 except error.LookupError:
286 return None
286 return None
287 renamed = fctx.renamed()
287 renamed = fctx.renamed()
288 if renamed:
288 if renamed:
289 renamed = renamed[0]
289 renamed = renamed[0]
290
290
291 data = fctx.data()
291 data = fctx.data()
292 if f == '.hgtags':
292 if f == '.hgtags':
293 data = _converttags (repo.ui, revmap, data)
293 data = _converttags (repo.ui, revmap, data)
294 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
294 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
295 'x' in fctx.flags(), renamed)
295 'x' in fctx.flags(), renamed)
296
296
297 # Remap tag data using a revision map
297 # Remap tag data using a revision map
298 def _converttags(ui, revmap, data):
298 def _converttags(ui, revmap, data):
299 newdata = []
299 newdata = []
300 for line in data.splitlines():
300 for line in data.splitlines():
301 try:
301 try:
302 id, name = line.split(' ', 1)
302 id, name = line.split(' ', 1)
303 except ValueError:
303 except ValueError:
304 ui.warn(_('skipping incorrectly formatted tag %s\n')
304 ui.warn(_('skipping incorrectly formatted tag %s\n')
305 % line)
305 % line)
306 continue
306 continue
307 try:
307 try:
308 newid = node.bin(id)
308 newid = node.bin(id)
309 except TypeError:
309 except TypeError:
310 ui.warn(_('skipping incorrectly formatted id %s\n')
310 ui.warn(_('skipping incorrectly formatted id %s\n')
311 % id)
311 % id)
312 continue
312 continue
313 try:
313 try:
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
315 name))
315 name))
316 except KeyError:
316 except KeyError:
317 ui.warn(_('no mapping for id %s\n') % id)
317 ui.warn(_('no mapping for id %s\n') % id)
318 continue
318 continue
319 return ''.join(newdata)
319 return ''.join(newdata)
320
320
321 def _islfile(file, ctx, matcher, size):
321 def _islfile(file, ctx, matcher, size):
322 '''Return true if file should be considered a largefile, i.e.
322 '''Return true if file should be considered a largefile, i.e.
323 matcher matches it or it is larger than size.'''
323 matcher matches it or it is larger than size.'''
324 # never store special .hg* files as largefiles
324 # never store special .hg* files as largefiles
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 return False
326 return False
327 if matcher and matcher(file):
327 if matcher and matcher(file):
328 return True
328 return True
329 try:
329 try:
330 return ctx.filectx(file).size() >= size * 1024 * 1024
330 return ctx.filectx(file).size() >= size * 1024 * 1024
331 except error.LookupError:
331 except error.LookupError:
332 return False
332 return False
333
333
334 def uploadlfiles(ui, rsrc, rdst, files):
334 def uploadlfiles(ui, rsrc, rdst, files):
335 '''upload largefiles to the central store'''
335 '''upload largefiles to the central store'''
336
336
337 if not files:
337 if not files:
338 return
338 return
339
339
340 store = basestore._openstore(rsrc, rdst, put=True)
340 store = storefactory._openstore(rsrc, rdst, put=True)
341
341
342 at = 0
342 at = 0
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
344 retval = store.exists(files)
344 retval = store.exists(files)
345 files = filter(lambda h: not retval[h], files)
345 files = filter(lambda h: not retval[h], files)
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
347
347
348 for hash in files:
348 for hash in files:
349 ui.progress(_('uploading largefiles'), at, unit=_('files'),
349 ui.progress(_('uploading largefiles'), at, unit=_('files'),
350 total=len(files))
350 total=len(files))
351 source = lfutil.findfile(rsrc, hash)
351 source = lfutil.findfile(rsrc, hash)
352 if not source:
352 if not source:
353 raise error.Abort(_('largefile %s missing from store'
353 raise error.Abort(_('largefile %s missing from store'
354 ' (needs to be uploaded)') % hash)
354 ' (needs to be uploaded)') % hash)
355 # XXX check for errors here
355 # XXX check for errors here
356 store.put(source, hash)
356 store.put(source, hash)
357 at += 1
357 at += 1
358 ui.progress(_('uploading largefiles'), None)
358 ui.progress(_('uploading largefiles'), None)
359
359
360 def verifylfiles(ui, repo, all=False, contents=False):
360 def verifylfiles(ui, repo, all=False, contents=False):
361 '''Verify that every largefile revision in the current changeset
361 '''Verify that every largefile revision in the current changeset
362 exists in the central store. With --contents, also verify that
362 exists in the central store. With --contents, also verify that
363 the contents of each local largefile file revision are correct (SHA-1 hash
363 the contents of each local largefile file revision are correct (SHA-1 hash
364 matches the revision ID). With --all, check every changeset in
364 matches the revision ID). With --all, check every changeset in
365 this repository.'''
365 this repository.'''
366 if all:
366 if all:
367 revs = repo.revs('all()')
367 revs = repo.revs('all()')
368 else:
368 else:
369 revs = ['.']
369 revs = ['.']
370
370
371 store = basestore._openstore(repo)
371 store = storefactory._openstore(repo)
372 return store.verify(revs, contents=contents)
372 return store.verify(revs, contents=contents)
373
373
374 def cachelfiles(ui, repo, node, filelist=None):
374 def cachelfiles(ui, repo, node, filelist=None):
375 '''cachelfiles ensures that all largefiles needed by the specified revision
375 '''cachelfiles ensures that all largefiles needed by the specified revision
376 are present in the repository's largefile cache.
376 are present in the repository's largefile cache.
377
377
378 returns a tuple (cached, missing). cached is the list of files downloaded
378 returns a tuple (cached, missing). cached is the list of files downloaded
379 by this operation; missing is the list of files that were needed but could
379 by this operation; missing is the list of files that were needed but could
380 not be found.'''
380 not be found.'''
381 lfiles = lfutil.listlfiles(repo, node)
381 lfiles = lfutil.listlfiles(repo, node)
382 if filelist:
382 if filelist:
383 lfiles = set(lfiles) & set(filelist)
383 lfiles = set(lfiles) & set(filelist)
384 toget = []
384 toget = []
385
385
386 for lfile in lfiles:
386 for lfile in lfiles:
387 try:
387 try:
388 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
388 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
389 except IOError as err:
389 except IOError as err:
390 if err.errno == errno.ENOENT:
390 if err.errno == errno.ENOENT:
391 continue # node must be None and standin wasn't found in wctx
391 continue # node must be None and standin wasn't found in wctx
392 raise
392 raise
393 if not lfutil.findfile(repo, expectedhash):
393 if not lfutil.findfile(repo, expectedhash):
394 toget.append((lfile, expectedhash))
394 toget.append((lfile, expectedhash))
395
395
396 if toget:
396 if toget:
397 store = basestore._openstore(repo)
397 store = storefactory._openstore(repo)
398 ret = store.get(toget)
398 ret = store.get(toget)
399 return ret
399 return ret
400
400
401 return ([], [])
401 return ([], [])
402
402
403 def downloadlfiles(ui, repo, rev=None):
403 def downloadlfiles(ui, repo, rev=None):
404 matchfn = scmutil.match(repo[None],
404 matchfn = scmutil.match(repo[None],
405 [repo.wjoin(lfutil.shortname)], {})
405 [repo.wjoin(lfutil.shortname)], {})
406 def prepare(ctx, fns):
406 def prepare(ctx, fns):
407 pass
407 pass
408 totalsuccess = 0
408 totalsuccess = 0
409 totalmissing = 0
409 totalmissing = 0
410 if rev != []: # walkchangerevs on empty list would return all revs
410 if rev != []: # walkchangerevs on empty list would return all revs
411 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
411 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
412 prepare):
412 prepare):
413 success, missing = cachelfiles(ui, repo, ctx.node())
413 success, missing = cachelfiles(ui, repo, ctx.node())
414 totalsuccess += len(success)
414 totalsuccess += len(success)
415 totalmissing += len(missing)
415 totalmissing += len(missing)
416 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
416 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
417 if totalmissing > 0:
417 if totalmissing > 0:
418 ui.status(_("%d largefiles failed to download\n") % totalmissing)
418 ui.status(_("%d largefiles failed to download\n") % totalmissing)
419 return totalsuccess, totalmissing
419 return totalsuccess, totalmissing
420
420
421 def updatelfiles(ui, repo, filelist=None, printmessage=None,
421 def updatelfiles(ui, repo, filelist=None, printmessage=None,
422 normallookup=False):
422 normallookup=False):
423 '''Update largefiles according to standins in the working directory
423 '''Update largefiles according to standins in the working directory
424
424
425 If ``printmessage`` is other than ``None``, it means "print (or
425 If ``printmessage`` is other than ``None``, it means "print (or
426 ignore, for false) message forcibly".
426 ignore, for false) message forcibly".
427 '''
427 '''
428 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
428 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
429 with repo.wlock():
429 with repo.wlock():
430 lfdirstate = lfutil.openlfdirstate(ui, repo)
430 lfdirstate = lfutil.openlfdirstate(ui, repo)
431 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
431 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
432
432
433 if filelist is not None:
433 if filelist is not None:
434 filelist = set(filelist)
434 filelist = set(filelist)
435 lfiles = [f for f in lfiles if f in filelist]
435 lfiles = [f for f in lfiles if f in filelist]
436
436
437 update = {}
437 update = {}
438 updated, removed = 0, 0
438 updated, removed = 0, 0
439 wvfs = repo.wvfs
439 wvfs = repo.wvfs
440 for lfile in lfiles:
440 for lfile in lfiles:
441 rellfile = lfile
441 rellfile = lfile
442 rellfileorig = os.path.relpath(
442 rellfileorig = os.path.relpath(
443 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
443 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
444 start=repo.root)
444 start=repo.root)
445 relstandin = lfutil.standin(lfile)
445 relstandin = lfutil.standin(lfile)
446 relstandinorig = os.path.relpath(
446 relstandinorig = os.path.relpath(
447 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
447 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
448 start=repo.root)
448 start=repo.root)
449 if wvfs.exists(relstandin):
449 if wvfs.exists(relstandin):
450 if (wvfs.exists(relstandinorig) and
450 if (wvfs.exists(relstandinorig) and
451 wvfs.exists(rellfile)):
451 wvfs.exists(rellfile)):
452 shutil.copyfile(wvfs.join(rellfile),
452 shutil.copyfile(wvfs.join(rellfile),
453 wvfs.join(rellfileorig))
453 wvfs.join(rellfileorig))
454 wvfs.unlinkpath(relstandinorig)
454 wvfs.unlinkpath(relstandinorig)
455 expecthash = lfutil.readstandin(repo, lfile)
455 expecthash = lfutil.readstandin(repo, lfile)
456 if expecthash != '':
456 if expecthash != '':
457 if lfile not in repo[None]: # not switched to normal file
457 if lfile not in repo[None]: # not switched to normal file
458 wvfs.unlinkpath(rellfile, ignoremissing=True)
458 wvfs.unlinkpath(rellfile, ignoremissing=True)
459 # use normallookup() to allocate an entry in largefiles
459 # use normallookup() to allocate an entry in largefiles
460 # dirstate to prevent lfilesrepo.status() from reporting
460 # dirstate to prevent lfilesrepo.status() from reporting
461 # missing files as removed.
461 # missing files as removed.
462 lfdirstate.normallookup(lfile)
462 lfdirstate.normallookup(lfile)
463 update[lfile] = expecthash
463 update[lfile] = expecthash
464 else:
464 else:
465 # Remove lfiles for which the standin is deleted, unless the
465 # Remove lfiles for which the standin is deleted, unless the
466 # lfile is added to the repository again. This happens when a
466 # lfile is added to the repository again. This happens when a
467 # largefile is converted back to a normal file: the standin
467 # largefile is converted back to a normal file: the standin
468 # disappears, but a new (normal) file appears as the lfile.
468 # disappears, but a new (normal) file appears as the lfile.
469 if (wvfs.exists(rellfile) and
469 if (wvfs.exists(rellfile) and
470 repo.dirstate.normalize(lfile) not in repo[None]):
470 repo.dirstate.normalize(lfile) not in repo[None]):
471 wvfs.unlinkpath(rellfile)
471 wvfs.unlinkpath(rellfile)
472 removed += 1
472 removed += 1
473
473
474 # largefile processing might be slow and be interrupted - be prepared
474 # largefile processing might be slow and be interrupted - be prepared
475 lfdirstate.write()
475 lfdirstate.write()
476
476
477 if lfiles:
477 if lfiles:
478 statuswriter(_('getting changed largefiles\n'))
478 statuswriter(_('getting changed largefiles\n'))
479 cachelfiles(ui, repo, None, lfiles)
479 cachelfiles(ui, repo, None, lfiles)
480
480
481 for lfile in lfiles:
481 for lfile in lfiles:
482 update1 = 0
482 update1 = 0
483
483
484 expecthash = update.get(lfile)
484 expecthash = update.get(lfile)
485 if expecthash:
485 if expecthash:
486 if not lfutil.copyfromcache(repo, expecthash, lfile):
486 if not lfutil.copyfromcache(repo, expecthash, lfile):
487 # failed ... but already removed and set to normallookup
487 # failed ... but already removed and set to normallookup
488 continue
488 continue
489 # Synchronize largefile dirstate to the last modified
489 # Synchronize largefile dirstate to the last modified
490 # time of the file
490 # time of the file
491 lfdirstate.normal(lfile)
491 lfdirstate.normal(lfile)
492 update1 = 1
492 update1 = 1
493
493
494 # copy the state of largefile standin from the repository's
494 # copy the state of largefile standin from the repository's
495 # dirstate to its state in the lfdirstate.
495 # dirstate to its state in the lfdirstate.
496 rellfile = lfile
496 rellfile = lfile
497 relstandin = lfutil.standin(lfile)
497 relstandin = lfutil.standin(lfile)
498 if wvfs.exists(relstandin):
498 if wvfs.exists(relstandin):
499 mode = wvfs.stat(relstandin).st_mode
499 mode = wvfs.stat(relstandin).st_mode
500 if mode != wvfs.stat(rellfile).st_mode:
500 if mode != wvfs.stat(rellfile).st_mode:
501 wvfs.chmod(rellfile, mode)
501 wvfs.chmod(rellfile, mode)
502 update1 = 1
502 update1 = 1
503
503
504 updated += update1
504 updated += update1
505
505
506 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
506 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
507
507
508 lfdirstate.write()
508 lfdirstate.write()
509 if lfiles:
509 if lfiles:
510 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
510 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
511 removed))
511 removed))
512
512
513 @command('lfpull',
513 @command('lfpull',
514 [('r', 'rev', [], _('pull largefiles for these revisions'))
514 [('r', 'rev', [], _('pull largefiles for these revisions'))
515 ] + commands.remoteopts,
515 ] + commands.remoteopts,
516 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
516 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
517 def lfpull(ui, repo, source="default", **opts):
517 def lfpull(ui, repo, source="default", **opts):
518 """pull largefiles for the specified revisions from the specified source
518 """pull largefiles for the specified revisions from the specified source
519
519
520 Pull largefiles that are referenced from local changesets but missing
520 Pull largefiles that are referenced from local changesets but missing
521 locally, pulling from a remote repository to the local cache.
521 locally, pulling from a remote repository to the local cache.
522
522
523 If SOURCE is omitted, the 'default' path will be used.
523 If SOURCE is omitted, the 'default' path will be used.
524 See :hg:`help urls` for more information.
524 See :hg:`help urls` for more information.
525
525
526 .. container:: verbose
526 .. container:: verbose
527
527
528 Some examples:
528 Some examples:
529
529
530 - pull largefiles for all branch heads::
530 - pull largefiles for all branch heads::
531
531
532 hg lfpull -r "head() and not closed()"
532 hg lfpull -r "head() and not closed()"
533
533
534 - pull largefiles on the default branch::
534 - pull largefiles on the default branch::
535
535
536 hg lfpull -r "branch(default)"
536 hg lfpull -r "branch(default)"
537 """
537 """
538 repo.lfpullsource = source
538 repo.lfpullsource = source
539
539
540 revs = opts.get('rev', [])
540 revs = opts.get('rev', [])
541 if not revs:
541 if not revs:
542 raise error.Abort(_('no revisions specified'))
542 raise error.Abort(_('no revisions specified'))
543 revs = scmutil.revrange(repo, revs)
543 revs = scmutil.revrange(repo, revs)
544
544
545 numcached = 0
545 numcached = 0
546 for rev in revs:
546 for rev in revs:
547 ui.note(_('pulling largefiles for revision %s\n') % rev)
547 ui.note(_('pulling largefiles for revision %s\n') % rev)
548 (cached, missing) = cachelfiles(ui, repo, rev)
548 (cached, missing) = cachelfiles(ui, repo, rev)
549 numcached += len(cached)
549 numcached += len(cached)
550 ui.status(_("%d largefiles cached\n") % numcached)
550 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,1419 +1,1419
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, registrar, revset, error
15 archival, pathutil, registrar, revset, error
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import storefactory
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 if opts is None:
55 if opts is None:
56 opts = {}
56 opts = {}
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 return composenormalfilematcher(match, manifest)
58 return composenormalfilematcher(match, manifest)
59 oldmatch = installmatchfn(overridematch)
59 oldmatch = installmatchfn(overridematch)
60
60
61 def installmatchfn(f):
61 def installmatchfn(f):
62 '''monkey patch the scmutil module with a custom match function.
62 '''monkey patch the scmutil module with a custom match function.
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 oldmatch = scmutil.match
64 oldmatch = scmutil.match
65 setattr(f, 'oldmatch', oldmatch)
65 setattr(f, 'oldmatch', oldmatch)
66 scmutil.match = f
66 scmutil.match = f
67 return oldmatch
67 return oldmatch
68
68
69 def restorematchfn():
69 def restorematchfn():
70 '''restores scmutil.match to what it was before installmatchfn
70 '''restores scmutil.match to what it was before installmatchfn
71 was called. no-op if scmutil.match is its original function.
71 was called. no-op if scmutil.match is its original function.
72
72
73 Note that n calls to installmatchfn will require n calls to
73 Note that n calls to installmatchfn will require n calls to
74 restore the original matchfn.'''
74 restore the original matchfn.'''
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76
76
77 def installmatchandpatsfn(f):
77 def installmatchandpatsfn(f):
78 oldmatchandpats = scmutil.matchandpats
78 oldmatchandpats = scmutil.matchandpats
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 scmutil.matchandpats = f
80 scmutil.matchandpats = f
81 return oldmatchandpats
81 return oldmatchandpats
82
82
83 def restorematchandpatsfn():
83 def restorematchandpatsfn():
84 '''restores scmutil.matchandpats to what it was before
84 '''restores scmutil.matchandpats to what it was before
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 is its original function.
86 is its original function.
87
87
88 Note that n calls to installmatchandpatsfn will require n calls
88 Note that n calls to installmatchandpatsfn will require n calls
89 to restore the original matchfn.'''
89 to restore the original matchfn.'''
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 scmutil.matchandpats)
91 scmutil.matchandpats)
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97
97
98 lfmatcher = None
98 lfmatcher = None
99 if lfutil.islfilesrepo(repo):
99 if lfutil.islfilesrepo(repo):
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 if lfpats:
101 if lfpats:
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103
103
104 lfnames = []
104 lfnames = []
105 m = matcher
105 m = matcher
106
106
107 wctx = repo[None]
107 wctx = repo[None]
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 exact = m.exact(f)
109 exact = m.exact(f)
110 lfile = lfutil.standin(f) in wctx
110 lfile = lfutil.standin(f) in wctx
111 nfile = f in wctx
111 nfile = f in wctx
112 exists = lfile or nfile
112 exists = lfile or nfile
113
113
114 # addremove in core gets fancy with the name, add doesn't
114 # addremove in core gets fancy with the name, add doesn't
115 if isaddremove:
115 if isaddremove:
116 name = m.uipath(f)
116 name = m.uipath(f)
117 else:
117 else:
118 name = m.rel(f)
118 name = m.rel(f)
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_('%s already a largefile\n') % name)
124 ui.warn(_('%s already a largefile\n') % name)
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (lfsize and
133 abovemin = (lfsize and
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 lfnames.append(f)
136 lfnames.append(f)
137 if ui.verbose or not exact:
137 if ui.verbose or not exact:
138 ui.status(_('adding %s as a largefile\n') % name)
138 ui.status(_('adding %s as a largefile\n') % name)
139
139
140 bad = []
140 bad = []
141
141
142 # Need to lock, otherwise there could be a race condition between
142 # Need to lock, otherwise there could be a race condition between
143 # when standins are created and added to the repo.
143 # when standins are created and added to the repo.
144 with repo.wlock():
144 with repo.wlock():
145 if not opts.get('dry_run'):
145 if not opts.get('dry_run'):
146 standins = []
146 standins = []
147 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 for f in lfnames:
148 for f in lfnames:
149 standinname = lfutil.standin(f)
149 standinname = lfutil.standin(f)
150 lfutil.writestandin(repo, standinname, hash='',
150 lfutil.writestandin(repo, standinname, hash='',
151 executable=lfutil.getexecutable(repo.wjoin(f)))
151 executable=lfutil.getexecutable(repo.wjoin(f)))
152 standins.append(standinname)
152 standins.append(standinname)
153 if lfdirstate[f] == 'r':
153 if lfdirstate[f] == 'r':
154 lfdirstate.normallookup(f)
154 lfdirstate.normallookup(f)
155 else:
155 else:
156 lfdirstate.add(f)
156 lfdirstate.add(f)
157 lfdirstate.write()
157 lfdirstate.write()
158 bad += [lfutil.splitstandin(f)
158 bad += [lfutil.splitstandin(f)
159 for f in repo[None].add(standins)
159 for f in repo[None].add(standins)
160 if f in m.files()]
160 if f in m.files()]
161
161
162 added = [f for f in lfnames if f not in bad]
162 added = [f for f in lfnames if f not in bad]
163 return added, bad
163 return added, bad
164
164
165 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
165 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
166 after = opts.get('after')
166 after = opts.get('after')
167 m = composelargefilematcher(matcher, repo[None].manifest())
167 m = composelargefilematcher(matcher, repo[None].manifest())
168 try:
168 try:
169 repo.lfstatus = True
169 repo.lfstatus = True
170 s = repo.status(match=m, clean=not isaddremove)
170 s = repo.status(match=m, clean=not isaddremove)
171 finally:
171 finally:
172 repo.lfstatus = False
172 repo.lfstatus = False
173 manifest = repo[None].manifest()
173 manifest = repo[None].manifest()
174 modified, added, deleted, clean = [[f for f in list
174 modified, added, deleted, clean = [[f for f in list
175 if lfutil.standin(f) in manifest]
175 if lfutil.standin(f) in manifest]
176 for list in (s.modified, s.added,
176 for list in (s.modified, s.added,
177 s.deleted, s.clean)]
177 s.deleted, s.clean)]
178
178
179 def warn(files, msg):
179 def warn(files, msg):
180 for f in files:
180 for f in files:
181 ui.warn(msg % m.rel(f))
181 ui.warn(msg % m.rel(f))
182 return int(len(files) > 0)
182 return int(len(files) > 0)
183
183
184 result = 0
184 result = 0
185
185
186 if after:
186 if after:
187 remove = deleted
187 remove = deleted
188 result = warn(modified + added + clean,
188 result = warn(modified + added + clean,
189 _('not removing %s: file still exists\n'))
189 _('not removing %s: file still exists\n'))
190 else:
190 else:
191 remove = deleted + clean
191 remove = deleted + clean
192 result = warn(modified, _('not removing %s: file is modified (use -f'
192 result = warn(modified, _('not removing %s: file is modified (use -f'
193 ' to force removal)\n'))
193 ' to force removal)\n'))
194 result = warn(added, _('not removing %s: file has been marked for add'
194 result = warn(added, _('not removing %s: file has been marked for add'
195 ' (use forget to undo)\n')) or result
195 ' (use forget to undo)\n')) or result
196
196
197 # Need to lock because standin files are deleted then removed from the
197 # Need to lock because standin files are deleted then removed from the
198 # repository and we could race in-between.
198 # repository and we could race in-between.
199 with repo.wlock():
199 with repo.wlock():
200 lfdirstate = lfutil.openlfdirstate(ui, repo)
200 lfdirstate = lfutil.openlfdirstate(ui, repo)
201 for f in sorted(remove):
201 for f in sorted(remove):
202 if ui.verbose or not m.exact(f):
202 if ui.verbose or not m.exact(f):
203 # addremove in core gets fancy with the name, remove doesn't
203 # addremove in core gets fancy with the name, remove doesn't
204 if isaddremove:
204 if isaddremove:
205 name = m.uipath(f)
205 name = m.uipath(f)
206 else:
206 else:
207 name = m.rel(f)
207 name = m.rel(f)
208 ui.status(_('removing %s\n') % name)
208 ui.status(_('removing %s\n') % name)
209
209
210 if not opts.get('dry_run'):
210 if not opts.get('dry_run'):
211 if not after:
211 if not after:
212 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
212 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
213
213
214 if opts.get('dry_run'):
214 if opts.get('dry_run'):
215 return result
215 return result
216
216
217 remove = [lfutil.standin(f) for f in remove]
217 remove = [lfutil.standin(f) for f in remove]
218 # If this is being called by addremove, let the original addremove
218 # If this is being called by addremove, let the original addremove
219 # function handle this.
219 # function handle this.
220 if not isaddremove:
220 if not isaddremove:
221 for f in remove:
221 for f in remove:
222 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
222 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
223 repo[None].forget(remove)
223 repo[None].forget(remove)
224
224
225 for f in remove:
225 for f in remove:
226 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
226 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
227 False)
227 False)
228
228
229 lfdirstate.write()
229 lfdirstate.write()
230
230
231 return result
231 return result
232
232
233 # For overriding mercurial.hgweb.webcommands so that largefiles will
233 # For overriding mercurial.hgweb.webcommands so that largefiles will
234 # appear at their right place in the manifests.
234 # appear at their right place in the manifests.
235 def decodepath(orig, path):
235 def decodepath(orig, path):
236 return lfutil.splitstandin(path) or path
236 return lfutil.splitstandin(path) or path
237
237
238 # -- Wrappers: modify existing commands --------------------------------
238 # -- Wrappers: modify existing commands --------------------------------
239
239
240 def overrideadd(orig, ui, repo, *pats, **opts):
240 def overrideadd(orig, ui, repo, *pats, **opts):
241 if opts.get('normal') and opts.get('large'):
241 if opts.get('normal') and opts.get('large'):
242 raise error.Abort(_('--normal cannot be used with --large'))
242 raise error.Abort(_('--normal cannot be used with --large'))
243 return orig(ui, repo, *pats, **opts)
243 return orig(ui, repo, *pats, **opts)
244
244
245 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
245 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
246 # The --normal flag short circuits this override
246 # The --normal flag short circuits this override
247 if opts.get('normal'):
247 if opts.get('normal'):
248 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
248 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
249
249
250 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
250 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
251 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
251 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
252 ladded)
252 ladded)
253 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
253 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
254
254
255 bad.extend(f for f in lbad)
255 bad.extend(f for f in lbad)
256 return bad
256 return bad
257
257
258 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
258 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
259 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
259 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
260 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
260 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
261 return removelargefiles(ui, repo, False, matcher, after=after,
261 return removelargefiles(ui, repo, False, matcher, after=after,
262 force=force) or result
262 force=force) or result
263
263
264 def overridestatusfn(orig, repo, rev2, **opts):
264 def overridestatusfn(orig, repo, rev2, **opts):
265 try:
265 try:
266 repo._repo.lfstatus = True
266 repo._repo.lfstatus = True
267 return orig(repo, rev2, **opts)
267 return orig(repo, rev2, **opts)
268 finally:
268 finally:
269 repo._repo.lfstatus = False
269 repo._repo.lfstatus = False
270
270
271 def overridestatus(orig, ui, repo, *pats, **opts):
271 def overridestatus(orig, ui, repo, *pats, **opts):
272 try:
272 try:
273 repo.lfstatus = True
273 repo.lfstatus = True
274 return orig(ui, repo, *pats, **opts)
274 return orig(ui, repo, *pats, **opts)
275 finally:
275 finally:
276 repo.lfstatus = False
276 repo.lfstatus = False
277
277
278 def overridedirty(orig, repo, ignoreupdate=False):
278 def overridedirty(orig, repo, ignoreupdate=False):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, ignoreupdate)
281 return orig(repo, ignoreupdate)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridelog(orig, ui, repo, *pats, **opts):
285 def overridelog(orig, ui, repo, *pats, **opts):
286 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
286 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
287 default='relpath', badfn=None):
287 default='relpath', badfn=None):
288 """Matcher that merges root directory with .hglf, suitable for log.
288 """Matcher that merges root directory with .hglf, suitable for log.
289 It is still possible to match .hglf directly.
289 It is still possible to match .hglf directly.
290 For any listed files run log on the standin too.
290 For any listed files run log on the standin too.
291 matchfn tries both the given filename and with .hglf stripped.
291 matchfn tries both the given filename and with .hglf stripped.
292 """
292 """
293 if opts is None:
293 if opts is None:
294 opts = {}
294 opts = {}
295 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
295 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
296 badfn=badfn)
296 badfn=badfn)
297 m, p = copy.copy(matchandpats)
297 m, p = copy.copy(matchandpats)
298
298
299 if m.always():
299 if m.always():
300 # We want to match everything anyway, so there's no benefit trying
300 # We want to match everything anyway, so there's no benefit trying
301 # to add standins.
301 # to add standins.
302 return matchandpats
302 return matchandpats
303
303
304 pats = set(p)
304 pats = set(p)
305
305
306 def fixpats(pat, tostandin=lfutil.standin):
306 def fixpats(pat, tostandin=lfutil.standin):
307 if pat.startswith('set:'):
307 if pat.startswith('set:'):
308 return pat
308 return pat
309
309
310 kindpat = match_._patsplit(pat, None)
310 kindpat = match_._patsplit(pat, None)
311
311
312 if kindpat[0] is not None:
312 if kindpat[0] is not None:
313 return kindpat[0] + ':' + tostandin(kindpat[1])
313 return kindpat[0] + ':' + tostandin(kindpat[1])
314 return tostandin(kindpat[1])
314 return tostandin(kindpat[1])
315
315
316 if m._cwd:
316 if m._cwd:
317 hglf = lfutil.shortname
317 hglf = lfutil.shortname
318 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
318 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
319
319
320 def tostandin(f):
320 def tostandin(f):
321 # The file may already be a standin, so truncate the back
321 # The file may already be a standin, so truncate the back
322 # prefix and test before mangling it. This avoids turning
322 # prefix and test before mangling it. This avoids turning
323 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
323 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
324 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
324 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
325 return f
325 return f
326
326
327 # An absolute path is from outside the repo, so truncate the
327 # An absolute path is from outside the repo, so truncate the
328 # path to the root before building the standin. Otherwise cwd
328 # path to the root before building the standin. Otherwise cwd
329 # is somewhere in the repo, relative to root, and needs to be
329 # is somewhere in the repo, relative to root, and needs to be
330 # prepended before building the standin.
330 # prepended before building the standin.
331 if os.path.isabs(m._cwd):
331 if os.path.isabs(m._cwd):
332 f = f[len(back):]
332 f = f[len(back):]
333 else:
333 else:
334 f = m._cwd + '/' + f
334 f = m._cwd + '/' + f
335 return back + lfutil.standin(f)
335 return back + lfutil.standin(f)
336
336
337 pats.update(fixpats(f, tostandin) for f in p)
337 pats.update(fixpats(f, tostandin) for f in p)
338 else:
338 else:
339 def tostandin(f):
339 def tostandin(f):
340 if lfutil.splitstandin(f):
340 if lfutil.splitstandin(f):
341 return f
341 return f
342 return lfutil.standin(f)
342 return lfutil.standin(f)
343 pats.update(fixpats(f, tostandin) for f in p)
343 pats.update(fixpats(f, tostandin) for f in p)
344
344
345 for i in range(0, len(m._files)):
345 for i in range(0, len(m._files)):
346 # Don't add '.hglf' to m.files, since that is already covered by '.'
346 # Don't add '.hglf' to m.files, since that is already covered by '.'
347 if m._files[i] == '.':
347 if m._files[i] == '.':
348 continue
348 continue
349 standin = lfutil.standin(m._files[i])
349 standin = lfutil.standin(m._files[i])
350 # If the "standin" is a directory, append instead of replace to
350 # If the "standin" is a directory, append instead of replace to
351 # support naming a directory on the command line with only
351 # support naming a directory on the command line with only
352 # largefiles. The original directory is kept to support normal
352 # largefiles. The original directory is kept to support normal
353 # files.
353 # files.
354 if standin in repo[ctx.node()]:
354 if standin in repo[ctx.node()]:
355 m._files[i] = standin
355 m._files[i] = standin
356 elif m._files[i] not in repo[ctx.node()] \
356 elif m._files[i] not in repo[ctx.node()] \
357 and repo.wvfs.isdir(standin):
357 and repo.wvfs.isdir(standin):
358 m._files.append(standin)
358 m._files.append(standin)
359
359
360 m._fileroots = set(m._files)
360 m._fileroots = set(m._files)
361 m._always = False
361 m._always = False
362 origmatchfn = m.matchfn
362 origmatchfn = m.matchfn
363 def lfmatchfn(f):
363 def lfmatchfn(f):
364 lf = lfutil.splitstandin(f)
364 lf = lfutil.splitstandin(f)
365 if lf is not None and origmatchfn(lf):
365 if lf is not None and origmatchfn(lf):
366 return True
366 return True
367 r = origmatchfn(f)
367 r = origmatchfn(f)
368 return r
368 return r
369 m.matchfn = lfmatchfn
369 m.matchfn = lfmatchfn
370
370
371 ui.debug('updated patterns: %s\n' % sorted(pats))
371 ui.debug('updated patterns: %s\n' % sorted(pats))
372 return m, pats
372 return m, pats
373
373
374 # For hg log --patch, the match object is used in two different senses:
374 # For hg log --patch, the match object is used in two different senses:
375 # (1) to determine what revisions should be printed out, and
375 # (1) to determine what revisions should be printed out, and
376 # (2) to determine what files to print out diffs for.
376 # (2) to determine what files to print out diffs for.
377 # The magic matchandpats override should be used for case (1) but not for
377 # The magic matchandpats override should be used for case (1) but not for
378 # case (2).
378 # case (2).
379 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
379 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
380 wctx = repo[None]
380 wctx = repo[None]
381 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
381 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
382 return lambda rev: match
382 return lambda rev: match
383
383
384 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
384 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
385 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
385 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
386 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
386 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
387
387
388 try:
388 try:
389 return orig(ui, repo, *pats, **opts)
389 return orig(ui, repo, *pats, **opts)
390 finally:
390 finally:
391 restorematchandpatsfn()
391 restorematchandpatsfn()
392 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
392 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
393
393
394 def overrideverify(orig, ui, repo, *pats, **opts):
394 def overrideverify(orig, ui, repo, *pats, **opts):
395 large = opts.pop('large', False)
395 large = opts.pop('large', False)
396 all = opts.pop('lfa', False)
396 all = opts.pop('lfa', False)
397 contents = opts.pop('lfc', False)
397 contents = opts.pop('lfc', False)
398
398
399 result = orig(ui, repo, *pats, **opts)
399 result = orig(ui, repo, *pats, **opts)
400 if large or all or contents:
400 if large or all or contents:
401 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
401 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
402 return result
402 return result
403
403
404 def overridedebugstate(orig, ui, repo, *pats, **opts):
404 def overridedebugstate(orig, ui, repo, *pats, **opts):
405 large = opts.pop('large', False)
405 large = opts.pop('large', False)
406 if large:
406 if large:
407 class fakerepo(object):
407 class fakerepo(object):
408 dirstate = lfutil.openlfdirstate(ui, repo)
408 dirstate = lfutil.openlfdirstate(ui, repo)
409 orig(ui, fakerepo, *pats, **opts)
409 orig(ui, fakerepo, *pats, **opts)
410 else:
410 else:
411 orig(ui, repo, *pats, **opts)
411 orig(ui, repo, *pats, **opts)
412
412
413 # Before starting the manifest merge, merge.updates will call
413 # Before starting the manifest merge, merge.updates will call
414 # _checkunknownfile to check if there are any files in the merged-in
414 # _checkunknownfile to check if there are any files in the merged-in
415 # changeset that collide with unknown files in the working copy.
415 # changeset that collide with unknown files in the working copy.
416 #
416 #
417 # The largefiles are seen as unknown, so this prevents us from merging
417 # The largefiles are seen as unknown, so this prevents us from merging
418 # in a file 'foo' if we already have a largefile with the same name.
418 # in a file 'foo' if we already have a largefile with the same name.
419 #
419 #
420 # The overridden function filters the unknown files by removing any
420 # The overridden function filters the unknown files by removing any
421 # largefiles. This makes the merge proceed and we can then handle this
421 # largefiles. This makes the merge proceed and we can then handle this
422 # case further in the overridden calculateupdates function below.
422 # case further in the overridden calculateupdates function below.
423 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
423 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
424 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
424 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
425 return False
425 return False
426 return origfn(repo, wctx, mctx, f, f2)
426 return origfn(repo, wctx, mctx, f, f2)
427
427
428 # The manifest merge handles conflicts on the manifest level. We want
428 # The manifest merge handles conflicts on the manifest level. We want
429 # to handle changes in largefile-ness of files at this level too.
429 # to handle changes in largefile-ness of files at this level too.
430 #
430 #
431 # The strategy is to run the original calculateupdates and then process
431 # The strategy is to run the original calculateupdates and then process
432 # the action list it outputs. There are two cases we need to deal with:
432 # the action list it outputs. There are two cases we need to deal with:
433 #
433 #
434 # 1. Normal file in p1, largefile in p2. Here the largefile is
434 # 1. Normal file in p1, largefile in p2. Here the largefile is
435 # detected via its standin file, which will enter the working copy
435 # detected via its standin file, which will enter the working copy
436 # with a "get" action. It is not "merge" since the standin is all
436 # with a "get" action. It is not "merge" since the standin is all
437 # Mercurial is concerned with at this level -- the link to the
437 # Mercurial is concerned with at this level -- the link to the
438 # existing normal file is not relevant here.
438 # existing normal file is not relevant here.
439 #
439 #
440 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
440 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
441 # since the largefile will be present in the working copy and
441 # since the largefile will be present in the working copy and
442 # different from the normal file in p2. Mercurial therefore
442 # different from the normal file in p2. Mercurial therefore
443 # triggers a merge action.
443 # triggers a merge action.
444 #
444 #
445 # In both cases, we prompt the user and emit new actions to either
445 # In both cases, we prompt the user and emit new actions to either
446 # remove the standin (if the normal file was kept) or to remove the
446 # remove the standin (if the normal file was kept) or to remove the
447 # normal file and get the standin (if the largefile was kept). The
447 # normal file and get the standin (if the largefile was kept). The
448 # default prompt answer is to use the largefile version since it was
448 # default prompt answer is to use the largefile version since it was
449 # presumably changed on purpose.
449 # presumably changed on purpose.
450 #
450 #
451 # Finally, the merge.applyupdates function will then take care of
451 # Finally, the merge.applyupdates function will then take care of
452 # writing the files into the working copy and lfcommands.updatelfiles
452 # writing the files into the working copy and lfcommands.updatelfiles
453 # will update the largefiles.
453 # will update the largefiles.
454 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
454 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
455 acceptremote, *args, **kwargs):
455 acceptremote, *args, **kwargs):
456 overwrite = force and not branchmerge
456 overwrite = force and not branchmerge
457 actions, diverge, renamedelete = origfn(
457 actions, diverge, renamedelete = origfn(
458 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
458 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
459
459
460 if overwrite:
460 if overwrite:
461 return actions, diverge, renamedelete
461 return actions, diverge, renamedelete
462
462
463 # Convert to dictionary with filename as key and action as value.
463 # Convert to dictionary with filename as key and action as value.
464 lfiles = set()
464 lfiles = set()
465 for f in actions:
465 for f in actions:
466 splitstandin = lfutil.splitstandin(f)
466 splitstandin = lfutil.splitstandin(f)
467 if splitstandin in p1:
467 if splitstandin in p1:
468 lfiles.add(splitstandin)
468 lfiles.add(splitstandin)
469 elif lfutil.standin(f) in p1:
469 elif lfutil.standin(f) in p1:
470 lfiles.add(f)
470 lfiles.add(f)
471
471
472 for lfile in sorted(lfiles):
472 for lfile in sorted(lfiles):
473 standin = lfutil.standin(lfile)
473 standin = lfutil.standin(lfile)
474 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
474 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
475 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
475 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
476 if sm in ('g', 'dc') and lm != 'r':
476 if sm in ('g', 'dc') and lm != 'r':
477 if sm == 'dc':
477 if sm == 'dc':
478 f1, f2, fa, move, anc = sargs
478 f1, f2, fa, move, anc = sargs
479 sargs = (p2[f2].flags(), False)
479 sargs = (p2[f2].flags(), False)
480 # Case 1: normal file in the working copy, largefile in
480 # Case 1: normal file in the working copy, largefile in
481 # the second parent
481 # the second parent
482 usermsg = _('remote turned local normal file %s into a largefile\n'
482 usermsg = _('remote turned local normal file %s into a largefile\n'
483 'use (l)argefile or keep (n)ormal file?'
483 'use (l)argefile or keep (n)ormal file?'
484 '$$ &Largefile $$ &Normal file') % lfile
484 '$$ &Largefile $$ &Normal file') % lfile
485 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
485 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
486 actions[lfile] = ('r', None, 'replaced by standin')
486 actions[lfile] = ('r', None, 'replaced by standin')
487 actions[standin] = ('g', sargs, 'replaces standin')
487 actions[standin] = ('g', sargs, 'replaces standin')
488 else: # keep local normal file
488 else: # keep local normal file
489 actions[lfile] = ('k', None, 'replaces standin')
489 actions[lfile] = ('k', None, 'replaces standin')
490 if branchmerge:
490 if branchmerge:
491 actions[standin] = ('k', None, 'replaced by non-standin')
491 actions[standin] = ('k', None, 'replaced by non-standin')
492 else:
492 else:
493 actions[standin] = ('r', None, 'replaced by non-standin')
493 actions[standin] = ('r', None, 'replaced by non-standin')
494 elif lm in ('g', 'dc') and sm != 'r':
494 elif lm in ('g', 'dc') and sm != 'r':
495 if lm == 'dc':
495 if lm == 'dc':
496 f1, f2, fa, move, anc = largs
496 f1, f2, fa, move, anc = largs
497 largs = (p2[f2].flags(), False)
497 largs = (p2[f2].flags(), False)
498 # Case 2: largefile in the working copy, normal file in
498 # Case 2: largefile in the working copy, normal file in
499 # the second parent
499 # the second parent
500 usermsg = _('remote turned local largefile %s into a normal file\n'
500 usermsg = _('remote turned local largefile %s into a normal file\n'
501 'keep (l)argefile or use (n)ormal file?'
501 'keep (l)argefile or use (n)ormal file?'
502 '$$ &Largefile $$ &Normal file') % lfile
502 '$$ &Largefile $$ &Normal file') % lfile
503 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
503 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
504 if branchmerge:
504 if branchmerge:
505 # largefile can be restored from standin safely
505 # largefile can be restored from standin safely
506 actions[lfile] = ('k', None, 'replaced by standin')
506 actions[lfile] = ('k', None, 'replaced by standin')
507 actions[standin] = ('k', None, 'replaces standin')
507 actions[standin] = ('k', None, 'replaces standin')
508 else:
508 else:
509 # "lfile" should be marked as "removed" without
509 # "lfile" should be marked as "removed" without
510 # removal of itself
510 # removal of itself
511 actions[lfile] = ('lfmr', None,
511 actions[lfile] = ('lfmr', None,
512 'forget non-standin largefile')
512 'forget non-standin largefile')
513
513
514 # linear-merge should treat this largefile as 're-added'
514 # linear-merge should treat this largefile as 're-added'
515 actions[standin] = ('a', None, 'keep standin')
515 actions[standin] = ('a', None, 'keep standin')
516 else: # pick remote normal file
516 else: # pick remote normal file
517 actions[lfile] = ('g', largs, 'replaces standin')
517 actions[lfile] = ('g', largs, 'replaces standin')
518 actions[standin] = ('r', None, 'replaced by non-standin')
518 actions[standin] = ('r', None, 'replaced by non-standin')
519
519
520 return actions, diverge, renamedelete
520 return actions, diverge, renamedelete
521
521
522 def mergerecordupdates(orig, repo, actions, branchmerge):
522 def mergerecordupdates(orig, repo, actions, branchmerge):
523 if 'lfmr' in actions:
523 if 'lfmr' in actions:
524 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
524 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
525 for lfile, args, msg in actions['lfmr']:
525 for lfile, args, msg in actions['lfmr']:
526 # this should be executed before 'orig', to execute 'remove'
526 # this should be executed before 'orig', to execute 'remove'
527 # before all other actions
527 # before all other actions
528 repo.dirstate.remove(lfile)
528 repo.dirstate.remove(lfile)
529 # make sure lfile doesn't get synclfdirstate'd as normal
529 # make sure lfile doesn't get synclfdirstate'd as normal
530 lfdirstate.add(lfile)
530 lfdirstate.add(lfile)
531 lfdirstate.write()
531 lfdirstate.write()
532
532
533 return orig(repo, actions, branchmerge)
533 return orig(repo, actions, branchmerge)
534
534
535
535
536 # Override filemerge to prompt the user about how they wish to merge
536 # Override filemerge to prompt the user about how they wish to merge
537 # largefiles. This will handle identical edits without prompting the user.
537 # largefiles. This will handle identical edits without prompting the user.
538 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
538 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
539 labels=None):
539 labels=None):
540 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
540 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
541 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
541 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
542 labels=labels)
542 labels=labels)
543
543
544 ahash = fca.data().strip().lower()
544 ahash = fca.data().strip().lower()
545 dhash = fcd.data().strip().lower()
545 dhash = fcd.data().strip().lower()
546 ohash = fco.data().strip().lower()
546 ohash = fco.data().strip().lower()
547 if (ohash != ahash and
547 if (ohash != ahash and
548 ohash != dhash and
548 ohash != dhash and
549 (dhash == ahash or
549 (dhash == ahash or
550 repo.ui.promptchoice(
550 repo.ui.promptchoice(
551 _('largefile %s has a merge conflict\nancestor was %s\n'
551 _('largefile %s has a merge conflict\nancestor was %s\n'
552 'keep (l)ocal %s or\ntake (o)ther %s?'
552 'keep (l)ocal %s or\ntake (o)ther %s?'
553 '$$ &Local $$ &Other') %
553 '$$ &Local $$ &Other') %
554 (lfutil.splitstandin(orig), ahash, dhash, ohash),
554 (lfutil.splitstandin(orig), ahash, dhash, ohash),
555 0) == 1)):
555 0) == 1)):
556 repo.wwrite(fcd.path(), fco.data(), fco.flags())
556 repo.wwrite(fcd.path(), fco.data(), fco.flags())
557 return True, 0, False
557 return True, 0, False
558
558
559 def copiespathcopies(orig, ctx1, ctx2, match=None):
559 def copiespathcopies(orig, ctx1, ctx2, match=None):
560 copies = orig(ctx1, ctx2, match=match)
560 copies = orig(ctx1, ctx2, match=match)
561 updated = {}
561 updated = {}
562
562
563 for k, v in copies.iteritems():
563 for k, v in copies.iteritems():
564 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
564 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
565
565
566 return updated
566 return updated
567
567
568 # Copy first changes the matchers to match standins instead of
568 # Copy first changes the matchers to match standins instead of
569 # largefiles. Then it overrides util.copyfile in that function it
569 # largefiles. Then it overrides util.copyfile in that function it
570 # checks if the destination largefile already exists. It also keeps a
570 # checks if the destination largefile already exists. It also keeps a
571 # list of copied files so that the largefiles can be copied and the
571 # list of copied files so that the largefiles can be copied and the
572 # dirstate updated.
572 # dirstate updated.
573 def overridecopy(orig, ui, repo, pats, opts, rename=False):
573 def overridecopy(orig, ui, repo, pats, opts, rename=False):
574 # doesn't remove largefile on rename
574 # doesn't remove largefile on rename
575 if len(pats) < 2:
575 if len(pats) < 2:
576 # this isn't legal, let the original function deal with it
576 # this isn't legal, let the original function deal with it
577 return orig(ui, repo, pats, opts, rename)
577 return orig(ui, repo, pats, opts, rename)
578
578
579 # This could copy both lfiles and normal files in one command,
579 # This could copy both lfiles and normal files in one command,
580 # but we don't want to do that. First replace their matcher to
580 # but we don't want to do that. First replace their matcher to
581 # only match normal files and run it, then replace it to just
581 # only match normal files and run it, then replace it to just
582 # match largefiles and run it again.
582 # match largefiles and run it again.
583 nonormalfiles = False
583 nonormalfiles = False
584 nolfiles = False
584 nolfiles = False
585 installnormalfilesmatchfn(repo[None].manifest())
585 installnormalfilesmatchfn(repo[None].manifest())
586 try:
586 try:
587 result = orig(ui, repo, pats, opts, rename)
587 result = orig(ui, repo, pats, opts, rename)
588 except error.Abort as e:
588 except error.Abort as e:
589 if str(e) != _('no files to copy'):
589 if str(e) != _('no files to copy'):
590 raise e
590 raise e
591 else:
591 else:
592 nonormalfiles = True
592 nonormalfiles = True
593 result = 0
593 result = 0
594 finally:
594 finally:
595 restorematchfn()
595 restorematchfn()
596
596
597 # The first rename can cause our current working directory to be removed.
597 # The first rename can cause our current working directory to be removed.
598 # In that case there is nothing left to copy/rename so just quit.
598 # In that case there is nothing left to copy/rename so just quit.
599 try:
599 try:
600 repo.getcwd()
600 repo.getcwd()
601 except OSError:
601 except OSError:
602 return result
602 return result
603
603
604 def makestandin(relpath):
604 def makestandin(relpath):
605 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
605 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
606 return repo.wvfs.join(lfutil.standin(path))
606 return repo.wvfs.join(lfutil.standin(path))
607
607
608 fullpats = scmutil.expandpats(pats)
608 fullpats = scmutil.expandpats(pats)
609 dest = fullpats[-1]
609 dest = fullpats[-1]
610
610
611 if os.path.isdir(dest):
611 if os.path.isdir(dest):
612 if not os.path.isdir(makestandin(dest)):
612 if not os.path.isdir(makestandin(dest)):
613 os.makedirs(makestandin(dest))
613 os.makedirs(makestandin(dest))
614
614
615 try:
615 try:
616 # When we call orig below it creates the standins but we don't add
616 # When we call orig below it creates the standins but we don't add
617 # them to the dir state until later so lock during that time.
617 # them to the dir state until later so lock during that time.
618 wlock = repo.wlock()
618 wlock = repo.wlock()
619
619
620 manifest = repo[None].manifest()
620 manifest = repo[None].manifest()
621 def overridematch(ctx, pats=(), opts=None, globbed=False,
621 def overridematch(ctx, pats=(), opts=None, globbed=False,
622 default='relpath', badfn=None):
622 default='relpath', badfn=None):
623 if opts is None:
623 if opts is None:
624 opts = {}
624 opts = {}
625 newpats = []
625 newpats = []
626 # The patterns were previously mangled to add the standin
626 # The patterns were previously mangled to add the standin
627 # directory; we need to remove that now
627 # directory; we need to remove that now
628 for pat in pats:
628 for pat in pats:
629 if match_.patkind(pat) is None and lfutil.shortname in pat:
629 if match_.patkind(pat) is None and lfutil.shortname in pat:
630 newpats.append(pat.replace(lfutil.shortname, ''))
630 newpats.append(pat.replace(lfutil.shortname, ''))
631 else:
631 else:
632 newpats.append(pat)
632 newpats.append(pat)
633 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
633 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
634 m = copy.copy(match)
634 m = copy.copy(match)
635 lfile = lambda f: lfutil.standin(f) in manifest
635 lfile = lambda f: lfutil.standin(f) in manifest
636 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
636 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
637 m._fileroots = set(m._files)
637 m._fileroots = set(m._files)
638 origmatchfn = m.matchfn
638 origmatchfn = m.matchfn
639 m.matchfn = lambda f: (lfutil.isstandin(f) and
639 m.matchfn = lambda f: (lfutil.isstandin(f) and
640 (f in manifest) and
640 (f in manifest) and
641 origmatchfn(lfutil.splitstandin(f)) or
641 origmatchfn(lfutil.splitstandin(f)) or
642 None)
642 None)
643 return m
643 return m
644 oldmatch = installmatchfn(overridematch)
644 oldmatch = installmatchfn(overridematch)
645 listpats = []
645 listpats = []
646 for pat in pats:
646 for pat in pats:
647 if match_.patkind(pat) is not None:
647 if match_.patkind(pat) is not None:
648 listpats.append(pat)
648 listpats.append(pat)
649 else:
649 else:
650 listpats.append(makestandin(pat))
650 listpats.append(makestandin(pat))
651
651
652 try:
652 try:
653 origcopyfile = util.copyfile
653 origcopyfile = util.copyfile
654 copiedfiles = []
654 copiedfiles = []
655 def overridecopyfile(src, dest):
655 def overridecopyfile(src, dest):
656 if (lfutil.shortname in src and
656 if (lfutil.shortname in src and
657 dest.startswith(repo.wjoin(lfutil.shortname))):
657 dest.startswith(repo.wjoin(lfutil.shortname))):
658 destlfile = dest.replace(lfutil.shortname, '')
658 destlfile = dest.replace(lfutil.shortname, '')
659 if not opts['force'] and os.path.exists(destlfile):
659 if not opts['force'] and os.path.exists(destlfile):
660 raise IOError('',
660 raise IOError('',
661 _('destination largefile already exists'))
661 _('destination largefile already exists'))
662 copiedfiles.append((src, dest))
662 copiedfiles.append((src, dest))
663 origcopyfile(src, dest)
663 origcopyfile(src, dest)
664
664
665 util.copyfile = overridecopyfile
665 util.copyfile = overridecopyfile
666 result += orig(ui, repo, listpats, opts, rename)
666 result += orig(ui, repo, listpats, opts, rename)
667 finally:
667 finally:
668 util.copyfile = origcopyfile
668 util.copyfile = origcopyfile
669
669
670 lfdirstate = lfutil.openlfdirstate(ui, repo)
670 lfdirstate = lfutil.openlfdirstate(ui, repo)
671 for (src, dest) in copiedfiles:
671 for (src, dest) in copiedfiles:
672 if (lfutil.shortname in src and
672 if (lfutil.shortname in src and
673 dest.startswith(repo.wjoin(lfutil.shortname))):
673 dest.startswith(repo.wjoin(lfutil.shortname))):
674 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
674 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
675 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
675 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
676 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
676 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
677 if not os.path.isdir(destlfiledir):
677 if not os.path.isdir(destlfiledir):
678 os.makedirs(destlfiledir)
678 os.makedirs(destlfiledir)
679 if rename:
679 if rename:
680 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
680 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
681
681
682 # The file is gone, but this deletes any empty parent
682 # The file is gone, but this deletes any empty parent
683 # directories as a side-effect.
683 # directories as a side-effect.
684 util.unlinkpath(repo.wjoin(srclfile), True)
684 util.unlinkpath(repo.wjoin(srclfile), True)
685 lfdirstate.remove(srclfile)
685 lfdirstate.remove(srclfile)
686 else:
686 else:
687 util.copyfile(repo.wjoin(srclfile),
687 util.copyfile(repo.wjoin(srclfile),
688 repo.wjoin(destlfile))
688 repo.wjoin(destlfile))
689
689
690 lfdirstate.add(destlfile)
690 lfdirstate.add(destlfile)
691 lfdirstate.write()
691 lfdirstate.write()
692 except error.Abort as e:
692 except error.Abort as e:
693 if str(e) != _('no files to copy'):
693 if str(e) != _('no files to copy'):
694 raise e
694 raise e
695 else:
695 else:
696 nolfiles = True
696 nolfiles = True
697 finally:
697 finally:
698 restorematchfn()
698 restorematchfn()
699 wlock.release()
699 wlock.release()
700
700
701 if nolfiles and nonormalfiles:
701 if nolfiles and nonormalfiles:
702 raise error.Abort(_('no files to copy'))
702 raise error.Abort(_('no files to copy'))
703
703
704 return result
704 return result
705
705
706 # When the user calls revert, we have to be careful to not revert any
706 # When the user calls revert, we have to be careful to not revert any
707 # changes to other largefiles accidentally. This means we have to keep
707 # changes to other largefiles accidentally. This means we have to keep
708 # track of the largefiles that are being reverted so we only pull down
708 # track of the largefiles that are being reverted so we only pull down
709 # the necessary largefiles.
709 # the necessary largefiles.
710 #
710 #
711 # Standins are only updated (to match the hash of largefiles) before
711 # Standins are only updated (to match the hash of largefiles) before
712 # commits. Update the standins then run the original revert, changing
712 # commits. Update the standins then run the original revert, changing
713 # the matcher to hit standins instead of largefiles. Based on the
713 # the matcher to hit standins instead of largefiles. Based on the
714 # resulting standins update the largefiles.
714 # resulting standins update the largefiles.
715 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
715 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
716 # Because we put the standins in a bad state (by updating them)
716 # Because we put the standins in a bad state (by updating them)
717 # and then return them to a correct state we need to lock to
717 # and then return them to a correct state we need to lock to
718 # prevent others from changing them in their incorrect state.
718 # prevent others from changing them in their incorrect state.
719 with repo.wlock():
719 with repo.wlock():
720 lfdirstate = lfutil.openlfdirstate(ui, repo)
720 lfdirstate = lfutil.openlfdirstate(ui, repo)
721 s = lfutil.lfdirstatestatus(lfdirstate, repo)
721 s = lfutil.lfdirstatestatus(lfdirstate, repo)
722 lfdirstate.write()
722 lfdirstate.write()
723 for lfile in s.modified:
723 for lfile in s.modified:
724 lfutil.updatestandin(repo, lfutil.standin(lfile))
724 lfutil.updatestandin(repo, lfutil.standin(lfile))
725 for lfile in s.deleted:
725 for lfile in s.deleted:
726 if (repo.wvfs.exists(lfutil.standin(lfile))):
726 if (repo.wvfs.exists(lfutil.standin(lfile))):
727 repo.wvfs.unlink(lfutil.standin(lfile))
727 repo.wvfs.unlink(lfutil.standin(lfile))
728
728
729 oldstandins = lfutil.getstandinsstate(repo)
729 oldstandins = lfutil.getstandinsstate(repo)
730
730
731 def overridematch(mctx, pats=(), opts=None, globbed=False,
731 def overridematch(mctx, pats=(), opts=None, globbed=False,
732 default='relpath', badfn=None):
732 default='relpath', badfn=None):
733 if opts is None:
733 if opts is None:
734 opts = {}
734 opts = {}
735 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
735 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
736 m = copy.copy(match)
736 m = copy.copy(match)
737
737
738 # revert supports recursing into subrepos, and though largefiles
738 # revert supports recursing into subrepos, and though largefiles
739 # currently doesn't work correctly in that case, this match is
739 # currently doesn't work correctly in that case, this match is
740 # called, so the lfdirstate above may not be the correct one for
740 # called, so the lfdirstate above may not be the correct one for
741 # this invocation of match.
741 # this invocation of match.
742 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
742 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
743 False)
743 False)
744
744
745 def tostandin(f):
745 def tostandin(f):
746 standin = lfutil.standin(f)
746 standin = lfutil.standin(f)
747 if standin in ctx or standin in mctx:
747 if standin in ctx or standin in mctx:
748 return standin
748 return standin
749 elif standin in repo[None] or lfdirstate[f] == 'r':
749 elif standin in repo[None] or lfdirstate[f] == 'r':
750 return None
750 return None
751 return f
751 return f
752 m._files = [tostandin(f) for f in m._files]
752 m._files = [tostandin(f) for f in m._files]
753 m._files = [f for f in m._files if f is not None]
753 m._files = [f for f in m._files if f is not None]
754 m._fileroots = set(m._files)
754 m._fileroots = set(m._files)
755 origmatchfn = m.matchfn
755 origmatchfn = m.matchfn
756 def matchfn(f):
756 def matchfn(f):
757 if lfutil.isstandin(f):
757 if lfutil.isstandin(f):
758 return (origmatchfn(lfutil.splitstandin(f)) and
758 return (origmatchfn(lfutil.splitstandin(f)) and
759 (f in ctx or f in mctx))
759 (f in ctx or f in mctx))
760 return origmatchfn(f)
760 return origmatchfn(f)
761 m.matchfn = matchfn
761 m.matchfn = matchfn
762 return m
762 return m
763 oldmatch = installmatchfn(overridematch)
763 oldmatch = installmatchfn(overridematch)
764 try:
764 try:
765 orig(ui, repo, ctx, parents, *pats, **opts)
765 orig(ui, repo, ctx, parents, *pats, **opts)
766 finally:
766 finally:
767 restorematchfn()
767 restorematchfn()
768
768
769 newstandins = lfutil.getstandinsstate(repo)
769 newstandins = lfutil.getstandinsstate(repo)
770 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
770 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
771 # lfdirstate should be 'normallookup'-ed for updated files,
771 # lfdirstate should be 'normallookup'-ed for updated files,
772 # because reverting doesn't touch dirstate for 'normal' files
772 # because reverting doesn't touch dirstate for 'normal' files
773 # when target revision is explicitly specified: in such case,
773 # when target revision is explicitly specified: in such case,
774 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
774 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
775 # of target (standin) file.
775 # of target (standin) file.
776 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
776 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
777 normallookup=True)
777 normallookup=True)
778
778
779 # after pulling changesets, we need to take some extra care to get
779 # after pulling changesets, we need to take some extra care to get
780 # largefiles updated remotely
780 # largefiles updated remotely
781 def overridepull(orig, ui, repo, source=None, **opts):
781 def overridepull(orig, ui, repo, source=None, **opts):
782 revsprepull = len(repo)
782 revsprepull = len(repo)
783 if not source:
783 if not source:
784 source = 'default'
784 source = 'default'
785 repo.lfpullsource = source
785 repo.lfpullsource = source
786 result = orig(ui, repo, source, **opts)
786 result = orig(ui, repo, source, **opts)
787 revspostpull = len(repo)
787 revspostpull = len(repo)
788 lfrevs = opts.get('lfrev', [])
788 lfrevs = opts.get('lfrev', [])
789 if opts.get('all_largefiles'):
789 if opts.get('all_largefiles'):
790 lfrevs.append('pulled()')
790 lfrevs.append('pulled()')
791 if lfrevs and revspostpull > revsprepull:
791 if lfrevs and revspostpull > revsprepull:
792 numcached = 0
792 numcached = 0
793 repo.firstpulled = revsprepull # for pulled() revset expression
793 repo.firstpulled = revsprepull # for pulled() revset expression
794 try:
794 try:
795 for rev in scmutil.revrange(repo, lfrevs):
795 for rev in scmutil.revrange(repo, lfrevs):
796 ui.note(_('pulling largefiles for revision %s\n') % rev)
796 ui.note(_('pulling largefiles for revision %s\n') % rev)
797 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
797 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
798 numcached += len(cached)
798 numcached += len(cached)
799 finally:
799 finally:
800 del repo.firstpulled
800 del repo.firstpulled
801 ui.status(_("%d largefiles cached\n") % numcached)
801 ui.status(_("%d largefiles cached\n") % numcached)
802 return result
802 return result
803
803
804 def overridepush(orig, ui, repo, *args, **kwargs):
804 def overridepush(orig, ui, repo, *args, **kwargs):
805 """Override push command and store --lfrev parameters in opargs"""
805 """Override push command and store --lfrev parameters in opargs"""
806 lfrevs = kwargs.pop('lfrev', None)
806 lfrevs = kwargs.pop('lfrev', None)
807 if lfrevs:
807 if lfrevs:
808 opargs = kwargs.setdefault('opargs', {})
808 opargs = kwargs.setdefault('opargs', {})
809 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
809 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
810 return orig(ui, repo, *args, **kwargs)
810 return orig(ui, repo, *args, **kwargs)
811
811
812 def exchangepushoperation(orig, *args, **kwargs):
812 def exchangepushoperation(orig, *args, **kwargs):
813 """Override pushoperation constructor and store lfrevs parameter"""
813 """Override pushoperation constructor and store lfrevs parameter"""
814 lfrevs = kwargs.pop('lfrevs', None)
814 lfrevs = kwargs.pop('lfrevs', None)
815 pushop = orig(*args, **kwargs)
815 pushop = orig(*args, **kwargs)
816 pushop.lfrevs = lfrevs
816 pushop.lfrevs = lfrevs
817 return pushop
817 return pushop
818
818
819 revsetpredicate = registrar.revsetpredicate()
819 revsetpredicate = registrar.revsetpredicate()
820
820
821 @revsetpredicate('pulled()')
821 @revsetpredicate('pulled()')
822 def pulledrevsetsymbol(repo, subset, x):
822 def pulledrevsetsymbol(repo, subset, x):
823 """Changesets that just has been pulled.
823 """Changesets that just has been pulled.
824
824
825 Only available with largefiles from pull --lfrev expressions.
825 Only available with largefiles from pull --lfrev expressions.
826
826
827 .. container:: verbose
827 .. container:: verbose
828
828
829 Some examples:
829 Some examples:
830
830
831 - pull largefiles for all new changesets::
831 - pull largefiles for all new changesets::
832
832
833 hg pull -lfrev "pulled()"
833 hg pull -lfrev "pulled()"
834
834
835 - pull largefiles for all new branch heads::
835 - pull largefiles for all new branch heads::
836
836
837 hg pull -lfrev "head(pulled()) and not closed()"
837 hg pull -lfrev "head(pulled()) and not closed()"
838
838
839 """
839 """
840
840
841 try:
841 try:
842 firstpulled = repo.firstpulled
842 firstpulled = repo.firstpulled
843 except AttributeError:
843 except AttributeError:
844 raise error.Abort(_("pulled() only available in --lfrev"))
844 raise error.Abort(_("pulled() only available in --lfrev"))
845 return revset.baseset([r for r in subset if r >= firstpulled])
845 return revset.baseset([r for r in subset if r >= firstpulled])
846
846
847 def overrideclone(orig, ui, source, dest=None, **opts):
847 def overrideclone(orig, ui, source, dest=None, **opts):
848 d = dest
848 d = dest
849 if d is None:
849 if d is None:
850 d = hg.defaultdest(source)
850 d = hg.defaultdest(source)
851 if opts.get('all_largefiles') and not hg.islocal(d):
851 if opts.get('all_largefiles') and not hg.islocal(d):
852 raise error.Abort(_(
852 raise error.Abort(_(
853 '--all-largefiles is incompatible with non-local destination %s') %
853 '--all-largefiles is incompatible with non-local destination %s') %
854 d)
854 d)
855
855
856 return orig(ui, source, dest, **opts)
856 return orig(ui, source, dest, **opts)
857
857
858 def hgclone(orig, ui, opts, *args, **kwargs):
858 def hgclone(orig, ui, opts, *args, **kwargs):
859 result = orig(ui, opts, *args, **kwargs)
859 result = orig(ui, opts, *args, **kwargs)
860
860
861 if result is not None:
861 if result is not None:
862 sourcerepo, destrepo = result
862 sourcerepo, destrepo = result
863 repo = destrepo.local()
863 repo = destrepo.local()
864
864
865 # When cloning to a remote repo (like through SSH), no repo is available
865 # When cloning to a remote repo (like through SSH), no repo is available
866 # from the peer. Therefore the largefiles can't be downloaded and the
866 # from the peer. Therefore the largefiles can't be downloaded and the
867 # hgrc can't be updated.
867 # hgrc can't be updated.
868 if not repo:
868 if not repo:
869 return result
869 return result
870
870
871 # If largefiles is required for this repo, permanently enable it locally
871 # If largefiles is required for this repo, permanently enable it locally
872 if 'largefiles' in repo.requirements:
872 if 'largefiles' in repo.requirements:
873 fp = repo.vfs('hgrc', 'a', text=True)
873 fp = repo.vfs('hgrc', 'a', text=True)
874 try:
874 try:
875 fp.write('\n[extensions]\nlargefiles=\n')
875 fp.write('\n[extensions]\nlargefiles=\n')
876 finally:
876 finally:
877 fp.close()
877 fp.close()
878
878
879 # Caching is implicitly limited to 'rev' option, since the dest repo was
879 # Caching is implicitly limited to 'rev' option, since the dest repo was
880 # truncated at that point. The user may expect a download count with
880 # truncated at that point. The user may expect a download count with
881 # this option, so attempt whether or not this is a largefile repo.
881 # this option, so attempt whether or not this is a largefile repo.
882 if opts.get('all_largefiles'):
882 if opts.get('all_largefiles'):
883 success, missing = lfcommands.downloadlfiles(ui, repo, None)
883 success, missing = lfcommands.downloadlfiles(ui, repo, None)
884
884
885 if missing != 0:
885 if missing != 0:
886 return None
886 return None
887
887
888 return result
888 return result
889
889
890 def overriderebase(orig, ui, repo, **opts):
890 def overriderebase(orig, ui, repo, **opts):
891 if not util.safehasattr(repo, '_largefilesenabled'):
891 if not util.safehasattr(repo, '_largefilesenabled'):
892 return orig(ui, repo, **opts)
892 return orig(ui, repo, **opts)
893
893
894 resuming = opts.get('continue')
894 resuming = opts.get('continue')
895 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
895 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
896 repo._lfstatuswriters.append(lambda *msg, **opts: None)
896 repo._lfstatuswriters.append(lambda *msg, **opts: None)
897 try:
897 try:
898 return orig(ui, repo, **opts)
898 return orig(ui, repo, **opts)
899 finally:
899 finally:
900 repo._lfstatuswriters.pop()
900 repo._lfstatuswriters.pop()
901 repo._lfcommithooks.pop()
901 repo._lfcommithooks.pop()
902
902
903 def overridearchivecmd(orig, ui, repo, dest, **opts):
903 def overridearchivecmd(orig, ui, repo, dest, **opts):
904 repo.unfiltered().lfstatus = True
904 repo.unfiltered().lfstatus = True
905
905
906 try:
906 try:
907 return orig(ui, repo.unfiltered(), dest, **opts)
907 return orig(ui, repo.unfiltered(), dest, **opts)
908 finally:
908 finally:
909 repo.unfiltered().lfstatus = False
909 repo.unfiltered().lfstatus = False
910
910
911 def hgwebarchive(orig, web, req, tmpl):
911 def hgwebarchive(orig, web, req, tmpl):
912 web.repo.lfstatus = True
912 web.repo.lfstatus = True
913
913
914 try:
914 try:
915 return orig(web, req, tmpl)
915 return orig(web, req, tmpl)
916 finally:
916 finally:
917 web.repo.lfstatus = False
917 web.repo.lfstatus = False
918
918
919 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
919 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
920 prefix='', mtime=None, subrepos=None):
920 prefix='', mtime=None, subrepos=None):
921 # For some reason setting repo.lfstatus in hgwebarchive only changes the
921 # For some reason setting repo.lfstatus in hgwebarchive only changes the
922 # unfiltered repo's attr, so check that as well.
922 # unfiltered repo's attr, so check that as well.
923 if not repo.lfstatus and not repo.unfiltered().lfstatus:
923 if not repo.lfstatus and not repo.unfiltered().lfstatus:
924 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
924 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
925 subrepos)
925 subrepos)
926
926
927 # No need to lock because we are only reading history and
927 # No need to lock because we are only reading history and
928 # largefile caches, neither of which are modified.
928 # largefile caches, neither of which are modified.
929 if node is not None:
929 if node is not None:
930 lfcommands.cachelfiles(repo.ui, repo, node)
930 lfcommands.cachelfiles(repo.ui, repo, node)
931
931
932 if kind not in archival.archivers:
932 if kind not in archival.archivers:
933 raise error.Abort(_("unknown archive type '%s'") % kind)
933 raise error.Abort(_("unknown archive type '%s'") % kind)
934
934
935 ctx = repo[node]
935 ctx = repo[node]
936
936
937 if kind == 'files':
937 if kind == 'files':
938 if prefix:
938 if prefix:
939 raise error.Abort(
939 raise error.Abort(
940 _('cannot give prefix when archiving to files'))
940 _('cannot give prefix when archiving to files'))
941 else:
941 else:
942 prefix = archival.tidyprefix(dest, kind, prefix)
942 prefix = archival.tidyprefix(dest, kind, prefix)
943
943
944 def write(name, mode, islink, getdata):
944 def write(name, mode, islink, getdata):
945 if matchfn and not matchfn(name):
945 if matchfn and not matchfn(name):
946 return
946 return
947 data = getdata()
947 data = getdata()
948 if decode:
948 if decode:
949 data = repo.wwritedata(name, data)
949 data = repo.wwritedata(name, data)
950 archiver.addfile(prefix + name, mode, islink, data)
950 archiver.addfile(prefix + name, mode, islink, data)
951
951
952 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
952 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
953
953
954 if repo.ui.configbool("ui", "archivemeta", True):
954 if repo.ui.configbool("ui", "archivemeta", True):
955 write('.hg_archival.txt', 0o644, False,
955 write('.hg_archival.txt', 0o644, False,
956 lambda: archival.buildmetadata(ctx))
956 lambda: archival.buildmetadata(ctx))
957
957
958 for f in ctx:
958 for f in ctx:
959 ff = ctx.flags(f)
959 ff = ctx.flags(f)
960 getdata = ctx[f].data
960 getdata = ctx[f].data
961 if lfutil.isstandin(f):
961 if lfutil.isstandin(f):
962 if node is not None:
962 if node is not None:
963 path = lfutil.findfile(repo, getdata().strip())
963 path = lfutil.findfile(repo, getdata().strip())
964
964
965 if path is None:
965 if path is None:
966 raise error.Abort(
966 raise error.Abort(
967 _('largefile %s not found in repo store or system cache')
967 _('largefile %s not found in repo store or system cache')
968 % lfutil.splitstandin(f))
968 % lfutil.splitstandin(f))
969 else:
969 else:
970 path = lfutil.splitstandin(f)
970 path = lfutil.splitstandin(f)
971
971
972 f = lfutil.splitstandin(f)
972 f = lfutil.splitstandin(f)
973
973
974 getdata = lambda: util.readfile(path)
974 getdata = lambda: util.readfile(path)
975 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
975 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
976
976
977 if subrepos:
977 if subrepos:
978 for subpath in sorted(ctx.substate):
978 for subpath in sorted(ctx.substate):
979 sub = ctx.workingsub(subpath)
979 sub = ctx.workingsub(subpath)
980 submatch = match_.subdirmatcher(subpath, matchfn)
980 submatch = match_.subdirmatcher(subpath, matchfn)
981 sub._repo.lfstatus = True
981 sub._repo.lfstatus = True
982 sub.archive(archiver, prefix, submatch)
982 sub.archive(archiver, prefix, submatch)
983
983
984 archiver.done()
984 archiver.done()
985
985
986 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
986 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
987 if not repo._repo.lfstatus:
987 if not repo._repo.lfstatus:
988 return orig(repo, archiver, prefix, match)
988 return orig(repo, archiver, prefix, match)
989
989
990 repo._get(repo._state + ('hg',))
990 repo._get(repo._state + ('hg',))
991 rev = repo._state[1]
991 rev = repo._state[1]
992 ctx = repo._repo[rev]
992 ctx = repo._repo[rev]
993
993
994 if ctx.node() is not None:
994 if ctx.node() is not None:
995 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
995 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
996
996
997 def write(name, mode, islink, getdata):
997 def write(name, mode, islink, getdata):
998 # At this point, the standin has been replaced with the largefile name,
998 # At this point, the standin has been replaced with the largefile name,
999 # so the normal matcher works here without the lfutil variants.
999 # so the normal matcher works here without the lfutil variants.
1000 if match and not match(f):
1000 if match and not match(f):
1001 return
1001 return
1002 data = getdata()
1002 data = getdata()
1003
1003
1004 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1004 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1005
1005
1006 for f in ctx:
1006 for f in ctx:
1007 ff = ctx.flags(f)
1007 ff = ctx.flags(f)
1008 getdata = ctx[f].data
1008 getdata = ctx[f].data
1009 if lfutil.isstandin(f):
1009 if lfutil.isstandin(f):
1010 if ctx.node() is not None:
1010 if ctx.node() is not None:
1011 path = lfutil.findfile(repo._repo, getdata().strip())
1011 path = lfutil.findfile(repo._repo, getdata().strip())
1012
1012
1013 if path is None:
1013 if path is None:
1014 raise error.Abort(
1014 raise error.Abort(
1015 _('largefile %s not found in repo store or system cache')
1015 _('largefile %s not found in repo store or system cache')
1016 % lfutil.splitstandin(f))
1016 % lfutil.splitstandin(f))
1017 else:
1017 else:
1018 path = lfutil.splitstandin(f)
1018 path = lfutil.splitstandin(f)
1019
1019
1020 f = lfutil.splitstandin(f)
1020 f = lfutil.splitstandin(f)
1021
1021
1022 getdata = lambda: util.readfile(os.path.join(prefix, path))
1022 getdata = lambda: util.readfile(os.path.join(prefix, path))
1023
1023
1024 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1024 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1025
1025
1026 for subpath in sorted(ctx.substate):
1026 for subpath in sorted(ctx.substate):
1027 sub = ctx.workingsub(subpath)
1027 sub = ctx.workingsub(subpath)
1028 submatch = match_.subdirmatcher(subpath, match)
1028 submatch = match_.subdirmatcher(subpath, match)
1029 sub._repo.lfstatus = True
1029 sub._repo.lfstatus = True
1030 sub.archive(archiver, prefix + repo._path + '/', submatch)
1030 sub.archive(archiver, prefix + repo._path + '/', submatch)
1031
1031
1032 # If a largefile is modified, the change is not reflected in its
1032 # If a largefile is modified, the change is not reflected in its
1033 # standin until a commit. cmdutil.bailifchanged() raises an exception
1033 # standin until a commit. cmdutil.bailifchanged() raises an exception
1034 # if the repo has uncommitted changes. Wrap it to also check if
1034 # if the repo has uncommitted changes. Wrap it to also check if
1035 # largefiles were changed. This is used by bisect, backout and fetch.
1035 # largefiles were changed. This is used by bisect, backout and fetch.
1036 def overridebailifchanged(orig, repo, *args, **kwargs):
1036 def overridebailifchanged(orig, repo, *args, **kwargs):
1037 orig(repo, *args, **kwargs)
1037 orig(repo, *args, **kwargs)
1038 repo.lfstatus = True
1038 repo.lfstatus = True
1039 s = repo.status()
1039 s = repo.status()
1040 repo.lfstatus = False
1040 repo.lfstatus = False
1041 if s.modified or s.added or s.removed or s.deleted:
1041 if s.modified or s.added or s.removed or s.deleted:
1042 raise error.Abort(_('uncommitted changes'))
1042 raise error.Abort(_('uncommitted changes'))
1043
1043
1044 def postcommitstatus(orig, repo, *args, **kwargs):
1044 def postcommitstatus(orig, repo, *args, **kwargs):
1045 repo.lfstatus = True
1045 repo.lfstatus = True
1046 try:
1046 try:
1047 return orig(repo, *args, **kwargs)
1047 return orig(repo, *args, **kwargs)
1048 finally:
1048 finally:
1049 repo.lfstatus = False
1049 repo.lfstatus = False
1050
1050
1051 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1051 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1052 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1052 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1053 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1053 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1054 m = composelargefilematcher(match, repo[None].manifest())
1054 m = composelargefilematcher(match, repo[None].manifest())
1055
1055
1056 try:
1056 try:
1057 repo.lfstatus = True
1057 repo.lfstatus = True
1058 s = repo.status(match=m, clean=True)
1058 s = repo.status(match=m, clean=True)
1059 finally:
1059 finally:
1060 repo.lfstatus = False
1060 repo.lfstatus = False
1061 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1061 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1062 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1062 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1063
1063
1064 for f in forget:
1064 for f in forget:
1065 if lfutil.standin(f) not in repo.dirstate and not \
1065 if lfutil.standin(f) not in repo.dirstate and not \
1066 repo.wvfs.isdir(lfutil.standin(f)):
1066 repo.wvfs.isdir(lfutil.standin(f)):
1067 ui.warn(_('not removing %s: file is already untracked\n')
1067 ui.warn(_('not removing %s: file is already untracked\n')
1068 % m.rel(f))
1068 % m.rel(f))
1069 bad.append(f)
1069 bad.append(f)
1070
1070
1071 for f in forget:
1071 for f in forget:
1072 if ui.verbose or not m.exact(f):
1072 if ui.verbose or not m.exact(f):
1073 ui.status(_('removing %s\n') % m.rel(f))
1073 ui.status(_('removing %s\n') % m.rel(f))
1074
1074
1075 # Need to lock because standin files are deleted then removed from the
1075 # Need to lock because standin files are deleted then removed from the
1076 # repository and we could race in-between.
1076 # repository and we could race in-between.
1077 with repo.wlock():
1077 with repo.wlock():
1078 lfdirstate = lfutil.openlfdirstate(ui, repo)
1078 lfdirstate = lfutil.openlfdirstate(ui, repo)
1079 for f in forget:
1079 for f in forget:
1080 if lfdirstate[f] == 'a':
1080 if lfdirstate[f] == 'a':
1081 lfdirstate.drop(f)
1081 lfdirstate.drop(f)
1082 else:
1082 else:
1083 lfdirstate.remove(f)
1083 lfdirstate.remove(f)
1084 lfdirstate.write()
1084 lfdirstate.write()
1085 standins = [lfutil.standin(f) for f in forget]
1085 standins = [lfutil.standin(f) for f in forget]
1086 for f in standins:
1086 for f in standins:
1087 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1087 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1088 rejected = repo[None].forget(standins)
1088 rejected = repo[None].forget(standins)
1089
1089
1090 bad.extend(f for f in rejected if f in m.files())
1090 bad.extend(f for f in rejected if f in m.files())
1091 forgot.extend(f for f in forget if f not in rejected)
1091 forgot.extend(f for f in forget if f not in rejected)
1092 return bad, forgot
1092 return bad, forgot
1093
1093
1094 def _getoutgoings(repo, other, missing, addfunc):
1094 def _getoutgoings(repo, other, missing, addfunc):
1095 """get pairs of filename and largefile hash in outgoing revisions
1095 """get pairs of filename and largefile hash in outgoing revisions
1096 in 'missing'.
1096 in 'missing'.
1097
1097
1098 largefiles already existing on 'other' repository are ignored.
1098 largefiles already existing on 'other' repository are ignored.
1099
1099
1100 'addfunc' is invoked with each unique pairs of filename and
1100 'addfunc' is invoked with each unique pairs of filename and
1101 largefile hash value.
1101 largefile hash value.
1102 """
1102 """
1103 knowns = set()
1103 knowns = set()
1104 lfhashes = set()
1104 lfhashes = set()
1105 def dedup(fn, lfhash):
1105 def dedup(fn, lfhash):
1106 k = (fn, lfhash)
1106 k = (fn, lfhash)
1107 if k not in knowns:
1107 if k not in knowns:
1108 knowns.add(k)
1108 knowns.add(k)
1109 lfhashes.add(lfhash)
1109 lfhashes.add(lfhash)
1110 lfutil.getlfilestoupload(repo, missing, dedup)
1110 lfutil.getlfilestoupload(repo, missing, dedup)
1111 if lfhashes:
1111 if lfhashes:
1112 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1112 lfexists = storefactory._openstore(repo, other).exists(lfhashes)
1113 for fn, lfhash in knowns:
1113 for fn, lfhash in knowns:
1114 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1114 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1115 addfunc(fn, lfhash)
1115 addfunc(fn, lfhash)
1116
1116
1117 def outgoinghook(ui, repo, other, opts, missing):
1117 def outgoinghook(ui, repo, other, opts, missing):
1118 if opts.pop('large', None):
1118 if opts.pop('large', None):
1119 lfhashes = set()
1119 lfhashes = set()
1120 if ui.debugflag:
1120 if ui.debugflag:
1121 toupload = {}
1121 toupload = {}
1122 def addfunc(fn, lfhash):
1122 def addfunc(fn, lfhash):
1123 if fn not in toupload:
1123 if fn not in toupload:
1124 toupload[fn] = []
1124 toupload[fn] = []
1125 toupload[fn].append(lfhash)
1125 toupload[fn].append(lfhash)
1126 lfhashes.add(lfhash)
1126 lfhashes.add(lfhash)
1127 def showhashes(fn):
1127 def showhashes(fn):
1128 for lfhash in sorted(toupload[fn]):
1128 for lfhash in sorted(toupload[fn]):
1129 ui.debug(' %s\n' % (lfhash))
1129 ui.debug(' %s\n' % (lfhash))
1130 else:
1130 else:
1131 toupload = set()
1131 toupload = set()
1132 def addfunc(fn, lfhash):
1132 def addfunc(fn, lfhash):
1133 toupload.add(fn)
1133 toupload.add(fn)
1134 lfhashes.add(lfhash)
1134 lfhashes.add(lfhash)
1135 def showhashes(fn):
1135 def showhashes(fn):
1136 pass
1136 pass
1137 _getoutgoings(repo, other, missing, addfunc)
1137 _getoutgoings(repo, other, missing, addfunc)
1138
1138
1139 if not toupload:
1139 if not toupload:
1140 ui.status(_('largefiles: no files to upload\n'))
1140 ui.status(_('largefiles: no files to upload\n'))
1141 else:
1141 else:
1142 ui.status(_('largefiles to upload (%d entities):\n')
1142 ui.status(_('largefiles to upload (%d entities):\n')
1143 % (len(lfhashes)))
1143 % (len(lfhashes)))
1144 for file in sorted(toupload):
1144 for file in sorted(toupload):
1145 ui.status(lfutil.splitstandin(file) + '\n')
1145 ui.status(lfutil.splitstandin(file) + '\n')
1146 showhashes(file)
1146 showhashes(file)
1147 ui.status('\n')
1147 ui.status('\n')
1148
1148
1149 def summaryremotehook(ui, repo, opts, changes):
1149 def summaryremotehook(ui, repo, opts, changes):
1150 largeopt = opts.get('large', False)
1150 largeopt = opts.get('large', False)
1151 if changes is None:
1151 if changes is None:
1152 if largeopt:
1152 if largeopt:
1153 return (False, True) # only outgoing check is needed
1153 return (False, True) # only outgoing check is needed
1154 else:
1154 else:
1155 return (False, False)
1155 return (False, False)
1156 elif largeopt:
1156 elif largeopt:
1157 url, branch, peer, outgoing = changes[1]
1157 url, branch, peer, outgoing = changes[1]
1158 if peer is None:
1158 if peer is None:
1159 # i18n: column positioning for "hg summary"
1159 # i18n: column positioning for "hg summary"
1160 ui.status(_('largefiles: (no remote repo)\n'))
1160 ui.status(_('largefiles: (no remote repo)\n'))
1161 return
1161 return
1162
1162
1163 toupload = set()
1163 toupload = set()
1164 lfhashes = set()
1164 lfhashes = set()
1165 def addfunc(fn, lfhash):
1165 def addfunc(fn, lfhash):
1166 toupload.add(fn)
1166 toupload.add(fn)
1167 lfhashes.add(lfhash)
1167 lfhashes.add(lfhash)
1168 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1168 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1169
1169
1170 if not toupload:
1170 if not toupload:
1171 # i18n: column positioning for "hg summary"
1171 # i18n: column positioning for "hg summary"
1172 ui.status(_('largefiles: (no files to upload)\n'))
1172 ui.status(_('largefiles: (no files to upload)\n'))
1173 else:
1173 else:
1174 # i18n: column positioning for "hg summary"
1174 # i18n: column positioning for "hg summary"
1175 ui.status(_('largefiles: %d entities for %d files to upload\n')
1175 ui.status(_('largefiles: %d entities for %d files to upload\n')
1176 % (len(lfhashes), len(toupload)))
1176 % (len(lfhashes), len(toupload)))
1177
1177
1178 def overridesummary(orig, ui, repo, *pats, **opts):
1178 def overridesummary(orig, ui, repo, *pats, **opts):
1179 try:
1179 try:
1180 repo.lfstatus = True
1180 repo.lfstatus = True
1181 orig(ui, repo, *pats, **opts)
1181 orig(ui, repo, *pats, **opts)
1182 finally:
1182 finally:
1183 repo.lfstatus = False
1183 repo.lfstatus = False
1184
1184
1185 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1185 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1186 similarity=None):
1186 similarity=None):
1187 if opts is None:
1187 if opts is None:
1188 opts = {}
1188 opts = {}
1189 if not lfutil.islfilesrepo(repo):
1189 if not lfutil.islfilesrepo(repo):
1190 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1190 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1191 # Get the list of missing largefiles so we can remove them
1191 # Get the list of missing largefiles so we can remove them
1192 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1192 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1193 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1193 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1194 False, False, False)
1194 False, False, False)
1195
1195
1196 # Call into the normal remove code, but the removing of the standin, we want
1196 # Call into the normal remove code, but the removing of the standin, we want
1197 # to have handled by original addremove. Monkey patching here makes sure
1197 # to have handled by original addremove. Monkey patching here makes sure
1198 # we don't remove the standin in the largefiles code, preventing a very
1198 # we don't remove the standin in the largefiles code, preventing a very
1199 # confused state later.
1199 # confused state later.
1200 if s.deleted:
1200 if s.deleted:
1201 m = copy.copy(matcher)
1201 m = copy.copy(matcher)
1202
1202
1203 # The m._files and m._map attributes are not changed to the deleted list
1203 # The m._files and m._map attributes are not changed to the deleted list
1204 # because that affects the m.exact() test, which in turn governs whether
1204 # because that affects the m.exact() test, which in turn governs whether
1205 # or not the file name is printed, and how. Simply limit the original
1205 # or not the file name is printed, and how. Simply limit the original
1206 # matches to those in the deleted status list.
1206 # matches to those in the deleted status list.
1207 matchfn = m.matchfn
1207 matchfn = m.matchfn
1208 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1208 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1209
1209
1210 removelargefiles(repo.ui, repo, True, m, **opts)
1210 removelargefiles(repo.ui, repo, True, m, **opts)
1211 # Call into the normal add code, and any files that *should* be added as
1211 # Call into the normal add code, and any files that *should* be added as
1212 # largefiles will be
1212 # largefiles will be
1213 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1213 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1214 # Now that we've handled largefiles, hand off to the original addremove
1214 # Now that we've handled largefiles, hand off to the original addremove
1215 # function to take care of the rest. Make sure it doesn't do anything with
1215 # function to take care of the rest. Make sure it doesn't do anything with
1216 # largefiles by passing a matcher that will ignore them.
1216 # largefiles by passing a matcher that will ignore them.
1217 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1217 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1218 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1218 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1219
1219
1220 # Calling purge with --all will cause the largefiles to be deleted.
1220 # Calling purge with --all will cause the largefiles to be deleted.
1221 # Override repo.status to prevent this from happening.
1221 # Override repo.status to prevent this from happening.
1222 def overridepurge(orig, ui, repo, *dirs, **opts):
1222 def overridepurge(orig, ui, repo, *dirs, **opts):
1223 # XXX Monkey patching a repoview will not work. The assigned attribute will
1223 # XXX Monkey patching a repoview will not work. The assigned attribute will
1224 # be set on the unfiltered repo, but we will only lookup attributes in the
1224 # be set on the unfiltered repo, but we will only lookup attributes in the
1225 # unfiltered repo if the lookup in the repoview object itself fails. As the
1225 # unfiltered repo if the lookup in the repoview object itself fails. As the
1226 # monkey patched method exists on the repoview class the lookup will not
1226 # monkey patched method exists on the repoview class the lookup will not
1227 # fail. As a result, the original version will shadow the monkey patched
1227 # fail. As a result, the original version will shadow the monkey patched
1228 # one, defeating the monkey patch.
1228 # one, defeating the monkey patch.
1229 #
1229 #
1230 # As a work around we use an unfiltered repo here. We should do something
1230 # As a work around we use an unfiltered repo here. We should do something
1231 # cleaner instead.
1231 # cleaner instead.
1232 repo = repo.unfiltered()
1232 repo = repo.unfiltered()
1233 oldstatus = repo.status
1233 oldstatus = repo.status
1234 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1234 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1235 clean=False, unknown=False, listsubrepos=False):
1235 clean=False, unknown=False, listsubrepos=False):
1236 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1236 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1237 listsubrepos)
1237 listsubrepos)
1238 lfdirstate = lfutil.openlfdirstate(ui, repo)
1238 lfdirstate = lfutil.openlfdirstate(ui, repo)
1239 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1239 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1240 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1240 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1241 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1241 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1242 unknown, ignored, r.clean)
1242 unknown, ignored, r.clean)
1243 repo.status = overridestatus
1243 repo.status = overridestatus
1244 orig(ui, repo, *dirs, **opts)
1244 orig(ui, repo, *dirs, **opts)
1245 repo.status = oldstatus
1245 repo.status = oldstatus
1246 def overriderollback(orig, ui, repo, **opts):
1246 def overriderollback(orig, ui, repo, **opts):
1247 with repo.wlock():
1247 with repo.wlock():
1248 before = repo.dirstate.parents()
1248 before = repo.dirstate.parents()
1249 orphans = set(f for f in repo.dirstate
1249 orphans = set(f for f in repo.dirstate
1250 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1250 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1251 result = orig(ui, repo, **opts)
1251 result = orig(ui, repo, **opts)
1252 after = repo.dirstate.parents()
1252 after = repo.dirstate.parents()
1253 if before == after:
1253 if before == after:
1254 return result # no need to restore standins
1254 return result # no need to restore standins
1255
1255
1256 pctx = repo['.']
1256 pctx = repo['.']
1257 for f in repo.dirstate:
1257 for f in repo.dirstate:
1258 if lfutil.isstandin(f):
1258 if lfutil.isstandin(f):
1259 orphans.discard(f)
1259 orphans.discard(f)
1260 if repo.dirstate[f] == 'r':
1260 if repo.dirstate[f] == 'r':
1261 repo.wvfs.unlinkpath(f, ignoremissing=True)
1261 repo.wvfs.unlinkpath(f, ignoremissing=True)
1262 elif f in pctx:
1262 elif f in pctx:
1263 fctx = pctx[f]
1263 fctx = pctx[f]
1264 repo.wwrite(f, fctx.data(), fctx.flags())
1264 repo.wwrite(f, fctx.data(), fctx.flags())
1265 else:
1265 else:
1266 # content of standin is not so important in 'a',
1266 # content of standin is not so important in 'a',
1267 # 'm' or 'n' (coming from the 2nd parent) cases
1267 # 'm' or 'n' (coming from the 2nd parent) cases
1268 lfutil.writestandin(repo, f, '', False)
1268 lfutil.writestandin(repo, f, '', False)
1269 for standin in orphans:
1269 for standin in orphans:
1270 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1270 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1271
1271
1272 lfdirstate = lfutil.openlfdirstate(ui, repo)
1272 lfdirstate = lfutil.openlfdirstate(ui, repo)
1273 orphans = set(lfdirstate)
1273 orphans = set(lfdirstate)
1274 lfiles = lfutil.listlfiles(repo)
1274 lfiles = lfutil.listlfiles(repo)
1275 for file in lfiles:
1275 for file in lfiles:
1276 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1276 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1277 orphans.discard(file)
1277 orphans.discard(file)
1278 for lfile in orphans:
1278 for lfile in orphans:
1279 lfdirstate.drop(lfile)
1279 lfdirstate.drop(lfile)
1280 lfdirstate.write()
1280 lfdirstate.write()
1281 return result
1281 return result
1282
1282
1283 def overridetransplant(orig, ui, repo, *revs, **opts):
1283 def overridetransplant(orig, ui, repo, *revs, **opts):
1284 resuming = opts.get('continue')
1284 resuming = opts.get('continue')
1285 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1285 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1286 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1286 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1287 try:
1287 try:
1288 result = orig(ui, repo, *revs, **opts)
1288 result = orig(ui, repo, *revs, **opts)
1289 finally:
1289 finally:
1290 repo._lfstatuswriters.pop()
1290 repo._lfstatuswriters.pop()
1291 repo._lfcommithooks.pop()
1291 repo._lfcommithooks.pop()
1292 return result
1292 return result
1293
1293
1294 def overridecat(orig, ui, repo, file1, *pats, **opts):
1294 def overridecat(orig, ui, repo, file1, *pats, **opts):
1295 ctx = scmutil.revsingle(repo, opts.get('rev'))
1295 ctx = scmutil.revsingle(repo, opts.get('rev'))
1296 err = 1
1296 err = 1
1297 notbad = set()
1297 notbad = set()
1298 m = scmutil.match(ctx, (file1,) + pats, opts)
1298 m = scmutil.match(ctx, (file1,) + pats, opts)
1299 origmatchfn = m.matchfn
1299 origmatchfn = m.matchfn
1300 def lfmatchfn(f):
1300 def lfmatchfn(f):
1301 if origmatchfn(f):
1301 if origmatchfn(f):
1302 return True
1302 return True
1303 lf = lfutil.splitstandin(f)
1303 lf = lfutil.splitstandin(f)
1304 if lf is None:
1304 if lf is None:
1305 return False
1305 return False
1306 notbad.add(lf)
1306 notbad.add(lf)
1307 return origmatchfn(lf)
1307 return origmatchfn(lf)
1308 m.matchfn = lfmatchfn
1308 m.matchfn = lfmatchfn
1309 origbadfn = m.bad
1309 origbadfn = m.bad
1310 def lfbadfn(f, msg):
1310 def lfbadfn(f, msg):
1311 if not f in notbad:
1311 if not f in notbad:
1312 origbadfn(f, msg)
1312 origbadfn(f, msg)
1313 m.bad = lfbadfn
1313 m.bad = lfbadfn
1314
1314
1315 origvisitdirfn = m.visitdir
1315 origvisitdirfn = m.visitdir
1316 def lfvisitdirfn(dir):
1316 def lfvisitdirfn(dir):
1317 if dir == lfutil.shortname:
1317 if dir == lfutil.shortname:
1318 return True
1318 return True
1319 ret = origvisitdirfn(dir)
1319 ret = origvisitdirfn(dir)
1320 if ret:
1320 if ret:
1321 return ret
1321 return ret
1322 lf = lfutil.splitstandin(dir)
1322 lf = lfutil.splitstandin(dir)
1323 if lf is None:
1323 if lf is None:
1324 return False
1324 return False
1325 return origvisitdirfn(lf)
1325 return origvisitdirfn(lf)
1326 m.visitdir = lfvisitdirfn
1326 m.visitdir = lfvisitdirfn
1327
1327
1328 for f in ctx.walk(m):
1328 for f in ctx.walk(m):
1329 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1329 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1330 pathname=f)
1330 pathname=f)
1331 lf = lfutil.splitstandin(f)
1331 lf = lfutil.splitstandin(f)
1332 if lf is None or origmatchfn(f):
1332 if lf is None or origmatchfn(f):
1333 # duplicating unreachable code from commands.cat
1333 # duplicating unreachable code from commands.cat
1334 data = ctx[f].data()
1334 data = ctx[f].data()
1335 if opts.get('decode'):
1335 if opts.get('decode'):
1336 data = repo.wwritedata(f, data)
1336 data = repo.wwritedata(f, data)
1337 fp.write(data)
1337 fp.write(data)
1338 else:
1338 else:
1339 hash = lfutil.readstandin(repo, lf, ctx.rev())
1339 hash = lfutil.readstandin(repo, lf, ctx.rev())
1340 if not lfutil.inusercache(repo.ui, hash):
1340 if not lfutil.inusercache(repo.ui, hash):
1341 store = basestore._openstore(repo)
1341 store = storefactory._openstore(repo)
1342 success, missing = store.get([(lf, hash)])
1342 success, missing = store.get([(lf, hash)])
1343 if len(success) != 1:
1343 if len(success) != 1:
1344 raise error.Abort(
1344 raise error.Abort(
1345 _('largefile %s is not in cache and could not be '
1345 _('largefile %s is not in cache and could not be '
1346 'downloaded') % lf)
1346 'downloaded') % lf)
1347 path = lfutil.usercachepath(repo.ui, hash)
1347 path = lfutil.usercachepath(repo.ui, hash)
1348 fpin = open(path, "rb")
1348 fpin = open(path, "rb")
1349 for chunk in util.filechunkiter(fpin, 128 * 1024):
1349 for chunk in util.filechunkiter(fpin, 128 * 1024):
1350 fp.write(chunk)
1350 fp.write(chunk)
1351 fpin.close()
1351 fpin.close()
1352 fp.close()
1352 fp.close()
1353 err = 0
1353 err = 0
1354 return err
1354 return err
1355
1355
1356 def mergeupdate(orig, repo, node, branchmerge, force,
1356 def mergeupdate(orig, repo, node, branchmerge, force,
1357 *args, **kwargs):
1357 *args, **kwargs):
1358 matcher = kwargs.get('matcher', None)
1358 matcher = kwargs.get('matcher', None)
1359 # note if this is a partial update
1359 # note if this is a partial update
1360 partial = matcher and not matcher.always()
1360 partial = matcher and not matcher.always()
1361 with repo.wlock():
1361 with repo.wlock():
1362 # branch | | |
1362 # branch | | |
1363 # merge | force | partial | action
1363 # merge | force | partial | action
1364 # -------+-------+---------+--------------
1364 # -------+-------+---------+--------------
1365 # x | x | x | linear-merge
1365 # x | x | x | linear-merge
1366 # o | x | x | branch-merge
1366 # o | x | x | branch-merge
1367 # x | o | x | overwrite (as clean update)
1367 # x | o | x | overwrite (as clean update)
1368 # o | o | x | force-branch-merge (*1)
1368 # o | o | x | force-branch-merge (*1)
1369 # x | x | o | (*)
1369 # x | x | o | (*)
1370 # o | x | o | (*)
1370 # o | x | o | (*)
1371 # x | o | o | overwrite (as revert)
1371 # x | o | o | overwrite (as revert)
1372 # o | o | o | (*)
1372 # o | o | o | (*)
1373 #
1373 #
1374 # (*) don't care
1374 # (*) don't care
1375 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1375 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1376
1376
1377 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1377 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1378 unsure, s = lfdirstate.status(match_.always(repo.root,
1378 unsure, s = lfdirstate.status(match_.always(repo.root,
1379 repo.getcwd()),
1379 repo.getcwd()),
1380 [], False, False, False)
1380 [], False, False, False)
1381 pctx = repo['.']
1381 pctx = repo['.']
1382 for lfile in unsure + s.modified:
1382 for lfile in unsure + s.modified:
1383 lfileabs = repo.wvfs.join(lfile)
1383 lfileabs = repo.wvfs.join(lfile)
1384 if not repo.wvfs.exists(lfileabs):
1384 if not repo.wvfs.exists(lfileabs):
1385 continue
1385 continue
1386 lfhash = lfutil.hashrepofile(repo, lfile)
1386 lfhash = lfutil.hashrepofile(repo, lfile)
1387 standin = lfutil.standin(lfile)
1387 standin = lfutil.standin(lfile)
1388 lfutil.writestandin(repo, standin, lfhash,
1388 lfutil.writestandin(repo, standin, lfhash,
1389 lfutil.getexecutable(lfileabs))
1389 lfutil.getexecutable(lfileabs))
1390 if (standin in pctx and
1390 if (standin in pctx and
1391 lfhash == lfutil.readstandin(repo, lfile, '.')):
1391 lfhash == lfutil.readstandin(repo, lfile, '.')):
1392 lfdirstate.normal(lfile)
1392 lfdirstate.normal(lfile)
1393 for lfile in s.added:
1393 for lfile in s.added:
1394 lfutil.updatestandin(repo, lfutil.standin(lfile))
1394 lfutil.updatestandin(repo, lfutil.standin(lfile))
1395 lfdirstate.write()
1395 lfdirstate.write()
1396
1396
1397 oldstandins = lfutil.getstandinsstate(repo)
1397 oldstandins = lfutil.getstandinsstate(repo)
1398
1398
1399 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1399 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1400
1400
1401 newstandins = lfutil.getstandinsstate(repo)
1401 newstandins = lfutil.getstandinsstate(repo)
1402 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1402 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1403 if branchmerge or force or partial:
1403 if branchmerge or force or partial:
1404 filelist.extend(s.deleted + s.removed)
1404 filelist.extend(s.deleted + s.removed)
1405
1405
1406 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1406 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1407 normallookup=partial)
1407 normallookup=partial)
1408
1408
1409 return result
1409 return result
1410
1410
1411 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1411 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1412 result = orig(repo, files, *args, **kwargs)
1412 result = orig(repo, files, *args, **kwargs)
1413
1413
1414 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1414 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1415 if filelist:
1415 if filelist:
1416 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1416 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1417 printmessage=False, normallookup=True)
1417 printmessage=False, normallookup=True)
1418
1418
1419 return result
1419 return result
@@ -1,226 +1,78
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
5 #
6 # This software may be used and distributed according to the terms of the
1 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
2 # GNU General Public License version 2 or any later version.
8
3
9 '''base class for store implementations and store-related utility code'''
4 from __future__ import absolute_import
10
5
11 import re
6 import re
12
7
13 from mercurial import util, node, hg, error
14 from mercurial.i18n import _
8 from mercurial.i18n import _
15
9
16 import lfutil
10 from mercurial import (
17
11 error,
18 class StoreError(Exception):
12 hg,
19 '''Raised when there is a problem getting files from or putting
13 util,
20 files to a central store.'''
14 )
21 def __init__(self, filename, hash, url, detail):
22 self.filename = filename
23 self.hash = hash
24 self.url = url
25 self.detail = detail
26
27 def longmessage(self):
28 return (_("error getting id %s from url %s for file %s: %s\n") %
29 (self.hash, util.hidepassword(self.url), self.filename,
30 self.detail))
31
32 def __str__(self):
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
34
35 class basestore(object):
36 def __init__(self, ui, repo, url):
37 self.ui = ui
38 self.repo = repo
39 self.url = url
40
41 def put(self, source, hash):
42 '''Put source file into the store so it can be retrieved by hash.'''
43 raise NotImplementedError('abstract method')
44
45 def exists(self, hashes):
46 '''Check to see if the store contains the given hashes. Given an
47 iterable of hashes it returns a mapping from hash to bool.'''
48 raise NotImplementedError('abstract method')
49
50 def get(self, files):
51 '''Get the specified largefiles from the store and write to local
52 files under repo.root. files is a list of (filename, hash)
53 tuples. Return (success, missing), lists of files successfully
54 downloaded and those not found in the store. success is a list
55 of (filename, hash) tuples; missing is a list of filenames that
56 we could not get. (The detailed error message will already have
57 been presented to the user, so missing is just supplied as a
58 summary.)'''
59 success = []
60 missing = []
61 ui = self.ui
62
63 at = 0
64 available = self.exists(set(hash for (_filename, hash) in files))
65 for filename, hash in files:
66 ui.progress(_('getting largefiles'), at, unit=_('files'),
67 total=len(files))
68 at += 1
69 ui.note(_('getting %s:%s\n') % (filename, hash))
70
71 if not available.get(hash):
72 ui.warn(_('%s: largefile %s not available from %s\n')
73 % (filename, hash, util.hidepassword(self.url)))
74 missing.append(filename)
75 continue
76
77 if self._gethash(filename, hash):
78 success.append((filename, hash))
79 else:
80 missing.append(filename)
81
82 ui.progress(_('getting largefiles'), None)
83 return (success, missing)
84
85 def _gethash(self, filename, hash):
86 """Get file with the provided hash and store it in the local repo's
87 store and in the usercache.
88 filename is for informational messages only.
89 """
90 util.makedirs(lfutil.storepath(self.repo, ''))
91 storefilename = lfutil.storepath(self.repo, hash)
92
93 tmpname = storefilename + '.tmp'
94 tmpfile = util.atomictempfile(tmpname,
95 createmode=self.repo.store.createmode)
96
15
97 try:
16 from . import (
98 gothash = self._getfile(tmpfile, filename, hash)
17 lfutil,
99 except StoreError as err:
18 localstore,
100 self.ui.warn(err.longmessage())
19 wirestore,
101 gothash = ""
20 )
102 tmpfile.close()
103
104 if gothash != hash:
105 if gothash != "":
106 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
107 % (filename, hash, gothash))
108 util.unlink(tmpname)
109 return False
110
111 util.rename(tmpname, storefilename)
112 lfutil.linktousercache(self.repo, hash)
113 return True
114
115 def verify(self, revs, contents=False):
116 '''Verify the existence (and, optionally, contents) of every big
117 file revision referenced by every changeset in revs.
118 Return 0 if all is well, non-zero on any errors.'''
119
120 self.ui.status(_('searching %d changesets for largefiles\n') %
121 len(revs))
122 verified = set() # set of (filename, filenode) tuples
123 filestocheck = [] # list of (cset, filename, expectedhash)
124 for rev in revs:
125 cctx = self.repo[rev]
126 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
127
128 for standin in cctx:
129 filename = lfutil.splitstandin(standin)
130 if filename:
131 fctx = cctx[standin]
132 key = (filename, fctx.filenode())
133 if key not in verified:
134 verified.add(key)
135 expectedhash = fctx.data()[0:40]
136 filestocheck.append((cset, filename, expectedhash))
137
138 failed = self._verifyfiles(contents, filestocheck)
139
140 numrevs = len(verified)
141 numlfiles = len(set([fname for (fname, fnode) in verified]))
142 if contents:
143 self.ui.status(
144 _('verified contents of %d revisions of %d largefiles\n')
145 % (numrevs, numlfiles))
146 else:
147 self.ui.status(
148 _('verified existence of %d revisions of %d largefiles\n')
149 % (numrevs, numlfiles))
150 return int(failed)
151
152 def _getfile(self, tmpfile, filename, hash):
153 '''Fetch one revision of one file from the store and write it
154 to tmpfile. Compute the hash of the file on-the-fly as it
155 downloads and return the hash. Close tmpfile. Raise
156 StoreError if unable to download the file (e.g. it does not
157 exist in the store).'''
158 raise NotImplementedError('abstract method')
159
160 def _verifyfiles(self, contents, filestocheck):
161 '''Perform the actual verification of files in the store.
162 'contents' controls verification of content hash.
163 'filestocheck' is list of files to check.
164 Returns _true_ if any problems are found!
165 '''
166 raise NotImplementedError('abstract method')
167
168 import localstore, wirestore
169
170 _storeprovider = {
171 'file': [localstore.localstore],
172 'http': [wirestore.wirestore],
173 'https': [wirestore.wirestore],
174 'ssh': [wirestore.wirestore],
175 }
176
177 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
178
21
179 # During clone this function is passed the src's ui object
22 # During clone this function is passed the src's ui object
180 # but it needs the dest's ui object so it can read out of
23 # but it needs the dest's ui object so it can read out of
181 # the config file. Use repo.ui instead.
24 # the config file. Use repo.ui instead.
182 def _openstore(repo, remote=None, put=False):
25 def _openstore(repo, remote=None, put=False):
183 ui = repo.ui
26 ui = repo.ui
184
27
185 if not remote:
28 if not remote:
186 lfpullsource = getattr(repo, 'lfpullsource', None)
29 lfpullsource = getattr(repo, 'lfpullsource', None)
187 if lfpullsource:
30 if lfpullsource:
188 path = ui.expandpath(lfpullsource)
31 path = ui.expandpath(lfpullsource)
189 elif put:
32 elif put:
190 path = ui.expandpath('default-push', 'default')
33 path = ui.expandpath('default-push', 'default')
191 else:
34 else:
192 path = ui.expandpath('default')
35 path = ui.expandpath('default')
193
36
194 # ui.expandpath() leaves 'default-push' and 'default' alone if
37 # ui.expandpath() leaves 'default-push' and 'default' alone if
195 # they cannot be expanded: fallback to the empty string,
38 # they cannot be expanded: fallback to the empty string,
196 # meaning the current directory.
39 # meaning the current directory.
197 if path == 'default-push' or path == 'default':
40 if path == 'default-push' or path == 'default':
198 path = ''
41 path = ''
199 remote = repo
42 remote = repo
200 else:
43 else:
201 path, _branches = hg.parseurl(path)
44 path, _branches = hg.parseurl(path)
202 remote = hg.peer(repo, {}, path)
45 remote = hg.peer(repo, {}, path)
203
46
204 # The path could be a scheme so use Mercurial's normal functionality
47 # The path could be a scheme so use Mercurial's normal functionality
205 # to resolve the scheme to a repository and use its path
48 # to resolve the scheme to a repository and use its path
206 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
49 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
207
50
208 match = _scheme_re.match(path)
51 match = _scheme_re.match(path)
209 if not match: # regular filesystem path
52 if not match: # regular filesystem path
210 scheme = 'file'
53 scheme = 'file'
211 else:
54 else:
212 scheme = match.group(1)
55 scheme = match.group(1)
213
56
214 try:
57 try:
215 storeproviders = _storeprovider[scheme]
58 storeproviders = _storeprovider[scheme]
216 except KeyError:
59 except KeyError:
217 raise error.Abort(_('unsupported URL scheme %r') % scheme)
60 raise error.Abort(_('unsupported URL scheme %r') % scheme)
218
61
219 for classobj in storeproviders:
62 for classobj in storeproviders:
220 try:
63 try:
221 return classobj(ui, repo, remote)
64 return classobj(ui, repo, remote)
222 except lfutil.storeprotonotcapable:
65 except lfutil.storeprotonotcapable:
223 pass
66 pass
224
67
225 raise error.Abort(_('%s does not appear to be a largefile store') %
68 raise error.Abort(_('%s does not appear to be a largefile store') %
226 util.hidepassword(path))
69 util.hidepassword(path))
70
71 _storeprovider = {
72 'file': [localstore.localstore],
73 'http': [wirestore.wirestore],
74 'https': [wirestore.wirestore],
75 'ssh': [wirestore.wirestore],
76 }
77
78 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
@@ -1,183 +1,181
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ import_checker="$TESTDIR"/../contrib/import-checker.py
4 $ import_checker="$TESTDIR"/../contrib/import-checker.py
5
5
6 Run the doctests from the import checker, and make sure
6 Run the doctests from the import checker, and make sure
7 it's working correctly.
7 it's working correctly.
8 $ TERM=dumb
8 $ TERM=dumb
9 $ export TERM
9 $ export TERM
10 $ python -m doctest $import_checker
10 $ python -m doctest $import_checker
11
11
12 Run additional tests for the import checker
12 Run additional tests for the import checker
13
13
14 $ mkdir testpackage
14 $ mkdir testpackage
15 $ touch testpackage/__init__.py
15 $ touch testpackage/__init__.py
16
16
17 $ cat > testpackage/multiple.py << EOF
17 $ cat > testpackage/multiple.py << EOF
18 > from __future__ import absolute_import
18 > from __future__ import absolute_import
19 > import os, sys
19 > import os, sys
20 > EOF
20 > EOF
21
21
22 $ cat > testpackage/unsorted.py << EOF
22 $ cat > testpackage/unsorted.py << EOF
23 > from __future__ import absolute_import
23 > from __future__ import absolute_import
24 > import sys
24 > import sys
25 > import os
25 > import os
26 > EOF
26 > EOF
27
27
28 $ cat > testpackage/stdafterlocal.py << EOF
28 $ cat > testpackage/stdafterlocal.py << EOF
29 > from __future__ import absolute_import
29 > from __future__ import absolute_import
30 > from . import unsorted
30 > from . import unsorted
31 > import os
31 > import os
32 > EOF
32 > EOF
33
33
34 $ cat > testpackage/requirerelative.py << EOF
34 $ cat > testpackage/requirerelative.py << EOF
35 > from __future__ import absolute_import
35 > from __future__ import absolute_import
36 > import testpackage.unsorted
36 > import testpackage.unsorted
37 > EOF
37 > EOF
38
38
39 $ cat > testpackage/importalias.py << EOF
39 $ cat > testpackage/importalias.py << EOF
40 > from __future__ import absolute_import
40 > from __future__ import absolute_import
41 > import ui
41 > import ui
42 > EOF
42 > EOF
43
43
44 $ cat > testpackage/relativestdlib.py << EOF
44 $ cat > testpackage/relativestdlib.py << EOF
45 > from __future__ import absolute_import
45 > from __future__ import absolute_import
46 > from .. import os
46 > from .. import os
47 > EOF
47 > EOF
48
48
49 $ cat > testpackage/symbolimport.py << EOF
49 $ cat > testpackage/symbolimport.py << EOF
50 > from __future__ import absolute_import
50 > from __future__ import absolute_import
51 > from .unsorted import foo
51 > from .unsorted import foo
52 > EOF
52 > EOF
53
53
54 $ cat > testpackage/latesymbolimport.py << EOF
54 $ cat > testpackage/latesymbolimport.py << EOF
55 > from __future__ import absolute_import
55 > from __future__ import absolute_import
56 > from . import unsorted
56 > from . import unsorted
57 > from mercurial.node import hex
57 > from mercurial.node import hex
58 > EOF
58 > EOF
59
59
60 $ cat > testpackage/multiplegroups.py << EOF
60 $ cat > testpackage/multiplegroups.py << EOF
61 > from __future__ import absolute_import
61 > from __future__ import absolute_import
62 > from . import unsorted
62 > from . import unsorted
63 > from . import more
63 > from . import more
64 > EOF
64 > EOF
65
65
66 $ mkdir testpackage/subpackage
66 $ mkdir testpackage/subpackage
67 $ cat > testpackage/subpackage/levelpriority.py << EOF
67 $ cat > testpackage/subpackage/levelpriority.py << EOF
68 > from __future__ import absolute_import
68 > from __future__ import absolute_import
69 > from . import foo
69 > from . import foo
70 > from .. import parent
70 > from .. import parent
71 > EOF
71 > EOF
72
72
73 $ touch testpackage/subpackage/foo.py
73 $ touch testpackage/subpackage/foo.py
74 $ cat > testpackage/subpackage/__init__.py << EOF
74 $ cat > testpackage/subpackage/__init__.py << EOF
75 > from __future__ import absolute_import
75 > from __future__ import absolute_import
76 > from . import levelpriority # should not cause cycle
76 > from . import levelpriority # should not cause cycle
77 > EOF
77 > EOF
78
78
79 $ cat > testpackage/subpackage/localimport.py << EOF
79 $ cat > testpackage/subpackage/localimport.py << EOF
80 > from __future__ import absolute_import
80 > from __future__ import absolute_import
81 > from . import foo
81 > from . import foo
82 > def bar():
82 > def bar():
83 > # should not cause "higher-level import should come first"
83 > # should not cause "higher-level import should come first"
84 > from .. import unsorted
84 > from .. import unsorted
85 > # but other errors should be detected
85 > # but other errors should be detected
86 > from .. import more
86 > from .. import more
87 > import testpackage.subpackage.levelpriority
87 > import testpackage.subpackage.levelpriority
88 > EOF
88 > EOF
89
89
90 $ cat > testpackage/importmodulefromsub.py << EOF
90 $ cat > testpackage/importmodulefromsub.py << EOF
91 > from __future__ import absolute_import
91 > from __future__ import absolute_import
92 > from .subpackage import foo # not a "direct symbol import"
92 > from .subpackage import foo # not a "direct symbol import"
93 > EOF
93 > EOF
94
94
95 $ cat > testpackage/importsymbolfromsub.py << EOF
95 $ cat > testpackage/importsymbolfromsub.py << EOF
96 > from __future__ import absolute_import
96 > from __future__ import absolute_import
97 > from .subpackage import foo, nonmodule
97 > from .subpackage import foo, nonmodule
98 > EOF
98 > EOF
99
99
100 $ cat > testpackage/sortedentries.py << EOF
100 $ cat > testpackage/sortedentries.py << EOF
101 > from __future__ import absolute_import
101 > from __future__ import absolute_import
102 > from . import (
102 > from . import (
103 > foo,
103 > foo,
104 > bar,
104 > bar,
105 > )
105 > )
106 > EOF
106 > EOF
107
107
108 $ cat > testpackage/importfromalias.py << EOF
108 $ cat > testpackage/importfromalias.py << EOF
109 > from __future__ import absolute_import
109 > from __future__ import absolute_import
110 > from . import ui
110 > from . import ui
111 > EOF
111 > EOF
112
112
113 $ cat > testpackage/importfromrelative.py << EOF
113 $ cat > testpackage/importfromrelative.py << EOF
114 > from __future__ import absolute_import
114 > from __future__ import absolute_import
115 > from testpackage.unsorted import foo
115 > from testpackage.unsorted import foo
116 > EOF
116 > EOF
117
117
118 $ mkdir testpackage2
118 $ mkdir testpackage2
119 $ touch testpackage2/__init__.py
119 $ touch testpackage2/__init__.py
120
120
121 $ cat > testpackage2/latesymbolimport.py << EOF
121 $ cat > testpackage2/latesymbolimport.py << EOF
122 > from __future__ import absolute_import
122 > from __future__ import absolute_import
123 > from testpackage import unsorted
123 > from testpackage import unsorted
124 > from mercurial.node import hex
124 > from mercurial.node import hex
125 > EOF
125 > EOF
126
126
127 $ python "$import_checker" testpackage*/*.py testpackage/subpackage/*.py
127 $ python "$import_checker" testpackage*/*.py testpackage/subpackage/*.py
128 testpackage/importalias.py:2: ui module must be "as" aliased to uimod
128 testpackage/importalias.py:2: ui module must be "as" aliased to uimod
129 testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod
129 testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod
130 testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted
130 testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted
131 testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted
131 testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted
132 testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage
132 testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage
133 testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
133 testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
134 testpackage/multiple.py:2: multiple imported names: os, sys
134 testpackage/multiple.py:2: multiple imported names: os, sys
135 testpackage/multiplegroups.py:3: multiple "from . import" statements
135 testpackage/multiplegroups.py:3: multiple "from . import" statements
136 testpackage/relativestdlib.py:2: relative import of stdlib module
136 testpackage/relativestdlib.py:2: relative import of stdlib module
137 testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted
137 testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted
138 testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo
138 testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo
139 testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage
139 testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage
140 testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage
140 testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage
141 testpackage/subpackage/localimport.py:7: multiple "from .. import" statements
141 testpackage/subpackage/localimport.py:7: multiple "from .. import" statements
142 testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority
142 testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority
143 testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted
143 testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted
144 testpackage/unsorted.py:3: imports not lexically sorted: os < sys
144 testpackage/unsorted.py:3: imports not lexically sorted: os < sys
145 testpackage2/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
145 testpackage2/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node
146 [1]
146 [1]
147
147
148 $ cd "$TESTDIR"/..
148 $ cd "$TESTDIR"/..
149
149
150 There are a handful of cases here that require renaming a module so it
150 There are a handful of cases here that require renaming a module so it
151 doesn't overlap with a stdlib module name. There are also some cycles
151 doesn't overlap with a stdlib module name. There are also some cycles
152 here that we should still endeavor to fix, and some cycles will be
152 here that we should still endeavor to fix, and some cycles will be
153 hidden by deduplication algorithm in the cycle detector, so fixing
153 hidden by deduplication algorithm in the cycle detector, so fixing
154 these may expose other cycles.
154 these may expose other cycles.
155
155
156 Known-bad files are excluded by -X as some of them would produce unstable
156 Known-bad files are excluded by -X as some of them would produce unstable
157 outputs, which should be fixed later.
157 outputs, which should be fixed later.
158
158
159 $ hg locate 'set:**.py or grep(r"^#!.*?python")' \
159 $ hg locate 'set:**.py or grep(r"^#!.*?python")' \
160 > 'tests/**.t' \
160 > 'tests/**.t' \
161 > -X contrib/debugshell.py \
161 > -X contrib/debugshell.py \
162 > -X contrib/win32/hgwebdir_wsgi.py \
162 > -X contrib/win32/hgwebdir_wsgi.py \
163 > -X doc/gendoc.py \
163 > -X doc/gendoc.py \
164 > -X doc/hgmanpage.py \
164 > -X doc/hgmanpage.py \
165 > -X i18n/posplit \
165 > -X i18n/posplit \
166 > -X tests/test-hgweb-auth.py \
166 > -X tests/test-hgweb-auth.py \
167 > -X tests/hypothesishelpers.py \
167 > -X tests/hypothesishelpers.py \
168 > -X tests/test-ctxmanager.py \
168 > -X tests/test-ctxmanager.py \
169 > -X tests/test-lock.py \
169 > -X tests/test-lock.py \
170 > -X tests/test-verify-repo-operations.py \
170 > -X tests/test-verify-repo-operations.py \
171 > -X tests/test-hook.t \
171 > -X tests/test-hook.t \
172 > -X tests/test-import.t \
172 > -X tests/test-import.t \
173 > -X tests/test-check-module-imports.t \
173 > -X tests/test-check-module-imports.t \
174 > -X tests/test-commit-interactive.t \
174 > -X tests/test-commit-interactive.t \
175 > -X tests/test-contrib-check-code.t \
175 > -X tests/test-contrib-check-code.t \
176 > -X tests/test-extension.t \
176 > -X tests/test-extension.t \
177 > -X tests/test-hghave.t \
177 > -X tests/test-hghave.t \
178 > -X tests/test-hgweb-no-path-info.t \
178 > -X tests/test-hgweb-no-path-info.t \
179 > -X tests/test-hgweb-no-request-uri.t \
179 > -X tests/test-hgweb-no-request-uri.t \
180 > -X tests/test-hgweb-non-interactive.t \
180 > -X tests/test-hgweb-non-interactive.t \
181 > | sed 's-\\-/-g' | python "$import_checker" -
181 > | sed 's-\\-/-g' | python "$import_checker" -
182 Import cycle: hgext.largefiles.basestore -> hgext.largefiles.localstore -> hgext.largefiles.basestore
183 [1]
General Comments 0
You need to be logged in to leave comments. Login now