##// END OF EJS Templates
largefiles: specify unit for ui.progress when operating on files...
av6 -
r28463:19b4a208 default
parent child Browse files
Show More
@@ -1,221 +1,221 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import re
11 import re
12
12
13 from mercurial import util, node, hg, error
13 from mercurial import util, node, hg, error
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 import lfutil
16 import lfutil
17
17
18 class StoreError(Exception):
18 class StoreError(Exception):
19 '''Raised when there is a problem getting files from or putting
19 '''Raised when there is a problem getting files from or putting
20 files to a central store.'''
20 files to a central store.'''
21 def __init__(self, filename, hash, url, detail):
21 def __init__(self, filename, hash, url, detail):
22 self.filename = filename
22 self.filename = filename
23 self.hash = hash
23 self.hash = hash
24 self.url = url
24 self.url = url
25 self.detail = detail
25 self.detail = detail
26
26
27 def longmessage(self):
27 def longmessage(self):
28 return (_("error getting id %s from url %s for file %s: %s\n") %
28 return (_("error getting id %s from url %s for file %s: %s\n") %
29 (self.hash, util.hidepassword(self.url), self.filename,
29 (self.hash, util.hidepassword(self.url), self.filename,
30 self.detail))
30 self.detail))
31
31
32 def __str__(self):
32 def __str__(self):
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
33 return "%s: %s" % (util.hidepassword(self.url), self.detail)
34
34
35 class basestore(object):
35 class basestore(object):
36 def __init__(self, ui, repo, url):
36 def __init__(self, ui, repo, url):
37 self.ui = ui
37 self.ui = ui
38 self.repo = repo
38 self.repo = repo
39 self.url = url
39 self.url = url
40
40
41 def put(self, source, hash):
41 def put(self, source, hash):
42 '''Put source file into the store so it can be retrieved by hash.'''
42 '''Put source file into the store so it can be retrieved by hash.'''
43 raise NotImplementedError('abstract method')
43 raise NotImplementedError('abstract method')
44
44
45 def exists(self, hashes):
45 def exists(self, hashes):
46 '''Check to see if the store contains the given hashes. Given an
46 '''Check to see if the store contains the given hashes. Given an
47 iterable of hashes it returns a mapping from hash to bool.'''
47 iterable of hashes it returns a mapping from hash to bool.'''
48 raise NotImplementedError('abstract method')
48 raise NotImplementedError('abstract method')
49
49
50 def get(self, files):
50 def get(self, files):
51 '''Get the specified largefiles from the store and write to local
51 '''Get the specified largefiles from the store and write to local
52 files under repo.root. files is a list of (filename, hash)
52 files under repo.root. files is a list of (filename, hash)
53 tuples. Return (success, missing), lists of files successfully
53 tuples. Return (success, missing), lists of files successfully
54 downloaded and those not found in the store. success is a list
54 downloaded and those not found in the store. success is a list
55 of (filename, hash) tuples; missing is a list of filenames that
55 of (filename, hash) tuples; missing is a list of filenames that
56 we could not get. (The detailed error message will already have
56 we could not get. (The detailed error message will already have
57 been presented to the user, so missing is just supplied as a
57 been presented to the user, so missing is just supplied as a
58 summary.)'''
58 summary.)'''
59 success = []
59 success = []
60 missing = []
60 missing = []
61 ui = self.ui
61 ui = self.ui
62
62
63 at = 0
63 at = 0
64 available = self.exists(set(hash for (_filename, hash) in files))
64 available = self.exists(set(hash for (_filename, hash) in files))
65 for filename, hash in files:
65 for filename, hash in files:
66 ui.progress(_('getting largefiles'), at, unit='lfile',
66 ui.progress(_('getting largefiles'), at, unit=_('files'),
67 total=len(files))
67 total=len(files))
68 at += 1
68 at += 1
69 ui.note(_('getting %s:%s\n') % (filename, hash))
69 ui.note(_('getting %s:%s\n') % (filename, hash))
70
70
71 if not available.get(hash):
71 if not available.get(hash):
72 ui.warn(_('%s: largefile %s not available from %s\n')
72 ui.warn(_('%s: largefile %s not available from %s\n')
73 % (filename, hash, util.hidepassword(self.url)))
73 % (filename, hash, util.hidepassword(self.url)))
74 missing.append(filename)
74 missing.append(filename)
75 continue
75 continue
76
76
77 if self._gethash(filename, hash):
77 if self._gethash(filename, hash):
78 success.append((filename, hash))
78 success.append((filename, hash))
79 else:
79 else:
80 missing.append(filename)
80 missing.append(filename)
81
81
82 ui.progress(_('getting largefiles'), None)
82 ui.progress(_('getting largefiles'), None)
83 return (success, missing)
83 return (success, missing)
84
84
85 def _gethash(self, filename, hash):
85 def _gethash(self, filename, hash):
86 """Get file with the provided hash and store it in the local repo's
86 """Get file with the provided hash and store it in the local repo's
87 store and in the usercache.
87 store and in the usercache.
88 filename is for informational messages only.
88 filename is for informational messages only.
89 """
89 """
90 util.makedirs(lfutil.storepath(self.repo, ''))
90 util.makedirs(lfutil.storepath(self.repo, ''))
91 storefilename = lfutil.storepath(self.repo, hash)
91 storefilename = lfutil.storepath(self.repo, hash)
92
92
93 tmpname = storefilename + '.tmp'
93 tmpname = storefilename + '.tmp'
94 tmpfile = util.atomictempfile(tmpname,
94 tmpfile = util.atomictempfile(tmpname,
95 createmode=self.repo.store.createmode)
95 createmode=self.repo.store.createmode)
96
96
97 try:
97 try:
98 gothash = self._getfile(tmpfile, filename, hash)
98 gothash = self._getfile(tmpfile, filename, hash)
99 except StoreError as err:
99 except StoreError as err:
100 self.ui.warn(err.longmessage())
100 self.ui.warn(err.longmessage())
101 gothash = ""
101 gothash = ""
102 tmpfile.close()
102 tmpfile.close()
103
103
104 if gothash != hash:
104 if gothash != hash:
105 if gothash != "":
105 if gothash != "":
106 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
106 self.ui.warn(_('%s: data corruption (expected %s, got %s)\n')
107 % (filename, hash, gothash))
107 % (filename, hash, gothash))
108 util.unlink(tmpname)
108 util.unlink(tmpname)
109 return False
109 return False
110
110
111 util.rename(tmpname, storefilename)
111 util.rename(tmpname, storefilename)
112 lfutil.linktousercache(self.repo, hash)
112 lfutil.linktousercache(self.repo, hash)
113 return True
113 return True
114
114
115 def verify(self, revs, contents=False):
115 def verify(self, revs, contents=False):
116 '''Verify the existence (and, optionally, contents) of every big
116 '''Verify the existence (and, optionally, contents) of every big
117 file revision referenced by every changeset in revs.
117 file revision referenced by every changeset in revs.
118 Return 0 if all is well, non-zero on any errors.'''
118 Return 0 if all is well, non-zero on any errors.'''
119 failed = False
119 failed = False
120
120
121 self.ui.status(_('searching %d changesets for largefiles\n') %
121 self.ui.status(_('searching %d changesets for largefiles\n') %
122 len(revs))
122 len(revs))
123 verified = set() # set of (filename, filenode) tuples
123 verified = set() # set of (filename, filenode) tuples
124
124
125 for rev in revs:
125 for rev in revs:
126 cctx = self.repo[rev]
126 cctx = self.repo[rev]
127 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
127 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
128
128
129 for standin in cctx:
129 for standin in cctx:
130 if self._verifyfile(cctx, cset, contents, standin, verified):
130 if self._verifyfile(cctx, cset, contents, standin, verified):
131 failed = True
131 failed = True
132
132
133 numrevs = len(verified)
133 numrevs = len(verified)
134 numlfiles = len(set([fname for (fname, fnode) in verified]))
134 numlfiles = len(set([fname for (fname, fnode) in verified]))
135 if contents:
135 if contents:
136 self.ui.status(
136 self.ui.status(
137 _('verified contents of %d revisions of %d largefiles\n')
137 _('verified contents of %d revisions of %d largefiles\n')
138 % (numrevs, numlfiles))
138 % (numrevs, numlfiles))
139 else:
139 else:
140 self.ui.status(
140 self.ui.status(
141 _('verified existence of %d revisions of %d largefiles\n')
141 _('verified existence of %d revisions of %d largefiles\n')
142 % (numrevs, numlfiles))
142 % (numrevs, numlfiles))
143 return int(failed)
143 return int(failed)
144
144
145 def _getfile(self, tmpfile, filename, hash):
145 def _getfile(self, tmpfile, filename, hash):
146 '''Fetch one revision of one file from the store and write it
146 '''Fetch one revision of one file from the store and write it
147 to tmpfile. Compute the hash of the file on-the-fly as it
147 to tmpfile. Compute the hash of the file on-the-fly as it
148 downloads and return the hash. Close tmpfile. Raise
148 downloads and return the hash. Close tmpfile. Raise
149 StoreError if unable to download the file (e.g. it does not
149 StoreError if unable to download the file (e.g. it does not
150 exist in the store).'''
150 exist in the store).'''
151 raise NotImplementedError('abstract method')
151 raise NotImplementedError('abstract method')
152
152
153 def _verifyfile(self, cctx, cset, contents, standin, verified):
153 def _verifyfile(self, cctx, cset, contents, standin, verified):
154 '''Perform the actual verification of a file in the store.
154 '''Perform the actual verification of a file in the store.
155 'cset' is only used in warnings.
155 'cset' is only used in warnings.
156 'contents' controls verification of content hash.
156 'contents' controls verification of content hash.
157 'standin' is the standin path of the largefile to verify.
157 'standin' is the standin path of the largefile to verify.
158 'verified' is maintained as a set of already verified files.
158 'verified' is maintained as a set of already verified files.
159 Returns _true_ if it is a standin and any problems are found!
159 Returns _true_ if it is a standin and any problems are found!
160 '''
160 '''
161 raise NotImplementedError('abstract method')
161 raise NotImplementedError('abstract method')
162
162
163 import localstore, wirestore
163 import localstore, wirestore
164
164
165 _storeprovider = {
165 _storeprovider = {
166 'file': [localstore.localstore],
166 'file': [localstore.localstore],
167 'http': [wirestore.wirestore],
167 'http': [wirestore.wirestore],
168 'https': [wirestore.wirestore],
168 'https': [wirestore.wirestore],
169 'ssh': [wirestore.wirestore],
169 'ssh': [wirestore.wirestore],
170 }
170 }
171
171
172 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
172 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
173
173
174 # During clone this function is passed the src's ui object
174 # During clone this function is passed the src's ui object
175 # but it needs the dest's ui object so it can read out of
175 # but it needs the dest's ui object so it can read out of
176 # the config file. Use repo.ui instead.
176 # the config file. Use repo.ui instead.
177 def _openstore(repo, remote=None, put=False):
177 def _openstore(repo, remote=None, put=False):
178 ui = repo.ui
178 ui = repo.ui
179
179
180 if not remote:
180 if not remote:
181 lfpullsource = getattr(repo, 'lfpullsource', None)
181 lfpullsource = getattr(repo, 'lfpullsource', None)
182 if lfpullsource:
182 if lfpullsource:
183 path = ui.expandpath(lfpullsource)
183 path = ui.expandpath(lfpullsource)
184 elif put:
184 elif put:
185 path = ui.expandpath('default-push', 'default')
185 path = ui.expandpath('default-push', 'default')
186 else:
186 else:
187 path = ui.expandpath('default')
187 path = ui.expandpath('default')
188
188
189 # ui.expandpath() leaves 'default-push' and 'default' alone if
189 # ui.expandpath() leaves 'default-push' and 'default' alone if
190 # they cannot be expanded: fallback to the empty string,
190 # they cannot be expanded: fallback to the empty string,
191 # meaning the current directory.
191 # meaning the current directory.
192 if path == 'default-push' or path == 'default':
192 if path == 'default-push' or path == 'default':
193 path = ''
193 path = ''
194 remote = repo
194 remote = repo
195 else:
195 else:
196 path, _branches = hg.parseurl(path)
196 path, _branches = hg.parseurl(path)
197 remote = hg.peer(repo, {}, path)
197 remote = hg.peer(repo, {}, path)
198
198
199 # The path could be a scheme so use Mercurial's normal functionality
199 # The path could be a scheme so use Mercurial's normal functionality
200 # to resolve the scheme to a repository and use its path
200 # to resolve the scheme to a repository and use its path
201 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
201 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
202
202
203 match = _scheme_re.match(path)
203 match = _scheme_re.match(path)
204 if not match: # regular filesystem path
204 if not match: # regular filesystem path
205 scheme = 'file'
205 scheme = 'file'
206 else:
206 else:
207 scheme = match.group(1)
207 scheme = match.group(1)
208
208
209 try:
209 try:
210 storeproviders = _storeprovider[scheme]
210 storeproviders = _storeprovider[scheme]
211 except KeyError:
211 except KeyError:
212 raise error.Abort(_('unsupported URL scheme %r') % scheme)
212 raise error.Abort(_('unsupported URL scheme %r') % scheme)
213
213
214 for classobj in storeproviders:
214 for classobj in storeproviders:
215 try:
215 try:
216 return classobj(ui, repo, remote)
216 return classobj(ui, repo, remote)
217 except lfutil.storeprotonotcapable:
217 except lfutil.storeprotonotcapable:
218 pass
218 pass
219
219
220 raise error.Abort(_('%s does not appear to be a largefile store') %
220 raise error.Abort(_('%s does not appear to be a largefile store') %
221 util.hidepassword(path))
221 util.hidepassword(path))
@@ -1,544 +1,544 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os, errno
11 import os, errno
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error, \
14 from mercurial import util, match as match_, hg, node, context, error, \
15 cmdutil, scmutil, commands
15 cmdutil, scmutil, commands
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.lock import release
17 from mercurial.lock import release
18
18
19 from hgext.convert import convcmd
19 from hgext.convert import convcmd
20 from hgext.convert import filemap
20 from hgext.convert import filemap
21
21
22 import lfutil
22 import lfutil
23 import basestore
23 import basestore
24
24
25 # -- Commands ----------------------------------------------------------
25 # -- Commands ----------------------------------------------------------
26
26
27 cmdtable = {}
27 cmdtable = {}
28 command = cmdutil.command(cmdtable)
28 command = cmdutil.command(cmdtable)
29
29
30 @command('lfconvert',
30 @command('lfconvert',
31 [('s', 'size', '',
31 [('s', 'size', '',
32 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
32 _('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
33 ('', 'to-normal', False,
33 ('', 'to-normal', False,
34 _('convert from a largefiles repo to a normal repo')),
34 _('convert from a largefiles repo to a normal repo')),
35 ],
35 ],
36 _('hg lfconvert SOURCE DEST [FILE ...]'),
36 _('hg lfconvert SOURCE DEST [FILE ...]'),
37 norepo=True,
37 norepo=True,
38 inferrepo=True)
38 inferrepo=True)
39 def lfconvert(ui, src, dest, *pats, **opts):
39 def lfconvert(ui, src, dest, *pats, **opts):
40 '''convert a normal repository to a largefiles repository
40 '''convert a normal repository to a largefiles repository
41
41
42 Convert repository SOURCE to a new repository DEST, identical to
42 Convert repository SOURCE to a new repository DEST, identical to
43 SOURCE except that certain files will be converted as largefiles:
43 SOURCE except that certain files will be converted as largefiles:
44 specifically, any file that matches any PATTERN *or* whose size is
44 specifically, any file that matches any PATTERN *or* whose size is
45 above the minimum size threshold is converted as a largefile. The
45 above the minimum size threshold is converted as a largefile. The
46 size used to determine whether or not to track a file as a
46 size used to determine whether or not to track a file as a
47 largefile is the size of the first version of the file. The
47 largefile is the size of the first version of the file. The
48 minimum size can be specified either with --size or in
48 minimum size can be specified either with --size or in
49 configuration as ``largefiles.size``.
49 configuration as ``largefiles.size``.
50
50
51 After running this command you will need to make sure that
51 After running this command you will need to make sure that
52 largefiles is enabled anywhere you intend to push the new
52 largefiles is enabled anywhere you intend to push the new
53 repository.
53 repository.
54
54
55 Use --to-normal to convert largefiles back to normal files; after
55 Use --to-normal to convert largefiles back to normal files; after
56 this, the DEST repository can be used without largefiles at all.'''
56 this, the DEST repository can be used without largefiles at all.'''
57
57
58 if opts['to_normal']:
58 if opts['to_normal']:
59 tolfile = False
59 tolfile = False
60 else:
60 else:
61 tolfile = True
61 tolfile = True
62 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
62 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
63
63
64 if not hg.islocal(src):
64 if not hg.islocal(src):
65 raise error.Abort(_('%s is not a local Mercurial repo') % src)
65 raise error.Abort(_('%s is not a local Mercurial repo') % src)
66 if not hg.islocal(dest):
66 if not hg.islocal(dest):
67 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
67 raise error.Abort(_('%s is not a local Mercurial repo') % dest)
68
68
69 rsrc = hg.repository(ui, src)
69 rsrc = hg.repository(ui, src)
70 ui.status(_('initializing destination %s\n') % dest)
70 ui.status(_('initializing destination %s\n') % dest)
71 rdst = hg.repository(ui, dest, create=True)
71 rdst = hg.repository(ui, dest, create=True)
72
72
73 success = False
73 success = False
74 dstwlock = dstlock = None
74 dstwlock = dstlock = None
75 try:
75 try:
76 # Get a list of all changesets in the source. The easy way to do this
76 # Get a list of all changesets in the source. The easy way to do this
77 # is to simply walk the changelog, using changelog.nodesbetween().
77 # is to simply walk the changelog, using changelog.nodesbetween().
78 # Take a look at mercurial/revlog.py:639 for more details.
78 # Take a look at mercurial/revlog.py:639 for more details.
79 # Use a generator instead of a list to decrease memory usage
79 # Use a generator instead of a list to decrease memory usage
80 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
80 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
81 rsrc.heads())[0])
81 rsrc.heads())[0])
82 revmap = {node.nullid: node.nullid}
82 revmap = {node.nullid: node.nullid}
83 if tolfile:
83 if tolfile:
84 # Lock destination to prevent modification while it is converted to.
84 # Lock destination to prevent modification while it is converted to.
85 # Don't need to lock src because we are just reading from its
85 # Don't need to lock src because we are just reading from its
86 # history which can't change.
86 # history which can't change.
87 dstwlock = rdst.wlock()
87 dstwlock = rdst.wlock()
88 dstlock = rdst.lock()
88 dstlock = rdst.lock()
89
89
90 lfiles = set()
90 lfiles = set()
91 normalfiles = set()
91 normalfiles = set()
92 if not pats:
92 if not pats:
93 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
93 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
94 if pats:
94 if pats:
95 matcher = match_.match(rsrc.root, '', list(pats))
95 matcher = match_.match(rsrc.root, '', list(pats))
96 else:
96 else:
97 matcher = None
97 matcher = None
98
98
99 lfiletohash = {}
99 lfiletohash = {}
100 for ctx in ctxs:
100 for ctx in ctxs:
101 ui.progress(_('converting revisions'), ctx.rev(),
101 ui.progress(_('converting revisions'), ctx.rev(),
102 unit=_('revision'), total=rsrc['tip'].rev())
102 unit=_('revision'), total=rsrc['tip'].rev())
103 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
103 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
104 lfiles, normalfiles, matcher, size, lfiletohash)
104 lfiles, normalfiles, matcher, size, lfiletohash)
105 ui.progress(_('converting revisions'), None)
105 ui.progress(_('converting revisions'), None)
106
106
107 if os.path.exists(rdst.wjoin(lfutil.shortname)):
107 if os.path.exists(rdst.wjoin(lfutil.shortname)):
108 shutil.rmtree(rdst.wjoin(lfutil.shortname))
108 shutil.rmtree(rdst.wjoin(lfutil.shortname))
109
109
110 for f in lfiletohash.keys():
110 for f in lfiletohash.keys():
111 if os.path.isfile(rdst.wjoin(f)):
111 if os.path.isfile(rdst.wjoin(f)):
112 os.unlink(rdst.wjoin(f))
112 os.unlink(rdst.wjoin(f))
113 try:
113 try:
114 os.removedirs(os.path.dirname(rdst.wjoin(f)))
114 os.removedirs(os.path.dirname(rdst.wjoin(f)))
115 except OSError:
115 except OSError:
116 pass
116 pass
117
117
118 # If there were any files converted to largefiles, add largefiles
118 # If there were any files converted to largefiles, add largefiles
119 # to the destination repository's requirements.
119 # to the destination repository's requirements.
120 if lfiles:
120 if lfiles:
121 rdst.requirements.add('largefiles')
121 rdst.requirements.add('largefiles')
122 rdst._writerequirements()
122 rdst._writerequirements()
123 else:
123 else:
124 class lfsource(filemap.filemap_source):
124 class lfsource(filemap.filemap_source):
125 def __init__(self, ui, source):
125 def __init__(self, ui, source):
126 super(lfsource, self).__init__(ui, source, None)
126 super(lfsource, self).__init__(ui, source, None)
127 self.filemapper.rename[lfutil.shortname] = '.'
127 self.filemapper.rename[lfutil.shortname] = '.'
128
128
129 def getfile(self, name, rev):
129 def getfile(self, name, rev):
130 realname, realrev = rev
130 realname, realrev = rev
131 f = super(lfsource, self).getfile(name, rev)
131 f = super(lfsource, self).getfile(name, rev)
132
132
133 if (not realname.startswith(lfutil.shortnameslash)
133 if (not realname.startswith(lfutil.shortnameslash)
134 or f[0] is None):
134 or f[0] is None):
135 return f
135 return f
136
136
137 # Substitute in the largefile data for the hash
137 # Substitute in the largefile data for the hash
138 hash = f[0].strip()
138 hash = f[0].strip()
139 path = lfutil.findfile(rsrc, hash)
139 path = lfutil.findfile(rsrc, hash)
140
140
141 if path is None:
141 if path is None:
142 raise error.Abort(_("missing largefile for '%s' in %s")
142 raise error.Abort(_("missing largefile for '%s' in %s")
143 % (realname, realrev))
143 % (realname, realrev))
144 return util.readfile(path), f[1]
144 return util.readfile(path), f[1]
145
145
146 class converter(convcmd.converter):
146 class converter(convcmd.converter):
147 def __init__(self, ui, source, dest, revmapfile, opts):
147 def __init__(self, ui, source, dest, revmapfile, opts):
148 src = lfsource(ui, source)
148 src = lfsource(ui, source)
149
149
150 super(converter, self).__init__(ui, src, dest, revmapfile,
150 super(converter, self).__init__(ui, src, dest, revmapfile,
151 opts)
151 opts)
152
152
153 found, missing = downloadlfiles(ui, rsrc)
153 found, missing = downloadlfiles(ui, rsrc)
154 if missing != 0:
154 if missing != 0:
155 raise error.Abort(_("all largefiles must be present locally"))
155 raise error.Abort(_("all largefiles must be present locally"))
156
156
157 orig = convcmd.converter
157 orig = convcmd.converter
158 convcmd.converter = converter
158 convcmd.converter = converter
159
159
160 try:
160 try:
161 convcmd.convert(ui, src, dest)
161 convcmd.convert(ui, src, dest)
162 finally:
162 finally:
163 convcmd.converter = orig
163 convcmd.converter = orig
164 success = True
164 success = True
165 finally:
165 finally:
166 if tolfile:
166 if tolfile:
167 rdst.dirstate.clear()
167 rdst.dirstate.clear()
168 release(dstlock, dstwlock)
168 release(dstlock, dstwlock)
169 if not success:
169 if not success:
170 # we failed, remove the new directory
170 # we failed, remove the new directory
171 shutil.rmtree(rdst.root)
171 shutil.rmtree(rdst.root)
172
172
173 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
173 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
174 matcher, size, lfiletohash):
174 matcher, size, lfiletohash):
175 # Convert src parents to dst parents
175 # Convert src parents to dst parents
176 parents = _convertparents(ctx, revmap)
176 parents = _convertparents(ctx, revmap)
177
177
178 # Generate list of changed files
178 # Generate list of changed files
179 files = _getchangedfiles(ctx, parents)
179 files = _getchangedfiles(ctx, parents)
180
180
181 dstfiles = []
181 dstfiles = []
182 for f in files:
182 for f in files:
183 if f not in lfiles and f not in normalfiles:
183 if f not in lfiles and f not in normalfiles:
184 islfile = _islfile(f, ctx, matcher, size)
184 islfile = _islfile(f, ctx, matcher, size)
185 # If this file was renamed or copied then copy
185 # If this file was renamed or copied then copy
186 # the largefile-ness of its predecessor
186 # the largefile-ness of its predecessor
187 if f in ctx.manifest():
187 if f in ctx.manifest():
188 fctx = ctx.filectx(f)
188 fctx = ctx.filectx(f)
189 renamed = fctx.renamed()
189 renamed = fctx.renamed()
190 renamedlfile = renamed and renamed[0] in lfiles
190 renamedlfile = renamed and renamed[0] in lfiles
191 islfile |= renamedlfile
191 islfile |= renamedlfile
192 if 'l' in fctx.flags():
192 if 'l' in fctx.flags():
193 if renamedlfile:
193 if renamedlfile:
194 raise error.Abort(
194 raise error.Abort(
195 _('renamed/copied largefile %s becomes symlink')
195 _('renamed/copied largefile %s becomes symlink')
196 % f)
196 % f)
197 islfile = False
197 islfile = False
198 if islfile:
198 if islfile:
199 lfiles.add(f)
199 lfiles.add(f)
200 else:
200 else:
201 normalfiles.add(f)
201 normalfiles.add(f)
202
202
203 if f in lfiles:
203 if f in lfiles:
204 dstfiles.append(lfutil.standin(f))
204 dstfiles.append(lfutil.standin(f))
205 # largefile in manifest if it has not been removed/renamed
205 # largefile in manifest if it has not been removed/renamed
206 if f in ctx.manifest():
206 if f in ctx.manifest():
207 fctx = ctx.filectx(f)
207 fctx = ctx.filectx(f)
208 if 'l' in fctx.flags():
208 if 'l' in fctx.flags():
209 renamed = fctx.renamed()
209 renamed = fctx.renamed()
210 if renamed and renamed[0] in lfiles:
210 if renamed and renamed[0] in lfiles:
211 raise error.Abort(_('largefile %s becomes symlink') % f)
211 raise error.Abort(_('largefile %s becomes symlink') % f)
212
212
213 # largefile was modified, update standins
213 # largefile was modified, update standins
214 m = util.sha1('')
214 m = util.sha1('')
215 m.update(ctx[f].data())
215 m.update(ctx[f].data())
216 hash = m.hexdigest()
216 hash = m.hexdigest()
217 if f not in lfiletohash or lfiletohash[f] != hash:
217 if f not in lfiletohash or lfiletohash[f] != hash:
218 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
218 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
219 executable = 'x' in ctx[f].flags()
219 executable = 'x' in ctx[f].flags()
220 lfutil.writestandin(rdst, lfutil.standin(f), hash,
220 lfutil.writestandin(rdst, lfutil.standin(f), hash,
221 executable)
221 executable)
222 lfiletohash[f] = hash
222 lfiletohash[f] = hash
223 else:
223 else:
224 # normal file
224 # normal file
225 dstfiles.append(f)
225 dstfiles.append(f)
226
226
227 def getfilectx(repo, memctx, f):
227 def getfilectx(repo, memctx, f):
228 if lfutil.isstandin(f):
228 if lfutil.isstandin(f):
229 # if the file isn't in the manifest then it was removed
229 # if the file isn't in the manifest then it was removed
230 # or renamed, raise IOError to indicate this
230 # or renamed, raise IOError to indicate this
231 srcfname = lfutil.splitstandin(f)
231 srcfname = lfutil.splitstandin(f)
232 try:
232 try:
233 fctx = ctx.filectx(srcfname)
233 fctx = ctx.filectx(srcfname)
234 except error.LookupError:
234 except error.LookupError:
235 return None
235 return None
236 renamed = fctx.renamed()
236 renamed = fctx.renamed()
237 if renamed:
237 if renamed:
238 # standin is always a largefile because largefile-ness
238 # standin is always a largefile because largefile-ness
239 # doesn't change after rename or copy
239 # doesn't change after rename or copy
240 renamed = lfutil.standin(renamed[0])
240 renamed = lfutil.standin(renamed[0])
241
241
242 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
242 return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
243 'l' in fctx.flags(), 'x' in fctx.flags(),
243 'l' in fctx.flags(), 'x' in fctx.flags(),
244 renamed)
244 renamed)
245 else:
245 else:
246 return _getnormalcontext(repo, ctx, f, revmap)
246 return _getnormalcontext(repo, ctx, f, revmap)
247
247
248 # Commit
248 # Commit
249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
249 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
250
250
251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
251 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
252 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
253 getfilectx, ctx.user(), ctx.date(), ctx.extra())
254 ret = rdst.commitctx(mctx)
254 ret = rdst.commitctx(mctx)
255 lfutil.copyalltostore(rdst, ret)
255 lfutil.copyalltostore(rdst, ret)
256 rdst.setparents(ret)
256 rdst.setparents(ret)
257 revmap[ctx.node()] = rdst.changelog.tip()
257 revmap[ctx.node()] = rdst.changelog.tip()
258
258
259 # Generate list of changed files
259 # Generate list of changed files
260 def _getchangedfiles(ctx, parents):
260 def _getchangedfiles(ctx, parents):
261 files = set(ctx.files())
261 files = set(ctx.files())
262 if node.nullid not in parents:
262 if node.nullid not in parents:
263 mc = ctx.manifest()
263 mc = ctx.manifest()
264 mp1 = ctx.parents()[0].manifest()
264 mp1 = ctx.parents()[0].manifest()
265 mp2 = ctx.parents()[1].manifest()
265 mp2 = ctx.parents()[1].manifest()
266 files |= (set(mp1) | set(mp2)) - set(mc)
266 files |= (set(mp1) | set(mp2)) - set(mc)
267 for f in mc:
267 for f in mc:
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
268 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
269 files.add(f)
269 files.add(f)
270 return files
270 return files
271
271
272 # Convert src parents to dst parents
272 # Convert src parents to dst parents
273 def _convertparents(ctx, revmap):
273 def _convertparents(ctx, revmap):
274 parents = []
274 parents = []
275 for p in ctx.parents():
275 for p in ctx.parents():
276 parents.append(revmap[p.node()])
276 parents.append(revmap[p.node()])
277 while len(parents) < 2:
277 while len(parents) < 2:
278 parents.append(node.nullid)
278 parents.append(node.nullid)
279 return parents
279 return parents
280
280
281 # Get memfilectx for a normal file
281 # Get memfilectx for a normal file
282 def _getnormalcontext(repo, ctx, f, revmap):
282 def _getnormalcontext(repo, ctx, f, revmap):
283 try:
283 try:
284 fctx = ctx.filectx(f)
284 fctx = ctx.filectx(f)
285 except error.LookupError:
285 except error.LookupError:
286 return None
286 return None
287 renamed = fctx.renamed()
287 renamed = fctx.renamed()
288 if renamed:
288 if renamed:
289 renamed = renamed[0]
289 renamed = renamed[0]
290
290
291 data = fctx.data()
291 data = fctx.data()
292 if f == '.hgtags':
292 if f == '.hgtags':
293 data = _converttags (repo.ui, revmap, data)
293 data = _converttags (repo.ui, revmap, data)
294 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
294 return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
295 'x' in fctx.flags(), renamed)
295 'x' in fctx.flags(), renamed)
296
296
297 # Remap tag data using a revision map
297 # Remap tag data using a revision map
298 def _converttags(ui, revmap, data):
298 def _converttags(ui, revmap, data):
299 newdata = []
299 newdata = []
300 for line in data.splitlines():
300 for line in data.splitlines():
301 try:
301 try:
302 id, name = line.split(' ', 1)
302 id, name = line.split(' ', 1)
303 except ValueError:
303 except ValueError:
304 ui.warn(_('skipping incorrectly formatted tag %s\n')
304 ui.warn(_('skipping incorrectly formatted tag %s\n')
305 % line)
305 % line)
306 continue
306 continue
307 try:
307 try:
308 newid = node.bin(id)
308 newid = node.bin(id)
309 except TypeError:
309 except TypeError:
310 ui.warn(_('skipping incorrectly formatted id %s\n')
310 ui.warn(_('skipping incorrectly formatted id %s\n')
311 % id)
311 % id)
312 continue
312 continue
313 try:
313 try:
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
314 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
315 name))
315 name))
316 except KeyError:
316 except KeyError:
317 ui.warn(_('no mapping for id %s\n') % id)
317 ui.warn(_('no mapping for id %s\n') % id)
318 continue
318 continue
319 return ''.join(newdata)
319 return ''.join(newdata)
320
320
321 def _islfile(file, ctx, matcher, size):
321 def _islfile(file, ctx, matcher, size):
322 '''Return true if file should be considered a largefile, i.e.
322 '''Return true if file should be considered a largefile, i.e.
323 matcher matches it or it is larger than size.'''
323 matcher matches it or it is larger than size.'''
324 # never store special .hg* files as largefiles
324 # never store special .hg* files as largefiles
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
325 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
326 return False
326 return False
327 if matcher and matcher(file):
327 if matcher and matcher(file):
328 return True
328 return True
329 try:
329 try:
330 return ctx.filectx(file).size() >= size * 1024 * 1024
330 return ctx.filectx(file).size() >= size * 1024 * 1024
331 except error.LookupError:
331 except error.LookupError:
332 return False
332 return False
333
333
334 def uploadlfiles(ui, rsrc, rdst, files):
334 def uploadlfiles(ui, rsrc, rdst, files):
335 '''upload largefiles to the central store'''
335 '''upload largefiles to the central store'''
336
336
337 if not files:
337 if not files:
338 return
338 return
339
339
340 store = basestore._openstore(rsrc, rdst, put=True)
340 store = basestore._openstore(rsrc, rdst, put=True)
341
341
342 at = 0
342 at = 0
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
343 ui.debug("sending statlfile command for %d largefiles\n" % len(files))
344 retval = store.exists(files)
344 retval = store.exists(files)
345 files = filter(lambda h: not retval[h], files)
345 files = filter(lambda h: not retval[h], files)
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
346 ui.debug("%d largefiles need to be uploaded\n" % len(files))
347
347
348 for hash in files:
348 for hash in files:
349 ui.progress(_('uploading largefiles'), at, unit='largefile',
349 ui.progress(_('uploading largefiles'), at, unit=_('files'),
350 total=len(files))
350 total=len(files))
351 source = lfutil.findfile(rsrc, hash)
351 source = lfutil.findfile(rsrc, hash)
352 if not source:
352 if not source:
353 raise error.Abort(_('largefile %s missing from store'
353 raise error.Abort(_('largefile %s missing from store'
354 ' (needs to be uploaded)') % hash)
354 ' (needs to be uploaded)') % hash)
355 # XXX check for errors here
355 # XXX check for errors here
356 store.put(source, hash)
356 store.put(source, hash)
357 at += 1
357 at += 1
358 ui.progress(_('uploading largefiles'), None)
358 ui.progress(_('uploading largefiles'), None)
359
359
360 def verifylfiles(ui, repo, all=False, contents=False):
360 def verifylfiles(ui, repo, all=False, contents=False):
361 '''Verify that every largefile revision in the current changeset
361 '''Verify that every largefile revision in the current changeset
362 exists in the central store. With --contents, also verify that
362 exists in the central store. With --contents, also verify that
363 the contents of each local largefile file revision are correct (SHA-1 hash
363 the contents of each local largefile file revision are correct (SHA-1 hash
364 matches the revision ID). With --all, check every changeset in
364 matches the revision ID). With --all, check every changeset in
365 this repository.'''
365 this repository.'''
366 if all:
366 if all:
367 revs = repo.revs('all()')
367 revs = repo.revs('all()')
368 else:
368 else:
369 revs = ['.']
369 revs = ['.']
370
370
371 store = basestore._openstore(repo)
371 store = basestore._openstore(repo)
372 return store.verify(revs, contents=contents)
372 return store.verify(revs, contents=contents)
373
373
374 def cachelfiles(ui, repo, node, filelist=None):
374 def cachelfiles(ui, repo, node, filelist=None):
375 '''cachelfiles ensures that all largefiles needed by the specified revision
375 '''cachelfiles ensures that all largefiles needed by the specified revision
376 are present in the repository's largefile cache.
376 are present in the repository's largefile cache.
377
377
378 returns a tuple (cached, missing). cached is the list of files downloaded
378 returns a tuple (cached, missing). cached is the list of files downloaded
379 by this operation; missing is the list of files that were needed but could
379 by this operation; missing is the list of files that were needed but could
380 not be found.'''
380 not be found.'''
381 lfiles = lfutil.listlfiles(repo, node)
381 lfiles = lfutil.listlfiles(repo, node)
382 if filelist:
382 if filelist:
383 lfiles = set(lfiles) & set(filelist)
383 lfiles = set(lfiles) & set(filelist)
384 toget = []
384 toget = []
385
385
386 for lfile in lfiles:
386 for lfile in lfiles:
387 try:
387 try:
388 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
388 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
389 except IOError as err:
389 except IOError as err:
390 if err.errno == errno.ENOENT:
390 if err.errno == errno.ENOENT:
391 continue # node must be None and standin wasn't found in wctx
391 continue # node must be None and standin wasn't found in wctx
392 raise
392 raise
393 if not lfutil.findfile(repo, expectedhash):
393 if not lfutil.findfile(repo, expectedhash):
394 toget.append((lfile, expectedhash))
394 toget.append((lfile, expectedhash))
395
395
396 if toget:
396 if toget:
397 store = basestore._openstore(repo)
397 store = basestore._openstore(repo)
398 ret = store.get(toget)
398 ret = store.get(toget)
399 return ret
399 return ret
400
400
401 return ([], [])
401 return ([], [])
402
402
403 def downloadlfiles(ui, repo, rev=None):
403 def downloadlfiles(ui, repo, rev=None):
404 matchfn = scmutil.match(repo[None],
404 matchfn = scmutil.match(repo[None],
405 [repo.wjoin(lfutil.shortname)], {})
405 [repo.wjoin(lfutil.shortname)], {})
406 def prepare(ctx, fns):
406 def prepare(ctx, fns):
407 pass
407 pass
408 totalsuccess = 0
408 totalsuccess = 0
409 totalmissing = 0
409 totalmissing = 0
410 if rev != []: # walkchangerevs on empty list would return all revs
410 if rev != []: # walkchangerevs on empty list would return all revs
411 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
411 for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev},
412 prepare):
412 prepare):
413 success, missing = cachelfiles(ui, repo, ctx.node())
413 success, missing = cachelfiles(ui, repo, ctx.node())
414 totalsuccess += len(success)
414 totalsuccess += len(success)
415 totalmissing += len(missing)
415 totalmissing += len(missing)
416 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
416 ui.status(_("%d additional largefiles cached\n") % totalsuccess)
417 if totalmissing > 0:
417 if totalmissing > 0:
418 ui.status(_("%d largefiles failed to download\n") % totalmissing)
418 ui.status(_("%d largefiles failed to download\n") % totalmissing)
419 return totalsuccess, totalmissing
419 return totalsuccess, totalmissing
420
420
421 def updatelfiles(ui, repo, filelist=None, printmessage=None,
421 def updatelfiles(ui, repo, filelist=None, printmessage=None,
422 normallookup=False):
422 normallookup=False):
423 '''Update largefiles according to standins in the working directory
423 '''Update largefiles according to standins in the working directory
424
424
425 If ``printmessage`` is other than ``None``, it means "print (or
425 If ``printmessage`` is other than ``None``, it means "print (or
426 ignore, for false) message forcibly".
426 ignore, for false) message forcibly".
427 '''
427 '''
428 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
428 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
429 with repo.wlock():
429 with repo.wlock():
430 lfdirstate = lfutil.openlfdirstate(ui, repo)
430 lfdirstate = lfutil.openlfdirstate(ui, repo)
431 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
431 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
432
432
433 if filelist is not None:
433 if filelist is not None:
434 filelist = set(filelist)
434 filelist = set(filelist)
435 lfiles = [f for f in lfiles if f in filelist]
435 lfiles = [f for f in lfiles if f in filelist]
436
436
437 update = {}
437 update = {}
438 updated, removed = 0, 0
438 updated, removed = 0, 0
439 for lfile in lfiles:
439 for lfile in lfiles:
440 abslfile = repo.wjoin(lfile)
440 abslfile = repo.wjoin(lfile)
441 abslfileorig = scmutil.origpath(ui, repo, abslfile)
441 abslfileorig = scmutil.origpath(ui, repo, abslfile)
442 absstandin = repo.wjoin(lfutil.standin(lfile))
442 absstandin = repo.wjoin(lfutil.standin(lfile))
443 absstandinorig = scmutil.origpath(ui, repo, absstandin)
443 absstandinorig = scmutil.origpath(ui, repo, absstandin)
444 if os.path.exists(absstandin):
444 if os.path.exists(absstandin):
445 if (os.path.exists(absstandinorig) and
445 if (os.path.exists(absstandinorig) and
446 os.path.exists(abslfile)):
446 os.path.exists(abslfile)):
447 shutil.copyfile(abslfile, abslfileorig)
447 shutil.copyfile(abslfile, abslfileorig)
448 util.unlinkpath(absstandinorig)
448 util.unlinkpath(absstandinorig)
449 expecthash = lfutil.readstandin(repo, lfile)
449 expecthash = lfutil.readstandin(repo, lfile)
450 if expecthash != '':
450 if expecthash != '':
451 if lfile not in repo[None]: # not switched to normal file
451 if lfile not in repo[None]: # not switched to normal file
452 util.unlinkpath(abslfile, ignoremissing=True)
452 util.unlinkpath(abslfile, ignoremissing=True)
453 # use normallookup() to allocate an entry in largefiles
453 # use normallookup() to allocate an entry in largefiles
454 # dirstate to prevent lfilesrepo.status() from reporting
454 # dirstate to prevent lfilesrepo.status() from reporting
455 # missing files as removed.
455 # missing files as removed.
456 lfdirstate.normallookup(lfile)
456 lfdirstate.normallookup(lfile)
457 update[lfile] = expecthash
457 update[lfile] = expecthash
458 else:
458 else:
459 # Remove lfiles for which the standin is deleted, unless the
459 # Remove lfiles for which the standin is deleted, unless the
460 # lfile is added to the repository again. This happens when a
460 # lfile is added to the repository again. This happens when a
461 # largefile is converted back to a normal file: the standin
461 # largefile is converted back to a normal file: the standin
462 # disappears, but a new (normal) file appears as the lfile.
462 # disappears, but a new (normal) file appears as the lfile.
463 if (os.path.exists(abslfile) and
463 if (os.path.exists(abslfile) and
464 repo.dirstate.normalize(lfile) not in repo[None]):
464 repo.dirstate.normalize(lfile) not in repo[None]):
465 util.unlinkpath(abslfile)
465 util.unlinkpath(abslfile)
466 removed += 1
466 removed += 1
467
467
468 # largefile processing might be slow and be interrupted - be prepared
468 # largefile processing might be slow and be interrupted - be prepared
469 lfdirstate.write()
469 lfdirstate.write()
470
470
471 if lfiles:
471 if lfiles:
472 statuswriter(_('getting changed largefiles\n'))
472 statuswriter(_('getting changed largefiles\n'))
473 cachelfiles(ui, repo, None, lfiles)
473 cachelfiles(ui, repo, None, lfiles)
474
474
475 for lfile in lfiles:
475 for lfile in lfiles:
476 update1 = 0
476 update1 = 0
477
477
478 expecthash = update.get(lfile)
478 expecthash = update.get(lfile)
479 if expecthash:
479 if expecthash:
480 if not lfutil.copyfromcache(repo, expecthash, lfile):
480 if not lfutil.copyfromcache(repo, expecthash, lfile):
481 # failed ... but already removed and set to normallookup
481 # failed ... but already removed and set to normallookup
482 continue
482 continue
483 # Synchronize largefile dirstate to the last modified
483 # Synchronize largefile dirstate to the last modified
484 # time of the file
484 # time of the file
485 lfdirstate.normal(lfile)
485 lfdirstate.normal(lfile)
486 update1 = 1
486 update1 = 1
487
487
488 # copy the state of largefile standin from the repository's
488 # copy the state of largefile standin from the repository's
489 # dirstate to its state in the lfdirstate.
489 # dirstate to its state in the lfdirstate.
490 abslfile = repo.wjoin(lfile)
490 abslfile = repo.wjoin(lfile)
491 absstandin = repo.wjoin(lfutil.standin(lfile))
491 absstandin = repo.wjoin(lfutil.standin(lfile))
492 if os.path.exists(absstandin):
492 if os.path.exists(absstandin):
493 mode = os.stat(absstandin).st_mode
493 mode = os.stat(absstandin).st_mode
494 if mode != os.stat(abslfile).st_mode:
494 if mode != os.stat(abslfile).st_mode:
495 os.chmod(abslfile, mode)
495 os.chmod(abslfile, mode)
496 update1 = 1
496 update1 = 1
497
497
498 updated += update1
498 updated += update1
499
499
500 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
500 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
501
501
502 lfdirstate.write()
502 lfdirstate.write()
503 if lfiles:
503 if lfiles:
504 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
504 statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
505 removed))
505 removed))
506
506
507 @command('lfpull',
507 @command('lfpull',
508 [('r', 'rev', [], _('pull largefiles for these revisions'))
508 [('r', 'rev', [], _('pull largefiles for these revisions'))
509 ] + commands.remoteopts,
509 ] + commands.remoteopts,
510 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
510 _('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
511 def lfpull(ui, repo, source="default", **opts):
511 def lfpull(ui, repo, source="default", **opts):
512 """pull largefiles for the specified revisions from the specified source
512 """pull largefiles for the specified revisions from the specified source
513
513
514 Pull largefiles that are referenced from local changesets but missing
514 Pull largefiles that are referenced from local changesets but missing
515 locally, pulling from a remote repository to the local cache.
515 locally, pulling from a remote repository to the local cache.
516
516
517 If SOURCE is omitted, the 'default' path will be used.
517 If SOURCE is omitted, the 'default' path will be used.
518 See :hg:`help urls` for more information.
518 See :hg:`help urls` for more information.
519
519
520 .. container:: verbose
520 .. container:: verbose
521
521
522 Some examples:
522 Some examples:
523
523
524 - pull largefiles for all branch heads::
524 - pull largefiles for all branch heads::
525
525
526 hg lfpull -r "head() and not closed()"
526 hg lfpull -r "head() and not closed()"
527
527
528 - pull largefiles on the default branch::
528 - pull largefiles on the default branch::
529
529
530 hg lfpull -r "branch(default)"
530 hg lfpull -r "branch(default)"
531 """
531 """
532 repo.lfpullsource = source
532 repo.lfpullsource = source
533
533
534 revs = opts.get('rev', [])
534 revs = opts.get('rev', [])
535 if not revs:
535 if not revs:
536 raise error.Abort(_('no revisions specified'))
536 raise error.Abort(_('no revisions specified'))
537 revs = scmutil.revrange(repo, revs)
537 revs = scmutil.revrange(repo, revs)
538
538
539 numcached = 0
539 numcached = 0
540 for rev in revs:
540 for rev in revs:
541 ui.note(_('pulling largefiles for revision %s\n') % rev)
541 ui.note(_('pulling largefiles for revision %s\n') % rev)
542 (cached, missing) = cachelfiles(ui, repo, rev)
542 (cached, missing) = cachelfiles(ui, repo, rev)
543 numcached += len(cached)
543 numcached += len(cached)
544 ui.status(_("%d largefiles cached\n") % numcached)
544 ui.status(_("%d largefiles cached\n") % numcached)
@@ -1,309 +1,309 b''
1 This file contains testcases that tend to be related to the wire protocol part
1 This file contains testcases that tend to be related to the wire protocol part
2 of largefiles.
2 of largefiles.
3
3
4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
5 $ mkdir "${USERCACHE}"
5 $ mkdir "${USERCACHE}"
6 $ cat >> $HGRCPATH <<EOF
6 $ cat >> $HGRCPATH <<EOF
7 > [extensions]
7 > [extensions]
8 > largefiles=
8 > largefiles=
9 > purge=
9 > purge=
10 > rebase=
10 > rebase=
11 > transplant=
11 > transplant=
12 > [phases]
12 > [phases]
13 > publish=False
13 > publish=False
14 > [largefiles]
14 > [largefiles]
15 > minsize=2
15 > minsize=2
16 > patterns=glob:**.dat
16 > patterns=glob:**.dat
17 > usercache=${USERCACHE}
17 > usercache=${USERCACHE}
18 > [web]
18 > [web]
19 > allow_archive = zip
19 > allow_archive = zip
20 > [hooks]
20 > [hooks]
21 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
21 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
22 > EOF
22 > EOF
23
23
24
24
25 #if serve
25 #if serve
26 vanilla clients not locked out from largefiles servers on vanilla repos
26 vanilla clients not locked out from largefiles servers on vanilla repos
27 $ mkdir r1
27 $ mkdir r1
28 $ cd r1
28 $ cd r1
29 $ hg init
29 $ hg init
30 $ echo c1 > f1
30 $ echo c1 > f1
31 $ hg add f1
31 $ hg add f1
32 $ hg commit -m "m1"
32 $ hg commit -m "m1"
33 Invoking status precommit hook
33 Invoking status precommit hook
34 A f1
34 A f1
35 $ cd ..
35 $ cd ..
36 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
36 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
37 $ cat hg.pid >> $DAEMON_PIDS
37 $ cat hg.pid >> $DAEMON_PIDS
38 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
38 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
39 requesting all changes
39 requesting all changes
40 adding changesets
40 adding changesets
41 adding manifests
41 adding manifests
42 adding file changes
42 adding file changes
43 added 1 changesets with 1 changes to 1 files
43 added 1 changesets with 1 changes to 1 files
44 updating to branch default
44 updating to branch default
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46
46
47 largefiles clients still work with vanilla servers
47 largefiles clients still work with vanilla servers
48 $ hg --config extensions.largefiles=! serve -R r1 -d -p $HGPORT1 --pid-file hg.pid
48 $ hg --config extensions.largefiles=! serve -R r1 -d -p $HGPORT1 --pid-file hg.pid
49 $ cat hg.pid >> $DAEMON_PIDS
49 $ cat hg.pid >> $DAEMON_PIDS
50 $ hg clone http://localhost:$HGPORT1 r3
50 $ hg clone http://localhost:$HGPORT1 r3
51 requesting all changes
51 requesting all changes
52 adding changesets
52 adding changesets
53 adding manifests
53 adding manifests
54 adding file changes
54 adding file changes
55 added 1 changesets with 1 changes to 1 files
55 added 1 changesets with 1 changes to 1 files
56 updating to branch default
56 updating to branch default
57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 #endif
58 #endif
59
59
60 vanilla clients locked out from largefiles http repos
60 vanilla clients locked out from largefiles http repos
61 $ mkdir r4
61 $ mkdir r4
62 $ cd r4
62 $ cd r4
63 $ hg init
63 $ hg init
64 $ echo c1 > f1
64 $ echo c1 > f1
65 $ hg add --large f1
65 $ hg add --large f1
66 $ hg commit -m "m1"
66 $ hg commit -m "m1"
67 Invoking status precommit hook
67 Invoking status precommit hook
68 A f1
68 A f1
69 $ cd ..
69 $ cd ..
70
70
71 largefiles can be pushed locally (issue3583)
71 largefiles can be pushed locally (issue3583)
72 $ hg init dest
72 $ hg init dest
73 $ cd r4
73 $ cd r4
74 $ hg outgoing ../dest
74 $ hg outgoing ../dest
75 comparing with ../dest
75 comparing with ../dest
76 searching for changes
76 searching for changes
77 changeset: 0:639881c12b4c
77 changeset: 0:639881c12b4c
78 tag: tip
78 tag: tip
79 user: test
79 user: test
80 date: Thu Jan 01 00:00:00 1970 +0000
80 date: Thu Jan 01 00:00:00 1970 +0000
81 summary: m1
81 summary: m1
82
82
83 $ hg push ../dest
83 $ hg push ../dest
84 pushing to ../dest
84 pushing to ../dest
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 1 changesets with 1 changes to 1 files
89 added 1 changesets with 1 changes to 1 files
90
90
91 exit code with nothing outgoing (issue3611)
91 exit code with nothing outgoing (issue3611)
92 $ hg outgoing ../dest
92 $ hg outgoing ../dest
93 comparing with ../dest
93 comparing with ../dest
94 searching for changes
94 searching for changes
95 no changes found
95 no changes found
96 [1]
96 [1]
97 $ cd ..
97 $ cd ..
98
98
99 #if serve
99 #if serve
100 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
100 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
101 $ cat hg.pid >> $DAEMON_PIDS
101 $ cat hg.pid >> $DAEMON_PIDS
102 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
102 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
103 abort: remote error:
103 abort: remote error:
104
104
105 This repository uses the largefiles extension.
105 This repository uses the largefiles extension.
106
106
107 Please enable it in your Mercurial config file.
107 Please enable it in your Mercurial config file.
108 [255]
108 [255]
109
109
110 used all HGPORTs, kill all daemons
110 used all HGPORTs, kill all daemons
111 $ killdaemons.py
111 $ killdaemons.py
112 #endif
112 #endif
113
113
114 vanilla clients locked out from largefiles ssh repos
114 vanilla clients locked out from largefiles ssh repos
115 $ hg --config extensions.largefiles=! clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
115 $ hg --config extensions.largefiles=! clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
116 remote:
116 remote:
117 remote: This repository uses the largefiles extension.
117 remote: This repository uses the largefiles extension.
118 remote:
118 remote:
119 remote: Please enable it in your Mercurial config file.
119 remote: Please enable it in your Mercurial config file.
120 remote:
120 remote:
121 remote: -
121 remote: -
122 abort: remote error
122 abort: remote error
123 (check previous remote output)
123 (check previous remote output)
124 [255]
124 [255]
125
125
126 #if serve
126 #if serve
127
127
128 largefiles clients refuse to push largefiles repos to vanilla servers
128 largefiles clients refuse to push largefiles repos to vanilla servers
129 $ mkdir r6
129 $ mkdir r6
130 $ cd r6
130 $ cd r6
131 $ hg init
131 $ hg init
132 $ echo c1 > f1
132 $ echo c1 > f1
133 $ hg add f1
133 $ hg add f1
134 $ hg commit -m "m1"
134 $ hg commit -m "m1"
135 Invoking status precommit hook
135 Invoking status precommit hook
136 A f1
136 A f1
137 $ cat >> .hg/hgrc <<!
137 $ cat >> .hg/hgrc <<!
138 > [web]
138 > [web]
139 > push_ssl = false
139 > push_ssl = false
140 > allow_push = *
140 > allow_push = *
141 > !
141 > !
142 $ cd ..
142 $ cd ..
143 $ hg clone r6 r7
143 $ hg clone r6 r7
144 updating to branch default
144 updating to branch default
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 $ cd r7
146 $ cd r7
147 $ echo c2 > f2
147 $ echo c2 > f2
148 $ hg add --large f2
148 $ hg add --large f2
149 $ hg commit -m "m2"
149 $ hg commit -m "m2"
150 Invoking status precommit hook
150 Invoking status precommit hook
151 A f2
151 A f2
152 $ hg --config extensions.largefiles=! -R ../r6 serve -d -p $HGPORT --pid-file ../hg.pid
152 $ hg --config extensions.largefiles=! -R ../r6 serve -d -p $HGPORT --pid-file ../hg.pid
153 $ cat ../hg.pid >> $DAEMON_PIDS
153 $ cat ../hg.pid >> $DAEMON_PIDS
154 $ hg push http://localhost:$HGPORT
154 $ hg push http://localhost:$HGPORT
155 pushing to http://localhost:$HGPORT/
155 pushing to http://localhost:$HGPORT/
156 searching for changes
156 searching for changes
157 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
157 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
158 [255]
158 [255]
159 $ cd ..
159 $ cd ..
160
160
161 putlfile errors are shown (issue3123)
161 putlfile errors are shown (issue3123)
162 Corrupt the cached largefile in r7 and move it out of the servers usercache
162 Corrupt the cached largefile in r7 and move it out of the servers usercache
163 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
163 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
164 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
164 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
165 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
165 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
166 $ hg init empty
166 $ hg init empty
167 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
167 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
168 > --config 'web.allow_push=*' --config web.push_ssl=False
168 > --config 'web.allow_push=*' --config web.push_ssl=False
169 $ cat hg.pid >> $DAEMON_PIDS
169 $ cat hg.pid >> $DAEMON_PIDS
170 $ hg push -R r7 http://localhost:$HGPORT1
170 $ hg push -R r7 http://localhost:$HGPORT1
171 pushing to http://localhost:$HGPORT1/
171 pushing to http://localhost:$HGPORT1/
172 searching for changes
172 searching for changes
173 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
173 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
174 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ (glob)
174 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ (glob)
175 [255]
175 [255]
176 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
176 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
177 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
177 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
178 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
178 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
179 $ hg push -R r7 http://localhost:$HGPORT1
179 $ hg push -R r7 http://localhost:$HGPORT1
180 pushing to http://localhost:$HGPORT1/
180 pushing to http://localhost:$HGPORT1/
181 searching for changes
181 searching for changes
182 remote: adding changesets
182 remote: adding changesets
183 remote: adding manifests
183 remote: adding manifests
184 remote: adding file changes
184 remote: adding file changes
185 remote: added 2 changesets with 2 changes to 2 files
185 remote: added 2 changesets with 2 changes to 2 files
186 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
186 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
187 server side corruption
187 server side corruption
188 $ rm -rf empty
188 $ rm -rf empty
189
189
190 Push a largefiles repository to a served empty repository
190 Push a largefiles repository to a served empty repository
191 $ hg init r8
191 $ hg init r8
192 $ echo c3 > r8/f1
192 $ echo c3 > r8/f1
193 $ hg add --large r8/f1 -R r8
193 $ hg add --large r8/f1 -R r8
194 $ hg commit -m "m1" -R r8
194 $ hg commit -m "m1" -R r8
195 Invoking status precommit hook
195 Invoking status precommit hook
196 A f1
196 A f1
197 $ hg init empty
197 $ hg init empty
198 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
198 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
199 > --config 'web.allow_push=*' --config web.push_ssl=False
199 > --config 'web.allow_push=*' --config web.push_ssl=False
200 $ cat hg.pid >> $DAEMON_PIDS
200 $ cat hg.pid >> $DAEMON_PIDS
201 $ rm "${USERCACHE}"/*
201 $ rm "${USERCACHE}"/*
202 $ hg push -R r8 http://localhost:$HGPORT2/#default
202 $ hg push -R r8 http://localhost:$HGPORT2/#default
203 pushing to http://localhost:$HGPORT2/
203 pushing to http://localhost:$HGPORT2/
204 searching for changes
204 searching for changes
205 remote: adding changesets
205 remote: adding changesets
206 remote: adding manifests
206 remote: adding manifests
207 remote: adding file changes
207 remote: adding file changes
208 remote: added 1 changesets with 1 changes to 1 files
208 remote: added 1 changesets with 1 changes to 1 files
209 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
209 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
210 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
210 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
211
211
212 Clone over http, no largefiles pulled on clone.
212 Clone over http, no largefiles pulled on clone.
213
213
214 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
214 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
215 adding changesets
215 adding changesets
216 adding manifests
216 adding manifests
217 adding file changes
217 adding file changes
218 added 1 changesets with 1 changes to 1 files
218 added 1 changesets with 1 changes to 1 files
219
219
220 Archive contains largefiles
220 Archive contains largefiles
221 >>> import urllib2, os
221 >>> import urllib2, os
222 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
222 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
223 >>> with open('archive.zip', 'w') as f:
223 >>> with open('archive.zip', 'w') as f:
224 ... f.write(urllib2.urlopen(u).read())
224 ... f.write(urllib2.urlopen(u).read())
225 $ unzip -t archive.zip
225 $ unzip -t archive.zip
226 Archive: archive.zip
226 Archive: archive.zip
227 testing: empty-default/.hg_archival.txt OK
227 testing: empty-default/.hg_archival.txt OK
228 testing: empty-default/f1 OK
228 testing: empty-default/f1 OK
229 No errors detected in compressed data of archive.zip.
229 No errors detected in compressed data of archive.zip.
230
230
231 test 'verify' with remotestore:
231 test 'verify' with remotestore:
232
232
233 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
233 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
234 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
234 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
235 $ hg -R http-clone verify --large --lfa
235 $ hg -R http-clone verify --large --lfa
236 checking changesets
236 checking changesets
237 checking manifests
237 checking manifests
238 crosschecking files in changesets and manifests
238 crosschecking files in changesets and manifests
239 checking files
239 checking files
240 1 files, 1 changesets, 1 total revisions
240 1 files, 1 changesets, 1 total revisions
241 searching 1 changesets for largefiles
241 searching 1 changesets for largefiles
242 changeset 0:cf03e5bb9936: f1 missing
242 changeset 0:cf03e5bb9936: f1 missing
243 verified existence of 1 revisions of 1 largefiles
243 verified existence of 1 revisions of 1 largefiles
244 [1]
244 [1]
245 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
245 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
246 $ hg -R http-clone -q verify --large --lfa
246 $ hg -R http-clone -q verify --large --lfa
247
247
248 largefiles pulled on update - a largefile missing on the server:
248 largefiles pulled on update - a largefile missing on the server:
249 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
249 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
250 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
250 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
251 getting changed largefiles
251 getting changed largefiles
252 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
252 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
253 0 largefiles updated, 0 removed
253 0 largefiles updated, 0 removed
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
255 $ hg -R http-clone st
255 $ hg -R http-clone st
256 ! f1
256 ! f1
257 $ hg -R http-clone up -Cqr null
257 $ hg -R http-clone up -Cqr null
258
258
259 largefiles pulled on update - a largefile corrupted on the server:
259 largefiles pulled on update - a largefile corrupted on the server:
260 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
260 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
261 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
261 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
262 getting changed largefiles
262 getting changed largefiles
263 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
263 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
264 0 largefiles updated, 0 removed
264 0 largefiles updated, 0 removed
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 $ hg -R http-clone st
266 $ hg -R http-clone st
267 ! f1
267 ! f1
268 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
268 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
269 $ [ ! -f http-clone/f1 ]
269 $ [ ! -f http-clone/f1 ]
270 $ [ ! -f http-clone-usercache ]
270 $ [ ! -f http-clone-usercache ]
271 $ hg -R http-clone verify --large --lfc
271 $ hg -R http-clone verify --large --lfc
272 checking changesets
272 checking changesets
273 checking manifests
273 checking manifests
274 crosschecking files in changesets and manifests
274 crosschecking files in changesets and manifests
275 checking files
275 checking files
276 1 files, 1 changesets, 1 total revisions
276 1 files, 1 changesets, 1 total revisions
277 searching 1 changesets for largefiles
277 searching 1 changesets for largefiles
278 verified contents of 1 revisions of 1 largefiles
278 verified contents of 1 revisions of 1 largefiles
279 $ hg -R http-clone up -Cqr null
279 $ hg -R http-clone up -Cqr null
280
280
281 largefiles pulled on update - no server side problems:
281 largefiles pulled on update - no server side problems:
282 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
282 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
283 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
283 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
284 resolving manifests
284 resolving manifests
285 branchmerge: False, force: False, partial: False
285 branchmerge: False, force: False, partial: False
286 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
286 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
287 .hglf/f1: remote created -> g
287 .hglf/f1: remote created -> g
288 getting .hglf/f1
288 getting .hglf/f1
289 updating: .hglf/f1 1/1 files (100.00%)
289 updating: .hglf/f1 1/1 files (100.00%)
290 getting changed largefiles
290 getting changed largefiles
291 using http://localhost:$HGPORT2/
291 using http://localhost:$HGPORT2/
292 sending capabilities command
292 sending capabilities command
293 sending batch command
293 sending batch command
294 getting largefiles: 0/1 lfile (0.00%)
294 getting largefiles: 0/1 files (0.00%)
295 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
295 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
296 sending getlfile command
296 sending getlfile command
297 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
297 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
298 1 largefiles updated, 0 removed
298 1 largefiles updated, 0 removed
299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
300
300
301 $ ls http-clone-usercache/*
301 $ ls http-clone-usercache/*
302 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
302 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
303
303
304 $ rm -rf empty http-clone*
304 $ rm -rf empty http-clone*
305
305
306 used all HGPORTs, kill all daemons
306 used all HGPORTs, kill all daemons
307 $ killdaemons.py
307 $ killdaemons.py
308
308
309 #endif
309 #endif
General Comments 0
You need to be logged in to leave comments. Login now