##// END OF EJS Templates
largefiles: improve comments, internal docstrings...
Greg Ward -
r15252:6e809bb4 default
parent child Browse files
Show More
@@ -1,201 +1,202 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Base class for store implementations and store-related utility code.'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import os
11 import os
12 import tempfile
12 import tempfile
13 import binascii
13 import binascii
14 import re
14 import re
15
15
16 from mercurial import util, node, hg
16 from mercurial import util, node, hg
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfutil
19 import lfutil
20
20
21 class StoreError(Exception):
21 class StoreError(Exception):
22 '''Raised when there is a problem getting files from or putting
22 '''Raised when there is a problem getting files from or putting
23 files to a central store.'''
23 files to a central store.'''
24 def __init__(self, filename, hash, url, detail):
24 def __init__(self, filename, hash, url, detail):
25 self.filename = filename
25 self.filename = filename
26 self.hash = hash
26 self.hash = hash
27 self.url = url
27 self.url = url
28 self.detail = detail
28 self.detail = detail
29
29
30 def longmessage(self):
30 def longmessage(self):
31 if self.url:
31 if self.url:
32 return ('%s: %s\n'
32 return ('%s: %s\n'
33 '(failed URL: %s)\n'
33 '(failed URL: %s)\n'
34 % (self.filename, self.detail, self.url))
34 % (self.filename, self.detail, self.url))
35 else:
35 else:
36 return ('%s: %s\n'
36 return ('%s: %s\n'
37 '(no default or default-push path set in hgrc)\n'
37 '(no default or default-push path set in hgrc)\n'
38 % (self.filename, self.detail))
38 % (self.filename, self.detail))
39
39
40 def __str__(self):
40 def __str__(self):
41 return "%s: %s" % (self.url, self.detail)
41 return "%s: %s" % (self.url, self.detail)
42
42
43 class basestore(object):
43 class basestore(object):
44 def __init__(self, ui, repo, url):
44 def __init__(self, ui, repo, url):
45 self.ui = ui
45 self.ui = ui
46 self.repo = repo
46 self.repo = repo
47 self.url = url
47 self.url = url
48
48
49 def put(self, source, hash):
49 def put(self, source, hash):
50 '''Put source file into the store under <filename>/<hash>.'''
50 '''Put source file into the store under <filename>/<hash>.'''
51 raise NotImplementedError('abstract method')
51 raise NotImplementedError('abstract method')
52
52
53 def exists(self, hash):
53 def exists(self, hash):
54 '''Check to see if the store contains the given hash.'''
54 '''Check to see if the store contains the given hash.'''
55 raise NotImplementedError('abstract method')
55 raise NotImplementedError('abstract method')
56
56
57 def get(self, files):
57 def get(self, files):
58 '''Get the specified largefiles from the store and write to local
58 '''Get the specified largefiles from the store and write to local
59 files under repo.root. files is a list of (filename, hash)
59 files under repo.root. files is a list of (filename, hash)
60 tuples. Return (success, missing), lists of files successfuly
60 tuples. Return (success, missing), lists of files successfuly
61 downloaded and those not found in the store. success is a list
61 downloaded and those not found in the store. success is a list
62 of (filename, hash) tuples; missing is a list of filenames that
62 of (filename, hash) tuples; missing is a list of filenames that
63 we could not get. (The detailed error message will already have
63 we could not get. (The detailed error message will already have
64 been presented to the user, so missing is just supplied as a
64 been presented to the user, so missing is just supplied as a
65 summary.)'''
65 summary.)'''
66 success = []
66 success = []
67 missing = []
67 missing = []
68 ui = self.ui
68 ui = self.ui
69
69
70 at = 0
70 at = 0
71 for filename, hash in files:
71 for filename, hash in files:
72 ui.progress(_('getting largefiles'), at, unit='lfile',
72 ui.progress(_('getting largefiles'), at, unit='lfile',
73 total=len(files))
73 total=len(files))
74 at += 1
74 at += 1
75 ui.note(_('getting %s:%s\n') % (filename, hash))
75 ui.note(_('getting %s:%s\n') % (filename, hash))
76
76
77 cachefilename = lfutil.cachepath(self.repo, hash)
77 cachefilename = lfutil.cachepath(self.repo, hash)
78 cachedir = os.path.dirname(cachefilename)
78 cachedir = os.path.dirname(cachefilename)
79
79
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
81 # opened the file in binary mode.
81 # opened the file in binary mode.
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
83 dir=cachedir, prefix=os.path.basename(filename))
83 dir=cachedir, prefix=os.path.basename(filename))
84 tmpfile = os.fdopen(tmpfd, 'w')
84 tmpfile = os.fdopen(tmpfd, 'w')
85
85
86 try:
86 try:
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
88 except StoreError, err:
88 except StoreError, err:
89 ui.warn(err.longmessage())
89 ui.warn(err.longmessage())
90 hhash = ""
90 hhash = ""
91
91
92 if hhash != hash:
92 if hhash != hash:
93 if hhash != "":
93 if hhash != "":
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
95 % (filename, hash, hhash))
95 % (filename, hash, hhash))
96 tmpfile.close() # no-op if it's already closed
96 tmpfile.close() # no-op if it's already closed
97 os.remove(tmpfilename)
97 os.remove(tmpfilename)
98 missing.append(filename)
98 missing.append(filename)
99 continue
99 continue
100
100
101 if os.path.exists(cachefilename): # Windows
101 if os.path.exists(cachefilename): # Windows
102 os.remove(cachefilename)
102 os.remove(cachefilename)
103 os.rename(tmpfilename, cachefilename)
103 os.rename(tmpfilename, cachefilename)
104 lfutil.linktosystemcache(self.repo, hash)
104 lfutil.linktosystemcache(self.repo, hash)
105 success.append((filename, hhash))
105 success.append((filename, hhash))
106
106
107 ui.progress(_('getting largefiles'), None)
107 ui.progress(_('getting largefiles'), None)
108 return (success, missing)
108 return (success, missing)
109
109
110 def verify(self, revs, contents=False):
110 def verify(self, revs, contents=False):
111 '''Verify the existence (and, optionally, contents) of every big
111 '''Verify the existence (and, optionally, contents) of every big
112 file revision referenced by every changeset in revs.
112 file revision referenced by every changeset in revs.
113 Return 0 if all is well, non-zero on any errors.'''
113 Return 0 if all is well, non-zero on any errors.'''
114 write = self.ui.write
114 write = self.ui.write
115 failed = False
115 failed = False
116
116
117 write(_('searching %d changesets for largefiles\n') % len(revs))
117 write(_('searching %d changesets for largefiles\n') % len(revs))
118 verified = set() # set of (filename, filenode) tuples
118 verified = set() # set of (filename, filenode) tuples
119
119
120 for rev in revs:
120 for rev in revs:
121 cctx = self.repo[rev]
121 cctx = self.repo[rev]
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
123
123
124 failed = lfutil.any_(self._verifyfile(
124 failed = lfutil.any_(self._verifyfile(
125 cctx, cset, contents, standin, verified) for standin in cctx)
125 cctx, cset, contents, standin, verified) for standin in cctx)
126
126
127 num_revs = len(verified)
127 num_revs = len(verified)
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
129 if contents:
129 if contents:
130 write(_('verified contents of %d revisions of %d largefiles\n')
130 write(_('verified contents of %d revisions of %d largefiles\n')
131 % (num_revs, num_lfiles))
131 % (num_revs, num_lfiles))
132 else:
132 else:
133 write(_('verified existence of %d revisions of %d largefiles\n')
133 write(_('verified existence of %d revisions of %d largefiles\n')
134 % (num_revs, num_lfiles))
134 % (num_revs, num_lfiles))
135
135
136 return int(failed)
136 return int(failed)
137
137
138 def _getfile(self, tmpfile, filename, hash):
138 def _getfile(self, tmpfile, filename, hash):
139 '''Fetch one revision of one file from the store and write it
139 '''Fetch one revision of one file from the store and write it
140 to tmpfile. Compute the hash of the file on-the-fly as it
140 to tmpfile. Compute the hash of the file on-the-fly as it
141 downloads and return the binary hash. Close tmpfile. Raise
141 downloads and return the binary hash. Close tmpfile. Raise
142 StoreError if unable to download the file (e.g. it does not
142 StoreError if unable to download the file (e.g. it does not
143 exist in the store).'''
143 exist in the store).'''
144 raise NotImplementedError('abstract method')
144 raise NotImplementedError('abstract method')
145
145
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
147 '''Perform the actual verification of a file in the store.
147 '''Perform the actual verification of a file in the store.
148 '''
148 '''
149 raise NotImplementedError('abstract method')
149 raise NotImplementedError('abstract method')
150
150
151 import localstore, wirestore
151 import localstore, wirestore
152
152
153 _storeprovider = {
153 _storeprovider = {
154 'file': [localstore.localstore],
154 'file': [localstore.localstore],
155 'http': [wirestore.wirestore],
155 'http': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
158 }
158 }
159
159
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
161
161
162 # During clone this function is passed the src's ui object
162 # During clone this function is passed the src's ui object
163 # but it needs the dest's ui object so it can read out of
163 # but it needs the dest's ui object so it can read out of
164 # the config file. Use repo.ui instead.
164 # the config file. Use repo.ui instead.
165 def _openstore(repo, remote=None, put=False):
165 def _openstore(repo, remote=None, put=False):
166 ui = repo.ui
166 ui = repo.ui
167
167
168 if not remote:
168 if not remote:
169 path = getattr(repo, 'lfpullsource', None) or \
169 path = getattr(repo, 'lfpullsource', None) or \
170 ui.expandpath('default-push', 'default')
170 ui.expandpath('default-push', 'default')
171 # If 'default-push' and 'default' can't be expanded
171
172 # they are just returned. In that case use the empty string which
172 # ui.expandpath() leaves 'default-push' and 'default' alone if
173 # use the filescheme.
173 # they cannot be expanded: fallback to the empty string,
174 # meaning the current directory.
174 if path == 'default-push' or path == 'default':
175 if path == 'default-push' or path == 'default':
175 path = ''
176 path = ''
176 remote = repo
177 remote = repo
177 else:
178 else:
178 remote = hg.peer(repo, {}, path)
179 remote = hg.peer(repo, {}, path)
179
180
180 # The path could be a scheme so use Mercurial's normal functionality
181 # The path could be a scheme so use Mercurial's normal functionality
181 # to resolve the scheme to a repository and use its path
182 # to resolve the scheme to a repository and use its path
182 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
183
184
184 match = _scheme_re.match(path)
185 match = _scheme_re.match(path)
185 if not match: # regular filesystem path
186 if not match: # regular filesystem path
186 scheme = 'file'
187 scheme = 'file'
187 else:
188 else:
188 scheme = match.group(1)
189 scheme = match.group(1)
189
190
190 try:
191 try:
191 storeproviders = _storeprovider[scheme]
192 storeproviders = _storeprovider[scheme]
192 except KeyError:
193 except KeyError:
193 raise util.Abort(_('unsupported URL scheme %r') % scheme)
194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
194
195
195 for class_obj in storeproviders:
196 for class_obj in storeproviders:
196 try:
197 try:
197 return class_obj(ui, repo, remote)
198 return class_obj(ui, repo, remote)
198 except lfutil.storeprotonotcapable:
199 except lfutil.storeprotonotcapable:
199 pass
200 pass
200
201
201 raise util.Abort(_('%s does not appear to be a lfile store'), path)
202 raise util.Abort(_('%s does not appear to be a lfile store'), path)
@@ -1,484 +1,482 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command functions: lfadd() et. al, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --tonormal to convert largefiles back to normal files; after
38 Use --tonormal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['tonormal']:
41 if opts['tonormal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 try:
46 try:
47 rsrc = hg.repository(ui, src)
47 rsrc = hg.repository(ui, src)
48 if not rsrc.local():
48 if not rsrc.local():
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 except error.RepoError, err:
50 except error.RepoError, err:
51 ui.traceback()
51 ui.traceback()
52 raise util.Abort(err.args[0])
52 raise util.Abort(err.args[0])
53 if os.path.exists(dest):
53 if os.path.exists(dest):
54 if not os.path.isdir(dest):
54 if not os.path.isdir(dest):
55 raise util.Abort(_('destination %s already exists') % dest)
55 raise util.Abort(_('destination %s already exists') % dest)
56 elif os.listdir(dest):
56 elif os.listdir(dest):
57 raise util.Abort(_('destination %s is not empty') % dest)
57 raise util.Abort(_('destination %s is not empty') % dest)
58 try:
58 try:
59 ui.status(_('initializing destination %s\n') % dest)
59 ui.status(_('initializing destination %s\n') % dest)
60 rdst = hg.repository(ui, dest, create=True)
60 rdst = hg.repository(ui, dest, create=True)
61 if not rdst.local():
61 if not rdst.local():
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
63 except error.RepoError:
63 except error.RepoError:
64 ui.traceback()
64 ui.traceback()
65 raise util.Abort(_('%s is not a repo') % dest)
65 raise util.Abort(_('%s is not a repo') % dest)
66
66
67 success = False
67 success = False
68 try:
68 try:
69 # Lock destination to prevent modification while it is converted to.
69 # Lock destination to prevent modification while it is converted to.
70 # Don't need to lock src because we are just reading from its history
70 # Don't need to lock src because we are just reading from its history
71 # which can't change.
71 # which can't change.
72 dst_lock = rdst.lock()
72 dst_lock = rdst.lock()
73
73
74 # Get a list of all changesets in the source. The easy way to do this
74 # Get a list of all changesets in the source. The easy way to do this
75 # is to simply walk the changelog, using changelog.nodesbewteen().
75 # is to simply walk the changelog, using changelog.nodesbewteen().
76 # Take a look at mercurial/revlog.py:639 for more details.
76 # Take a look at mercurial/revlog.py:639 for more details.
77 # Use a generator instead of a list to decrease memory usage
77 # Use a generator instead of a list to decrease memory usage
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
79 rsrc.heads())[0])
79 rsrc.heads())[0])
80 revmap = {node.nullid: node.nullid}
80 revmap = {node.nullid: node.nullid}
81 if tolfile:
81 if tolfile:
82 lfiles = set()
82 lfiles = set()
83 normalfiles = set()
83 normalfiles = set()
84 if not pats:
84 if not pats:
85 pats = ui.config(lfutil.longname, 'patterns', default=())
85 pats = ui.config(lfutil.longname, 'patterns', default=())
86 if pats:
86 if pats:
87 pats = pats.split(' ')
87 pats = pats.split(' ')
88 if pats:
88 if pats:
89 matcher = match_.match(rsrc.root, '', list(pats))
89 matcher = match_.match(rsrc.root, '', list(pats))
90 else:
90 else:
91 matcher = None
91 matcher = None
92
92
93 lfiletohash = {}
93 lfiletohash = {}
94 for ctx in ctxs:
94 for ctx in ctxs:
95 ui.progress(_('converting revisions'), ctx.rev(),
95 ui.progress(_('converting revisions'), ctx.rev(),
96 unit=_('revision'), total=rsrc['tip'].rev())
96 unit=_('revision'), total=rsrc['tip'].rev())
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
98 lfiles, normalfiles, matcher, size, lfiletohash)
98 lfiles, normalfiles, matcher, size, lfiletohash)
99 ui.progress(_('converting revisions'), None)
99 ui.progress(_('converting revisions'), None)
100
100
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
103
103
104 for f in lfiletohash.keys():
104 for f in lfiletohash.keys():
105 if os.path.isfile(rdst.wjoin(f)):
105 if os.path.isfile(rdst.wjoin(f)):
106 os.unlink(rdst.wjoin(f))
106 os.unlink(rdst.wjoin(f))
107 try:
107 try:
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
109 except OSError:
109 except OSError:
110 pass
110 pass
111
111
112 else:
112 else:
113 for ctx in ctxs:
113 for ctx in ctxs:
114 ui.progress(_('converting revisions'), ctx.rev(),
114 ui.progress(_('converting revisions'), ctx.rev(),
115 unit=_('revision'), total=rsrc['tip'].rev())
115 unit=_('revision'), total=rsrc['tip'].rev())
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
117
117
118 ui.progress(_('converting revisions'), None)
118 ui.progress(_('converting revisions'), None)
119 success = True
119 success = True
120 finally:
120 finally:
121 if not success:
121 if not success:
122 # we failed, remove the new directory
122 # we failed, remove the new directory
123 shutil.rmtree(rdst.root)
123 shutil.rmtree(rdst.root)
124 dst_lock.release()
124 dst_lock.release()
125
125
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
127 # Convert src parents to dst parents
127 # Convert src parents to dst parents
128 parents = []
128 parents = []
129 for p in ctx.parents():
129 for p in ctx.parents():
130 parents.append(revmap[p.node()])
130 parents.append(revmap[p.node()])
131 while len(parents) < 2:
131 while len(parents) < 2:
132 parents.append(node.nullid)
132 parents.append(node.nullid)
133
133
134 # Generate list of changed files
134 # Generate list of changed files
135 files = set(ctx.files())
135 files = set(ctx.files())
136 if node.nullid not in parents:
136 if node.nullid not in parents:
137 mc = ctx.manifest()
137 mc = ctx.manifest()
138 mp1 = ctx.parents()[0].manifest()
138 mp1 = ctx.parents()[0].manifest()
139 mp2 = ctx.parents()[1].manifest()
139 mp2 = ctx.parents()[1].manifest()
140 files |= (set(mp1) | set(mp2)) - set(mc)
140 files |= (set(mp1) | set(mp2)) - set(mc)
141 for f in mc:
141 for f in mc:
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
143 files.add(f)
143 files.add(f)
144
144
145 def getfilectx(repo, memctx, f):
145 def getfilectx(repo, memctx, f):
146 if lfutil.standin(f) in files:
146 if lfutil.standin(f) in files:
147 # if the file isn't in the manifest then it was removed
147 # if the file isn't in the manifest then it was removed
148 # or renamed, raise IOError to indicate this
148 # or renamed, raise IOError to indicate this
149 try:
149 try:
150 fctx = ctx.filectx(lfutil.standin(f))
150 fctx = ctx.filectx(lfutil.standin(f))
151 except error.LookupError:
151 except error.LookupError:
152 raise IOError()
152 raise IOError()
153 renamed = fctx.renamed()
153 renamed = fctx.renamed()
154 if renamed:
154 if renamed:
155 renamed = lfutil.splitstandin(renamed[0])
155 renamed = lfutil.splitstandin(renamed[0])
156
156
157 hash = fctx.data().strip()
157 hash = fctx.data().strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159 ### TODO: What if the file is not cached?
159 ### TODO: What if the file is not cached?
160 data = ''
160 data = ''
161 fd = None
161 fd = None
162 try:
162 try:
163 fd = open(path, 'rb')
163 fd = open(path, 'rb')
164 data = fd.read()
164 data = fd.read()
165 finally:
165 finally:
166 if fd:
166 if fd:
167 fd.close()
167 fd.close()
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
169 'x' in fctx.flags(), renamed)
169 'x' in fctx.flags(), renamed)
170 else:
170 else:
171 try:
171 try:
172 fctx = ctx.filectx(f)
172 fctx = ctx.filectx(f)
173 except error.LookupError:
173 except error.LookupError:
174 raise IOError()
174 raise IOError()
175 renamed = fctx.renamed()
175 renamed = fctx.renamed()
176 if renamed:
176 if renamed:
177 renamed = renamed[0]
177 renamed = renamed[0]
178 data = fctx.data()
178 data = fctx.data()
179 if f == '.hgtags':
179 if f == '.hgtags':
180 newdata = []
180 newdata = []
181 for line in data.splitlines():
181 for line in data.splitlines():
182 id, name = line.split(' ', 1)
182 id, name = line.split(' ', 1)
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
184 name))
184 name))
185 data = ''.join(newdata)
185 data = ''.join(newdata)
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
187 'x' in fctx.flags(), renamed)
187 'x' in fctx.flags(), renamed)
188
188
189 dstfiles = []
189 dstfiles = []
190 for file in files:
190 for file in files:
191 if lfutil.isstandin(file):
191 if lfutil.isstandin(file):
192 dstfiles.append(lfutil.splitstandin(file))
192 dstfiles.append(lfutil.splitstandin(file))
193 else:
193 else:
194 dstfiles.append(file)
194 dstfiles.append(file)
195 # Commit
195 # Commit
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
198 ret = rdst.commitctx(mctx)
198 ret = rdst.commitctx(mctx)
199 rdst.dirstate.setparents(ret)
199 rdst.dirstate.setparents(ret)
200 revmap[ctx.node()] = rdst.changelog.tip()
200 revmap[ctx.node()] = rdst.changelog.tip()
201
201
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
203 matcher, size, lfiletohash):
203 matcher, size, lfiletohash):
204 # Convert src parents to dst parents
204 # Convert src parents to dst parents
205 parents = []
205 parents = []
206 for p in ctx.parents():
206 for p in ctx.parents():
207 parents.append(revmap[p.node()])
207 parents.append(revmap[p.node()])
208 while len(parents) < 2:
208 while len(parents) < 2:
209 parents.append(node.nullid)
209 parents.append(node.nullid)
210
210
211 # Generate list of changed files
211 # Generate list of changed files
212 files = set(ctx.files())
212 files = set(ctx.files())
213 if node.nullid not in parents:
213 if node.nullid not in parents:
214 mc = ctx.manifest()
214 mc = ctx.manifest()
215 mp1 = ctx.parents()[0].manifest()
215 mp1 = ctx.parents()[0].manifest()
216 mp2 = ctx.parents()[1].manifest()
216 mp2 = ctx.parents()[1].manifest()
217 files |= (set(mp1) | set(mp2)) - set(mc)
217 files |= (set(mp1) | set(mp2)) - set(mc)
218 for f in mc:
218 for f in mc:
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
220 files.add(f)
220 files.add(f)
221
221
222 dstfiles = []
222 dstfiles = []
223 for f in files:
223 for f in files:
224 if f not in lfiles and f not in normalfiles:
224 if f not in lfiles and f not in normalfiles:
225 islfile = _islfile(f, ctx, matcher, size)
225 islfile = _islfile(f, ctx, matcher, size)
226 # If this file was renamed or copied then copy
226 # If this file was renamed or copied then copy
227 # the lfileness of its predecessor
227 # the lfileness of its predecessor
228 if f in ctx.manifest():
228 if f in ctx.manifest():
229 fctx = ctx.filectx(f)
229 fctx = ctx.filectx(f)
230 renamed = fctx.renamed()
230 renamed = fctx.renamed()
231 renamedlfile = renamed and renamed[0] in lfiles
231 renamedlfile = renamed and renamed[0] in lfiles
232 islfile |= renamedlfile
232 islfile |= renamedlfile
233 if 'l' in fctx.flags():
233 if 'l' in fctx.flags():
234 if renamedlfile:
234 if renamedlfile:
235 raise util.Abort(
235 raise util.Abort(
236 _('Renamed/copied largefile %s becomes symlink')
236 _('Renamed/copied largefile %s becomes symlink')
237 % f)
237 % f)
238 islfile = False
238 islfile = False
239 if islfile:
239 if islfile:
240 lfiles.add(f)
240 lfiles.add(f)
241 else:
241 else:
242 normalfiles.add(f)
242 normalfiles.add(f)
243
243
244 if f in lfiles:
244 if f in lfiles:
245 dstfiles.append(lfutil.standin(f))
245 dstfiles.append(lfutil.standin(f))
246 # lfile in manifest if it has not been removed/renamed
246 # lfile in manifest if it has not been removed/renamed
247 if f in ctx.manifest():
247 if f in ctx.manifest():
248 if 'l' in ctx.filectx(f).flags():
248 if 'l' in ctx.filectx(f).flags():
249 if renamed and renamed[0] in lfiles:
249 if renamed and renamed[0] in lfiles:
250 raise util.Abort(_('largefile %s becomes symlink') % f)
250 raise util.Abort(_('largefile %s becomes symlink') % f)
251
251
252 # lfile was modified, update standins
252 # lfile was modified, update standins
253 fullpath = rdst.wjoin(f)
253 fullpath = rdst.wjoin(f)
254 lfutil.createdir(os.path.dirname(fullpath))
254 lfutil.createdir(os.path.dirname(fullpath))
255 m = util.sha1('')
255 m = util.sha1('')
256 m.update(ctx[f].data())
256 m.update(ctx[f].data())
257 hash = m.hexdigest()
257 hash = m.hexdigest()
258 if f not in lfiletohash or lfiletohash[f] != hash:
258 if f not in lfiletohash or lfiletohash[f] != hash:
259 try:
259 try:
260 fd = open(fullpath, 'wb')
260 fd = open(fullpath, 'wb')
261 fd.write(ctx[f].data())
261 fd.write(ctx[f].data())
262 finally:
262 finally:
263 if fd:
263 if fd:
264 fd.close()
264 fd.close()
265 executable = 'x' in ctx[f].flags()
265 executable = 'x' in ctx[f].flags()
266 os.chmod(fullpath, lfutil.getmode(executable))
266 os.chmod(fullpath, lfutil.getmode(executable))
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
268 executable)
268 executable)
269 lfiletohash[f] = hash
269 lfiletohash[f] = hash
270 else:
270 else:
271 # normal file
271 # normal file
272 dstfiles.append(f)
272 dstfiles.append(f)
273
273
274 def getfilectx(repo, memctx, f):
274 def getfilectx(repo, memctx, f):
275 if lfutil.isstandin(f):
275 if lfutil.isstandin(f):
276 # if the file isn't in the manifest then it was removed
276 # if the file isn't in the manifest then it was removed
277 # or renamed, raise IOError to indicate this
277 # or renamed, raise IOError to indicate this
278 srcfname = lfutil.splitstandin(f)
278 srcfname = lfutil.splitstandin(f)
279 try:
279 try:
280 fctx = ctx.filectx(srcfname)
280 fctx = ctx.filectx(srcfname)
281 except error.LookupError:
281 except error.LookupError:
282 raise IOError()
282 raise IOError()
283 renamed = fctx.renamed()
283 renamed = fctx.renamed()
284 if renamed:
284 if renamed:
285 # standin is always a lfile because lfileness
285 # standin is always a lfile because lfileness
286 # doesn't change after rename or copy
286 # doesn't change after rename or copy
287 renamed = lfutil.standin(renamed[0])
287 renamed = lfutil.standin(renamed[0])
288
288
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
290 fctx.flags(), 'x' in fctx.flags(), renamed)
290 fctx.flags(), 'x' in fctx.flags(), renamed)
291 else:
291 else:
292 try:
292 try:
293 fctx = ctx.filectx(f)
293 fctx = ctx.filectx(f)
294 except error.LookupError:
294 except error.LookupError:
295 raise IOError()
295 raise IOError()
296 renamed = fctx.renamed()
296 renamed = fctx.renamed()
297 if renamed:
297 if renamed:
298 renamed = renamed[0]
298 renamed = renamed[0]
299
299
300 data = fctx.data()
300 data = fctx.data()
301 if f == '.hgtags':
301 if f == '.hgtags':
302 newdata = []
302 newdata = []
303 for line in data.splitlines():
303 for line in data.splitlines():
304 id, name = line.split(' ', 1)
304 id, name = line.split(' ', 1)
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
306 name))
306 name))
307 data = ''.join(newdata)
307 data = ''.join(newdata)
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
309 'x' in fctx.flags(), renamed)
309 'x' in fctx.flags(), renamed)
310
310
311 # Commit
311 # Commit
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
314 ret = rdst.commitctx(mctx)
314 ret = rdst.commitctx(mctx)
315 rdst.dirstate.setparents(ret)
315 rdst.dirstate.setparents(ret)
316 revmap[ctx.node()] = rdst.changelog.tip()
316 revmap[ctx.node()] = rdst.changelog.tip()
317
317
318 def _islfile(file, ctx, matcher, size):
318 def _islfile(file, ctx, matcher, size):
319 '''
319 '''Return true if file should be considered a largefile, i.e.
320 A file is a lfile if it matches a pattern or is over
320 matcher matches it or it is larger than size.'''
321 the given size.
321 # never store special .hg* files as largefiles
322 '''
323 # Never store hgtags or hgignore as lfiles
324 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
325 return False
323 return False
326 if matcher and matcher(file):
324 if matcher and matcher(file):
327 return True
325 return True
328 try:
326 try:
329 return ctx.filectx(file).size() >= size * 1024 * 1024
327 return ctx.filectx(file).size() >= size * 1024 * 1024
330 except error.LookupError:
328 except error.LookupError:
331 return False
329 return False
332
330
333 def uploadlfiles(ui, rsrc, rdst, files):
331 def uploadlfiles(ui, rsrc, rdst, files):
334 '''upload largefiles to the central store'''
332 '''upload largefiles to the central store'''
335
333
336 # Don't upload locally. All largefiles are in the system wide cache
334 # Don't upload locally. All largefiles are in the system wide cache
337 # so the other repo can just get them from there.
335 # so the other repo can just get them from there.
338 if not files or rdst.local():
336 if not files or rdst.local():
339 return
337 return
340
338
341 store = basestore._openstore(rsrc, rdst, put=True)
339 store = basestore._openstore(rsrc, rdst, put=True)
342
340
343 at = 0
341 at = 0
344 files = filter(lambda h: not store.exists(h), files)
342 files = filter(lambda h: not store.exists(h), files)
345 for hash in files:
343 for hash in files:
346 ui.progress(_('uploading largefiles'), at, unit='largefile',
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
347 total=len(files))
345 total=len(files))
348 source = lfutil.findfile(rsrc, hash)
346 source = lfutil.findfile(rsrc, hash)
349 if not source:
347 if not source:
350 raise util.Abort(_('Missing largefile %s needs to be uploaded')
348 raise util.Abort(_('Missing largefile %s needs to be uploaded')
351 % hash)
349 % hash)
352 # XXX check for errors here
350 # XXX check for errors here
353 store.put(source, hash)
351 store.put(source, hash)
354 at += 1
352 at += 1
355 ui.progress(_('uploading largefiles'), None)
353 ui.progress(_('uploading largefiles'), None)
356
354
357 def verifylfiles(ui, repo, all=False, contents=False):
355 def verifylfiles(ui, repo, all=False, contents=False):
358 '''Verify that every big file revision in the current changeset
356 '''Verify that every big file revision in the current changeset
359 exists in the central store. With --contents, also verify that
357 exists in the central store. With --contents, also verify that
360 the contents of each big file revision are correct (SHA-1 hash
358 the contents of each big file revision are correct (SHA-1 hash
361 matches the revision ID). With --all, check every changeset in
359 matches the revision ID). With --all, check every changeset in
362 this repository.'''
360 this repository.'''
363 if all:
361 if all:
364 # Pass a list to the function rather than an iterator because we know a
362 # Pass a list to the function rather than an iterator because we know a
365 # list will work.
363 # list will work.
366 revs = range(len(repo))
364 revs = range(len(repo))
367 else:
365 else:
368 revs = ['.']
366 revs = ['.']
369
367
370 store = basestore._openstore(repo)
368 store = basestore._openstore(repo)
371 return store.verify(revs, contents=contents)
369 return store.verify(revs, contents=contents)
372
370
373 def cachelfiles(ui, repo, node):
371 def cachelfiles(ui, repo, node):
374 '''cachelfiles ensures that all largefiles needed by the specified revision
372 '''cachelfiles ensures that all largefiles needed by the specified revision
375 are present in the repository's largefile cache.
373 are present in the repository's largefile cache.
376
374
377 returns a tuple (cached, missing). cached is the list of files downloaded
375 returns a tuple (cached, missing). cached is the list of files downloaded
378 by this operation; missing is the list of files that were needed but could
376 by this operation; missing is the list of files that were needed but could
379 not be found.'''
377 not be found.'''
380 lfiles = lfutil.listlfiles(repo, node)
378 lfiles = lfutil.listlfiles(repo, node)
381 toget = []
379 toget = []
382
380
383 for lfile in lfiles:
381 for lfile in lfiles:
384 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
385 # if it exists and its hash matches, it might have been locally
383 # if it exists and its hash matches, it might have been locally
386 # modified before updating and the user chose 'local'. in this case,
384 # modified before updating and the user chose 'local'. in this case,
387 # it will not be in any store, so don't look for it.
385 # it will not be in any store, so don't look for it.
388 if (not os.path.exists(repo.wjoin(lfile)) \
386 if (not os.path.exists(repo.wjoin(lfile)) \
389 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
387 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
390 not lfutil.findfile(repo, expectedhash):
388 not lfutil.findfile(repo, expectedhash):
391 toget.append((lfile, expectedhash))
389 toget.append((lfile, expectedhash))
392
390
393 if toget:
391 if toget:
394 store = basestore._openstore(repo)
392 store = basestore._openstore(repo)
395 ret = store.get(toget)
393 ret = store.get(toget)
396 return ret
394 return ret
397
395
398 return ([], [])
396 return ([], [])
399
397
400 def updatelfiles(ui, repo, filelist=None, printmessage=True):
398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
401 wlock = repo.wlock()
399 wlock = repo.wlock()
402 try:
400 try:
403 lfdirstate = lfutil.openlfdirstate(ui, repo)
401 lfdirstate = lfutil.openlfdirstate(ui, repo)
404 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
405
403
406 if filelist is not None:
404 if filelist is not None:
407 lfiles = [f for f in lfiles if f in filelist]
405 lfiles = [f for f in lfiles if f in filelist]
408
406
409 printed = False
407 printed = False
410 if printmessage and lfiles:
408 if printmessage and lfiles:
411 ui.status(_('getting changed largefiles\n'))
409 ui.status(_('getting changed largefiles\n'))
412 printed = True
410 printed = True
413 cachelfiles(ui, repo, '.')
411 cachelfiles(ui, repo, '.')
414
412
415 updated, removed = 0, 0
413 updated, removed = 0, 0
416 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
417 # increment the appropriate counter according to _updatelfile's
415 # increment the appropriate counter according to _updatelfile's
418 # return value
416 # return value
419 updated += i > 0 and i or 0
417 updated += i > 0 and i or 0
420 removed -= i < 0 and i or 0
418 removed -= i < 0 and i or 0
421 if printmessage and (removed or updated) and not printed:
419 if printmessage and (removed or updated) and not printed:
422 ui.status(_('getting changed largefiles\n'))
420 ui.status(_('getting changed largefiles\n'))
423 printed = True
421 printed = True
424
422
425 lfdirstate.write()
423 lfdirstate.write()
426 if printed and printmessage:
424 if printed and printmessage:
427 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
428 removed))
426 removed))
429 finally:
427 finally:
430 wlock.release()
428 wlock.release()
431
429
432 def _updatelfile(repo, lfdirstate, lfile):
430 def _updatelfile(repo, lfdirstate, lfile):
433 '''updates a single largefile and copies the state of its standin from
431 '''updates a single largefile and copies the state of its standin from
434 the repository's dirstate to its state in the lfdirstate.
432 the repository's dirstate to its state in the lfdirstate.
435
433
436 returns 1 if the file was modified, -1 if the file was removed, 0 if the
434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
437 file was unchanged, and None if the needed largefile was missing from the
435 file was unchanged, and None if the needed largefile was missing from the
438 cache.'''
436 cache.'''
439 ret = 0
437 ret = 0
440 abslfile = repo.wjoin(lfile)
438 abslfile = repo.wjoin(lfile)
441 absstandin = repo.wjoin(lfutil.standin(lfile))
439 absstandin = repo.wjoin(lfutil.standin(lfile))
442 if os.path.exists(absstandin):
440 if os.path.exists(absstandin):
443 if os.path.exists(absstandin+'.orig'):
441 if os.path.exists(absstandin+'.orig'):
444 shutil.copyfile(abslfile, abslfile+'.orig')
442 shutil.copyfile(abslfile, abslfile+'.orig')
445 expecthash = lfutil.readstandin(repo, lfile)
443 expecthash = lfutil.readstandin(repo, lfile)
446 if expecthash != '' and \
444 if expecthash != '' and \
447 (not os.path.exists(abslfile) or \
445 (not os.path.exists(abslfile) or \
448 expecthash != lfutil.hashfile(abslfile)):
446 expecthash != lfutil.hashfile(abslfile)):
449 if not lfutil.copyfromcache(repo, expecthash, lfile):
447 if not lfutil.copyfromcache(repo, expecthash, lfile):
450 return None # don't try to set the mode or update the dirstate
448 return None # don't try to set the mode or update the dirstate
451 ret = 1
449 ret = 1
452 mode = os.stat(absstandin).st_mode
450 mode = os.stat(absstandin).st_mode
453 if mode != os.stat(abslfile).st_mode:
451 if mode != os.stat(abslfile).st_mode:
454 os.chmod(abslfile, mode)
452 os.chmod(abslfile, mode)
455 ret = 1
453 ret = 1
456 else:
454 else:
457 if os.path.exists(abslfile):
455 if os.path.exists(abslfile):
458 os.unlink(abslfile)
456 os.unlink(abslfile)
459 ret = -1
457 ret = -1
460 state = repo.dirstate[lfutil.standin(lfile)]
458 state = repo.dirstate[lfutil.standin(lfile)]
461 if state == 'n':
459 if state == 'n':
462 lfdirstate.normal(lfile)
460 lfdirstate.normal(lfile)
463 elif state == 'r':
461 elif state == 'r':
464 lfdirstate.remove(lfile)
462 lfdirstate.remove(lfile)
465 elif state == 'a':
463 elif state == 'a':
466 lfdirstate.add(lfile)
464 lfdirstate.add(lfile)
467 elif state == '?':
465 elif state == '?':
468 lfdirstate.drop(lfile)
466 lfdirstate.drop(lfile)
469 return ret
467 return ret
470
468
471 # -- hg commands declarations ------------------------------------------------
469 # -- hg commands declarations ------------------------------------------------
472
470
473
471
474 cmdtable = {
472 cmdtable = {
475 'lfconvert': (lfconvert,
473 'lfconvert': (lfconvert,
476 [('s', 'size', '',
474 [('s', 'size', '',
477 _('minimum size (MB) for files to be converted '
475 _('minimum size (MB) for files to be converted '
478 'as largefiles'),
476 'as largefiles'),
479 'SIZE'),
477 'SIZE'),
480 ('', 'tonormal', False,
478 ('', 'tonormal', False,
481 _('convert from a largefiles repo to a normal repo')),
479 _('convert from a largefiles repo to a normal repo')),
482 ],
480 ],
483 _('hg lfconvert SOURCE DEST [FILE ...]')),
481 _('hg lfconvert SOURCE DEST [FILE ...]')),
484 }
482 }
@@ -1,445 +1,446 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import hashlib
15 import hashlib
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'size', default=default)
64 lfsize = ui.config(longname, 'size', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # If hardlinks fail fall back on copy
79 # if hardlinks fail, fallback on copy
80 shutil.copyfile(src, dest)
80 shutil.copyfile(src, dest)
81 os.chmod(dest, os.stat(src).st_mode)
81 os.chmod(dest, os.stat(src).st_mode)
82
82
83 def systemcachepath(ui, hash):
83 def systemcachepath(ui, hash):
84 path = ui.config(longname, 'systemcache', None)
84 path = ui.config(longname, 'systemcache', None)
85 if path:
85 if path:
86 path = os.path.join(path, hash)
86 path = os.path.join(path, hash)
87 else:
87 else:
88 if os.name == 'nt':
88 if os.name == 'nt':
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \
90 os.getenv('APPDATA'), longname, hash)
90 os.getenv('APPDATA'), longname, hash)
91 elif os.name == 'posix':
91 elif os.name == 'posix':
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
93 else:
93 else:
94 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
94 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
95 return path
95 return path
96
96
97 def insystemcache(ui, hash):
97 def insystemcache(ui, hash):
98 return os.path.exists(systemcachepath(ui, hash))
98 return os.path.exists(systemcachepath(ui, hash))
99
99
100 def findfile(repo, hash):
100 def findfile(repo, hash):
101 if incache(repo, hash):
101 if incache(repo, hash):
102 repo.ui.note(_('Found %s in cache\n') % hash)
102 repo.ui.note(_('Found %s in cache\n') % hash)
103 return cachepath(repo, hash)
103 return cachepath(repo, hash)
104 if insystemcache(repo.ui, hash):
104 if insystemcache(repo.ui, hash):
105 repo.ui.note(_('Found %s in system cache\n') % hash)
105 repo.ui.note(_('Found %s in system cache\n') % hash)
106 return systemcachepath(repo.ui, hash)
106 return systemcachepath(repo.ui, hash)
107 return None
107 return None
108
108
109 class largefiles_dirstate(dirstate.dirstate):
109 class largefiles_dirstate(dirstate.dirstate):
110 def __getitem__(self, key):
110 def __getitem__(self, key):
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
112 def normal(self, f):
112 def normal(self, f):
113 return super(largefiles_dirstate, self).normal(unixpath(f))
113 return super(largefiles_dirstate, self).normal(unixpath(f))
114 def remove(self, f):
114 def remove(self, f):
115 return super(largefiles_dirstate, self).remove(unixpath(f))
115 return super(largefiles_dirstate, self).remove(unixpath(f))
116 def add(self, f):
116 def add(self, f):
117 return super(largefiles_dirstate, self).add(unixpath(f))
117 return super(largefiles_dirstate, self).add(unixpath(f))
118 def drop(self, f):
118 def drop(self, f):
119 return super(largefiles_dirstate, self).drop(unixpath(f))
119 return super(largefiles_dirstate, self).drop(unixpath(f))
120 def forget(self, f):
120 def forget(self, f):
121 return super(largefiles_dirstate, self).forget(unixpath(f))
121 return super(largefiles_dirstate, self).forget(unixpath(f))
122
122
123 def openlfdirstate(ui, repo):
123 def openlfdirstate(ui, repo):
124 '''
124 '''
125 Return a dirstate object that tracks big files: i.e. its root is the
125 Return a dirstate object that tracks largefiles: i.e. its root is
126 repo root, but it is saved in .hg/largefiles/dirstate.
126 the repo root, but it is saved in .hg/largefiles/dirstate.
127 '''
127 '''
128 admin = repo.join(longname)
128 admin = repo.join(longname)
129 opener = scmutil.opener(admin)
129 opener = scmutil.opener(admin)
130 if util.safehasattr(repo.dirstate, '_validate'):
130 if util.safehasattr(repo.dirstate, '_validate'):
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
132 repo.dirstate._validate)
132 repo.dirstate._validate)
133 else:
133 else:
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
135
135
136 # If the largefiles dirstate does not exist, populate and create it. This
136 # If the largefiles dirstate does not exist, populate and create
137 # ensures that we create it on the first meaningful largefiles operation in
137 # it. This ensures that we create it on the first meaningful
138 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
138 # largefiles operation in a new clone. It also gives us an easy
139 # state:
139 # way to forcibly rebuild largefiles state:
140 # rm .hg/largefiles/dirstate && hg status
140 # rm .hg/largefiles/dirstate && hg status
141 # Or even, if things are really messed up:
141 # Or even, if things are really messed up:
142 # rm -rf .hg/largefiles && hg status
142 # rm -rf .hg/largefiles && hg status
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
144 util.makedirs(admin)
144 util.makedirs(admin)
145 matcher = getstandinmatcher(repo)
145 matcher = getstandinmatcher(repo)
146 for standin in dirstate_walk(repo.dirstate, matcher):
146 for standin in dirstate_walk(repo.dirstate, matcher):
147 lfile = splitstandin(standin)
147 lfile = splitstandin(standin)
148 hash = readstandin(repo, lfile)
148 hash = readstandin(repo, lfile)
149 lfdirstate.normallookup(lfile)
149 lfdirstate.normallookup(lfile)
150 try:
150 try:
151 if hash == hashfile(lfile):
151 if hash == hashfile(lfile):
152 lfdirstate.normal(lfile)
152 lfdirstate.normal(lfile)
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT:
154 if err.errno != errno.ENOENT:
155 raise
155 raise
156
156
157 lfdirstate.write()
157 lfdirstate.write()
158
158
159 return lfdirstate
159 return lfdirstate
160
160
161 def lfdirstate_status(lfdirstate, repo, rev):
161 def lfdirstate_status(lfdirstate, repo, rev):
162 wlock = repo.wlock()
162 wlock = repo.wlock()
163 try:
163 try:
164 match = match_.always(repo.root, repo.getcwd())
164 match = match_.always(repo.root, repo.getcwd())
165 s = lfdirstate.status(match, [], False, False, False)
165 s = lfdirstate.status(match, [], False, False, False)
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
167 for lfile in unsure:
167 for lfile in unsure:
168 if repo[rev][standin(lfile)].data().strip() != \
168 if repo[rev][standin(lfile)].data().strip() != \
169 hashfile(repo.wjoin(lfile)):
169 hashfile(repo.wjoin(lfile)):
170 modified.append(lfile)
170 modified.append(lfile)
171 else:
171 else:
172 clean.append(lfile)
172 clean.append(lfile)
173 lfdirstate.normal(lfile)
173 lfdirstate.normal(lfile)
174 lfdirstate.write()
174 lfdirstate.write()
175 finally:
175 finally:
176 wlock.release()
176 wlock.release()
177 return (modified, added, removed, missing, unknown, ignored, clean)
177 return (modified, added, removed, missing, unknown, ignored, clean)
178
178
179 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
180 '''list largefiles in the working copy or specified changeset'''
180 '''return a list of largefiles in the working copy or the
181 specified changeset'''
181
182
182 if matcher is None:
183 if matcher is None:
183 matcher = getstandinmatcher(repo)
184 matcher = getstandinmatcher(repo)
184
185
185 # ignore unknown files in working directory
186 # ignore unknown files in working directory
186 return [splitstandin(f) for f in repo[rev].walk(matcher) \
187 return [splitstandin(f) for f in repo[rev].walk(matcher) \
187 if rev is not None or repo.dirstate[f] != '?']
188 if rev is not None or repo.dirstate[f] != '?']
188
189
189 def incache(repo, hash):
190 def incache(repo, hash):
190 return os.path.exists(cachepath(repo, hash))
191 return os.path.exists(cachepath(repo, hash))
191
192
192 def createdir(dir):
193 def createdir(dir):
193 if not os.path.exists(dir):
194 if not os.path.exists(dir):
194 os.makedirs(dir)
195 os.makedirs(dir)
195
196
196 def cachepath(repo, hash):
197 def cachepath(repo, hash):
197 return repo.join(os.path.join(longname, hash))
198 return repo.join(os.path.join(longname, hash))
198
199
199 def copyfromcache(repo, hash, filename):
200 def copyfromcache(repo, hash, filename):
200 '''copyfromcache copies the specified largefile from the repo or system
201 '''Copy the specified largefile from the repo or system cache to
201 cache to the specified location in the repository. It will not throw an
202 filename in the repository. Return true on success or false if the
202 exception on failure, as it is meant to be called only after ensuring that
203 file was not found in either cache (which should not happened:
203 the needed largefile exists in the cache.'''
204 this is meant to be called only after ensuring that the needed
205 largefile exists in the cache).'''
204 path = findfile(repo, hash)
206 path = findfile(repo, hash)
205 if path is None:
207 if path is None:
206 return False
208 return False
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
209 util.makedirs(os.path.dirname(repo.wjoin(filename)))
208 shutil.copy(path, repo.wjoin(filename))
210 shutil.copy(path, repo.wjoin(filename))
209 return True
211 return True
210
212
211 def copytocache(repo, rev, file, uploaded=False):
213 def copytocache(repo, rev, file, uploaded=False):
212 hash = readstandin(repo, file)
214 hash = readstandin(repo, file)
213 if incache(repo, hash):
215 if incache(repo, hash):
214 return
216 return
215 copytocacheabsolute(repo, repo.wjoin(file), hash)
217 copytocacheabsolute(repo, repo.wjoin(file), hash)
216
218
217 def copytocacheabsolute(repo, file, hash):
219 def copytocacheabsolute(repo, file, hash):
218 createdir(os.path.dirname(cachepath(repo, hash)))
220 createdir(os.path.dirname(cachepath(repo, hash)))
219 if insystemcache(repo.ui, hash):
221 if insystemcache(repo.ui, hash):
220 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
222 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
221 else:
223 else:
222 shutil.copyfile(file, cachepath(repo, hash))
224 shutil.copyfile(file, cachepath(repo, hash))
223 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
225 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
224 linktosystemcache(repo, hash)
226 linktosystemcache(repo, hash)
225
227
226 def linktosystemcache(repo, hash):
228 def linktosystemcache(repo, hash):
227 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
229 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
228 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
230 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
229
231
230 def getstandinmatcher(repo, pats=[], opts={}):
232 def getstandinmatcher(repo, pats=[], opts={}):
231 '''Return a match object that applies pats to the standin directory'''
233 '''Return a match object that applies pats to the standin directory'''
232 standindir = repo.pathto(shortname)
234 standindir = repo.pathto(shortname)
233 if pats:
235 if pats:
234 # patterns supplied: search standin directory relative to current dir
236 # patterns supplied: search standin directory relative to current dir
235 cwd = repo.getcwd()
237 cwd = repo.getcwd()
236 if os.path.isabs(cwd):
238 if os.path.isabs(cwd):
237 # cwd is an absolute path for hg -R <reponame>
239 # cwd is an absolute path for hg -R <reponame>
238 # work relative to the repository root in this case
240 # work relative to the repository root in this case
239 cwd = ''
241 cwd = ''
240 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
242 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
241 elif os.path.isdir(standindir):
243 elif os.path.isdir(standindir):
242 # no patterns: relative to repo root
244 # no patterns: relative to repo root
243 pats = [standindir]
245 pats = [standindir]
244 else:
246 else:
245 # no patterns and no standin dir: return matcher that matches nothing
247 # no patterns and no standin dir: return matcher that matches nothing
246 match = match_.match(repo.root, None, [], exact=True)
248 match = match_.match(repo.root, None, [], exact=True)
247 match.matchfn = lambda f: False
249 match.matchfn = lambda f: False
248 return match
250 return match
249 return getmatcher(repo, pats, opts, showbad=False)
251 return getmatcher(repo, pats, opts, showbad=False)
250
252
251 def getmatcher(repo, pats=[], opts={}, showbad=True):
253 def getmatcher(repo, pats=[], opts={}, showbad=True):
252 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
254 '''Wrapper around scmutil.match() that adds showbad: if false,
253 the match object\'s bad() method so it does not print any warnings
255 neuter the match object's bad() method so it does not print any
254 about missing files or directories.'''
256 warnings about missing files or directories.'''
255 match = scmutil.match(repo[None], pats, opts)
257 match = scmutil.match(repo[None], pats, opts)
256
258
257 if not showbad:
259 if not showbad:
258 match.bad = lambda f, msg: None
260 match.bad = lambda f, msg: None
259 return match
261 return match
260
262
261 def composestandinmatcher(repo, rmatcher):
263 def composestandinmatcher(repo, rmatcher):
262 '''Return a matcher that accepts standins corresponding to the files
264 '''Return a matcher that accepts standins corresponding to the
263 accepted by rmatcher. Pass the list of files in the matcher as the
265 files accepted by rmatcher. Pass the list of files in the matcher
264 paths specified by the user.'''
266 as the paths specified by the user.'''
265 smatcher = getstandinmatcher(repo, rmatcher.files())
267 smatcher = getstandinmatcher(repo, rmatcher.files())
266 isstandin = smatcher.matchfn
268 isstandin = smatcher.matchfn
267 def composed_matchfn(f):
269 def composed_matchfn(f):
268 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
270 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
269 smatcher.matchfn = composed_matchfn
271 smatcher.matchfn = composed_matchfn
270
272
271 return smatcher
273 return smatcher
272
274
273 def standin(filename):
275 def standin(filename):
274 '''Return the repo-relative path to the standin for the specified big
276 '''Return the repo-relative path to the standin for the specified big
275 file.'''
277 file.'''
276 # Notes:
278 # Notes:
277 # 1) Most callers want an absolute path, but _create_standin() needs
279 # 1) Most callers want an absolute path, but _create_standin() needs
278 # it repo-relative so lfadd() can pass it to repo_add(). So leave
280 # it repo-relative so lfadd() can pass it to repo_add(). So leave
279 # it up to the caller to use repo.wjoin() to get an absolute path.
281 # it up to the caller to use repo.wjoin() to get an absolute path.
280 # 2) Join with '/' because that's what dirstate always uses, even on
282 # 2) Join with '/' because that's what dirstate always uses, even on
281 # Windows. Change existing separator to '/' first in case we are
283 # Windows. Change existing separator to '/' first in case we are
282 # passed filenames from an external source (like the command line).
284 # passed filenames from an external source (like the command line).
283 return shortname + '/' + filename.replace(os.sep, '/')
285 return shortname + '/' + filename.replace(os.sep, '/')
284
286
285 def isstandin(filename):
287 def isstandin(filename):
286 '''Return true if filename is a big file standin. filename must
288 '''Return true if filename is a big file standin. filename must be
287 be in Mercurial\'s internal form (slash-separated).'''
289 in Mercurial's internal form (slash-separated).'''
288 return filename.startswith(shortname + '/')
290 return filename.startswith(shortname + '/')
289
291
290 def splitstandin(filename):
292 def splitstandin(filename):
291 # Split on / because that's what dirstate always uses, even on Windows.
293 # Split on / because that's what dirstate always uses, even on Windows.
292 # Change local separator to / first just in case we are passed filenames
294 # Change local separator to / first just in case we are passed filenames
293 # from an external source (like the command line).
295 # from an external source (like the command line).
294 bits = filename.replace(os.sep, '/').split('/', 1)
296 bits = filename.replace(os.sep, '/').split('/', 1)
295 if len(bits) == 2 and bits[0] == shortname:
297 if len(bits) == 2 and bits[0] == shortname:
296 return bits[1]
298 return bits[1]
297 else:
299 else:
298 return None
300 return None
299
301
300 def updatestandin(repo, standin):
302 def updatestandin(repo, standin):
301 file = repo.wjoin(splitstandin(standin))
303 file = repo.wjoin(splitstandin(standin))
302 if os.path.exists(file):
304 if os.path.exists(file):
303 hash = hashfile(file)
305 hash = hashfile(file)
304 executable = getexecutable(file)
306 executable = getexecutable(file)
305 writestandin(repo, standin, hash, executable)
307 writestandin(repo, standin, hash, executable)
306
308
307 def readstandin(repo, filename, node=None):
309 def readstandin(repo, filename, node=None):
308 '''read hex hash from standin for filename at given node, or working
310 '''read hex hash from standin for filename at given node, or working
309 directory if no node is given'''
311 directory if no node is given'''
310 return repo[node][standin(filename)].data().strip()
312 return repo[node][standin(filename)].data().strip()
311
313
312 def writestandin(repo, standin, hash, executable):
314 def writestandin(repo, standin, hash, executable):
313 '''write hhash to <repo.root>/<standin>'''
315 '''write hash to <repo.root>/<standin>'''
314 writehash(hash, repo.wjoin(standin), executable)
316 writehash(hash, repo.wjoin(standin), executable)
315
317
316 def copyandhash(instream, outfile):
318 def copyandhash(instream, outfile):
317 '''Read bytes from instream (iterable) and write them to outfile,
319 '''Read bytes from instream (iterable) and write them to outfile,
318 computing the SHA-1 hash of the data along the way. Close outfile
320 computing the SHA-1 hash of the data along the way. Close outfile
319 when done and return the binary hash.'''
321 when done and return the binary hash.'''
320 hasher = util.sha1('')
322 hasher = util.sha1('')
321 for data in instream:
323 for data in instream:
322 hasher.update(data)
324 hasher.update(data)
323 outfile.write(data)
325 outfile.write(data)
324
326
325 # Blecch: closing a file that somebody else opened is rude and
327 # Blecch: closing a file that somebody else opened is rude and
326 # wrong. But it's so darn convenient and practical! After all,
328 # wrong. But it's so darn convenient and practical! After all,
327 # outfile was opened just to copy and hash.
329 # outfile was opened just to copy and hash.
328 outfile.close()
330 outfile.close()
329
331
330 return hasher.digest()
332 return hasher.digest()
331
333
332 def hashrepofile(repo, file):
334 def hashrepofile(repo, file):
333 return hashfile(repo.wjoin(file))
335 return hashfile(repo.wjoin(file))
334
336
335 def hashfile(file):
337 def hashfile(file):
336 if not os.path.exists(file):
338 if not os.path.exists(file):
337 return ''
339 return ''
338 hasher = util.sha1('')
340 hasher = util.sha1('')
339 fd = open(file, 'rb')
341 fd = open(file, 'rb')
340 for data in blockstream(fd):
342 for data in blockstream(fd):
341 hasher.update(data)
343 hasher.update(data)
342 fd.close()
344 fd.close()
343 return hasher.hexdigest()
345 return hasher.hexdigest()
344
346
345 class limitreader(object):
347 class limitreader(object):
346 def __init__(self, f, limit):
348 def __init__(self, f, limit):
347 self.f = f
349 self.f = f
348 self.limit = limit
350 self.limit = limit
349
351
350 def read(self, length):
352 def read(self, length):
351 if self.limit == 0:
353 if self.limit == 0:
352 return ''
354 return ''
353 length = length > self.limit and self.limit or length
355 length = length > self.limit and self.limit or length
354 self.limit -= length
356 self.limit -= length
355 return self.f.read(length)
357 return self.f.read(length)
356
358
357 def close(self):
359 def close(self):
358 pass
360 pass
359
361
360 def blockstream(infile, blocksize=128 * 1024):
362 def blockstream(infile, blocksize=128 * 1024):
361 """Generator that yields blocks of data from infile and closes infile."""
363 """Generator that yields blocks of data from infile and closes infile."""
362 while True:
364 while True:
363 data = infile.read(blocksize)
365 data = infile.read(blocksize)
364 if not data:
366 if not data:
365 break
367 break
366 yield data
368 yield data
367 # Same blecch as above.
369 # same blecch as copyandhash() above
368 infile.close()
370 infile.close()
369
371
370 def readhash(filename):
372 def readhash(filename):
371 rfile = open(filename, 'rb')
373 rfile = open(filename, 'rb')
372 hash = rfile.read(40)
374 hash = rfile.read(40)
373 rfile.close()
375 rfile.close()
374 if len(hash) < 40:
376 if len(hash) < 40:
375 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
377 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
376 % (filename, len(hash)))
378 % (filename, len(hash)))
377 return hash
379 return hash
378
380
379 def writehash(hash, filename, executable):
381 def writehash(hash, filename, executable):
380 util.makedirs(os.path.dirname(filename))
382 util.makedirs(os.path.dirname(filename))
381 if os.path.exists(filename):
383 if os.path.exists(filename):
382 os.unlink(filename)
384 os.unlink(filename)
383 wfile = open(filename, 'wb')
385 wfile = open(filename, 'wb')
384
386
385 try:
387 try:
386 wfile.write(hash)
388 wfile.write(hash)
387 wfile.write('\n')
389 wfile.write('\n')
388 finally:
390 finally:
389 wfile.close()
391 wfile.close()
390 if os.path.exists(filename):
392 if os.path.exists(filename):
391 os.chmod(filename, getmode(executable))
393 os.chmod(filename, getmode(executable))
392
394
393 def getexecutable(filename):
395 def getexecutable(filename):
394 mode = os.stat(filename).st_mode
396 mode = os.stat(filename).st_mode
395 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
397 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
396 stat.S_IXOTH)
398 stat.S_IXOTH)
397
399
398 def getmode(executable):
400 def getmode(executable):
399 if executable:
401 if executable:
400 return 0755
402 return 0755
401 else:
403 else:
402 return 0644
404 return 0644
403
405
404 def urljoin(first, second, *arg):
406 def urljoin(first, second, *arg):
405 def join(left, right):
407 def join(left, right):
406 if not left.endswith('/'):
408 if not left.endswith('/'):
407 left += '/'
409 left += '/'
408 if right.startswith('/'):
410 if right.startswith('/'):
409 right = right[1:]
411 right = right[1:]
410 return left + right
412 return left + right
411
413
412 url = join(first, second)
414 url = join(first, second)
413 for a in arg:
415 for a in arg:
414 url = join(url, a)
416 url = join(url, a)
415 return url
417 return url
416
418
417 def hexsha1(data):
419 def hexsha1(data):
418 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
420 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
419 object data"""
421 object data"""
420 h = hashlib.sha1()
422 h = hashlib.sha1()
421 for chunk in util.filechunkiter(data):
423 for chunk in util.filechunkiter(data):
422 h.update(chunk)
424 h.update(chunk)
423 return h.hexdigest()
425 return h.hexdigest()
424
426
425 def httpsendfile(ui, filename):
427 def httpsendfile(ui, filename):
426 return httpconnection.httpsendfile(ui, filename, 'rb')
428 return httpconnection.httpsendfile(ui, filename, 'rb')
427
429
428 # Convert a path to a unix style path. This is used to give a
429 # canonical path to the lfdirstate.
430 def unixpath(path):
430 def unixpath(path):
431 '''Return a version of path normalized for use with the lfdirstate.'''
431 return os.path.normpath(path).replace(os.sep, '/')
432 return os.path.normpath(path).replace(os.sep, '/')
432
433
433 def islfilesrepo(repo):
434 def islfilesrepo(repo):
434 return ('largefiles' in repo.requirements and
435 return ('largefiles' in repo.requirements and
435 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
436 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
436
437
437 def any_(gen):
438 def any_(gen):
438 for x in gen:
439 for x in gen:
439 if x:
440 if x:
440 return True
441 return True
441 return False
442 return False
442
443
443 class storeprotonotcapable(BaseException):
444 class storeprotonotcapable(BaseException):
444 def __init__(self, storetypes):
445 def __init__(self, storetypes):
445 self.storetypes = storetypes
446 self.storetypes = storetypes
@@ -1,71 +1,71 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Store class for local filesystem.'''
9 '''store class for local filesystem'''
10
10
11 import os
11 import os
12
12
13 from mercurial import util
13 from mercurial import util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 import lfutil
16 import lfutil
17 import basestore
17 import basestore
18
18
19 class localstore(basestore.basestore):
19 class localstore(basestore.basestore):
20 '''Because there is a system wide cache, the local store always uses that
20 '''Because there is a system-wide cache, the local store always
21 cache. Since the cache is updated elsewhere, we can just read from it here
21 uses that cache. Since the cache is updated elsewhere, we can
22 as if it were the store.'''
22 just read from it here as if it were the store.'''
23
23
24 def __init__(self, ui, repo, remote):
24 def __init__(self, ui, repo, remote):
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
27
27
28 def put(self, source, filename, hash):
28 def put(self, source, filename, hash):
29 '''Any file that is put must already be in the system wide cache so do
29 '''Any file that is put must already be in the system-wide
30 nothing.'''
30 cache so do nothing.'''
31 return
31 return
32
32
33 def exists(self, hash):
33 def exists(self, hash):
34 return lfutil.insystemcache(self.repo.ui, hash)
34 return lfutil.insystemcache(self.repo.ui, hash)
35
35
36 def _getfile(self, tmpfile, filename, hash):
36 def _getfile(self, tmpfile, filename, hash):
37 if lfutil.insystemcache(self.ui, hash):
37 if lfutil.insystemcache(self.ui, hash):
38 return lfutil.systemcachepath(self.ui, hash)
38 return lfutil.systemcachepath(self.ui, hash)
39 raise basestore.StoreError(filename, hash, '',
39 raise basestore.StoreError(filename, hash, '',
40 _("Can't get file locally"))
40 _("Can't get file locally"))
41
41
42 def _verifyfile(self, cctx, cset, contents, standin, verified):
42 def _verifyfile(self, cctx, cset, contents, standin, verified):
43 filename = lfutil.splitstandin(standin)
43 filename = lfutil.splitstandin(standin)
44 if not filename:
44 if not filename:
45 return False
45 return False
46 fctx = cctx[standin]
46 fctx = cctx[standin]
47 key = (filename, fctx.filenode())
47 key = (filename, fctx.filenode())
48 if key in verified:
48 if key in verified:
49 return False
49 return False
50
50
51 expecthash = fctx.data()[0:40]
51 expecthash = fctx.data()[0:40]
52 verified.add(key)
52 verified.add(key)
53 if not lfutil.insystemcache(self.ui, expecthash):
53 if not lfutil.insystemcache(self.ui, expecthash):
54 self.ui.warn(
54 self.ui.warn(
55 _('changeset %s: %s missing\n'
55 _('changeset %s: %s missing\n'
56 ' (looked for hash %s)\n')
56 ' (looked for hash %s)\n')
57 % (cset, filename, expecthash))
57 % (cset, filename, expecthash))
58 return True # failed
58 return True # failed
59
59
60 if contents:
60 if contents:
61 storepath = lfutil.systemcachepath(self.ui, expecthash)
61 storepath = lfutil.systemcachepath(self.ui, expecthash)
62 actualhash = lfutil.hashfile(storepath)
62 actualhash = lfutil.hashfile(storepath)
63 if actualhash != expecthash:
63 if actualhash != expecthash:
64 self.ui.warn(
64 self.ui.warn(
65 _('changeset %s: %s: contents differ\n'
65 _('changeset %s: %s: contents differ\n'
66 ' (%s:\n'
66 ' (%s:\n'
67 ' expected hash %s,\n'
67 ' expected hash %s,\n'
68 ' but got %s)\n')
68 ' but got %s)\n')
69 % (cset, filename, storepath, expecthash, actualhash))
69 % (cset, filename, storepath, expecthash, actualhash))
70 return True # failed
70 return True # failed
71 return False
71 return False
@@ -1,822 +1,824 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 archival, error, merge
15 archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19 import lfutil
19 import lfutil
20
20
21 try:
21 try:
22 from mercurial import scmutil
22 from mercurial import scmutil
23 except ImportError:
23 except ImportError:
24 pass
24 pass
25
25
26 import lfutil
26 import lfutil
27 import lfcommands
27 import lfcommands
28
28
29 def installnormalfilesmatchfn(manifest):
29 def installnormalfilesmatchfn(manifest):
30 '''overrides scmutil.match so that the matcher it returns will ignore all
30 '''overrides scmutil.match so that the matcher it returns will ignore all
31 largefiles'''
31 largefiles'''
32 oldmatch = None # for the closure
32 oldmatch = None # for the closure
33 def override_match(repo, pats=[], opts={}, globbed=False,
33 def override_match(repo, pats=[], opts={}, globbed=False,
34 default='relpath'):
34 default='relpath'):
35 match = oldmatch(repo, pats, opts, globbed, default)
35 match = oldmatch(repo, pats, opts, globbed, default)
36 m = copy.copy(match)
36 m = copy.copy(match)
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
38 manifest)
38 manifest)
39 m._files = filter(notlfile, m._files)
39 m._files = filter(notlfile, m._files)
40 m._fmap = set(m._files)
40 m._fmap = set(m._files)
41 orig_matchfn = m.matchfn
41 orig_matchfn = m.matchfn
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
43 return m
43 return m
44 oldmatch = installmatchfn(override_match)
44 oldmatch = installmatchfn(override_match)
45
45
46 def installmatchfn(f):
46 def installmatchfn(f):
47 oldmatch = scmutil.match
47 oldmatch = scmutil.match
48 setattr(f, 'oldmatch', oldmatch)
48 setattr(f, 'oldmatch', oldmatch)
49 scmutil.match = f
49 scmutil.match = f
50 return oldmatch
50 return oldmatch
51
51
52 def restorematchfn():
52 def restorematchfn():
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
54 was called. no-op if scmutil.match is its original function.
54 was called. no-op if scmutil.match is its original function.
55
55
56 Note that n calls to installnormalfilesmatchfn will require n calls to
56 Note that n calls to installnormalfilesmatchfn will require n calls to
57 restore matchfn to reverse'''
57 restore matchfn to reverse'''
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
59
59
60 # -- Wrappers: modify existing commands --------------------------------
60 # -- Wrappers: modify existing commands --------------------------------
61
61
62 # Add works by going through the files that the user wanted to add
62 # Add works by going through the files that the user wanted to add and
63 # and checking if they should be added as lfiles. Then making a new
63 # checking if they should be added as largefiles. Then it makes a new
64 # matcher which matches only the normal files and running the original
64 # matcher which matches only the normal files and runs the original
65 # version of add.
65 # version of add.
66 def override_add(orig, ui, repo, *pats, **opts):
66 def override_add(orig, ui, repo, *pats, **opts):
67 large = opts.pop('large', None)
67 large = opts.pop('large', None)
68 lfsize = lfutil.getminsize(
68 lfsize = lfutil.getminsize(
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
70
70
71 lfmatcher = None
71 lfmatcher = None
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 if lfpats:
74 if lfpats:
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
76
76
77 lfnames = []
77 lfnames = []
78 m = scmutil.match(repo[None], pats, opts)
78 m = scmutil.match(repo[None], pats, opts)
79 m.bad = lambda x, y: None
79 m.bad = lambda x, y: None
80 wctx = repo[None]
80 wctx = repo[None]
81 for f in repo.walk(m):
81 for f in repo.walk(m):
82 exact = m.exact(f)
82 exact = m.exact(f)
83 lfile = lfutil.standin(f) in wctx
83 lfile = lfutil.standin(f) in wctx
84 nfile = f in wctx
84 nfile = f in wctx
85 exists = lfile or nfile
85 exists = lfile or nfile
86
86
87 # Don't warn the user when they attempt to add a normal tracked file.
87 # Don't warn the user when they attempt to add a normal tracked file.
88 # The normal add code will do that for us.
88 # The normal add code will do that for us.
89 if exact and exists:
89 if exact and exists:
90 if lfile:
90 if lfile:
91 ui.warn(_('%s already a largefile\n') % f)
91 ui.warn(_('%s already a largefile\n') % f)
92 continue
92 continue
93
93
94 if exact or not exists:
94 if exact or not exists:
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
97 lfnames.append(f)
98 if ui.verbose or not exact:
98 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
100
101 bad = []
101 bad = []
102 standins = []
102 standins = []
103
103
104 # Need to lock otherwise there could be a race condition inbetween when
104 # Need to lock, otherwise there could be a race condition between
105 # standins are created and added to the repo
105 # when standins are created and added to the repo.
106 wlock = repo.wlock()
106 wlock = repo.wlock()
107 try:
107 try:
108 if not opts.get('dry_run'):
108 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
110 for f in lfnames:
111 standinname = lfutil.standin(f)
111 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
112 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
114 standins.append(standinname)
115 if lfdirstate[f] == 'r':
115 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
116 lfdirstate.normallookup(f)
117 else:
117 else:
118 lfdirstate.add(f)
118 lfdirstate.add(f)
119 lfdirstate.write()
119 lfdirstate.write()
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
121 standins) if f in m.files()]
121 standins) if f in m.files()]
122 finally:
122 finally:
123 wlock.release()
123 wlock.release()
124
124
125 installnormalfilesmatchfn(repo[None].manifest())
125 installnormalfilesmatchfn(repo[None].manifest())
126 result = orig(ui, repo, *pats, **opts)
126 result = orig(ui, repo, *pats, **opts)
127 restorematchfn()
127 restorematchfn()
128
128
129 return (result == 1 or bad) and 1 or 0
129 return (result == 1 or bad) and 1 or 0
130
130
131 def override_remove(orig, ui, repo, *pats, **opts):
131 def override_remove(orig, ui, repo, *pats, **opts):
132 manifest = repo[None].manifest()
132 manifest = repo[None].manifest()
133 installnormalfilesmatchfn(manifest)
133 installnormalfilesmatchfn(manifest)
134 orig(ui, repo, *pats, **opts)
134 orig(ui, repo, *pats, **opts)
135 restorematchfn()
135 restorematchfn()
136
136
137 after, force = opts.get('after'), opts.get('force')
137 after, force = opts.get('after'), opts.get('force')
138 if not pats and not after:
138 if not pats and not after:
139 raise util.Abort(_('no files specified'))
139 raise util.Abort(_('no files specified'))
140 m = scmutil.match(repo[None], pats, opts)
140 m = scmutil.match(repo[None], pats, opts)
141 try:
141 try:
142 repo.lfstatus = True
142 repo.lfstatus = True
143 s = repo.status(match=m, clean=True)
143 s = repo.status(match=m, clean=True)
144 finally:
144 finally:
145 repo.lfstatus = False
145 repo.lfstatus = False
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
148
148
149 def warn(files, reason):
149 def warn(files, reason):
150 for f in files:
150 for f in files:
151 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
151 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
152 % (m.rel(f), reason))
152 % (m.rel(f), reason))
153
153
154 if force:
154 if force:
155 remove, forget = modified + deleted + clean, added
155 remove, forget = modified + deleted + clean, added
156 elif after:
156 elif after:
157 remove, forget = deleted, []
157 remove, forget = deleted, []
158 warn(modified + added + clean, _('still exists'))
158 warn(modified + added + clean, _('still exists'))
159 else:
159 else:
160 remove, forget = deleted + clean, []
160 remove, forget = deleted + clean, []
161 warn(modified, _('is modified'))
161 warn(modified, _('is modified'))
162 warn(added, _('has been marked for add'))
162 warn(added, _('has been marked for add'))
163
163
164 for f in sorted(remove + forget):
164 for f in sorted(remove + forget):
165 if ui.verbose or not m.exact(f):
165 if ui.verbose or not m.exact(f):
166 ui.status(_('removing %s\n') % m.rel(f))
166 ui.status(_('removing %s\n') % m.rel(f))
167
167
168 # Need to lock because standin files are deleted then removed from the
168 # Need to lock because standin files are deleted then removed from the
169 # repository and we could race inbetween.
169 # repository and we could race inbetween.
170 wlock = repo.wlock()
170 wlock = repo.wlock()
171 try:
171 try:
172 lfdirstate = lfutil.openlfdirstate(ui, repo)
172 lfdirstate = lfutil.openlfdirstate(ui, repo)
173 for f in remove:
173 for f in remove:
174 if not after:
174 if not after:
175 os.unlink(repo.wjoin(f))
175 os.unlink(repo.wjoin(f))
176 currentdir = os.path.split(f)[0]
176 currentdir = os.path.split(f)[0]
177 while currentdir and not os.listdir(repo.wjoin(currentdir)):
177 while currentdir and not os.listdir(repo.wjoin(currentdir)):
178 os.rmdir(repo.wjoin(currentdir))
178 os.rmdir(repo.wjoin(currentdir))
179 currentdir = os.path.split(currentdir)[0]
179 currentdir = os.path.split(currentdir)[0]
180 lfdirstate.remove(f)
180 lfdirstate.remove(f)
181 lfdirstate.write()
181 lfdirstate.write()
182
182
183 forget = [lfutil.standin(f) for f in forget]
183 forget = [lfutil.standin(f) for f in forget]
184 remove = [lfutil.standin(f) for f in remove]
184 remove = [lfutil.standin(f) for f in remove]
185 lfutil.repo_forget(repo, forget)
185 lfutil.repo_forget(repo, forget)
186 lfutil.repo_remove(repo, remove, unlink=True)
186 lfutil.repo_remove(repo, remove, unlink=True)
187 finally:
187 finally:
188 wlock.release()
188 wlock.release()
189
189
190 def override_status(orig, ui, repo, *pats, **opts):
190 def override_status(orig, ui, repo, *pats, **opts):
191 try:
191 try:
192 repo.lfstatus = True
192 repo.lfstatus = True
193 return orig(ui, repo, *pats, **opts)
193 return orig(ui, repo, *pats, **opts)
194 finally:
194 finally:
195 repo.lfstatus = False
195 repo.lfstatus = False
196
196
197 def override_log(orig, ui, repo, *pats, **opts):
197 def override_log(orig, ui, repo, *pats, **opts):
198 try:
198 try:
199 repo.lfstatus = True
199 repo.lfstatus = True
200 orig(ui, repo, *pats, **opts)
200 orig(ui, repo, *pats, **opts)
201 finally:
201 finally:
202 repo.lfstatus = False
202 repo.lfstatus = False
203
203
204 def override_verify(orig, ui, repo, *pats, **opts):
204 def override_verify(orig, ui, repo, *pats, **opts):
205 large = opts.pop('large', False)
205 large = opts.pop('large', False)
206 all = opts.pop('lfa', False)
206 all = opts.pop('lfa', False)
207 contents = opts.pop('lfc', False)
207 contents = opts.pop('lfc', False)
208
208
209 result = orig(ui, repo, *pats, **opts)
209 result = orig(ui, repo, *pats, **opts)
210 if large:
210 if large:
211 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
211 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
212 return result
212 return result
213
213
214 # Override needs to refresh standins so that update's normal merge
214 # Override needs to refresh standins so that update's normal merge
215 # will go through properly. Then the other update hook (overriding repo.update)
215 # will go through properly. Then the other update hook (overriding repo.update)
216 # will get the new files. Filemerge is also overriden so that the merge
216 # will get the new files. Filemerge is also overriden so that the merge
217 # will merge standins correctly.
217 # will merge standins correctly.
218 def override_update(orig, ui, repo, *pats, **opts):
218 def override_update(orig, ui, repo, *pats, **opts):
219 lfdirstate = lfutil.openlfdirstate(ui, repo)
219 lfdirstate = lfutil.openlfdirstate(ui, repo)
220 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
220 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
221 False, False)
221 False, False)
222 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
222 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
223
223
224 # Need to lock between the standins getting updated and their lfiles
224 # Need to lock between the standins getting updated and their
225 # getting updated
225 # largefiles getting updated
226 wlock = repo.wlock()
226 wlock = repo.wlock()
227 try:
227 try:
228 if opts['check']:
228 if opts['check']:
229 mod = len(modified) > 0
229 mod = len(modified) > 0
230 for lfile in unsure:
230 for lfile in unsure:
231 standin = lfutil.standin(lfile)
231 standin = lfutil.standin(lfile)
232 if repo['.'][standin].data().strip() != \
232 if repo['.'][standin].data().strip() != \
233 lfutil.hashfile(repo.wjoin(lfile)):
233 lfutil.hashfile(repo.wjoin(lfile)):
234 mod = True
234 mod = True
235 else:
235 else:
236 lfdirstate.normal(lfile)
236 lfdirstate.normal(lfile)
237 lfdirstate.write()
237 lfdirstate.write()
238 if mod:
238 if mod:
239 raise util.Abort(_('uncommitted local changes'))
239 raise util.Abort(_('uncommitted local changes'))
240 # XXX handle removed differently
240 # XXX handle removed differently
241 if not opts['clean']:
241 if not opts['clean']:
242 for lfile in unsure + modified + added:
242 for lfile in unsure + modified + added:
243 lfutil.updatestandin(repo, lfutil.standin(lfile))
243 lfutil.updatestandin(repo, lfutil.standin(lfile))
244 finally:
244 finally:
245 wlock.release()
245 wlock.release()
246 return orig(ui, repo, *pats, **opts)
246 return orig(ui, repo, *pats, **opts)
247
247
248 # Override filemerge to prompt the user about how they wish to merge lfiles.
248 # Override filemerge to prompt the user about how they wish to merge
249 # This will handle identical edits, and copy/rename + edit without prompting
249 # largefiles. This will handle identical edits, and copy/rename +
250 # the user.
250 # edit without prompting the user.
251 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
251 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
252 # Use better variable names here. Because this is a wrapper we cannot
252 # Use better variable names here. Because this is a wrapper we cannot
253 # change the variable names in the function declaration.
253 # change the variable names in the function declaration.
254 fcdest, fcother, fcancestor = fcd, fco, fca
254 fcdest, fcother, fcancestor = fcd, fco, fca
255 if not lfutil.isstandin(orig):
255 if not lfutil.isstandin(orig):
256 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
256 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
257 else:
257 else:
258 if not fcother.cmp(fcdest): # files identical?
258 if not fcother.cmp(fcdest): # files identical?
259 return None
259 return None
260
260
261 # backwards, use working dir parent as ancestor
261 # backwards, use working dir parent as ancestor
262 if fcancestor == fcother:
262 if fcancestor == fcother:
263 fcancestor = fcdest.parents()[0]
263 fcancestor = fcdest.parents()[0]
264
264
265 if orig != fcother.path():
265 if orig != fcother.path():
266 repo.ui.status(_('merging %s and %s to %s\n')
266 repo.ui.status(_('merging %s and %s to %s\n')
267 % (lfutil.splitstandin(orig),
267 % (lfutil.splitstandin(orig),
268 lfutil.splitstandin(fcother.path()),
268 lfutil.splitstandin(fcother.path()),
269 lfutil.splitstandin(fcdest.path())))
269 lfutil.splitstandin(fcdest.path())))
270 else:
270 else:
271 repo.ui.status(_('merging %s\n')
271 repo.ui.status(_('merging %s\n')
272 % lfutil.splitstandin(fcdest.path()))
272 % lfutil.splitstandin(fcdest.path()))
273
273
274 if fcancestor.path() != fcother.path() and fcother.data() == \
274 if fcancestor.path() != fcother.path() and fcother.data() == \
275 fcancestor.data():
275 fcancestor.data():
276 return 0
276 return 0
277 if fcancestor.path() != fcdest.path() and fcdest.data() == \
277 if fcancestor.path() != fcdest.path() and fcdest.data() == \
278 fcancestor.data():
278 fcancestor.data():
279 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
279 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
280 return 0
280 return 0
281
281
282 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
282 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
283 'keep (l)ocal or take (o)ther?') %
283 'keep (l)ocal or take (o)ther?') %
284 lfutil.splitstandin(orig),
284 lfutil.splitstandin(orig),
285 (_('&Local'), _('&Other')), 0) == 0:
285 (_('&Local'), _('&Other')), 0) == 0:
286 return 0
286 return 0
287 else:
287 else:
288 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
288 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
289 return 0
289 return 0
290
290
291 # Copy first changes the matchers to match standins instead of lfiles.
291 # Copy first changes the matchers to match standins instead of
292 # Then it overrides util.copyfile in that function it checks if the destination
292 # largefiles. Then it overrides util.copyfile in that function it
293 # lfile already exists. It also keeps a list of copied files so that the lfiles
293 # checks if the destination largefile already exists. It also keeps a
294 # can be copied and the dirstate updated.
294 # list of copied files so that the largefiles can be copied and the
295 # dirstate updated.
295 def override_copy(orig, ui, repo, pats, opts, rename=False):
296 def override_copy(orig, ui, repo, pats, opts, rename=False):
296 # doesn't remove lfile on rename
297 # doesn't remove largefile on rename
297 if len(pats) < 2:
298 if len(pats) < 2:
298 # this isn't legal, let the original function deal with it
299 # this isn't legal, let the original function deal with it
299 return orig(ui, repo, pats, opts, rename)
300 return orig(ui, repo, pats, opts, rename)
300
301
301 def makestandin(relpath):
302 def makestandin(relpath):
302 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
303 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
303 return os.path.join(os.path.relpath('.', repo.getcwd()),
304 return os.path.join(os.path.relpath('.', repo.getcwd()),
304 lfutil.standin(path))
305 lfutil.standin(path))
305
306
306 fullpats = scmutil.expandpats(pats)
307 fullpats = scmutil.expandpats(pats)
307 dest = fullpats[-1]
308 dest = fullpats[-1]
308
309
309 if os.path.isdir(dest):
310 if os.path.isdir(dest):
310 if not os.path.isdir(makestandin(dest)):
311 if not os.path.isdir(makestandin(dest)):
311 os.makedirs(makestandin(dest))
312 os.makedirs(makestandin(dest))
312 # This could copy both lfiles and normal files in one command, but we don't
313 # This could copy both largefiles and normal files in one command,
313 # want to do that first replace their matcher to only match normal files
314 # but we don't want to do that first replace their matcher to only
314 # and run it then replace it to just match lfiles and run it again
315 # match normal files and run it then replace it to just match
316 # lfiles and run it again
315 nonormalfiles = False
317 nonormalfiles = False
316 nolfiles = False
318 nolfiles = False
317 try:
319 try:
318 installnormalfilesmatchfn(repo[None].manifest())
320 installnormalfilesmatchfn(repo[None].manifest())
319 result = orig(ui, repo, pats, opts, rename)
321 result = orig(ui, repo, pats, opts, rename)
320 except util.Abort, e:
322 except util.Abort, e:
321 if str(e) != 'no files to copy':
323 if str(e) != 'no files to copy':
322 raise e
324 raise e
323 else:
325 else:
324 nonormalfiles = True
326 nonormalfiles = True
325 result = 0
327 result = 0
326 finally:
328 finally:
327 restorematchfn()
329 restorematchfn()
328
330
329 # The first rename can cause our current working directory to be removed.
331 # The first rename can cause our current working directory to be removed.
330 # In that case there is nothing left to copy/rename so just quit.
332 # In that case there is nothing left to copy/rename so just quit.
331 try:
333 try:
332 repo.getcwd()
334 repo.getcwd()
333 except OSError:
335 except OSError:
334 return result
336 return result
335
337
336 try:
338 try:
337 # When we call orig below it creates the standins but we don't add them
339 # When we call orig below it creates the standins but we don't add them
338 # to the dir state until later so lock during that time.
340 # to the dir state until later so lock during that time.
339 wlock = repo.wlock()
341 wlock = repo.wlock()
340
342
341 manifest = repo[None].manifest()
343 manifest = repo[None].manifest()
342 oldmatch = None # for the closure
344 oldmatch = None # for the closure
343 def override_match(repo, pats=[], opts={}, globbed=False,
345 def override_match(repo, pats=[], opts={}, globbed=False,
344 default='relpath'):
346 default='relpath'):
345 newpats = []
347 newpats = []
346 # The patterns were previously mangled to add the standin
348 # The patterns were previously mangled to add the standin
347 # directory; we need to remove that now
349 # directory; we need to remove that now
348 for pat in pats:
350 for pat in pats:
349 if match_.patkind(pat) is None and lfutil.shortname in pat:
351 if match_.patkind(pat) is None and lfutil.shortname in pat:
350 newpats.append(pat.replace(lfutil.shortname, ''))
352 newpats.append(pat.replace(lfutil.shortname, ''))
351 else:
353 else:
352 newpats.append(pat)
354 newpats.append(pat)
353 match = oldmatch(repo, newpats, opts, globbed, default)
355 match = oldmatch(repo, newpats, opts, globbed, default)
354 m = copy.copy(match)
356 m = copy.copy(match)
355 lfile = lambda f: lfutil.standin(f) in manifest
357 lfile = lambda f: lfutil.standin(f) in manifest
356 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
358 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
357 m._fmap = set(m._files)
359 m._fmap = set(m._files)
358 orig_matchfn = m.matchfn
360 orig_matchfn = m.matchfn
359 m.matchfn = lambda f: lfutil.isstandin(f) and \
361 m.matchfn = lambda f: lfutil.isstandin(f) and \
360 lfile(lfutil.splitstandin(f)) and \
362 lfile(lfutil.splitstandin(f)) and \
361 orig_matchfn(lfutil.splitstandin(f)) or None
363 orig_matchfn(lfutil.splitstandin(f)) or None
362 return m
364 return m
363 oldmatch = installmatchfn(override_match)
365 oldmatch = installmatchfn(override_match)
364 listpats = []
366 listpats = []
365 for pat in pats:
367 for pat in pats:
366 if match_.patkind(pat) is not None:
368 if match_.patkind(pat) is not None:
367 listpats.append(pat)
369 listpats.append(pat)
368 else:
370 else:
369 listpats.append(makestandin(pat))
371 listpats.append(makestandin(pat))
370
372
371 try:
373 try:
372 origcopyfile = util.copyfile
374 origcopyfile = util.copyfile
373 copiedfiles = []
375 copiedfiles = []
374 def override_copyfile(src, dest):
376 def override_copyfile(src, dest):
375 if lfutil.shortname in src and lfutil.shortname in dest:
377 if lfutil.shortname in src and lfutil.shortname in dest:
376 destlfile = dest.replace(lfutil.shortname, '')
378 destlfile = dest.replace(lfutil.shortname, '')
377 if not opts['force'] and os.path.exists(destlfile):
379 if not opts['force'] and os.path.exists(destlfile):
378 raise IOError('',
380 raise IOError('',
379 _('destination largefile already exists'))
381 _('destination largefile already exists'))
380 copiedfiles.append((src, dest))
382 copiedfiles.append((src, dest))
381 origcopyfile(src, dest)
383 origcopyfile(src, dest)
382
384
383 util.copyfile = override_copyfile
385 util.copyfile = override_copyfile
384 result += orig(ui, repo, listpats, opts, rename)
386 result += orig(ui, repo, listpats, opts, rename)
385 finally:
387 finally:
386 util.copyfile = origcopyfile
388 util.copyfile = origcopyfile
387
389
388 lfdirstate = lfutil.openlfdirstate(ui, repo)
390 lfdirstate = lfutil.openlfdirstate(ui, repo)
389 for (src, dest) in copiedfiles:
391 for (src, dest) in copiedfiles:
390 if lfutil.shortname in src and lfutil.shortname in dest:
392 if lfutil.shortname in src and lfutil.shortname in dest:
391 srclfile = src.replace(lfutil.shortname, '')
393 srclfile = src.replace(lfutil.shortname, '')
392 destlfile = dest.replace(lfutil.shortname, '')
394 destlfile = dest.replace(lfutil.shortname, '')
393 destlfiledir = os.path.dirname(destlfile) or '.'
395 destlfiledir = os.path.dirname(destlfile) or '.'
394 if not os.path.isdir(destlfiledir):
396 if not os.path.isdir(destlfiledir):
395 os.makedirs(destlfiledir)
397 os.makedirs(destlfiledir)
396 if rename:
398 if rename:
397 os.rename(srclfile, destlfile)
399 os.rename(srclfile, destlfile)
398 lfdirstate.remove(os.path.relpath(srclfile,
400 lfdirstate.remove(os.path.relpath(srclfile,
399 repo.root))
401 repo.root))
400 else:
402 else:
401 util.copyfile(srclfile, destlfile)
403 util.copyfile(srclfile, destlfile)
402 lfdirstate.add(os.path.relpath(destlfile,
404 lfdirstate.add(os.path.relpath(destlfile,
403 repo.root))
405 repo.root))
404 lfdirstate.write()
406 lfdirstate.write()
405 except util.Abort, e:
407 except util.Abort, e:
406 if str(e) != 'no files to copy':
408 if str(e) != 'no files to copy':
407 raise e
409 raise e
408 else:
410 else:
409 nolfiles = True
411 nolfiles = True
410 finally:
412 finally:
411 restorematchfn()
413 restorematchfn()
412 wlock.release()
414 wlock.release()
413
415
414 if nolfiles and nonormalfiles:
416 if nolfiles and nonormalfiles:
415 raise util.Abort(_('no files to copy'))
417 raise util.Abort(_('no files to copy'))
416
418
417 return result
419 return result
418
420
419 # When the user calls revert, we have to be careful to not revert any changes
421 # When the user calls revert, we have to be careful to not revert any changes
420 # to other lfiles accidentally. This means we have to keep track of the lfiles
422 # to other lfiles accidentally. This means we have to keep track of the lfiles
421 # that are being reverted so we only pull down the necessary lfiles.
423 # that are being reverted so we only pull down the necessary lfiles.
422 #
424 #
423 # Standins are only updated (to match the hash of lfiles) before commits.
425 # Standins are only updated (to match the hash of lfiles) before commits.
424 # Update the standins then run the original revert (changing the matcher to hit
426 # Update the standins then run the original revert (changing the matcher to hit
425 # standins instead of lfiles). Based on the resulting standins update the
427 # standins instead of lfiles). Based on the resulting standins update the
426 # lfiles. Then return the standins to their proper state
428 # lfiles. Then return the standins to their proper state
427 def override_revert(orig, ui, repo, *pats, **opts):
429 def override_revert(orig, ui, repo, *pats, **opts):
428 # Because we put the standins in a bad state (by updating them) and then
430 # Because we put the standins in a bad state (by updating them) and then
429 # return them to a correct state we need to lock to prevent others from
431 # return them to a correct state we need to lock to prevent others from
430 # changing them in their incorrect state.
432 # changing them in their incorrect state.
431 wlock = repo.wlock()
433 wlock = repo.wlock()
432 try:
434 try:
433 lfdirstate = lfutil.openlfdirstate(ui, repo)
435 lfdirstate = lfutil.openlfdirstate(ui, repo)
434 (modified, added, removed, missing, unknown, ignored, clean) = \
436 (modified, added, removed, missing, unknown, ignored, clean) = \
435 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
437 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
436 for lfile in modified:
438 for lfile in modified:
437 lfutil.updatestandin(repo, lfutil.standin(lfile))
439 lfutil.updatestandin(repo, lfutil.standin(lfile))
438
440
439 try:
441 try:
440 ctx = repo[opts.get('rev')]
442 ctx = repo[opts.get('rev')]
441 oldmatch = None # for the closure
443 oldmatch = None # for the closure
442 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
444 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
443 default='relpath'):
445 default='relpath'):
444 if util.safehasattr(ctxorrepo, 'match'):
446 if util.safehasattr(ctxorrepo, 'match'):
445 ctx0 = ctxorrepo
447 ctx0 = ctxorrepo
446 else:
448 else:
447 ctx0 = ctxorrepo[None]
449 ctx0 = ctxorrepo[None]
448 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
450 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
449 m = copy.copy(match)
451 m = copy.copy(match)
450 def tostandin(f):
452 def tostandin(f):
451 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
453 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
452 return lfutil.standin(f)
454 return lfutil.standin(f)
453 elif lfutil.standin(f) in repo[None]:
455 elif lfutil.standin(f) in repo[None]:
454 return None
456 return None
455 return f
457 return f
456 m._files = [tostandin(f) for f in m._files]
458 m._files = [tostandin(f) for f in m._files]
457 m._files = [f for f in m._files if f is not None]
459 m._files = [f for f in m._files if f is not None]
458 m._fmap = set(m._files)
460 m._fmap = set(m._files)
459 orig_matchfn = m.matchfn
461 orig_matchfn = m.matchfn
460 def matchfn(f):
462 def matchfn(f):
461 if lfutil.isstandin(f):
463 if lfutil.isstandin(f):
462 # We need to keep track of what lfiles are being
464 # We need to keep track of what lfiles are being
463 # matched so we know which ones to update later
465 # matched so we know which ones to update later
464 # (otherwise we revert changes to other lfiles
466 # (otherwise we revert changes to other lfiles
465 # accidentally). This is repo specific, so duckpunch
467 # accidentally). This is repo specific, so duckpunch
466 # the repo object to keep the list of lfiles for us
468 # the repo object to keep the list of lfiles for us
467 # later.
469 # later.
468 if orig_matchfn(lfutil.splitstandin(f)) and \
470 if orig_matchfn(lfutil.splitstandin(f)) and \
469 (f in repo[None] or f in ctx):
471 (f in repo[None] or f in ctx):
470 lfileslist = getattr(repo, '_lfilestoupdate', [])
472 lfileslist = getattr(repo, '_lfilestoupdate', [])
471 lfileslist.append(lfutil.splitstandin(f))
473 lfileslist.append(lfutil.splitstandin(f))
472 repo._lfilestoupdate = lfileslist
474 repo._lfilestoupdate = lfileslist
473 return True
475 return True
474 else:
476 else:
475 return False
477 return False
476 return orig_matchfn(f)
478 return orig_matchfn(f)
477 m.matchfn = matchfn
479 m.matchfn = matchfn
478 return m
480 return m
479 oldmatch = installmatchfn(override_match)
481 oldmatch = installmatchfn(override_match)
480 scmutil.match
482 scmutil.match
481 matches = override_match(repo[None], pats, opts)
483 matches = override_match(repo[None], pats, opts)
482 orig(ui, repo, *pats, **opts)
484 orig(ui, repo, *pats, **opts)
483 finally:
485 finally:
484 restorematchfn()
486 restorematchfn()
485 lfileslist = getattr(repo, '_lfilestoupdate', [])
487 lfileslist = getattr(repo, '_lfilestoupdate', [])
486 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
488 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
487 printmessage=False)
489 printmessage=False)
488 # Empty out the lfiles list so we start fresh next time
490 # Empty out the lfiles list so we start fresh next time
489 repo._lfilestoupdate = []
491 repo._lfilestoupdate = []
490 for lfile in modified:
492 for lfile in modified:
491 if lfile in lfileslist:
493 if lfile in lfileslist:
492 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
494 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
493 in repo['.']:
495 in repo['.']:
494 lfutil.writestandin(repo, lfutil.standin(lfile),
496 lfutil.writestandin(repo, lfutil.standin(lfile),
495 repo['.'][lfile].data().strip(),
497 repo['.'][lfile].data().strip(),
496 'x' in repo['.'][lfile].flags())
498 'x' in repo['.'][lfile].flags())
497 lfdirstate = lfutil.openlfdirstate(ui, repo)
499 lfdirstate = lfutil.openlfdirstate(ui, repo)
498 for lfile in added:
500 for lfile in added:
499 standin = lfutil.standin(lfile)
501 standin = lfutil.standin(lfile)
500 if standin not in ctx and (standin in matches or opts.get('all')):
502 if standin not in ctx and (standin in matches or opts.get('all')):
501 if lfile in lfdirstate:
503 if lfile in lfdirstate:
502 lfdirstate.drop(lfile)
504 lfdirstate.drop(lfile)
503 util.unlinkpath(repo.wjoin(standin))
505 util.unlinkpath(repo.wjoin(standin))
504 lfdirstate.write()
506 lfdirstate.write()
505 finally:
507 finally:
506 wlock.release()
508 wlock.release()
507
509
508 def hg_update(orig, repo, node):
510 def hg_update(orig, repo, node):
509 result = orig(repo, node)
511 result = orig(repo, node)
510 # XXX check if it worked first
512 # XXX check if it worked first
511 lfcommands.updatelfiles(repo.ui, repo)
513 lfcommands.updatelfiles(repo.ui, repo)
512 return result
514 return result
513
515
514 def hg_clean(orig, repo, node, show_stats=True):
516 def hg_clean(orig, repo, node, show_stats=True):
515 result = orig(repo, node, show_stats)
517 result = orig(repo, node, show_stats)
516 lfcommands.updatelfiles(repo.ui, repo)
518 lfcommands.updatelfiles(repo.ui, repo)
517 return result
519 return result
518
520
519 def hg_merge(orig, repo, node, force=None, remind=True):
521 def hg_merge(orig, repo, node, force=None, remind=True):
520 result = orig(repo, node, force, remind)
522 result = orig(repo, node, force, remind)
521 lfcommands.updatelfiles(repo.ui, repo)
523 lfcommands.updatelfiles(repo.ui, repo)
522 return result
524 return result
523
525
524 # When we rebase a repository with remotely changed lfiles, we need
526 # When we rebase a repository with remotely changed lfiles, we need
525 # to take some extra care so that the lfiles are correctly updated
527 # to take some extra care so that the lfiles are correctly updated
526 # in the working copy
528 # in the working copy
527 def override_pull(orig, ui, repo, source=None, **opts):
529 def override_pull(orig, ui, repo, source=None, **opts):
528 if opts.get('rebase', False):
530 if opts.get('rebase', False):
529 repo._isrebasing = True
531 repo._isrebasing = True
530 try:
532 try:
531 if opts.get('update'):
533 if opts.get('update'):
532 del opts['update']
534 del opts['update']
533 ui.debug('--update and --rebase are not compatible, ignoring '
535 ui.debug('--update and --rebase are not compatible, ignoring '
534 'the update flag\n')
536 'the update flag\n')
535 del opts['rebase']
537 del opts['rebase']
536 cmdutil.bailifchanged(repo)
538 cmdutil.bailifchanged(repo)
537 revsprepull = len(repo)
539 revsprepull = len(repo)
538 origpostincoming = commands.postincoming
540 origpostincoming = commands.postincoming
539 def _dummy(*args, **kwargs):
541 def _dummy(*args, **kwargs):
540 pass
542 pass
541 commands.postincoming = _dummy
543 commands.postincoming = _dummy
542 repo.lfpullsource = source
544 repo.lfpullsource = source
543 if not source:
545 if not source:
544 source = 'default'
546 source = 'default'
545 try:
547 try:
546 result = commands.pull(ui, repo, source, **opts)
548 result = commands.pull(ui, repo, source, **opts)
547 finally:
549 finally:
548 commands.postincoming = origpostincoming
550 commands.postincoming = origpostincoming
549 revspostpull = len(repo)
551 revspostpull = len(repo)
550 if revspostpull > revsprepull:
552 if revspostpull > revsprepull:
551 result = result or rebase.rebase(ui, repo)
553 result = result or rebase.rebase(ui, repo)
552 finally:
554 finally:
553 repo._isrebasing = False
555 repo._isrebasing = False
554 else:
556 else:
555 repo.lfpullsource = source
557 repo.lfpullsource = source
556 if not source:
558 if not source:
557 source = 'default'
559 source = 'default'
558 result = orig(ui, repo, source, **opts)
560 result = orig(ui, repo, source, **opts)
559 return result
561 return result
560
562
561 def override_rebase(orig, ui, repo, **opts):
563 def override_rebase(orig, ui, repo, **opts):
562 repo._isrebasing = True
564 repo._isrebasing = True
563 try:
565 try:
564 orig(ui, repo, **opts)
566 orig(ui, repo, **opts)
565 finally:
567 finally:
566 repo._isrebasing = False
568 repo._isrebasing = False
567
569
568 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
570 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
569 prefix=None, mtime=None, subrepos=None):
571 prefix=None, mtime=None, subrepos=None):
570 # No need to lock because we are only reading history and lfile caches
572 # No need to lock because we are only reading history and lfile caches
571 # neither of which are modified
573 # neither of which are modified
572
574
573 lfcommands.cachelfiles(repo.ui, repo, node)
575 lfcommands.cachelfiles(repo.ui, repo, node)
574
576
575 if kind not in archival.archivers:
577 if kind not in archival.archivers:
576 raise util.Abort(_("unknown archive type '%s'") % kind)
578 raise util.Abort(_("unknown archive type '%s'") % kind)
577
579
578 ctx = repo[node]
580 ctx = repo[node]
579
581
580 if kind == 'files':
582 if kind == 'files':
581 if prefix:
583 if prefix:
582 raise util.Abort(
584 raise util.Abort(
583 _('cannot give prefix when archiving to files'))
585 _('cannot give prefix when archiving to files'))
584 else:
586 else:
585 prefix = archival.tidyprefix(dest, kind, prefix)
587 prefix = archival.tidyprefix(dest, kind, prefix)
586
588
587 def write(name, mode, islink, getdata):
589 def write(name, mode, islink, getdata):
588 if matchfn and not matchfn(name):
590 if matchfn and not matchfn(name):
589 return
591 return
590 data = getdata()
592 data = getdata()
591 if decode:
593 if decode:
592 data = repo.wwritedata(name, data)
594 data = repo.wwritedata(name, data)
593 archiver.addfile(prefix + name, mode, islink, data)
595 archiver.addfile(prefix + name, mode, islink, data)
594
596
595 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
597 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
596
598
597 if repo.ui.configbool("ui", "archivemeta", True):
599 if repo.ui.configbool("ui", "archivemeta", True):
598 def metadata():
600 def metadata():
599 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
601 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
600 hex(repo.changelog.node(0)), hex(node), ctx.branch())
602 hex(repo.changelog.node(0)), hex(node), ctx.branch())
601
603
602 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
604 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
603 if repo.tagtype(t) == 'global')
605 if repo.tagtype(t) == 'global')
604 if not tags:
606 if not tags:
605 repo.ui.pushbuffer()
607 repo.ui.pushbuffer()
606 opts = {'template': '{latesttag}\n{latesttagdistance}',
608 opts = {'template': '{latesttag}\n{latesttagdistance}',
607 'style': '', 'patch': None, 'git': None}
609 'style': '', 'patch': None, 'git': None}
608 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
610 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
609 ltags, dist = repo.ui.popbuffer().split('\n')
611 ltags, dist = repo.ui.popbuffer().split('\n')
610 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
612 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
611 tags += 'latesttagdistance: %s\n' % dist
613 tags += 'latesttagdistance: %s\n' % dist
612
614
613 return base + tags
615 return base + tags
614
616
615 write('.hg_archival.txt', 0644, False, metadata)
617 write('.hg_archival.txt', 0644, False, metadata)
616
618
617 for f in ctx:
619 for f in ctx:
618 ff = ctx.flags(f)
620 ff = ctx.flags(f)
619 getdata = ctx[f].data
621 getdata = ctx[f].data
620 if lfutil.isstandin(f):
622 if lfutil.isstandin(f):
621 path = lfutil.findfile(repo, getdata().strip())
623 path = lfutil.findfile(repo, getdata().strip())
622 f = lfutil.splitstandin(f)
624 f = lfutil.splitstandin(f)
623
625
624 def getdatafn():
626 def getdatafn():
625 try:
627 try:
626 fd = open(path, 'rb')
628 fd = open(path, 'rb')
627 return fd.read()
629 return fd.read()
628 finally:
630 finally:
629 fd.close()
631 fd.close()
630
632
631 getdata = getdatafn
633 getdata = getdatafn
632 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
634 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
633
635
634 if subrepos:
636 if subrepos:
635 for subpath in ctx.substate:
637 for subpath in ctx.substate:
636 sub = ctx.sub(subpath)
638 sub = ctx.sub(subpath)
637 try:
639 try:
638 sub.archive(repo.ui, archiver, prefix)
640 sub.archive(repo.ui, archiver, prefix)
639 except TypeError:
641 except TypeError:
640 sub.archive(archiver, prefix)
642 sub.archive(archiver, prefix)
641
643
642 archiver.done()
644 archiver.done()
643
645
644 # If a lfile is modified the change is not reflected in its standin until a
646 # If a lfile is modified the change is not reflected in its standin until a
645 # commit. cmdutil.bailifchanged raises an exception if the repo has
647 # commit. cmdutil.bailifchanged raises an exception if the repo has
646 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
648 # uncommitted changes. Wrap it to also check if lfiles were changed. This is
647 # used by bisect and backout.
649 # used by bisect and backout.
648 def override_bailifchanged(orig, repo):
650 def override_bailifchanged(orig, repo):
649 orig(repo)
651 orig(repo)
650 repo.lfstatus = True
652 repo.lfstatus = True
651 modified, added, removed, deleted = repo.status()[:4]
653 modified, added, removed, deleted = repo.status()[:4]
652 repo.lfstatus = False
654 repo.lfstatus = False
653 if modified or added or removed or deleted:
655 if modified or added or removed or deleted:
654 raise util.Abort(_('outstanding uncommitted changes'))
656 raise util.Abort(_('outstanding uncommitted changes'))
655
657
656 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
658 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
657 def override_fetch(orig, ui, repo, *pats, **opts):
659 def override_fetch(orig, ui, repo, *pats, **opts):
658 repo.lfstatus = True
660 repo.lfstatus = True
659 modified, added, removed, deleted = repo.status()[:4]
661 modified, added, removed, deleted = repo.status()[:4]
660 repo.lfstatus = False
662 repo.lfstatus = False
661 if modified or added or removed or deleted:
663 if modified or added or removed or deleted:
662 raise util.Abort(_('outstanding uncommitted changes'))
664 raise util.Abort(_('outstanding uncommitted changes'))
663 return orig(ui, repo, *pats, **opts)
665 return orig(ui, repo, *pats, **opts)
664
666
665 def override_forget(orig, ui, repo, *pats, **opts):
667 def override_forget(orig, ui, repo, *pats, **opts):
666 installnormalfilesmatchfn(repo[None].manifest())
668 installnormalfilesmatchfn(repo[None].manifest())
667 orig(ui, repo, *pats, **opts)
669 orig(ui, repo, *pats, **opts)
668 restorematchfn()
670 restorematchfn()
669 m = scmutil.match(repo[None], pats, opts)
671 m = scmutil.match(repo[None], pats, opts)
670
672
671 try:
673 try:
672 repo.lfstatus = True
674 repo.lfstatus = True
673 s = repo.status(match=m, clean=True)
675 s = repo.status(match=m, clean=True)
674 finally:
676 finally:
675 repo.lfstatus = False
677 repo.lfstatus = False
676 forget = sorted(s[0] + s[1] + s[3] + s[6])
678 forget = sorted(s[0] + s[1] + s[3] + s[6])
677 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
679 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
678
680
679 for f in forget:
681 for f in forget:
680 if lfutil.standin(f) not in repo.dirstate and not \
682 if lfutil.standin(f) not in repo.dirstate and not \
681 os.path.isdir(m.rel(lfutil.standin(f))):
683 os.path.isdir(m.rel(lfutil.standin(f))):
682 ui.warn(_('not removing %s: file is already untracked\n')
684 ui.warn(_('not removing %s: file is already untracked\n')
683 % m.rel(f))
685 % m.rel(f))
684
686
685 for f in forget:
687 for f in forget:
686 if ui.verbose or not m.exact(f):
688 if ui.verbose or not m.exact(f):
687 ui.status(_('removing %s\n') % m.rel(f))
689 ui.status(_('removing %s\n') % m.rel(f))
688
690
689 # Need to lock because standin files are deleted then removed from the
691 # Need to lock because standin files are deleted then removed from the
690 # repository and we could race inbetween.
692 # repository and we could race inbetween.
691 wlock = repo.wlock()
693 wlock = repo.wlock()
692 try:
694 try:
693 lfdirstate = lfutil.openlfdirstate(ui, repo)
695 lfdirstate = lfutil.openlfdirstate(ui, repo)
694 for f in forget:
696 for f in forget:
695 if lfdirstate[f] == 'a':
697 if lfdirstate[f] == 'a':
696 lfdirstate.drop(f)
698 lfdirstate.drop(f)
697 else:
699 else:
698 lfdirstate.remove(f)
700 lfdirstate.remove(f)
699 lfdirstate.write()
701 lfdirstate.write()
700 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
702 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
701 unlink=True)
703 unlink=True)
702 finally:
704 finally:
703 wlock.release()
705 wlock.release()
704
706
705 def getoutgoinglfiles(ui, repo, dest=None, **opts):
707 def getoutgoinglfiles(ui, repo, dest=None, **opts):
706 dest = ui.expandpath(dest or 'default-push', dest or 'default')
708 dest = ui.expandpath(dest or 'default-push', dest or 'default')
707 dest, branches = hg.parseurl(dest, opts.get('branch'))
709 dest, branches = hg.parseurl(dest, opts.get('branch'))
708 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
710 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
709 if revs:
711 if revs:
710 revs = [repo.lookup(rev) for rev in revs]
712 revs = [repo.lookup(rev) for rev in revs]
711
713
712 remoteui = hg.remoteui
714 remoteui = hg.remoteui
713
715
714 try:
716 try:
715 remote = hg.repository(remoteui(repo, opts), dest)
717 remote = hg.repository(remoteui(repo, opts), dest)
716 except error.RepoError:
718 except error.RepoError:
717 return None
719 return None
718 o = lfutil.findoutgoing(repo, remote, False)
720 o = lfutil.findoutgoing(repo, remote, False)
719 if not o:
721 if not o:
720 return None
722 return None
721 o = repo.changelog.nodesbetween(o, revs)[0]
723 o = repo.changelog.nodesbetween(o, revs)[0]
722 if opts.get('newest_first'):
724 if opts.get('newest_first'):
723 o.reverse()
725 o.reverse()
724
726
725 toupload = set()
727 toupload = set()
726 for n in o:
728 for n in o:
727 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
729 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
728 ctx = repo[n]
730 ctx = repo[n]
729 files = set(ctx.files())
731 files = set(ctx.files())
730 if len(parents) == 2:
732 if len(parents) == 2:
731 mc = ctx.manifest()
733 mc = ctx.manifest()
732 mp1 = ctx.parents()[0].manifest()
734 mp1 = ctx.parents()[0].manifest()
733 mp2 = ctx.parents()[1].manifest()
735 mp2 = ctx.parents()[1].manifest()
734 for f in mp1:
736 for f in mp1:
735 if f not in mc:
737 if f not in mc:
736 files.add(f)
738 files.add(f)
737 for f in mp2:
739 for f in mp2:
738 if f not in mc:
740 if f not in mc:
739 files.add(f)
741 files.add(f)
740 for f in mc:
742 for f in mc:
741 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
743 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
742 files.add(f)
744 files.add(f)
743 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
745 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
744 and f in ctx]))
746 and f in ctx]))
745 return toupload
747 return toupload
746
748
747 def override_outgoing(orig, ui, repo, dest=None, **opts):
749 def override_outgoing(orig, ui, repo, dest=None, **opts):
748 orig(ui, repo, dest, **opts)
750 orig(ui, repo, dest, **opts)
749
751
750 if opts.pop('large', None):
752 if opts.pop('large', None):
751 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
753 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
752 if toupload is None:
754 if toupload is None:
753 ui.status(_('largefiles: No remote repo\n'))
755 ui.status(_('largefiles: No remote repo\n'))
754 else:
756 else:
755 ui.status(_('largefiles to upload:\n'))
757 ui.status(_('largefiles to upload:\n'))
756 for file in toupload:
758 for file in toupload:
757 ui.status(lfutil.splitstandin(file) + '\n')
759 ui.status(lfutil.splitstandin(file) + '\n')
758 ui.status('\n')
760 ui.status('\n')
759
761
760 def override_summary(orig, ui, repo, *pats, **opts):
762 def override_summary(orig, ui, repo, *pats, **opts):
761 orig(ui, repo, *pats, **opts)
763 orig(ui, repo, *pats, **opts)
762
764
763 if opts.pop('large', None):
765 if opts.pop('large', None):
764 toupload = getoutgoinglfiles(ui, repo, None, **opts)
766 toupload = getoutgoinglfiles(ui, repo, None, **opts)
765 if toupload is None:
767 if toupload is None:
766 ui.status(_('largefiles: No remote repo\n'))
768 ui.status(_('largefiles: No remote repo\n'))
767 else:
769 else:
768 ui.status(_('largefiles: %d to upload\n') % len(toupload))
770 ui.status(_('largefiles: %d to upload\n') % len(toupload))
769
771
770 def override_addremove(orig, ui, repo, *pats, **opts):
772 def override_addremove(orig, ui, repo, *pats, **opts):
771 # Check if the parent or child has lfiles if they do don't allow it. If
773 # Check if the parent or child has lfiles if they do don't allow it. If
772 # there is a symlink in the manifest then getting the manifest throws an
774 # there is a symlink in the manifest then getting the manifest throws an
773 # exception catch it and let addremove deal with it. This happens in
775 # exception catch it and let addremove deal with it. This happens in
774 # Mercurial's test test-addremove-symlink
776 # Mercurial's test test-addremove-symlink
775 try:
777 try:
776 manifesttip = set(repo['tip'].manifest())
778 manifesttip = set(repo['tip'].manifest())
777 except util.Abort:
779 except util.Abort:
778 manifesttip = set()
780 manifesttip = set()
779 try:
781 try:
780 manifestworking = set(repo[None].manifest())
782 manifestworking = set(repo[None].manifest())
781 except util.Abort:
783 except util.Abort:
782 manifestworking = set()
784 manifestworking = set()
783
785
784 # Manifests are only iterable so turn them into sets then union
786 # Manifests are only iterable so turn them into sets then union
785 for file in manifesttip.union(manifestworking):
787 for file in manifesttip.union(manifestworking):
786 if file.startswith(lfutil.shortname):
788 if file.startswith(lfutil.shortname):
787 raise util.Abort(
789 raise util.Abort(
788 _('addremove cannot be run on a repo with largefiles'))
790 _('addremove cannot be run on a repo with largefiles'))
789
791
790 return orig(ui, repo, *pats, **opts)
792 return orig(ui, repo, *pats, **opts)
791
793
792 # Calling purge with --all will cause the lfiles to be deleted.
794 # Calling purge with --all will cause the lfiles to be deleted.
793 # Override repo.status to prevent this from happening.
795 # Override repo.status to prevent this from happening.
794 def override_purge(orig, ui, repo, *dirs, **opts):
796 def override_purge(orig, ui, repo, *dirs, **opts):
795 oldstatus = repo.status
797 oldstatus = repo.status
796 def override_status(node1='.', node2=None, match=None, ignored=False,
798 def override_status(node1='.', node2=None, match=None, ignored=False,
797 clean=False, unknown=False, listsubrepos=False):
799 clean=False, unknown=False, listsubrepos=False):
798 r = oldstatus(node1, node2, match, ignored, clean, unknown,
800 r = oldstatus(node1, node2, match, ignored, clean, unknown,
799 listsubrepos)
801 listsubrepos)
800 lfdirstate = lfutil.openlfdirstate(ui, repo)
802 lfdirstate = lfutil.openlfdirstate(ui, repo)
801 modified, added, removed, deleted, unknown, ignored, clean = r
803 modified, added, removed, deleted, unknown, ignored, clean = r
802 unknown = [f for f in unknown if lfdirstate[f] == '?']
804 unknown = [f for f in unknown if lfdirstate[f] == '?']
803 ignored = [f for f in ignored if lfdirstate[f] == '?']
805 ignored = [f for f in ignored if lfdirstate[f] == '?']
804 return modified, added, removed, deleted, unknown, ignored, clean
806 return modified, added, removed, deleted, unknown, ignored, clean
805 repo.status = override_status
807 repo.status = override_status
806 orig(ui, repo, *dirs, **opts)
808 orig(ui, repo, *dirs, **opts)
807 repo.status = oldstatus
809 repo.status = oldstatus
808
810
809 def override_rollback(orig, ui, repo, **opts):
811 def override_rollback(orig, ui, repo, **opts):
810 result = orig(ui, repo, **opts)
812 result = orig(ui, repo, **opts)
811 merge.update(repo, node=None, branchmerge=False, force=True,
813 merge.update(repo, node=None, branchmerge=False, force=True,
812 partial=lfutil.isstandin)
814 partial=lfutil.isstandin)
813 lfdirstate = lfutil.openlfdirstate(ui, repo)
815 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 lfiles = lfutil.listlfiles(repo)
816 lfiles = lfutil.listlfiles(repo)
815 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
817 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
816 for file in lfiles:
818 for file in lfiles:
817 if file in oldlfiles:
819 if file in oldlfiles:
818 lfdirstate.normallookup(file)
820 lfdirstate.normallookup(file)
819 else:
821 else:
820 lfdirstate.add(file)
822 lfdirstate.add(file)
821 lfdirstate.write()
823 lfdirstate.write()
822 return result
824 return result
@@ -1,158 +1,160 b''
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import tempfile
7 import tempfile
8 import urllib2
8 import urllib2
9
9
10 from mercurial import error, httprepo, util, wireproto
10 from mercurial import error, httprepo, util, wireproto
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 import lfutil
13 import lfutil
14
14
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
16 '\n\nPlease enable it in your Mercurial config ' \
16 '\n\nPlease enable it in your Mercurial config ' \
17 'file.\n'
17 'file.\n'
18
18
19 def putlfile(repo, proto, sha):
19 def putlfile(repo, proto, sha):
20 """putlfile puts a largefile into a repository's local cache and into the
20 '''Put a largefile into a repository's local cache and into the
21 system cache."""
21 system cache.'''
22 f = None
22 f = None
23 proto.redirect()
23 proto.redirect()
24 try:
24 try:
25 try:
25 try:
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 proto.getfile(f)
27 proto.getfile(f)
28 f.seek(0)
28 f.seek(0)
29 if sha != lfutil.hexsha1(f):
29 if sha != lfutil.hexsha1(f):
30 return wireproto.pushres(1)
30 return wireproto.pushres(1)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
32 except IOError:
32 except IOError:
33 repo.ui.warn(
33 repo.ui.warn(
34 _('error: could not put received data into largefile store'))
34 _('error: could not put received data into largefile store'))
35 return wireproto.pushres(1)
35 return wireproto.pushres(1)
36 finally:
36 finally:
37 if f:
37 if f:
38 f.close()
38 f.close()
39
39
40 return wireproto.pushres(0)
40 return wireproto.pushres(0)
41
41
42 def getlfile(repo, proto, sha):
42 def getlfile(repo, proto, sha):
43 """getlfile retrieves a largefile from the repository-local cache or system
43 '''Retrieve a largefile from the repository-local cache or system
44 cache."""
44 cache.'''
45 filename = lfutil.findfile(repo, sha)
45 filename = lfutil.findfile(repo, sha)
46 if not filename:
46 if not filename:
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 f = open(filename, 'rb')
48 f = open(filename, 'rb')
49 length = os.fstat(f.fileno())[6]
49 length = os.fstat(f.fileno())[6]
50 # since we can't set an HTTP content-length header here, and mercurial core
50
51 # provides no way to give the length of a streamres (and reading the entire
51 # Since we can't set an HTTP content-length header here, and
52 # file into RAM would be ill-advised), we just send the length on the first
52 # Mercurial core provides no way to give the length of a streamres
53 # line of the response, like the ssh proto does for string responses.
53 # (and reading the entire file into RAM would be ill-advised), we
54 # just send the length on the first line of the response, like the
55 # ssh proto does for string responses.
54 def generator():
56 def generator():
55 yield '%d\n' % length
57 yield '%d\n' % length
56 for chunk in f:
58 for chunk in f:
57 yield chunk
59 yield chunk
58 return wireproto.streamres(generator())
60 return wireproto.streamres(generator())
59
61
60 def statlfile(repo, proto, sha):
62 def statlfile(repo, proto, sha):
61 """statlfile sends '2\n' if the largefile is missing, '1\n' if it has a
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
62 mismatched checksum, or '0\n' if it is in good condition"""
64 mismatched checksum, or '0\n' if it is in good condition'''
63 filename = lfutil.findfile(repo, sha)
65 filename = lfutil.findfile(repo, sha)
64 if not filename:
66 if not filename:
65 return '2\n'
67 return '2\n'
66 fd = None
68 fd = None
67 try:
69 try:
68 fd = open(filename, 'rb')
70 fd = open(filename, 'rb')
69 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
70 finally:
72 finally:
71 if fd:
73 if fd:
72 fd.close()
74 fd.close()
73
75
74 def wirereposetup(ui, repo):
76 def wirereposetup(ui, repo):
75 class lfileswirerepository(repo.__class__):
77 class lfileswirerepository(repo.__class__):
76 def putlfile(self, sha, fd):
78 def putlfile(self, sha, fd):
77 # unfortunately, httprepository._callpush tries to convert its
79 # unfortunately, httprepository._callpush tries to convert its
78 # input file-like into a bundle before sending it, so we can't use
80 # input file-like into a bundle before sending it, so we can't use
79 # it ...
81 # it ...
80 if issubclass(self.__class__, httprepo.httprepository):
82 if issubclass(self.__class__, httprepo.httprepository):
81 try:
83 try:
82 return int(self._call('putlfile', data=fd, sha=sha,
84 return int(self._call('putlfile', data=fd, sha=sha,
83 headers={'content-type':'application/mercurial-0.1'}))
85 headers={'content-type':'application/mercurial-0.1'}))
84 except (ValueError, urllib2.HTTPError):
86 except (ValueError, urllib2.HTTPError):
85 return 1
87 return 1
86 # ... but we can't use sshrepository._call because the data=
88 # ... but we can't use sshrepository._call because the data=
87 # argument won't get sent, and _callpush does exactly what we want
89 # argument won't get sent, and _callpush does exactly what we want
88 # in this case: send the data straight through
90 # in this case: send the data straight through
89 else:
91 else:
90 try:
92 try:
91 ret, output = self._callpush("putlfile", fd, sha=sha)
93 ret, output = self._callpush("putlfile", fd, sha=sha)
92 if ret == "":
94 if ret == "":
93 raise error.ResponseError(_('putlfile failed:'),
95 raise error.ResponseError(_('putlfile failed:'),
94 output)
96 output)
95 return int(ret)
97 return int(ret)
96 except IOError:
98 except IOError:
97 return 1
99 return 1
98 except ValueError:
100 except ValueError:
99 raise error.ResponseError(
101 raise error.ResponseError(
100 _('putlfile failed (unexpected response):'), ret)
102 _('putlfile failed (unexpected response):'), ret)
101
103
102 def getlfile(self, sha):
104 def getlfile(self, sha):
103 stream = self._callstream("getlfile", sha=sha)
105 stream = self._callstream("getlfile", sha=sha)
104 length = stream.readline()
106 length = stream.readline()
105 try:
107 try:
106 length = int(length)
108 length = int(length)
107 except ValueError:
109 except ValueError:
108 self._abort(error.ResponseError(_("unexpected response:"),
110 self._abort(error.ResponseError(_("unexpected response:"),
109 length))
111 length))
110 return (length, stream)
112 return (length, stream)
111
113
112 def statlfile(self, sha):
114 def statlfile(self, sha):
113 try:
115 try:
114 return int(self._call("statlfile", sha=sha))
116 return int(self._call("statlfile", sha=sha))
115 except (ValueError, urllib2.HTTPError):
117 except (ValueError, urllib2.HTTPError):
116 # if the server returns anything but an integer followed by a
118 # If the server returns anything but an integer followed by a
117 # newline, newline, it's not speaking our language; if we get
119 # newline, newline, it's not speaking our language; if we get
118 # an HTTP error, we can't be sure the largefile is present;
120 # an HTTP error, we can't be sure the largefile is present;
119 # either way, consider it missing
121 # either way, consider it missing.
120 return 2
122 return 2
121
123
122 repo.__class__ = lfileswirerepository
124 repo.__class__ = lfileswirerepository
123
125
124 # advertise the largefiles=serve capability
126 # advertise the largefiles=serve capability
125 def capabilities(repo, proto):
127 def capabilities(repo, proto):
126 return capabilities_orig(repo, proto) + ' largefiles=serve'
128 return capabilities_orig(repo, proto) + ' largefiles=serve'
127
129
128 # duplicate what Mercurial's new out-of-band errors mechanism does, because
130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
129 # clients old and new alike both handle it well
131 # clients old and new alike both handle it well
130 def webproto_refuseclient(self, message):
132 def webproto_refuseclient(self, message):
131 self.req.header([('Content-Type', 'application/hg-error')])
133 self.req.header([('Content-Type', 'application/hg-error')])
132 return message
134 return message
133
135
134 def sshproto_refuseclient(self, message):
136 def sshproto_refuseclient(self, message):
135 self.ui.write_err('%s\n-\n' % message)
137 self.ui.write_err('%s\n-\n' % message)
136 self.fout.write('\n')
138 self.fout.write('\n')
137 self.fout.flush()
139 self.fout.flush()
138
140
139 return ''
141 return ''
140
142
141 def heads(repo, proto):
143 def heads(repo, proto):
142 if lfutil.islfilesrepo(repo):
144 if lfutil.islfilesrepo(repo):
143 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
144 return wireproto.heads(repo, proto)
146 return wireproto.heads(repo, proto)
145
147
146 def sshrepo_callstream(self, cmd, **args):
148 def sshrepo_callstream(self, cmd, **args):
147 if cmd == 'heads' and self.capable('largefiles'):
149 if cmd == 'heads' and self.capable('largefiles'):
148 cmd = 'lheads'
150 cmd = 'lheads'
149 if cmd == 'batch' and self.capable('largefiles'):
151 if cmd == 'batch' and self.capable('largefiles'):
150 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
151 return ssh_oldcallstream(self, cmd, **args)
153 return ssh_oldcallstream(self, cmd, **args)
152
154
153 def httprepo_callstream(self, cmd, **args):
155 def httprepo_callstream(self, cmd, **args):
154 if cmd == 'heads' and self.capable('largefiles'):
156 if cmd == 'heads' and self.capable('largefiles'):
155 cmd = 'lheads'
157 cmd = 'lheads'
156 if cmd == 'batch' and self.capable('largefiles'):
158 if cmd == 'batch' and self.capable('largefiles'):
157 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
158 return http_oldcallstream(self, cmd, **args)
160 return http_oldcallstream(self, cmd, **args)
@@ -1,105 +1,105 b''
1 # Copyright 2010-2011 Fog Creek Software
1 # Copyright 2010-2011 Fog Creek Software
2 # Copyright 2010-2011 Unity Technologies
2 # Copyright 2010-2011 Unity Technologies
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 '''Remote largefile store; the base class for servestore'''
7 '''remote largefile store; the base class for servestore'''
8
8
9 import urllib2
9 import urllib2
10
10
11 from mercurial import util
11 from mercurial import util
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 import lfutil
14 import lfutil
15 import basestore
15 import basestore
16
16
17 class remotestore(basestore.basestore):
17 class remotestore(basestore.basestore):
18 """A largefile store accessed over a network"""
18 '''a largefile store accessed over a network'''
19 def __init__(self, ui, repo, url):
19 def __init__(self, ui, repo, url):
20 super(remotestore, self).__init__(ui, repo, url)
20 super(remotestore, self).__init__(ui, repo, url)
21
21
22 def put(self, source, hash):
22 def put(self, source, hash):
23 if self._verify(hash):
23 if self._verify(hash):
24 return
24 return
25 if self.sendfile(source, hash):
25 if self.sendfile(source, hash):
26 raise util.Abort(
26 raise util.Abort(
27 _('remotestore: could not put %s to remote store %s')
27 _('remotestore: could not put %s to remote store %s')
28 % (source, self.url))
28 % (source, self.url))
29 self.ui.debug(
29 self.ui.debug(
30 _('remotestore: put %s to remote store %s') % (source, self.url))
30 _('remotestore: put %s to remote store %s') % (source, self.url))
31
31
32 def exists(self, hash):
32 def exists(self, hash):
33 return self._verify(hash)
33 return self._verify(hash)
34
34
35 def sendfile(self, filename, hash):
35 def sendfile(self, filename, hash):
36 self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
36 self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
37 fd = None
37 fd = None
38 try:
38 try:
39 try:
39 try:
40 fd = lfutil.httpsendfile(self.ui, filename)
40 fd = lfutil.httpsendfile(self.ui, filename)
41 except IOError, e:
41 except IOError, e:
42 raise util.Abort(
42 raise util.Abort(
43 _('remotestore: could not open file %s: %s')
43 _('remotestore: could not open file %s: %s')
44 % (filename, str(e)))
44 % (filename, str(e)))
45 return self._put(hash, fd)
45 return self._put(hash, fd)
46 finally:
46 finally:
47 if fd:
47 if fd:
48 fd.close()
48 fd.close()
49
49
50 def _getfile(self, tmpfile, filename, hash):
50 def _getfile(self, tmpfile, filename, hash):
51 # quit if the largefile isn't there
51 # quit if the largefile isn't there
52 stat = self._stat(hash)
52 stat = self._stat(hash)
53 if stat:
53 if stat:
54 raise util.Abort(_('remotestore: largefile %s is %s') %
54 raise util.Abort(_('remotestore: largefile %s is %s') %
55 (hash, stat == 1 and 'invalid' or 'missing'))
55 (hash, stat == 1 and 'invalid' or 'missing'))
56
56
57 try:
57 try:
58 length, infile = self._get(hash)
58 length, infile = self._get(hash)
59 except urllib2.HTTPError, e:
59 except urllib2.HTTPError, e:
60 # 401s get converted to util.Aborts; everything else is fine being
60 # 401s get converted to util.Aborts; everything else is fine being
61 # turned into a StoreError
61 # turned into a StoreError
62 raise basestore.StoreError(filename, hash, self.url, str(e))
62 raise basestore.StoreError(filename, hash, self.url, str(e))
63 except urllib2.URLError, e:
63 except urllib2.URLError, e:
64 # This usually indicates a connection problem, so don't
64 # This usually indicates a connection problem, so don't
65 # keep trying with the other files... they will probably
65 # keep trying with the other files... they will probably
66 # all fail too.
66 # all fail too.
67 raise util.Abort('%s: %s' % (self.url, str(e.reason)))
67 raise util.Abort('%s: %s' % (self.url, str(e.reason)))
68 except IOError, e:
68 except IOError, e:
69 raise basestore.StoreError(filename, hash, self.url, str(e))
69 raise basestore.StoreError(filename, hash, self.url, str(e))
70
70
71 # Mercurial does not close its SSH connections after writing a stream
71 # Mercurial does not close its SSH connections after writing a stream
72 if length is not None:
72 if length is not None:
73 infile = lfutil.limitreader(infile, length)
73 infile = lfutil.limitreader(infile, length)
74 return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
74 return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
75
75
76 def _verify(self, hash):
76 def _verify(self, hash):
77 return not self._stat(hash)
77 return not self._stat(hash)
78
78
79 def _verifyfile(self, cctx, cset, contents, standin, verified):
79 def _verifyfile(self, cctx, cset, contents, standin, verified):
80 filename = lfutil.splitstandin(standin)
80 filename = lfutil.splitstandin(standin)
81 if not filename:
81 if not filename:
82 return False
82 return False
83 fctx = cctx[standin]
83 fctx = cctx[standin]
84 key = (filename, fctx.filenode())
84 key = (filename, fctx.filenode())
85 if key in verified:
85 if key in verified:
86 return False
86 return False
87
87
88 verified.add(key)
88 verified.add(key)
89
89
90 stat = self._stat(hash)
90 stat = self._stat(hash)
91 if not stat:
91 if not stat:
92 return False
92 return False
93 elif stat == 1:
93 elif stat == 1:
94 self.ui.warn(
94 self.ui.warn(
95 _('changeset %s: %s: contents differ\n')
95 _('changeset %s: %s: contents differ\n')
96 % (cset, filename))
96 % (cset, filename))
97 return True # failed
97 return True # failed
98 elif stat == 2:
98 elif stat == 2:
99 self.ui.warn(
99 self.ui.warn(
100 _('changeset %s: %s missing\n')
100 _('changeset %s: %s missing\n')
101 % (cset, filename))
101 % (cset, filename))
102 return True # failed
102 return True # failed
103 else:
103 else:
104 raise util.Abort(_('check failed, unexpected response'
104 raise util.Abort(_('check failed, unexpected response'
105 'statlfile: %d') % stat)
105 'statlfile: %d') % stat)
@@ -1,410 +1,411 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13 import re
13 import re
14
14
15 from mercurial import context, error, manifest, match as match_, \
15 from mercurial import context, error, manifest, match as match_, \
16 node, util
16 node, util
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 #if not (isinstance(method, types.MethodType) and
31 #if not (isinstance(method, types.MethodType) and
32 # method.im_func is repo.__class__.commitctx.im_func):
32 # method.im_func is repo.__class__.commitctx.im_func):
33 if isinstance(method, types.FunctionType) and method.func_name == \
33 if isinstance(method, types.FunctionType) and method.func_name == \
34 'wrap':
34 'wrap':
35 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ui.warn(_('largefiles: repo method %r appears to have already been'
36 ' wrapped by another extension: '
36 ' wrapped by another extension: '
37 'largefiles may behave incorrectly\n')
37 'largefiles may behave incorrectly\n')
38 % name)
38 % name)
39
39
40 class lfiles_repo(repo.__class__):
40 class lfiles_repo(repo.__class__):
41 lfstatus = False
41 lfstatus = False
42 def status_nolfiles(self, *args, **kwargs):
42 def status_nolfiles(self, *args, **kwargs):
43 return super(lfiles_repo, self).status(*args, **kwargs)
43 return super(lfiles_repo, self).status(*args, **kwargs)
44
44
45 # When lfstatus is set, return a context that gives the names of lfiles
45 # When lfstatus is set, return a context that gives the names
46 # instead of their corresponding standins and identifies the lfiles as
46 # of largefiles instead of their corresponding standins and
47 # always binary, regardless of their actual contents.
47 # identifies the largefiles as always binary, regardless of
48 # their actual contents.
48 def __getitem__(self, changeid):
49 def __getitem__(self, changeid):
49 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 if self.lfstatus:
51 if self.lfstatus:
51 class lfiles_manifestdict(manifest.manifestdict):
52 class lfiles_manifestdict(manifest.manifestdict):
52 def __contains__(self, filename):
53 def __contains__(self, filename):
53 if super(lfiles_manifestdict,
54 if super(lfiles_manifestdict,
54 self).__contains__(filename):
55 self).__contains__(filename):
55 return True
56 return True
56 return super(lfiles_manifestdict,
57 return super(lfiles_manifestdict,
57 self).__contains__(lfutil.shortname+'/' + filename)
58 self).__contains__(lfutil.shortname+'/' + filename)
58 class lfiles_ctx(ctx.__class__):
59 class lfiles_ctx(ctx.__class__):
59 def files(self):
60 def files(self):
60 filenames = super(lfiles_ctx, self).files()
61 filenames = super(lfiles_ctx, self).files()
61 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 filename) for filename in filenames]
63 filename) for filename in filenames]
63 def manifest(self):
64 def manifest(self):
64 man1 = super(lfiles_ctx, self).manifest()
65 man1 = super(lfiles_ctx, self).manifest()
65 man1.__class__ = lfiles_manifestdict
66 man1.__class__ = lfiles_manifestdict
66 return man1
67 return man1
67 def filectx(self, path, fileid=None, filelog=None):
68 def filectx(self, path, fileid=None, filelog=None):
68 try:
69 try:
69 result = super(lfiles_ctx, self).filectx(path,
70 result = super(lfiles_ctx, self).filectx(path,
70 fileid, filelog)
71 fileid, filelog)
71 except error.LookupError:
72 except error.LookupError:
72 # Adding a null character will cause Mercurial to
73 # Adding a null character will cause Mercurial to
73 # identify this as a binary file.
74 # identify this as a binary file.
74 result = super(lfiles_ctx, self).filectx(
75 result = super(lfiles_ctx, self).filectx(
75 lfutil.shortname + '/' + path, fileid,
76 lfutil.shortname + '/' + path, fileid,
76 filelog)
77 filelog)
77 olddata = result.data
78 olddata = result.data
78 result.data = lambda: olddata() + '\0'
79 result.data = lambda: olddata() + '\0'
79 return result
80 return result
80 ctx.__class__ = lfiles_ctx
81 ctx.__class__ = lfiles_ctx
81 return ctx
82 return ctx
82
83
83 # Figure out the status of big files and insert them into the
84 # Figure out the status of big files and insert them into the
84 # appropriate list in the result. Also removes standin files from
85 # appropriate list in the result. Also removes standin files
85 # the listing. This function reverts to the original status if
86 # from the listing. Revert to the original status if
86 # self.lfstatus is False
87 # self.lfstatus is False.
87 def status(self, node1='.', node2=None, match=None, ignored=False,
88 def status(self, node1='.', node2=None, match=None, ignored=False,
88 clean=False, unknown=False, listsubrepos=False):
89 clean=False, unknown=False, listsubrepos=False):
89 listignored, listclean, listunknown = ignored, clean, unknown
90 listignored, listclean, listunknown = ignored, clean, unknown
90 if not self.lfstatus:
91 if not self.lfstatus:
91 try:
92 try:
92 return super(lfiles_repo, self).status(node1, node2, match,
93 return super(lfiles_repo, self).status(node1, node2, match,
93 listignored, listclean, listunknown, listsubrepos)
94 listignored, listclean, listunknown, listsubrepos)
94 except TypeError:
95 except TypeError:
95 return super(lfiles_repo, self).status(node1, node2, match,
96 return super(lfiles_repo, self).status(node1, node2, match,
96 listignored, listclean, listunknown)
97 listignored, listclean, listunknown)
97 else:
98 else:
98 # some calls in this function rely on the old version of status
99 # some calls in this function rely on the old version of status
99 self.lfstatus = False
100 self.lfstatus = False
100 if isinstance(node1, context.changectx):
101 if isinstance(node1, context.changectx):
101 ctx1 = node1
102 ctx1 = node1
102 else:
103 else:
103 ctx1 = repo[node1]
104 ctx1 = repo[node1]
104 if isinstance(node2, context.changectx):
105 if isinstance(node2, context.changectx):
105 ctx2 = node2
106 ctx2 = node2
106 else:
107 else:
107 ctx2 = repo[node2]
108 ctx2 = repo[node2]
108 working = ctx2.rev() is None
109 working = ctx2.rev() is None
109 parentworking = working and ctx1 == self['.']
110 parentworking = working and ctx1 == self['.']
110
111
111 def inctx(file, ctx):
112 def inctx(file, ctx):
112 try:
113 try:
113 if ctx.rev() is None:
114 if ctx.rev() is None:
114 return file in ctx.manifest()
115 return file in ctx.manifest()
115 ctx[file]
116 ctx[file]
116 return True
117 return True
117 except KeyError:
118 except KeyError:
118 return False
119 return False
119
120
120 # create a copy of match that matches standins instead of
121 # create a copy of match that matches standins instead of
121 # lfiles if matcher not set then it is the always matcher so
122 # lfiles if matcher not set then it is the always matcher so
122 # overwrite that
123 # overwrite that
123 if match is None:
124 if match is None:
124 match = match_.always(self.root, self.getcwd())
125 match = match_.always(self.root, self.getcwd())
125
126
126 def tostandin(file):
127 def tostandin(file):
127 if inctx(lfutil.standin(file), ctx2):
128 if inctx(lfutil.standin(file), ctx2):
128 return lfutil.standin(file)
129 return lfutil.standin(file)
129 return file
130 return file
130
131
131 m = copy.copy(match)
132 m = copy.copy(match)
132 m._files = [tostandin(f) for f in m._files]
133 m._files = [tostandin(f) for f in m._files]
133
134
134 # get ignored clean and unknown but remove them later if they
135 # get ignored, clean, and unknown but remove them
135 # were not asked for
136 # later if they were not asked for
136 try:
137 try:
137 result = super(lfiles_repo, self).status(node1, node2, m,
138 result = super(lfiles_repo, self).status(node1, node2, m,
138 True, True, True, listsubrepos)
139 True, True, True, listsubrepos)
139 except TypeError:
140 except TypeError:
140 result = super(lfiles_repo, self).status(node1, node2, m,
141 result = super(lfiles_repo, self).status(node1, node2, m,
141 True, True, True)
142 True, True, True)
142 if working:
143 if working:
143 # Hold the wlock while we read lfiles and update the
144 # hold the wlock while we read largefiles and
144 # lfdirstate
145 # update the lfdirstate
145 wlock = repo.wlock()
146 wlock = repo.wlock()
146 try:
147 try:
147 # Any non lfiles that were explicitly listed must be
148 # Any non-largefiles that were explicitly listed must be
148 # taken out or lfdirstate.status will report an error.
149 # taken out or lfdirstate.status will report an error.
149 # The status of these files was already computed using
150 # The status of these files was already computed using
150 # super's status.
151 # super's status.
151 lfdirstate = lfutil.openlfdirstate(ui, self)
152 lfdirstate = lfutil.openlfdirstate(ui, self)
152 match._files = [f for f in match._files if f in
153 match._files = [f for f in match._files if f in
153 lfdirstate]
154 lfdirstate]
154 s = lfdirstate.status(match, [], listignored,
155 s = lfdirstate.status(match, [], listignored,
155 listclean, listunknown)
156 listclean, listunknown)
156 (unsure, modified, added, removed, missing, unknown,
157 (unsure, modified, added, removed, missing, unknown,
157 ignored, clean) = s
158 ignored, clean) = s
158 if parentworking:
159 if parentworking:
159 for lfile in unsure:
160 for lfile in unsure:
160 if ctx1[lfutil.standin(lfile)].data().strip() \
161 if ctx1[lfutil.standin(lfile)].data().strip() \
161 != lfutil.hashfile(self.wjoin(lfile)):
162 != lfutil.hashfile(self.wjoin(lfile)):
162 modified.append(lfile)
163 modified.append(lfile)
163 else:
164 else:
164 clean.append(lfile)
165 clean.append(lfile)
165 lfdirstate.normal(lfile)
166 lfdirstate.normal(lfile)
166 lfdirstate.write()
167 lfdirstate.write()
167 else:
168 else:
168 tocheck = unsure + modified + added + clean
169 tocheck = unsure + modified + added + clean
169 modified, added, clean = [], [], []
170 modified, added, clean = [], [], []
170
171
171 for lfile in tocheck:
172 for lfile in tocheck:
172 standin = lfutil.standin(lfile)
173 standin = lfutil.standin(lfile)
173 if inctx(standin, ctx1):
174 if inctx(standin, ctx1):
174 if ctx1[standin].data().strip() != \
175 if ctx1[standin].data().strip() != \
175 lfutil.hashfile(self.wjoin(lfile)):
176 lfutil.hashfile(self.wjoin(lfile)):
176 modified.append(lfile)
177 modified.append(lfile)
177 else:
178 else:
178 clean.append(lfile)
179 clean.append(lfile)
179 else:
180 else:
180 added.append(lfile)
181 added.append(lfile)
181 finally:
182 finally:
182 wlock.release()
183 wlock.release()
183
184
184 for standin in ctx1.manifest():
185 for standin in ctx1.manifest():
185 if not lfutil.isstandin(standin):
186 if not lfutil.isstandin(standin):
186 continue
187 continue
187 lfile = lfutil.splitstandin(standin)
188 lfile = lfutil.splitstandin(standin)
188 if not match(lfile):
189 if not match(lfile):
189 continue
190 continue
190 if lfile not in lfdirstate:
191 if lfile not in lfdirstate:
191 removed.append(lfile)
192 removed.append(lfile)
192 # Handle unknown and ignored differently
193 # Handle unknown and ignored differently
193 lfiles = (modified, added, removed, missing, [], [], clean)
194 lfiles = (modified, added, removed, missing, [], [], clean)
194 result = list(result)
195 result = list(result)
195 # Unknown files
196 # Unknown files
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
197 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
197 and not lfutil.isstandin(f)]
198 and not lfutil.isstandin(f)]
198 # Ignored files must be ignored by both the dirstate and
199 # Ignored files must be ignored by both the dirstate and
199 # lfdirstate
200 # lfdirstate
200 result[5] = set(ignored).intersection(set(result[5]))
201 result[5] = set(ignored).intersection(set(result[5]))
201 # combine normal files and lfiles
202 # combine normal files and lfiles
202 normals = [[fn for fn in filelist if not \
203 normals = [[fn for fn in filelist if not \
203 lfutil.isstandin(fn)] for filelist in result]
204 lfutil.isstandin(fn)] for filelist in result]
204 result = [sorted(list1 + list2) for (list1, list2) in \
205 result = [sorted(list1 + list2) for (list1, list2) in \
205 zip(normals, lfiles)]
206 zip(normals, lfiles)]
206 else:
207 else:
207 def toname(f):
208 def toname(f):
208 if lfutil.isstandin(f):
209 if lfutil.isstandin(f):
209 return lfutil.splitstandin(f)
210 return lfutil.splitstandin(f)
210 return f
211 return f
211 result = [[toname(f) for f in items] for items in result]
212 result = [[toname(f) for f in items] for items in result]
212
213
213 if not listunknown:
214 if not listunknown:
214 result[4] = []
215 result[4] = []
215 if not listignored:
216 if not listignored:
216 result[5] = []
217 result[5] = []
217 if not listclean:
218 if not listclean:
218 result[6] = []
219 result[6] = []
219 self.lfstatus = True
220 self.lfstatus = True
220 return result
221 return result
221
222
222 # This call happens after a commit has occurred. Copy all of the lfiles
223 # This call happens after a commit has occurred. Copy all of the lfiles
223 # into the cache
224 # into the cache
224 def commitctx(self, *args, **kwargs):
225 def commitctx(self, *args, **kwargs):
225 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 ctx = self[node]
227 ctx = self[node]
227 for filename in ctx.files():
228 for filename in ctx.files():
228 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 realfile = lfutil.splitstandin(filename)
230 realfile = lfutil.splitstandin(filename)
230 lfutil.copytocache(self, ctx.node(), realfile)
231 lfutil.copytocache(self, ctx.node(), realfile)
231
232
232 return node
233 return node
233
234
234 # This call happens before a commit has occurred. The lfile standins
235 # This call happens before a commit has occurred. The lfile standins
235 # have not had their contents updated (to reflect the hash of their
236 # have not had their contents updated (to reflect the hash of their
236 # lfile). Do that here.
237 # lfile). Do that here.
237 def commit(self, text="", user=None, date=None, match=None,
238 def commit(self, text="", user=None, date=None, match=None,
238 force=False, editor=False, extra={}):
239 force=False, editor=False, extra={}):
239 orig = super(lfiles_repo, self).commit
240 orig = super(lfiles_repo, self).commit
240
241
241 wlock = repo.wlock()
242 wlock = repo.wlock()
242 try:
243 try:
243 if getattr(repo, "_isrebasing", False):
244 if getattr(repo, "_isrebasing", False):
244 # We have to take the time to pull down the new lfiles now.
245 # We have to take the time to pull down the new lfiles now.
245 # Otherwise if we are rebasing, any lfiles that were
246 # Otherwise if we are rebasing, any lfiles that were
246 # modified in the changesets we are rebasing on top of get
247 # modified in the changesets we are rebasing on top of get
247 # overwritten either by the rebase or in the first commit
248 # overwritten either by the rebase or in the first commit
248 # after the rebase.
249 # after the rebase.
249 lfcommands.updatelfiles(repo.ui, repo)
250 lfcommands.updatelfiles(repo.ui, repo)
250 # Case 1: user calls commit with no specific files or
251 # Case 1: user calls commit with no specific files or
251 # include/exclude patterns: refresh and commit all files that
252 # include/exclude patterns: refresh and commit all files that
252 # are "dirty".
253 # are "dirty".
253 if (match is None) or (not match.anypats() and not \
254 if (match is None) or (not match.anypats() and not \
254 match.files()):
255 match.files()):
255 # Spend a bit of time here to get a list of files we know
256 # Spend a bit of time here to get a list of files we know
256 # are modified so we can compare only against those.
257 # are modified so we can compare only against those.
257 # It can cost a lot of time (several seconds)
258 # It can cost a lot of time (several seconds)
258 # otherwise to update all standins if the largefiles are
259 # otherwise to update all standins if the largefiles are
259 # large.
260 # large.
260 lfdirstate = lfutil.openlfdirstate(ui, self)
261 lfdirstate = lfutil.openlfdirstate(ui, self)
261 dirtymatch = match_.always(repo.root, repo.getcwd())
262 dirtymatch = match_.always(repo.root, repo.getcwd())
262 s = lfdirstate.status(dirtymatch, [], False, False, False)
263 s = lfdirstate.status(dirtymatch, [], False, False, False)
263 modifiedfiles = []
264 modifiedfiles = []
264 for i in s:
265 for i in s:
265 modifiedfiles.extend(i)
266 modifiedfiles.extend(i)
266 lfiles = lfutil.listlfiles(self)
267 lfiles = lfutil.listlfiles(self)
267 # this only loops through lfiles that exist (not
268 # this only loops through lfiles that exist (not
268 # removed/renamed)
269 # removed/renamed)
269 for lfile in lfiles:
270 for lfile in lfiles:
270 if lfile in modifiedfiles:
271 if lfile in modifiedfiles:
271 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
272 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
272 # this handles the case where a rebase is being
273 # this handles the case where a rebase is being
273 # performed and the working copy is not updated
274 # performed and the working copy is not updated
274 # yet.
275 # yet.
275 if os.path.exists(self.wjoin(lfile)):
276 if os.path.exists(self.wjoin(lfile)):
276 lfutil.updatestandin(self,
277 lfutil.updatestandin(self,
277 lfutil.standin(lfile))
278 lfutil.standin(lfile))
278 lfdirstate.normal(lfile)
279 lfdirstate.normal(lfile)
279 for lfile in lfdirstate:
280 for lfile in lfdirstate:
280 if lfile in modifiedfiles:
281 if lfile in modifiedfiles:
281 if not os.path.exists(
282 if not os.path.exists(
282 repo.wjoin(lfutil.standin(lfile))):
283 repo.wjoin(lfutil.standin(lfile))):
283 lfdirstate.drop(lfile)
284 lfdirstate.drop(lfile)
284 lfdirstate.write()
285 lfdirstate.write()
285
286
286 return orig(text=text, user=user, date=date, match=match,
287 return orig(text=text, user=user, date=date, match=match,
287 force=force, editor=editor, extra=extra)
288 force=force, editor=editor, extra=extra)
288
289
289 for file in match.files():
290 for file in match.files():
290 if lfutil.isstandin(file):
291 if lfutil.isstandin(file):
291 raise util.Abort(
292 raise util.Abort(
292 "Don't commit largefile standin. Commit largefile.")
293 "Don't commit largefile standin. Commit largefile.")
293
294
294 # Case 2: user calls commit with specified patterns: refresh
295 # Case 2: user calls commit with specified patterns: refresh
295 # any matching big files.
296 # any matching big files.
296 smatcher = lfutil.composestandinmatcher(self, match)
297 smatcher = lfutil.composestandinmatcher(self, match)
297 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
298 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
298
299
299 # No matching big files: get out of the way and pass control to
300 # No matching big files: get out of the way and pass control to
300 # the usual commit() method.
301 # the usual commit() method.
301 if not standins:
302 if not standins:
302 return orig(text=text, user=user, date=date, match=match,
303 return orig(text=text, user=user, date=date, match=match,
303 force=force, editor=editor, extra=extra)
304 force=force, editor=editor, extra=extra)
304
305
305 # Refresh all matching big files. It's possible that the
306 # Refresh all matching big files. It's possible that the
306 # commit will end up failing, in which case the big files will
307 # commit will end up failing, in which case the big files will
307 # stay refreshed. No harm done: the user modified them and
308 # stay refreshed. No harm done: the user modified them and
308 # asked to commit them, so sooner or later we're going to
309 # asked to commit them, so sooner or later we're going to
309 # refresh the standins. Might as well leave them refreshed.
310 # refresh the standins. Might as well leave them refreshed.
310 lfdirstate = lfutil.openlfdirstate(ui, self)
311 lfdirstate = lfutil.openlfdirstate(ui, self)
311 for standin in standins:
312 for standin in standins:
312 lfile = lfutil.splitstandin(standin)
313 lfile = lfutil.splitstandin(standin)
313 if lfdirstate[lfile] <> 'r':
314 if lfdirstate[lfile] <> 'r':
314 lfutil.updatestandin(self, standin)
315 lfutil.updatestandin(self, standin)
315 lfdirstate.normal(lfile)
316 lfdirstate.normal(lfile)
316 else:
317 else:
317 lfdirstate.drop(lfile)
318 lfdirstate.drop(lfile)
318 lfdirstate.write()
319 lfdirstate.write()
319
320
320 # Cook up a new matcher that only matches regular files or
321 # Cook up a new matcher that only matches regular files or
321 # standins corresponding to the big files requested by the
322 # standins corresponding to the big files requested by the
322 # user. Have to modify _files to prevent commit() from
323 # user. Have to modify _files to prevent commit() from
323 # complaining "not tracked" for big files.
324 # complaining "not tracked" for big files.
324 lfiles = lfutil.listlfiles(repo)
325 lfiles = lfutil.listlfiles(repo)
325 match = copy.copy(match)
326 match = copy.copy(match)
326 orig_matchfn = match.matchfn
327 orig_matchfn = match.matchfn
327
328
328 # Check both the list of lfiles and the list of standins
329 # Check both the list of lfiles and the list of standins
329 # because if a lfile was removed, it won't be in the list of
330 # because if a lfile was removed, it won't be in the list of
330 # lfiles at this point
331 # lfiles at this point
331 match._files += sorted(standins)
332 match._files += sorted(standins)
332
333
333 actualfiles = []
334 actualfiles = []
334 for f in match._files:
335 for f in match._files:
335 fstandin = lfutil.standin(f)
336 fstandin = lfutil.standin(f)
336
337
337 # Ignore known lfiles and standins
338 # ignore known largefiles and standins
338 if f in lfiles or fstandin in standins:
339 if f in lfiles or fstandin in standins:
339 continue
340 continue
340
341
341 # Append directory separator to avoid collisions
342 # append directory separator to avoid collisions
342 if not fstandin.endswith(os.sep):
343 if not fstandin.endswith(os.sep):
343 fstandin += os.sep
344 fstandin += os.sep
344
345
345 # Prevalidate matching standin directories
346 # prevalidate matching standin directories
346 if lfutil.any_(st for st in match._files if \
347 if lfutil.any_(st for st in match._files if \
347 st.startswith(fstandin)):
348 st.startswith(fstandin)):
348 continue
349 continue
349 actualfiles.append(f)
350 actualfiles.append(f)
350 match._files = actualfiles
351 match._files = actualfiles
351
352
352 def matchfn(f):
353 def matchfn(f):
353 if orig_matchfn(f):
354 if orig_matchfn(f):
354 return f not in lfiles
355 return f not in lfiles
355 else:
356 else:
356 return f in standins
357 return f in standins
357
358
358 match.matchfn = matchfn
359 match.matchfn = matchfn
359 return orig(text=text, user=user, date=date, match=match,
360 return orig(text=text, user=user, date=date, match=match,
360 force=force, editor=editor, extra=extra)
361 force=force, editor=editor, extra=extra)
361 finally:
362 finally:
362 wlock.release()
363 wlock.release()
363
364
364 def push(self, remote, force=False, revs=None, newbranch=False):
365 def push(self, remote, force=False, revs=None, newbranch=False):
365 o = lfutil.findoutgoing(repo, remote, force)
366 o = lfutil.findoutgoing(repo, remote, force)
366 if o:
367 if o:
367 toupload = set()
368 toupload = set()
368 o = repo.changelog.nodesbetween(o, revs)[0]
369 o = repo.changelog.nodesbetween(o, revs)[0]
369 for n in o:
370 for n in o:
370 parents = [p for p in repo.changelog.parents(n) if p != \
371 parents = [p for p in repo.changelog.parents(n) if p != \
371 node.nullid]
372 node.nullid]
372 ctx = repo[n]
373 ctx = repo[n]
373 files = set(ctx.files())
374 files = set(ctx.files())
374 if len(parents) == 2:
375 if len(parents) == 2:
375 mc = ctx.manifest()
376 mc = ctx.manifest()
376 mp1 = ctx.parents()[0].manifest()
377 mp1 = ctx.parents()[0].manifest()
377 mp2 = ctx.parents()[1].manifest()
378 mp2 = ctx.parents()[1].manifest()
378 for f in mp1:
379 for f in mp1:
379 if f not in mc:
380 if f not in mc:
380 files.add(f)
381 files.add(f)
381 for f in mp2:
382 for f in mp2:
382 if f not in mc:
383 if f not in mc:
383 files.add(f)
384 files.add(f)
384 for f in mc:
385 for f in mc:
385 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
386 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
386 None):
387 None):
387 files.add(f)
388 files.add(f)
388
389
389 toupload = toupload.union(set([ctx[f].data().strip() for f\
390 toupload = toupload.union(set([ctx[f].data().strip() for f\
390 in files if lfutil.isstandin(f) and f in ctx]))
391 in files if lfutil.isstandin(f) and f in ctx]))
391 lfcommands.uploadlfiles(ui, self, remote, toupload)
392 lfcommands.uploadlfiles(ui, self, remote, toupload)
392 return super(lfiles_repo, self).push(remote, force, revs,
393 return super(lfiles_repo, self).push(remote, force, revs,
393 newbranch)
394 newbranch)
394
395
395 repo.__class__ = lfiles_repo
396 repo.__class__ = lfiles_repo
396
397
397 def checkrequireslfiles(ui, repo, **kwargs):
398 def checkrequireslfiles(ui, repo, **kwargs):
398 if 'largefiles' not in repo.requirements and lfutil.any_(
399 if 'largefiles' not in repo.requirements and lfutil.any_(
399 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
400 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
400 # work around bug in mercurial 1.9 whereby requirements is a list
401 # workaround bug in Mercurial 1.9 whereby requirements is
401 # on newly-cloned repos
402 # a list on newly-cloned repos
402 repo.requirements = set(repo.requirements)
403 repo.requirements = set(repo.requirements)
403
404
404 repo.requirements |= set(['largefiles'])
405 repo.requirements |= set(['largefiles'])
405 repo._writerequirements()
406 repo._writerequirements()
406
407
407 checkrequireslfiles(ui, repo)
408 checkrequireslfiles(ui, repo)
408
409
409 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
410 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
410 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
411 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,138 +1,138 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10
10
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httprepo, localrepo, sshrepo, sshserver, util, wireproto
12 httprepo, localrepo, sshrepo, sshserver, util, wireproto
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.hgweb import hgweb_mod, protocol
14 from mercurial.hgweb import hgweb_mod, protocol
15
15
16 import overrides
16 import overrides
17 import proto
17 import proto
18
18
19 def uisetup(ui):
19 def uisetup(ui):
20 # Disable auto-status for some commands which assume that all
20 # Disable auto-status for some commands which assume that all
21 # files in the result are under Mercurial's control
21 # files in the result are under Mercurial's control
22
22
23 entry = extensions.wrapcommand(commands.table, 'add',
23 entry = extensions.wrapcommand(commands.table, 'add',
24 overrides.override_add)
24 overrides.override_add)
25 addopt = [('', 'large', None, _('add as largefile')),
25 addopt = [('', 'large', None, _('add as largefile')),
26 ('', 'lfsize', '', _('add all files above this size (in megabytes)'
26 ('', 'lfsize', '', _('add all files above this size (in megabytes)'
27 'as largefiles (default: 10)'))]
27 'as largefiles (default: 10)'))]
28 entry[1].extend(addopt)
28 entry[1].extend(addopt)
29
29
30 entry = extensions.wrapcommand(commands.table, 'addremove',
30 entry = extensions.wrapcommand(commands.table, 'addremove',
31 overrides.override_addremove)
31 overrides.override_addremove)
32 entry = extensions.wrapcommand(commands.table, 'remove',
32 entry = extensions.wrapcommand(commands.table, 'remove',
33 overrides.override_remove)
33 overrides.override_remove)
34 entry = extensions.wrapcommand(commands.table, 'forget',
34 entry = extensions.wrapcommand(commands.table, 'forget',
35 overrides.override_forget)
35 overrides.override_forget)
36 entry = extensions.wrapcommand(commands.table, 'status',
36 entry = extensions.wrapcommand(commands.table, 'status',
37 overrides.override_status)
37 overrides.override_status)
38 entry = extensions.wrapcommand(commands.table, 'log',
38 entry = extensions.wrapcommand(commands.table, 'log',
39 overrides.override_log)
39 overrides.override_log)
40 entry = extensions.wrapcommand(commands.table, 'rollback',
40 entry = extensions.wrapcommand(commands.table, 'rollback',
41 overrides.override_rollback)
41 overrides.override_rollback)
42 entry = extensions.wrapcommand(commands.table, 'verify',
42 entry = extensions.wrapcommand(commands.table, 'verify',
43 overrides.override_verify)
43 overrides.override_verify)
44
44
45 verifyopt = [('', 'large', None, _('verify largefiles')),
45 verifyopt = [('', 'large', None, _('verify largefiles')),
46 ('', 'lfa', None,
46 ('', 'lfa', None,
47 _('verify all revisions of largefiles not just current')),
47 _('verify all revisions of largefiles not just current')),
48 ('', 'lfc', None,
48 ('', 'lfc', None,
49 _('verify largefile contents not just existence'))]
49 _('verify largefile contents not just existence'))]
50 entry[1].extend(verifyopt)
50 entry[1].extend(verifyopt)
51
51
52 entry = extensions.wrapcommand(commands.table, 'outgoing',
52 entry = extensions.wrapcommand(commands.table, 'outgoing',
53 overrides.override_outgoing)
53 overrides.override_outgoing)
54 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
54 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
55 entry[1].extend(outgoingopt)
55 entry[1].extend(outgoingopt)
56 entry = extensions.wrapcommand(commands.table, 'summary',
56 entry = extensions.wrapcommand(commands.table, 'summary',
57 overrides.override_summary)
57 overrides.override_summary)
58 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
58 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
59 entry[1].extend(summaryopt)
59 entry[1].extend(summaryopt)
60
60
61 entry = extensions.wrapcommand(commands.table, 'update',
61 entry = extensions.wrapcommand(commands.table, 'update',
62 overrides.override_update)
62 overrides.override_update)
63 entry = extensions.wrapcommand(commands.table, 'pull',
63 entry = extensions.wrapcommand(commands.table, 'pull',
64 overrides.override_pull)
64 overrides.override_pull)
65 entry = extensions.wrapfunction(filemerge, 'filemerge',
65 entry = extensions.wrapfunction(filemerge, 'filemerge',
66 overrides.override_filemerge)
66 overrides.override_filemerge)
67 entry = extensions.wrapfunction(cmdutil, 'copy',
67 entry = extensions.wrapfunction(cmdutil, 'copy',
68 overrides.override_copy)
68 overrides.override_copy)
69
69
70 # Backout calls revert so we need to override both the command and the
70 # Backout calls revert so we need to override both the command and the
71 # function
71 # function
72 entry = extensions.wrapcommand(commands.table, 'revert',
72 entry = extensions.wrapcommand(commands.table, 'revert',
73 overrides.override_revert)
73 overrides.override_revert)
74 entry = extensions.wrapfunction(commands, 'revert',
74 entry = extensions.wrapfunction(commands, 'revert',
75 overrides.override_revert)
75 overrides.override_revert)
76
76
77 # clone uses hg._update instead of hg.update even though they are the
77 # clone uses hg._update instead of hg.update even though they are the
78 # same function... so wrap both of them)
78 # same function... so wrap both of them)
79 extensions.wrapfunction(hg, 'update', overrides.hg_update)
79 extensions.wrapfunction(hg, 'update', overrides.hg_update)
80 extensions.wrapfunction(hg, '_update', overrides.hg_update)
80 extensions.wrapfunction(hg, '_update', overrides.hg_update)
81 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
81 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
82 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
82 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
83
83
84 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
84 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
85 if util.safehasattr(cmdutil, 'bailifchanged'):
85 if util.safehasattr(cmdutil, 'bailifchanged'):
86 extensions.wrapfunction(cmdutil, 'bailifchanged',
86 extensions.wrapfunction(cmdutil, 'bailifchanged',
87 overrides.override_bailifchanged)
87 overrides.override_bailifchanged)
88 else:
88 else:
89 extensions.wrapfunction(cmdutil, 'bail_if_changed',
89 extensions.wrapfunction(cmdutil, 'bail_if_changed',
90 overrides.override_bailifchanged)
90 overrides.override_bailifchanged)
91
91
92 # create the new wireproto commands ...
92 # create the new wireproto commands ...
93 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
93 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
94 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
94 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
95 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
95 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
96
96
97 # ... and wrap some existing ones
97 # ... and wrap some existing ones
98 wireproto.commands['capabilities'] = (proto.capabilities, '')
98 wireproto.commands['capabilities'] = (proto.capabilities, '')
99 wireproto.commands['heads'] = (proto.heads, '')
99 wireproto.commands['heads'] = (proto.heads, '')
100 wireproto.commands['lheads'] = (wireproto.heads, '')
100 wireproto.commands['lheads'] = (wireproto.heads, '')
101
101
102 # make putlfile behave the same as push and {get,stat}lfile behave the same
102 # make putlfile behave the same as push and {get,stat}lfile behave the same
103 # as pull w.r.t. permissions checks
103 # as pull w.r.t. permissions checks
104 hgweb_mod.perms['putlfile'] = 'push'
104 hgweb_mod.perms['putlfile'] = 'push'
105 hgweb_mod.perms['getlfile'] = 'pull'
105 hgweb_mod.perms['getlfile'] = 'pull'
106 hgweb_mod.perms['statlfile'] = 'pull'
106 hgweb_mod.perms['statlfile'] = 'pull'
107
107
108 # the hello wireproto command uses wireproto.capabilities, so it won't see
108 # the hello wireproto command uses wireproto.capabilities, so it won't see
109 # our largefiles capability unless we replace the actual function as well.
109 # our largefiles capability unless we replace the actual function as well.
110 proto.capabilities_orig = wireproto.capabilities
110 proto.capabilities_orig = wireproto.capabilities
111 wireproto.capabilities = proto.capabilities
111 wireproto.capabilities = proto.capabilities
112
112
113 # these let us reject non-lfiles clients and make them display our error
113 # these let us reject non-largefiles clients and make them display
114 # messages
114 # our error messages
115 protocol.webproto.refuseclient = proto.webproto_refuseclient
115 protocol.webproto.refuseclient = proto.webproto_refuseclient
116 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
116 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
117
117
118 # can't do this in reposetup because it needs to have happened before
118 # can't do this in reposetup because it needs to have happened before
119 # wirerepo.__init__ is called
119 # wirerepo.__init__ is called
120 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
120 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
121 proto.http_oldcallstream = httprepo.httprepository._callstream
121 proto.http_oldcallstream = httprepo.httprepository._callstream
122 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
122 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
123 httprepo.httprepository._callstream = proto.httprepo_callstream
123 httprepo.httprepository._callstream = proto.httprepo_callstream
124
124
125 # don't die on seeing a repo with the largefiles requirement
125 # don't die on seeing a repo with the largefiles requirement
126 localrepo.localrepository.supported |= set(['largefiles'])
126 localrepo.localrepository.supported |= set(['largefiles'])
127
127
128 # override some extensions' stuff as well
128 # override some extensions' stuff as well
129 for name, module in extensions.extensions():
129 for name, module in extensions.extensions():
130 if name == 'fetch':
130 if name == 'fetch':
131 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
131 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
132 overrides.override_fetch)
132 overrides.override_fetch)
133 if name == 'purge':
133 if name == 'purge':
134 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
134 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
135 overrides.override_purge)
135 overrides.override_purge)
136 if name == 'rebase':
136 if name == 'rebase':
137 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
137 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
138 overrides.override_rebase)
138 overrides.override_rebase)
@@ -1,29 +1,29 b''
1 # Copyright 2010-2011 Fog Creek Software
1 # Copyright 2010-2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''largefile store working over mercurial's wire protocol'''
6 '''largefile store working over Mercurial's wire protocol'''
7
7
8 import lfutil
8 import lfutil
9 import remotestore
9 import remotestore
10
10
11 class wirestore(remotestore.remotestore):
11 class wirestore(remotestore.remotestore):
12 def __init__(self, ui, repo, remote):
12 def __init__(self, ui, repo, remote):
13 cap = remote.capable('largefiles')
13 cap = remote.capable('largefiles')
14 if not cap:
14 if not cap:
15 raise lfutil.storeprotonotcapable([])
15 raise lfutil.storeprotonotcapable([])
16 storetypes = cap.split(',')
16 storetypes = cap.split(',')
17 if not 'serve' in storetypes:
17 if not 'serve' in storetypes:
18 raise lfutil.storeprotonotcapable(storetypes)
18 raise lfutil.storeprotonotcapable(storetypes)
19 self.remote = remote
19 self.remote = remote
20 super(wirestore, self).__init__(ui, repo, remote.url())
20 super(wirestore, self).__init__(ui, repo, remote.url())
21
21
22 def _put(self, hash, fd):
22 def _put(self, hash, fd):
23 return self.remote.putlfile(hash, fd)
23 return self.remote.putlfile(hash, fd)
24
24
25 def _get(self, hash):
25 def _get(self, hash):
26 return self.remote.getlfile(hash)
26 return self.remote.getlfile(hash)
27
27
28 def _stat(self, hash):
28 def _stat(self, hash):
29 return self.remote.statlfile(hash)
29 return self.remote.statlfile(hash)
General Comments 0
You need to be logged in to leave comments. Login now