##// END OF EJS Templates
largefiles: cosmetics, whitespace, code style...
Greg Ward -
r15255:7ab05d75 default
parent child Browse files
Show More
@@ -1,202 +1,202 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import os
11 import os
12 import tempfile
12 import tempfile
13 import binascii
13 import binascii
14 import re
14 import re
15
15
16 from mercurial import util, node, hg
16 from mercurial import util, node, hg
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfutil
19 import lfutil
20
20
21 class StoreError(Exception):
21 class StoreError(Exception):
22 '''Raised when there is a problem getting files from or putting
22 '''Raised when there is a problem getting files from or putting
23 files to a central store.'''
23 files to a central store.'''
24 def __init__(self, filename, hash, url, detail):
24 def __init__(self, filename, hash, url, detail):
25 self.filename = filename
25 self.filename = filename
26 self.hash = hash
26 self.hash = hash
27 self.url = url
27 self.url = url
28 self.detail = detail
28 self.detail = detail
29
29
30 def longmessage(self):
30 def longmessage(self):
31 if self.url:
31 if self.url:
32 return ('%s: %s\n'
32 return ('%s: %s\n'
33 '(failed URL: %s)\n'
33 '(failed URL: %s)\n'
34 % (self.filename, self.detail, self.url))
34 % (self.filename, self.detail, self.url))
35 else:
35 else:
36 return ('%s: %s\n'
36 return ('%s: %s\n'
37 '(no default or default-push path set in hgrc)\n'
37 '(no default or default-push path set in hgrc)\n'
38 % (self.filename, self.detail))
38 % (self.filename, self.detail))
39
39
40 def __str__(self):
40 def __str__(self):
41 return "%s: %s" % (self.url, self.detail)
41 return "%s: %s" % (self.url, self.detail)
42
42
43 class basestore(object):
43 class basestore(object):
44 def __init__(self, ui, repo, url):
44 def __init__(self, ui, repo, url):
45 self.ui = ui
45 self.ui = ui
46 self.repo = repo
46 self.repo = repo
47 self.url = url
47 self.url = url
48
48
49 def put(self, source, hash):
49 def put(self, source, hash):
50 '''Put source file into the store under <filename>/<hash>.'''
50 '''Put source file into the store under <filename>/<hash>.'''
51 raise NotImplementedError('abstract method')
51 raise NotImplementedError('abstract method')
52
52
53 def exists(self, hash):
53 def exists(self, hash):
54 '''Check to see if the store contains the given hash.'''
54 '''Check to see if the store contains the given hash.'''
55 raise NotImplementedError('abstract method')
55 raise NotImplementedError('abstract method')
56
56
57 def get(self, files):
57 def get(self, files):
58 '''Get the specified largefiles from the store and write to local
58 '''Get the specified largefiles from the store and write to local
59 files under repo.root. files is a list of (filename, hash)
59 files under repo.root. files is a list of (filename, hash)
60 tuples. Return (success, missing), lists of files successfuly
60 tuples. Return (success, missing), lists of files successfuly
61 downloaded and those not found in the store. success is a list
61 downloaded and those not found in the store. success is a list
62 of (filename, hash) tuples; missing is a list of filenames that
62 of (filename, hash) tuples; missing is a list of filenames that
63 we could not get. (The detailed error message will already have
63 we could not get. (The detailed error message will already have
64 been presented to the user, so missing is just supplied as a
64 been presented to the user, so missing is just supplied as a
65 summary.)'''
65 summary.)'''
66 success = []
66 success = []
67 missing = []
67 missing = []
68 ui = self.ui
68 ui = self.ui
69
69
70 at = 0
70 at = 0
71 for filename, hash in files:
71 for filename, hash in files:
72 ui.progress(_('getting largefiles'), at, unit='lfile',
72 ui.progress(_('getting largefiles'), at, unit='lfile',
73 total=len(files))
73 total=len(files))
74 at += 1
74 at += 1
75 ui.note(_('getting %s:%s\n') % (filename, hash))
75 ui.note(_('getting %s:%s\n') % (filename, hash))
76
76
77 cachefilename = lfutil.cachepath(self.repo, hash)
77 cachefilename = lfutil.cachepath(self.repo, hash)
78 cachedir = os.path.dirname(cachefilename)
78 cachedir = os.path.dirname(cachefilename)
79
79
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
81 # opened the file in binary mode.
81 # opened the file in binary mode.
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
83 dir=cachedir, prefix=os.path.basename(filename))
83 dir=cachedir, prefix=os.path.basename(filename))
84 tmpfile = os.fdopen(tmpfd, 'w')
84 tmpfile = os.fdopen(tmpfd, 'w')
85
85
86 try:
86 try:
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
88 except StoreError, err:
88 except StoreError, err:
89 ui.warn(err.longmessage())
89 ui.warn(err.longmessage())
90 hhash = ""
90 hhash = ""
91
91
92 if hhash != hash:
92 if hhash != hash:
93 if hhash != "":
93 if hhash != "":
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
95 % (filename, hash, hhash))
95 % (filename, hash, hhash))
96 tmpfile.close() # no-op if it's already closed
96 tmpfile.close() # no-op if it's already closed
97 os.remove(tmpfilename)
97 os.remove(tmpfilename)
98 missing.append(filename)
98 missing.append(filename)
99 continue
99 continue
100
100
101 if os.path.exists(cachefilename): # Windows
101 if os.path.exists(cachefilename): # Windows
102 os.remove(cachefilename)
102 os.remove(cachefilename)
103 os.rename(tmpfilename, cachefilename)
103 os.rename(tmpfilename, cachefilename)
104 lfutil.linktosystemcache(self.repo, hash)
104 lfutil.linktosystemcache(self.repo, hash)
105 success.append((filename, hhash))
105 success.append((filename, hhash))
106
106
107 ui.progress(_('getting largefiles'), None)
107 ui.progress(_('getting largefiles'), None)
108 return (success, missing)
108 return (success, missing)
109
109
110 def verify(self, revs, contents=False):
110 def verify(self, revs, contents=False):
111 '''Verify the existence (and, optionally, contents) of every big
111 '''Verify the existence (and, optionally, contents) of every big
112 file revision referenced by every changeset in revs.
112 file revision referenced by every changeset in revs.
113 Return 0 if all is well, non-zero on any errors.'''
113 Return 0 if all is well, non-zero on any errors.'''
114 write = self.ui.write
114 write = self.ui.write
115 failed = False
115 failed = False
116
116
117 write(_('searching %d changesets for largefiles\n') % len(revs))
117 write(_('searching %d changesets for largefiles\n') % len(revs))
118 verified = set() # set of (filename, filenode) tuples
118 verified = set() # set of (filename, filenode) tuples
119
119
120 for rev in revs:
120 for rev in revs:
121 cctx = self.repo[rev]
121 cctx = self.repo[rev]
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
123
123
124 failed = lfutil.any_(self._verifyfile(
124 failed = lfutil.any_(self._verifyfile(
125 cctx, cset, contents, standin, verified) for standin in cctx)
125 cctx, cset, contents, standin, verified) for standin in cctx)
126
126
127 num_revs = len(verified)
127 num_revs = len(verified)
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
129 if contents:
129 if contents:
130 write(_('verified contents of %d revisions of %d largefiles\n')
130 write(_('verified contents of %d revisions of %d largefiles\n')
131 % (num_revs, num_lfiles))
131 % (num_revs, num_lfiles))
132 else:
132 else:
133 write(_('verified existence of %d revisions of %d largefiles\n')
133 write(_('verified existence of %d revisions of %d largefiles\n')
134 % (num_revs, num_lfiles))
134 % (num_revs, num_lfiles))
135
135
136 return int(failed)
136 return int(failed)
137
137
138 def _getfile(self, tmpfile, filename, hash):
138 def _getfile(self, tmpfile, filename, hash):
139 '''Fetch one revision of one file from the store and write it
139 '''Fetch one revision of one file from the store and write it
140 to tmpfile. Compute the hash of the file on-the-fly as it
140 to tmpfile. Compute the hash of the file on-the-fly as it
141 downloads and return the binary hash. Close tmpfile. Raise
141 downloads and return the binary hash. Close tmpfile. Raise
142 StoreError if unable to download the file (e.g. it does not
142 StoreError if unable to download the file (e.g. it does not
143 exist in the store).'''
143 exist in the store).'''
144 raise NotImplementedError('abstract method')
144 raise NotImplementedError('abstract method')
145
145
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
147 '''Perform the actual verification of a file in the store.
147 '''Perform the actual verification of a file in the store.
148 '''
148 '''
149 raise NotImplementedError('abstract method')
149 raise NotImplementedError('abstract method')
150
150
151 import localstore, wirestore
151 import localstore, wirestore
152
152
153 _storeprovider = {
153 _storeprovider = {
154 'file': [localstore.localstore],
154 'file': [localstore.localstore],
155 'http': [wirestore.wirestore],
155 'http': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
158 }
158 }
159
159
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
161
161
162 # During clone this function is passed the src's ui object
162 # During clone this function is passed the src's ui object
163 # but it needs the dest's ui object so it can read out of
163 # but it needs the dest's ui object so it can read out of
164 # the config file. Use repo.ui instead.
164 # the config file. Use repo.ui instead.
165 def _openstore(repo, remote=None, put=False):
165 def _openstore(repo, remote=None, put=False):
166 ui = repo.ui
166 ui = repo.ui
167
167
168 if not remote:
168 if not remote:
169 path = getattr(repo, 'lfpullsource', None) or \
169 path = (getattr(repo, 'lfpullsource', None) or
170 ui.expandpath('default-push', 'default')
170 ui.expandpath('default-push', 'default'))
171
171
172 # ui.expandpath() leaves 'default-push' and 'default' alone if
172 # ui.expandpath() leaves 'default-push' and 'default' alone if
173 # they cannot be expanded: fallback to the empty string,
173 # they cannot be expanded: fallback to the empty string,
174 # meaning the current directory.
174 # meaning the current directory.
175 if path == 'default-push' or path == 'default':
175 if path == 'default-push' or path == 'default':
176 path = ''
176 path = ''
177 remote = repo
177 remote = repo
178 else:
178 else:
179 remote = hg.peer(repo, {}, path)
179 remote = hg.peer(repo, {}, path)
180
180
181 # The path could be a scheme so use Mercurial's normal functionality
181 # The path could be a scheme so use Mercurial's normal functionality
182 # to resolve the scheme to a repository and use its path
182 # to resolve the scheme to a repository and use its path
183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
184
184
185 match = _scheme_re.match(path)
185 match = _scheme_re.match(path)
186 if not match: # regular filesystem path
186 if not match: # regular filesystem path
187 scheme = 'file'
187 scheme = 'file'
188 else:
188 else:
189 scheme = match.group(1)
189 scheme = match.group(1)
190
190
191 try:
191 try:
192 storeproviders = _storeprovider[scheme]
192 storeproviders = _storeprovider[scheme]
193 except KeyError:
193 except KeyError:
194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
195
195
196 for class_obj in storeproviders:
196 for class_obj in storeproviders:
197 try:
197 try:
198 return class_obj(ui, repo, remote)
198 return class_obj(ui, repo, remote)
199 except lfutil.storeprotonotcapable:
199 except lfutil.storeprotonotcapable:
200 pass
200 pass
201
201
202 raise util.Abort(_('%s does not appear to be a largefile store'), path)
202 raise util.Abort(_('%s does not appear to be a largefile store'), path)
@@ -1,482 +1,481 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --tonormal to convert largefiles back to normal files; after
38 Use --tonormal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['tonormal']:
41 if opts['tonormal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 try:
46 try:
47 rsrc = hg.repository(ui, src)
47 rsrc = hg.repository(ui, src)
48 if not rsrc.local():
48 if not rsrc.local():
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 except error.RepoError, err:
50 except error.RepoError, err:
51 ui.traceback()
51 ui.traceback()
52 raise util.Abort(err.args[0])
52 raise util.Abort(err.args[0])
53 if os.path.exists(dest):
53 if os.path.exists(dest):
54 if not os.path.isdir(dest):
54 if not os.path.isdir(dest):
55 raise util.Abort(_('destination %s already exists') % dest)
55 raise util.Abort(_('destination %s already exists') % dest)
56 elif os.listdir(dest):
56 elif os.listdir(dest):
57 raise util.Abort(_('destination %s is not empty') % dest)
57 raise util.Abort(_('destination %s is not empty') % dest)
58 try:
58 try:
59 ui.status(_('initializing destination %s\n') % dest)
59 ui.status(_('initializing destination %s\n') % dest)
60 rdst = hg.repository(ui, dest, create=True)
60 rdst = hg.repository(ui, dest, create=True)
61 if not rdst.local():
61 if not rdst.local():
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
63 except error.RepoError:
63 except error.RepoError:
64 ui.traceback()
64 ui.traceback()
65 raise util.Abort(_('%s is not a repo') % dest)
65 raise util.Abort(_('%s is not a repo') % dest)
66
66
67 success = False
67 success = False
68 try:
68 try:
69 # Lock destination to prevent modification while it is converted to.
69 # Lock destination to prevent modification while it is converted to.
70 # Don't need to lock src because we are just reading from its history
70 # Don't need to lock src because we are just reading from its history
71 # which can't change.
71 # which can't change.
72 dst_lock = rdst.lock()
72 dst_lock = rdst.lock()
73
73
74 # Get a list of all changesets in the source. The easy way to do this
74 # Get a list of all changesets in the source. The easy way to do this
75 # is to simply walk the changelog, using changelog.nodesbewteen().
75 # is to simply walk the changelog, using changelog.nodesbewteen().
76 # Take a look at mercurial/revlog.py:639 for more details.
76 # Take a look at mercurial/revlog.py:639 for more details.
77 # Use a generator instead of a list to decrease memory usage
77 # Use a generator instead of a list to decrease memory usage
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
79 rsrc.heads())[0])
79 rsrc.heads())[0])
80 revmap = {node.nullid: node.nullid}
80 revmap = {node.nullid: node.nullid}
81 if tolfile:
81 if tolfile:
82 lfiles = set()
82 lfiles = set()
83 normalfiles = set()
83 normalfiles = set()
84 if not pats:
84 if not pats:
85 pats = ui.config(lfutil.longname, 'patterns', default=())
85 pats = ui.config(lfutil.longname, 'patterns', default=())
86 if pats:
86 if pats:
87 pats = pats.split(' ')
87 pats = pats.split(' ')
88 if pats:
88 if pats:
89 matcher = match_.match(rsrc.root, '', list(pats))
89 matcher = match_.match(rsrc.root, '', list(pats))
90 else:
90 else:
91 matcher = None
91 matcher = None
92
92
93 lfiletohash = {}
93 lfiletohash = {}
94 for ctx in ctxs:
94 for ctx in ctxs:
95 ui.progress(_('converting revisions'), ctx.rev(),
95 ui.progress(_('converting revisions'), ctx.rev(),
96 unit=_('revision'), total=rsrc['tip'].rev())
96 unit=_('revision'), total=rsrc['tip'].rev())
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
98 lfiles, normalfiles, matcher, size, lfiletohash)
98 lfiles, normalfiles, matcher, size, lfiletohash)
99 ui.progress(_('converting revisions'), None)
99 ui.progress(_('converting revisions'), None)
100
100
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
103
103
104 for f in lfiletohash.keys():
104 for f in lfiletohash.keys():
105 if os.path.isfile(rdst.wjoin(f)):
105 if os.path.isfile(rdst.wjoin(f)):
106 os.unlink(rdst.wjoin(f))
106 os.unlink(rdst.wjoin(f))
107 try:
107 try:
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
109 except OSError:
109 except OSError:
110 pass
110 pass
111
111
112 else:
112 else:
113 for ctx in ctxs:
113 for ctx in ctxs:
114 ui.progress(_('converting revisions'), ctx.rev(),
114 ui.progress(_('converting revisions'), ctx.rev(),
115 unit=_('revision'), total=rsrc['tip'].rev())
115 unit=_('revision'), total=rsrc['tip'].rev())
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
117
117
118 ui.progress(_('converting revisions'), None)
118 ui.progress(_('converting revisions'), None)
119 success = True
119 success = True
120 finally:
120 finally:
121 if not success:
121 if not success:
122 # we failed, remove the new directory
122 # we failed, remove the new directory
123 shutil.rmtree(rdst.root)
123 shutil.rmtree(rdst.root)
124 dst_lock.release()
124 dst_lock.release()
125
125
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
127 # Convert src parents to dst parents
127 # Convert src parents to dst parents
128 parents = []
128 parents = []
129 for p in ctx.parents():
129 for p in ctx.parents():
130 parents.append(revmap[p.node()])
130 parents.append(revmap[p.node()])
131 while len(parents) < 2:
131 while len(parents) < 2:
132 parents.append(node.nullid)
132 parents.append(node.nullid)
133
133
134 # Generate list of changed files
134 # Generate list of changed files
135 files = set(ctx.files())
135 files = set(ctx.files())
136 if node.nullid not in parents:
136 if node.nullid not in parents:
137 mc = ctx.manifest()
137 mc = ctx.manifest()
138 mp1 = ctx.parents()[0].manifest()
138 mp1 = ctx.parents()[0].manifest()
139 mp2 = ctx.parents()[1].manifest()
139 mp2 = ctx.parents()[1].manifest()
140 files |= (set(mp1) | set(mp2)) - set(mc)
140 files |= (set(mp1) | set(mp2)) - set(mc)
141 for f in mc:
141 for f in mc:
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
143 files.add(f)
143 files.add(f)
144
144
145 def getfilectx(repo, memctx, f):
145 def getfilectx(repo, memctx, f):
146 if lfutil.standin(f) in files:
146 if lfutil.standin(f) in files:
147 # if the file isn't in the manifest then it was removed
147 # if the file isn't in the manifest then it was removed
148 # or renamed, raise IOError to indicate this
148 # or renamed, raise IOError to indicate this
149 try:
149 try:
150 fctx = ctx.filectx(lfutil.standin(f))
150 fctx = ctx.filectx(lfutil.standin(f))
151 except error.LookupError:
151 except error.LookupError:
152 raise IOError()
152 raise IOError()
153 renamed = fctx.renamed()
153 renamed = fctx.renamed()
154 if renamed:
154 if renamed:
155 renamed = lfutil.splitstandin(renamed[0])
155 renamed = lfutil.splitstandin(renamed[0])
156
156
157 hash = fctx.data().strip()
157 hash = fctx.data().strip()
158 path = lfutil.findfile(rsrc, hash)
158 path = lfutil.findfile(rsrc, hash)
159 ### TODO: What if the file is not cached?
159 ### TODO: What if the file is not cached?
160 data = ''
160 data = ''
161 fd = None
161 fd = None
162 try:
162 try:
163 fd = open(path, 'rb')
163 fd = open(path, 'rb')
164 data = fd.read()
164 data = fd.read()
165 finally:
165 finally:
166 if fd:
166 if fd:
167 fd.close()
167 fd.close()
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
168 return context.memfilectx(f, data, 'l' in fctx.flags(),
169 'x' in fctx.flags(), renamed)
169 'x' in fctx.flags(), renamed)
170 else:
170 else:
171 try:
171 try:
172 fctx = ctx.filectx(f)
172 fctx = ctx.filectx(f)
173 except error.LookupError:
173 except error.LookupError:
174 raise IOError()
174 raise IOError()
175 renamed = fctx.renamed()
175 renamed = fctx.renamed()
176 if renamed:
176 if renamed:
177 renamed = renamed[0]
177 renamed = renamed[0]
178 data = fctx.data()
178 data = fctx.data()
179 if f == '.hgtags':
179 if f == '.hgtags':
180 newdata = []
180 newdata = []
181 for line in data.splitlines():
181 for line in data.splitlines():
182 id, name = line.split(' ', 1)
182 id, name = line.split(' ', 1)
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
184 name))
184 name))
185 data = ''.join(newdata)
185 data = ''.join(newdata)
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
186 return context.memfilectx(f, data, 'l' in fctx.flags(),
187 'x' in fctx.flags(), renamed)
187 'x' in fctx.flags(), renamed)
188
188
189 dstfiles = []
189 dstfiles = []
190 for file in files:
190 for file in files:
191 if lfutil.isstandin(file):
191 if lfutil.isstandin(file):
192 dstfiles.append(lfutil.splitstandin(file))
192 dstfiles.append(lfutil.splitstandin(file))
193 else:
193 else:
194 dstfiles.append(file)
194 dstfiles.append(file)
195 # Commit
195 # Commit
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
198 ret = rdst.commitctx(mctx)
198 ret = rdst.commitctx(mctx)
199 rdst.dirstate.setparents(ret)
199 rdst.dirstate.setparents(ret)
200 revmap[ctx.node()] = rdst.changelog.tip()
200 revmap[ctx.node()] = rdst.changelog.tip()
201
201
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
203 matcher, size, lfiletohash):
203 matcher, size, lfiletohash):
204 # Convert src parents to dst parents
204 # Convert src parents to dst parents
205 parents = []
205 parents = []
206 for p in ctx.parents():
206 for p in ctx.parents():
207 parents.append(revmap[p.node()])
207 parents.append(revmap[p.node()])
208 while len(parents) < 2:
208 while len(parents) < 2:
209 parents.append(node.nullid)
209 parents.append(node.nullid)
210
210
211 # Generate list of changed files
211 # Generate list of changed files
212 files = set(ctx.files())
212 files = set(ctx.files())
213 if node.nullid not in parents:
213 if node.nullid not in parents:
214 mc = ctx.manifest()
214 mc = ctx.manifest()
215 mp1 = ctx.parents()[0].manifest()
215 mp1 = ctx.parents()[0].manifest()
216 mp2 = ctx.parents()[1].manifest()
216 mp2 = ctx.parents()[1].manifest()
217 files |= (set(mp1) | set(mp2)) - set(mc)
217 files |= (set(mp1) | set(mp2)) - set(mc)
218 for f in mc:
218 for f in mc:
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
220 files.add(f)
220 files.add(f)
221
221
222 dstfiles = []
222 dstfiles = []
223 for f in files:
223 for f in files:
224 if f not in lfiles and f not in normalfiles:
224 if f not in lfiles and f not in normalfiles:
225 islfile = _islfile(f, ctx, matcher, size)
225 islfile = _islfile(f, ctx, matcher, size)
226 # If this file was renamed or copied then copy
226 # If this file was renamed or copied then copy
227 # the lfileness of its predecessor
227 # the lfileness of its predecessor
228 if f in ctx.manifest():
228 if f in ctx.manifest():
229 fctx = ctx.filectx(f)
229 fctx = ctx.filectx(f)
230 renamed = fctx.renamed()
230 renamed = fctx.renamed()
231 renamedlfile = renamed and renamed[0] in lfiles
231 renamedlfile = renamed and renamed[0] in lfiles
232 islfile |= renamedlfile
232 islfile |= renamedlfile
233 if 'l' in fctx.flags():
233 if 'l' in fctx.flags():
234 if renamedlfile:
234 if renamedlfile:
235 raise util.Abort(
235 raise util.Abort(
236 _('Renamed/copied largefile %s becomes symlink')
236 _('Renamed/copied largefile %s becomes symlink')
237 % f)
237 % f)
238 islfile = False
238 islfile = False
239 if islfile:
239 if islfile:
240 lfiles.add(f)
240 lfiles.add(f)
241 else:
241 else:
242 normalfiles.add(f)
242 normalfiles.add(f)
243
243
244 if f in lfiles:
244 if f in lfiles:
245 dstfiles.append(lfutil.standin(f))
245 dstfiles.append(lfutil.standin(f))
246 # largefile in manifest if it has not been removed/renamed
246 # largefile in manifest if it has not been removed/renamed
247 if f in ctx.manifest():
247 if f in ctx.manifest():
248 if 'l' in ctx.filectx(f).flags():
248 if 'l' in ctx.filectx(f).flags():
249 if renamed and renamed[0] in lfiles:
249 if renamed and renamed[0] in lfiles:
250 raise util.Abort(_('largefile %s becomes symlink') % f)
250 raise util.Abort(_('largefile %s becomes symlink') % f)
251
251
252 # largefile was modified, update standins
252 # largefile was modified, update standins
253 fullpath = rdst.wjoin(f)
253 fullpath = rdst.wjoin(f)
254 lfutil.createdir(os.path.dirname(fullpath))
254 lfutil.createdir(os.path.dirname(fullpath))
255 m = util.sha1('')
255 m = util.sha1('')
256 m.update(ctx[f].data())
256 m.update(ctx[f].data())
257 hash = m.hexdigest()
257 hash = m.hexdigest()
258 if f not in lfiletohash or lfiletohash[f] != hash:
258 if f not in lfiletohash or lfiletohash[f] != hash:
259 try:
259 try:
260 fd = open(fullpath, 'wb')
260 fd = open(fullpath, 'wb')
261 fd.write(ctx[f].data())
261 fd.write(ctx[f].data())
262 finally:
262 finally:
263 if fd:
263 if fd:
264 fd.close()
264 fd.close()
265 executable = 'x' in ctx[f].flags()
265 executable = 'x' in ctx[f].flags()
266 os.chmod(fullpath, lfutil.getmode(executable))
266 os.chmod(fullpath, lfutil.getmode(executable))
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
268 executable)
268 executable)
269 lfiletohash[f] = hash
269 lfiletohash[f] = hash
270 else:
270 else:
271 # normal file
271 # normal file
272 dstfiles.append(f)
272 dstfiles.append(f)
273
273
274 def getfilectx(repo, memctx, f):
274 def getfilectx(repo, memctx, f):
275 if lfutil.isstandin(f):
275 if lfutil.isstandin(f):
276 # if the file isn't in the manifest then it was removed
276 # if the file isn't in the manifest then it was removed
277 # or renamed, raise IOError to indicate this
277 # or renamed, raise IOError to indicate this
278 srcfname = lfutil.splitstandin(f)
278 srcfname = lfutil.splitstandin(f)
279 try:
279 try:
280 fctx = ctx.filectx(srcfname)
280 fctx = ctx.filectx(srcfname)
281 except error.LookupError:
281 except error.LookupError:
282 raise IOError()
282 raise IOError()
283 renamed = fctx.renamed()
283 renamed = fctx.renamed()
284 if renamed:
284 if renamed:
285 # standin is always a largefile because largefile-ness
285 # standin is always a largefile because largefile-ness
286 # doesn't change after rename or copy
286 # doesn't change after rename or copy
287 renamed = lfutil.standin(renamed[0])
287 renamed = lfutil.standin(renamed[0])
288
288
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
290 fctx.flags(), 'x' in fctx.flags(), renamed)
290 fctx.flags(), 'x' in fctx.flags(), renamed)
291 else:
291 else:
292 try:
292 try:
293 fctx = ctx.filectx(f)
293 fctx = ctx.filectx(f)
294 except error.LookupError:
294 except error.LookupError:
295 raise IOError()
295 raise IOError()
296 renamed = fctx.renamed()
296 renamed = fctx.renamed()
297 if renamed:
297 if renamed:
298 renamed = renamed[0]
298 renamed = renamed[0]
299
299
300 data = fctx.data()
300 data = fctx.data()
301 if f == '.hgtags':
301 if f == '.hgtags':
302 newdata = []
302 newdata = []
303 for line in data.splitlines():
303 for line in data.splitlines():
304 id, name = line.split(' ', 1)
304 id, name = line.split(' ', 1)
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
306 name))
306 name))
307 data = ''.join(newdata)
307 data = ''.join(newdata)
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
308 return context.memfilectx(f, data, 'l' in fctx.flags(),
309 'x' in fctx.flags(), renamed)
309 'x' in fctx.flags(), renamed)
310
310
311 # Commit
311 # Commit
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
314 ret = rdst.commitctx(mctx)
314 ret = rdst.commitctx(mctx)
315 rdst.dirstate.setparents(ret)
315 rdst.dirstate.setparents(ret)
316 revmap[ctx.node()] = rdst.changelog.tip()
316 revmap[ctx.node()] = rdst.changelog.tip()
317
317
318 def _islfile(file, ctx, matcher, size):
318 def _islfile(file, ctx, matcher, size):
319 '''Return true if file should be considered a largefile, i.e.
319 '''Return true if file should be considered a largefile, i.e.
320 matcher matches it or it is larger than size.'''
320 matcher matches it or it is larger than size.'''
321 # never store special .hg* files as largefiles
321 # never store special .hg* files as largefiles
322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
323 return False
323 return False
324 if matcher and matcher(file):
324 if matcher and matcher(file):
325 return True
325 return True
326 try:
326 try:
327 return ctx.filectx(file).size() >= size * 1024 * 1024
327 return ctx.filectx(file).size() >= size * 1024 * 1024
328 except error.LookupError:
328 except error.LookupError:
329 return False
329 return False
330
330
331 def uploadlfiles(ui, rsrc, rdst, files):
331 def uploadlfiles(ui, rsrc, rdst, files):
332 '''upload largefiles to the central store'''
332 '''upload largefiles to the central store'''
333
333
334 # Don't upload locally. All largefiles are in the system wide cache
334 # Don't upload locally. All largefiles are in the system wide cache
335 # so the other repo can just get them from there.
335 # so the other repo can just get them from there.
336 if not files or rdst.local():
336 if not files or rdst.local():
337 return
337 return
338
338
339 store = basestore._openstore(rsrc, rdst, put=True)
339 store = basestore._openstore(rsrc, rdst, put=True)
340
340
341 at = 0
341 at = 0
342 files = filter(lambda h: not store.exists(h), files)
342 files = filter(lambda h: not store.exists(h), files)
343 for hash in files:
343 for hash in files:
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
344 ui.progress(_('uploading largefiles'), at, unit='largefile',
345 total=len(files))
345 total=len(files))
346 source = lfutil.findfile(rsrc, hash)
346 source = lfutil.findfile(rsrc, hash)
347 if not source:
347 if not source:
348 raise util.Abort(_('largefile %s missing from store'
348 raise util.Abort(_('largefile %s missing from store'
349 ' (needs to be uploaded)') % hash)
349 ' (needs to be uploaded)') % hash)
350 # XXX check for errors here
350 # XXX check for errors here
351 store.put(source, hash)
351 store.put(source, hash)
352 at += 1
352 at += 1
353 ui.progress(_('uploading largefiles'), None)
353 ui.progress(_('uploading largefiles'), None)
354
354
355 def verifylfiles(ui, repo, all=False, contents=False):
355 def verifylfiles(ui, repo, all=False, contents=False):
356 '''Verify that every big file revision in the current changeset
356 '''Verify that every big file revision in the current changeset
357 exists in the central store. With --contents, also verify that
357 exists in the central store. With --contents, also verify that
358 the contents of each big file revision are correct (SHA-1 hash
358 the contents of each big file revision are correct (SHA-1 hash
359 matches the revision ID). With --all, check every changeset in
359 matches the revision ID). With --all, check every changeset in
360 this repository.'''
360 this repository.'''
361 if all:
361 if all:
362 # Pass a list to the function rather than an iterator because we know a
362 # Pass a list to the function rather than an iterator because we know a
363 # list will work.
363 # list will work.
364 revs = range(len(repo))
364 revs = range(len(repo))
365 else:
365 else:
366 revs = ['.']
366 revs = ['.']
367
367
368 store = basestore._openstore(repo)
368 store = basestore._openstore(repo)
369 return store.verify(revs, contents=contents)
369 return store.verify(revs, contents=contents)
370
370
371 def cachelfiles(ui, repo, node):
371 def cachelfiles(ui, repo, node):
372 '''cachelfiles ensures that all largefiles needed by the specified revision
372 '''cachelfiles ensures that all largefiles needed by the specified revision
373 are present in the repository's largefile cache.
373 are present in the repository's largefile cache.
374
374
375 returns a tuple (cached, missing). cached is the list of files downloaded
375 returns a tuple (cached, missing). cached is the list of files downloaded
376 by this operation; missing is the list of files that were needed but could
376 by this operation; missing is the list of files that were needed but could
377 not be found.'''
377 not be found.'''
378 lfiles = lfutil.listlfiles(repo, node)
378 lfiles = lfutil.listlfiles(repo, node)
379 toget = []
379 toget = []
380
380
381 for lfile in lfiles:
381 for lfile in lfiles:
382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
383 # if it exists and its hash matches, it might have been locally
383 # if it exists and its hash matches, it might have been locally
384 # modified before updating and the user chose 'local'. in this case,
384 # modified before updating and the user chose 'local'. in this case,
385 # it will not be in any store, so don't look for it.
385 # it will not be in any store, so don't look for it.
386 if (not os.path.exists(repo.wjoin(lfile)) \
386 if ((not os.path.exists(repo.wjoin(lfile)) or
387 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
387 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
388 not lfutil.findfile(repo, expectedhash):
388 not lfutil.findfile(repo, expectedhash)):
389 toget.append((lfile, expectedhash))
389 toget.append((lfile, expectedhash))
390
390
391 if toget:
391 if toget:
392 store = basestore._openstore(repo)
392 store = basestore._openstore(repo)
393 ret = store.get(toget)
393 ret = store.get(toget)
394 return ret
394 return ret
395
395
396 return ([], [])
396 return ([], [])
397
397
398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
399 wlock = repo.wlock()
399 wlock = repo.wlock()
400 try:
400 try:
401 lfdirstate = lfutil.openlfdirstate(ui, repo)
401 lfdirstate = lfutil.openlfdirstate(ui, repo)
402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
403
403
404 if filelist is not None:
404 if filelist is not None:
405 lfiles = [f for f in lfiles if f in filelist]
405 lfiles = [f for f in lfiles if f in filelist]
406
406
407 printed = False
407 printed = False
408 if printmessage and lfiles:
408 if printmessage and lfiles:
409 ui.status(_('getting changed largefiles\n'))
409 ui.status(_('getting changed largefiles\n'))
410 printed = True
410 printed = True
411 cachelfiles(ui, repo, '.')
411 cachelfiles(ui, repo, '.')
412
412
413 updated, removed = 0, 0
413 updated, removed = 0, 0
414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
415 # increment the appropriate counter according to _updatelfile's
415 # increment the appropriate counter according to _updatelfile's
416 # return value
416 # return value
417 updated += i > 0 and i or 0
417 updated += i > 0 and i or 0
418 removed -= i < 0 and i or 0
418 removed -= i < 0 and i or 0
419 if printmessage and (removed or updated) and not printed:
419 if printmessage and (removed or updated) and not printed:
420 ui.status(_('getting changed largefiles\n'))
420 ui.status(_('getting changed largefiles\n'))
421 printed = True
421 printed = True
422
422
423 lfdirstate.write()
423 lfdirstate.write()
424 if printed and printmessage:
424 if printed and printmessage:
425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
426 removed))
426 removed))
427 finally:
427 finally:
428 wlock.release()
428 wlock.release()
429
429
430 def _updatelfile(repo, lfdirstate, lfile):
430 def _updatelfile(repo, lfdirstate, lfile):
431 '''updates a single largefile and copies the state of its standin from
431 '''updates a single largefile and copies the state of its standin from
432 the repository's dirstate to its state in the lfdirstate.
432 the repository's dirstate to its state in the lfdirstate.
433
433
434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
435 file was unchanged, and None if the needed largefile was missing from the
435 file was unchanged, and None if the needed largefile was missing from the
436 cache.'''
436 cache.'''
437 ret = 0
437 ret = 0
438 abslfile = repo.wjoin(lfile)
438 abslfile = repo.wjoin(lfile)
439 absstandin = repo.wjoin(lfutil.standin(lfile))
439 absstandin = repo.wjoin(lfutil.standin(lfile))
440 if os.path.exists(absstandin):
440 if os.path.exists(absstandin):
441 if os.path.exists(absstandin+'.orig'):
441 if os.path.exists(absstandin+'.orig'):
442 shutil.copyfile(abslfile, abslfile+'.orig')
442 shutil.copyfile(abslfile, abslfile+'.orig')
443 expecthash = lfutil.readstandin(repo, lfile)
443 expecthash = lfutil.readstandin(repo, lfile)
444 if expecthash != '' and \
444 if (expecthash != '' and
445 (not os.path.exists(abslfile) or \
445 (not os.path.exists(abslfile) or
446 expecthash != lfutil.hashfile(abslfile)):
446 expecthash != lfutil.hashfile(abslfile))):
447 if not lfutil.copyfromcache(repo, expecthash, lfile):
447 if not lfutil.copyfromcache(repo, expecthash, lfile):
448 return None # don't try to set the mode or update the dirstate
448 return None # don't try to set the mode or update the dirstate
449 ret = 1
449 ret = 1
450 mode = os.stat(absstandin).st_mode
450 mode = os.stat(absstandin).st_mode
451 if mode != os.stat(abslfile).st_mode:
451 if mode != os.stat(abslfile).st_mode:
452 os.chmod(abslfile, mode)
452 os.chmod(abslfile, mode)
453 ret = 1
453 ret = 1
454 else:
454 else:
455 if os.path.exists(abslfile):
455 if os.path.exists(abslfile):
456 os.unlink(abslfile)
456 os.unlink(abslfile)
457 ret = -1
457 ret = -1
458 state = repo.dirstate[lfutil.standin(lfile)]
458 state = repo.dirstate[lfutil.standin(lfile)]
459 if state == 'n':
459 if state == 'n':
460 lfdirstate.normal(lfile)
460 lfdirstate.normal(lfile)
461 elif state == 'r':
461 elif state == 'r':
462 lfdirstate.remove(lfile)
462 lfdirstate.remove(lfile)
463 elif state == 'a':
463 elif state == 'a':
464 lfdirstate.add(lfile)
464 lfdirstate.add(lfile)
465 elif state == '?':
465 elif state == '?':
466 lfdirstate.drop(lfile)
466 lfdirstate.drop(lfile)
467 return ret
467 return ret
468
468
469 # -- hg commands declarations ------------------------------------------------
469 # -- hg commands declarations ------------------------------------------------
470
470
471
472 cmdtable = {
471 cmdtable = {
473 'lfconvert': (lfconvert,
472 'lfconvert': (lfconvert,
474 [('s', 'size', '',
473 [('s', 'size', '',
475 _('minimum size (MB) for files to be converted '
474 _('minimum size (MB) for files to be converted '
476 'as largefiles'),
475 'as largefiles'),
477 'SIZE'),
476 'SIZE'),
478 ('', 'tonormal', False,
477 ('', 'tonormal', False,
479 _('convert from a largefiles repo to a normal repo')),
478 _('convert from a largefiles repo to a normal repo')),
480 ],
479 ],
481 _('hg lfconvert SOURCE DEST [FILE ...]')),
480 _('hg lfconvert SOURCE DEST [FILE ...]')),
482 }
481 }
@@ -1,446 +1,448 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import hashlib
15 import hashlib
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'size', default=default)
64 lfsize = ui.config(longname, 'size', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on copy
79 # if hardlinks fail, fallback on copy
80 shutil.copyfile(src, dest)
80 shutil.copyfile(src, dest)
81 os.chmod(dest, os.stat(src).st_mode)
81 os.chmod(dest, os.stat(src).st_mode)
82
82
83 def systemcachepath(ui, hash):
83 def systemcachepath(ui, hash):
84 path = ui.config(longname, 'systemcache', None)
84 path = ui.config(longname, 'systemcache', None)
85 if path:
85 if path:
86 path = os.path.join(path, hash)
86 path = os.path.join(path, hash)
87 else:
87 else:
88 if os.name == 'nt':
88 if os.name == 'nt':
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 os.getenv('APPDATA'), longname, hash)
90 path = os.path.join(appdata, longname, hash)
91 elif os.name == 'posix':
91 elif os.name == 'posix':
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
93 else:
93 else:
94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
95 return path
95 return path
96
96
97 def insystemcache(ui, hash):
97 def insystemcache(ui, hash):
98 return os.path.exists(systemcachepath(ui, hash))
98 return os.path.exists(systemcachepath(ui, hash))
99
99
100 def findfile(repo, hash):
100 def findfile(repo, hash):
101 if incache(repo, hash):
101 if incache(repo, hash):
102 repo.ui.note(_('Found %s in cache\n') % hash)
102 repo.ui.note(_('Found %s in cache\n') % hash)
103 return cachepath(repo, hash)
103 return cachepath(repo, hash)
104 if insystemcache(repo.ui, hash):
104 if insystemcache(repo.ui, hash):
105 repo.ui.note(_('Found %s in system cache\n') % hash)
105 repo.ui.note(_('Found %s in system cache\n') % hash)
106 return systemcachepath(repo.ui, hash)
106 return systemcachepath(repo.ui, hash)
107 return None
107 return None
108
108
109 class largefiles_dirstate(dirstate.dirstate):
109 class largefiles_dirstate(dirstate.dirstate):
110 def __getitem__(self, key):
110 def __getitem__(self, key):
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
112 def normal(self, f):
112 def normal(self, f):
113 return super(largefiles_dirstate, self).normal(unixpath(f))
113 return super(largefiles_dirstate, self).normal(unixpath(f))
114 def remove(self, f):
114 def remove(self, f):
115 return super(largefiles_dirstate, self).remove(unixpath(f))
115 return super(largefiles_dirstate, self).remove(unixpath(f))
116 def add(self, f):
116 def add(self, f):
117 return super(largefiles_dirstate, self).add(unixpath(f))
117 return super(largefiles_dirstate, self).add(unixpath(f))
118 def drop(self, f):
118 def drop(self, f):
119 return super(largefiles_dirstate, self).drop(unixpath(f))
119 return super(largefiles_dirstate, self).drop(unixpath(f))
120 def forget(self, f):
120 def forget(self, f):
121 return super(largefiles_dirstate, self).forget(unixpath(f))
121 return super(largefiles_dirstate, self).forget(unixpath(f))
122
122
123 def openlfdirstate(ui, repo):
123 def openlfdirstate(ui, repo):
124 '''
124 '''
125 Return a dirstate object that tracks largefiles: i.e. its root is
125 Return a dirstate object that tracks largefiles: i.e. its root is
126 the repo root, but it is saved in .hg/largefiles/dirstate.
126 the repo root, but it is saved in .hg/largefiles/dirstate.
127 '''
127 '''
128 admin = repo.join(longname)
128 admin = repo.join(longname)
129 opener = scmutil.opener(admin)
129 opener = scmutil.opener(admin)
130 if util.safehasattr(repo.dirstate, '_validate'):
130 if util.safehasattr(repo.dirstate, '_validate'):
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
132 repo.dirstate._validate)
132 repo.dirstate._validate)
133 else:
133 else:
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
135
135
136 # If the largefiles dirstate does not exist, populate and create
136 # If the largefiles dirstate does not exist, populate and create
137 # it. This ensures that we create it on the first meaningful
137 # it. This ensures that we create it on the first meaningful
138 # largefiles operation in a new clone. It also gives us an easy
138 # largefiles operation in a new clone. It also gives us an easy
139 # way to forcibly rebuild largefiles state:
139 # way to forcibly rebuild largefiles state:
140 # rm .hg/largefiles/dirstate && hg status
140 # rm .hg/largefiles/dirstate && hg status
141 # Or even, if things are really messed up:
141 # Or even, if things are really messed up:
142 # rm -rf .hg/largefiles && hg status
142 # rm -rf .hg/largefiles && hg status
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
144 util.makedirs(admin)
144 util.makedirs(admin)
145 matcher = getstandinmatcher(repo)
145 matcher = getstandinmatcher(repo)
146 for standin in dirstate_walk(repo.dirstate, matcher):
146 for standin in dirstate_walk(repo.dirstate, matcher):
147 lfile = splitstandin(standin)
147 lfile = splitstandin(standin)
148 hash = readstandin(repo, lfile)
148 hash = readstandin(repo, lfile)
149 lfdirstate.normallookup(lfile)
149 lfdirstate.normallookup(lfile)
150 try:
150 try:
151 if hash == hashfile(lfile):
151 if hash == hashfile(lfile):
152 lfdirstate.normal(lfile)
152 lfdirstate.normal(lfile)
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT:
154 if err.errno != errno.ENOENT:
155 raise
155 raise
156
156
157 lfdirstate.write()
157 lfdirstate.write()
158
158
159 return lfdirstate
159 return lfdirstate
160
160
161 def lfdirstate_status(lfdirstate, repo, rev):
161 def lfdirstate_status(lfdirstate, repo, rev):
162 wlock = repo.wlock()
162 wlock = repo.wlock()
163 try:
163 try:
164 match = match_.always(repo.root, repo.getcwd())
164 match = match_.always(repo.root, repo.getcwd())
165 s = lfdirstate.status(match, [], False, False, False)
165 s = lfdirstate.status(match, [], False, False, False)
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
167 for lfile in unsure:
167 for lfile in unsure:
168 if repo[rev][standin(lfile)].data().strip() != \
168 if repo[rev][standin(lfile)].data().strip() != \
169 hashfile(repo.wjoin(lfile)):
169 hashfile(repo.wjoin(lfile)):
170 modified.append(lfile)
170 modified.append(lfile)
171 else:
171 else:
172 clean.append(lfile)
172 clean.append(lfile)
173 lfdirstate.normal(lfile)
173 lfdirstate.normal(lfile)
174 lfdirstate.write()
174 lfdirstate.write()
175 finally:
175 finally:
176 wlock.release()
176 wlock.release()
177 return (modified, added, removed, missing, unknown, ignored, clean)
177 return (modified, added, removed, missing, unknown, ignored, clean)
178
178
179 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
180 '''return a list of largefiles in the working copy or the
180 '''return a list of largefiles in the working copy or the
181 specified changeset'''
181 specified changeset'''
182
182
183 if matcher is None:
183 if matcher is None:
184 matcher = getstandinmatcher(repo)
184 matcher = getstandinmatcher(repo)
185
185
186 # ignore unknown files in working directory
186 # ignore unknown files in working directory
187 return [splitstandin(f) for f in repo[rev].walk(matcher) \
187 return [splitstandin(f)
188 for f in repo[rev].walk(matcher)
188 if rev is not None or repo.dirstate[f] != '?']
189 if rev is not None or repo.dirstate[f] != '?']
189
190
190 def incache(repo, hash):
191 def incache(repo, hash):
191 return os.path.exists(cachepath(repo, hash))
192 return os.path.exists(cachepath(repo, hash))
192
193
193 def createdir(dir):
194 def createdir(dir):
194 if not os.path.exists(dir):
195 if not os.path.exists(dir):
195 os.makedirs(dir)
196 os.makedirs(dir)
196
197
197 def cachepath(repo, hash):
198 def cachepath(repo, hash):
198 return repo.join(os.path.join(longname, hash))
199 return repo.join(os.path.join(longname, hash))
199
200
200 def copyfromcache(repo, hash, filename):
201 def copyfromcache(repo, hash, filename):
201 '''Copy the specified largefile from the repo or system cache to
202 '''Copy the specified largefile from the repo or system cache to
202 filename in the repository. Return true on success or false if the
203 filename in the repository. Return true on success or false if the
203 file was not found in either cache (which should not happened:
204 file was not found in either cache (which should not happened:
204 this is meant to be called only after ensuring that the needed
205 this is meant to be called only after ensuring that the needed
205 largefile exists in the cache).'''
206 largefile exists in the cache).'''
206 path = findfile(repo, hash)
207 path = findfile(repo, hash)
207 if path is None:
208 if path is None:
208 return False
209 return False
209 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 shutil.copy(path, repo.wjoin(filename))
211 shutil.copy(path, repo.wjoin(filename))
211 return True
212 return True
212
213
213 def copytocache(repo, rev, file, uploaded=False):
214 def copytocache(repo, rev, file, uploaded=False):
214 hash = readstandin(repo, file)
215 hash = readstandin(repo, file)
215 if incache(repo, hash):
216 if incache(repo, hash):
216 return
217 return
217 copytocacheabsolute(repo, repo.wjoin(file), hash)
218 copytocacheabsolute(repo, repo.wjoin(file), hash)
218
219
219 def copytocacheabsolute(repo, file, hash):
220 def copytocacheabsolute(repo, file, hash):
220 createdir(os.path.dirname(cachepath(repo, hash)))
221 createdir(os.path.dirname(cachepath(repo, hash)))
221 if insystemcache(repo.ui, hash):
222 if insystemcache(repo.ui, hash):
222 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
223 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
223 else:
224 else:
224 shutil.copyfile(file, cachepath(repo, hash))
225 shutil.copyfile(file, cachepath(repo, hash))
225 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
226 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
226 linktosystemcache(repo, hash)
227 linktosystemcache(repo, hash)
227
228
228 def linktosystemcache(repo, hash):
229 def linktosystemcache(repo, hash):
229 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
230 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
230 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
231 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
231
232
232 def getstandinmatcher(repo, pats=[], opts={}):
233 def getstandinmatcher(repo, pats=[], opts={}):
233 '''Return a match object that applies pats to the standin directory'''
234 '''Return a match object that applies pats to the standin directory'''
234 standindir = repo.pathto(shortname)
235 standindir = repo.pathto(shortname)
235 if pats:
236 if pats:
236 # patterns supplied: search standin directory relative to current dir
237 # patterns supplied: search standin directory relative to current dir
237 cwd = repo.getcwd()
238 cwd = repo.getcwd()
238 if os.path.isabs(cwd):
239 if os.path.isabs(cwd):
239 # cwd is an absolute path for hg -R <reponame>
240 # cwd is an absolute path for hg -R <reponame>
240 # work relative to the repository root in this case
241 # work relative to the repository root in this case
241 cwd = ''
242 cwd = ''
242 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
243 elif os.path.isdir(standindir):
244 elif os.path.isdir(standindir):
244 # no patterns: relative to repo root
245 # no patterns: relative to repo root
245 pats = [standindir]
246 pats = [standindir]
246 else:
247 else:
247 # no patterns and no standin dir: return matcher that matches nothing
248 # no patterns and no standin dir: return matcher that matches nothing
248 match = match_.match(repo.root, None, [], exact=True)
249 match = match_.match(repo.root, None, [], exact=True)
249 match.matchfn = lambda f: False
250 match.matchfn = lambda f: False
250 return match
251 return match
251 return getmatcher(repo, pats, opts, showbad=False)
252 return getmatcher(repo, pats, opts, showbad=False)
252
253
253 def getmatcher(repo, pats=[], opts={}, showbad=True):
254 def getmatcher(repo, pats=[], opts={}, showbad=True):
254 '''Wrapper around scmutil.match() that adds showbad: if false,
255 '''Wrapper around scmutil.match() that adds showbad: if false,
255 neuter the match object's bad() method so it does not print any
256 neuter the match object's bad() method so it does not print any
256 warnings about missing files or directories.'''
257 warnings about missing files or directories.'''
257 match = scmutil.match(repo[None], pats, opts)
258 match = scmutil.match(repo[None], pats, opts)
258
259
259 if not showbad:
260 if not showbad:
260 match.bad = lambda f, msg: None
261 match.bad = lambda f, msg: None
261 return match
262 return match
262
263
263 def composestandinmatcher(repo, rmatcher):
264 def composestandinmatcher(repo, rmatcher):
264 '''Return a matcher that accepts standins corresponding to the
265 '''Return a matcher that accepts standins corresponding to the
265 files accepted by rmatcher. Pass the list of files in the matcher
266 files accepted by rmatcher. Pass the list of files in the matcher
266 as the paths specified by the user.'''
267 as the paths specified by the user.'''
267 smatcher = getstandinmatcher(repo, rmatcher.files())
268 smatcher = getstandinmatcher(repo, rmatcher.files())
268 isstandin = smatcher.matchfn
269 isstandin = smatcher.matchfn
269 def composed_matchfn(f):
270 def composed_matchfn(f):
270 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
271 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
271 smatcher.matchfn = composed_matchfn
272 smatcher.matchfn = composed_matchfn
272
273
273 return smatcher
274 return smatcher
274
275
275 def standin(filename):
276 def standin(filename):
276 '''Return the repo-relative path to the standin for the specified big
277 '''Return the repo-relative path to the standin for the specified big
277 file.'''
278 file.'''
278 # Notes:
279 # Notes:
279 # 1) Most callers want an absolute path, but _create_standin() needs
280 # 1) Most callers want an absolute path, but _create_standin() needs
280 # it repo-relative so lfadd() can pass it to repo_add(). So leave
281 # it repo-relative so lfadd() can pass it to repo_add(). So leave
281 # it up to the caller to use repo.wjoin() to get an absolute path.
282 # it up to the caller to use repo.wjoin() to get an absolute path.
282 # 2) Join with '/' because that's what dirstate always uses, even on
283 # 2) Join with '/' because that's what dirstate always uses, even on
283 # Windows. Change existing separator to '/' first in case we are
284 # Windows. Change existing separator to '/' first in case we are
284 # passed filenames from an external source (like the command line).
285 # passed filenames from an external source (like the command line).
285 return shortname + '/' + filename.replace(os.sep, '/')
286 return shortname + '/' + filename.replace(os.sep, '/')
286
287
287 def isstandin(filename):
288 def isstandin(filename):
288 '''Return true if filename is a big file standin. filename must be
289 '''Return true if filename is a big file standin. filename must be
289 in Mercurial's internal form (slash-separated).'''
290 in Mercurial's internal form (slash-separated).'''
290 return filename.startswith(shortname + '/')
291 return filename.startswith(shortname + '/')
291
292
292 def splitstandin(filename):
293 def splitstandin(filename):
293 # Split on / because that's what dirstate always uses, even on Windows.
294 # Split on / because that's what dirstate always uses, even on Windows.
294 # Change local separator to / first just in case we are passed filenames
295 # Change local separator to / first just in case we are passed filenames
295 # from an external source (like the command line).
296 # from an external source (like the command line).
296 bits = filename.replace(os.sep, '/').split('/', 1)
297 bits = filename.replace(os.sep, '/').split('/', 1)
297 if len(bits) == 2 and bits[0] == shortname:
298 if len(bits) == 2 and bits[0] == shortname:
298 return bits[1]
299 return bits[1]
299 else:
300 else:
300 return None
301 return None
301
302
302 def updatestandin(repo, standin):
303 def updatestandin(repo, standin):
303 file = repo.wjoin(splitstandin(standin))
304 file = repo.wjoin(splitstandin(standin))
304 if os.path.exists(file):
305 if os.path.exists(file):
305 hash = hashfile(file)
306 hash = hashfile(file)
306 executable = getexecutable(file)
307 executable = getexecutable(file)
307 writestandin(repo, standin, hash, executable)
308 writestandin(repo, standin, hash, executable)
308
309
309 def readstandin(repo, filename, node=None):
310 def readstandin(repo, filename, node=None):
310 '''read hex hash from standin for filename at given node, or working
311 '''read hex hash from standin for filename at given node, or working
311 directory if no node is given'''
312 directory if no node is given'''
312 return repo[node][standin(filename)].data().strip()
313 return repo[node][standin(filename)].data().strip()
313
314
314 def writestandin(repo, standin, hash, executable):
315 def writestandin(repo, standin, hash, executable):
315 '''write hash to <repo.root>/<standin>'''
316 '''write hash to <repo.root>/<standin>'''
316 writehash(hash, repo.wjoin(standin), executable)
317 writehash(hash, repo.wjoin(standin), executable)
317
318
318 def copyandhash(instream, outfile):
319 def copyandhash(instream, outfile):
319 '''Read bytes from instream (iterable) and write them to outfile,
320 '''Read bytes from instream (iterable) and write them to outfile,
320 computing the SHA-1 hash of the data along the way. Close outfile
321 computing the SHA-1 hash of the data along the way. Close outfile
321 when done and return the binary hash.'''
322 when done and return the binary hash.'''
322 hasher = util.sha1('')
323 hasher = util.sha1('')
323 for data in instream:
324 for data in instream:
324 hasher.update(data)
325 hasher.update(data)
325 outfile.write(data)
326 outfile.write(data)
326
327
327 # Blecch: closing a file that somebody else opened is rude and
328 # Blecch: closing a file that somebody else opened is rude and
328 # wrong. But it's so darn convenient and practical! After all,
329 # wrong. But it's so darn convenient and practical! After all,
329 # outfile was opened just to copy and hash.
330 # outfile was opened just to copy and hash.
330 outfile.close()
331 outfile.close()
331
332
332 return hasher.digest()
333 return hasher.digest()
333
334
334 def hashrepofile(repo, file):
335 def hashrepofile(repo, file):
335 return hashfile(repo.wjoin(file))
336 return hashfile(repo.wjoin(file))
336
337
337 def hashfile(file):
338 def hashfile(file):
338 if not os.path.exists(file):
339 if not os.path.exists(file):
339 return ''
340 return ''
340 hasher = util.sha1('')
341 hasher = util.sha1('')
341 fd = open(file, 'rb')
342 fd = open(file, 'rb')
342 for data in blockstream(fd):
343 for data in blockstream(fd):
343 hasher.update(data)
344 hasher.update(data)
344 fd.close()
345 fd.close()
345 return hasher.hexdigest()
346 return hasher.hexdigest()
346
347
347 class limitreader(object):
348 class limitreader(object):
348 def __init__(self, f, limit):
349 def __init__(self, f, limit):
349 self.f = f
350 self.f = f
350 self.limit = limit
351 self.limit = limit
351
352
352 def read(self, length):
353 def read(self, length):
353 if self.limit == 0:
354 if self.limit == 0:
354 return ''
355 return ''
355 length = length > self.limit and self.limit or length
356 length = length > self.limit and self.limit or length
356 self.limit -= length
357 self.limit -= length
357 return self.f.read(length)
358 return self.f.read(length)
358
359
359 def close(self):
360 def close(self):
360 pass
361 pass
361
362
362 def blockstream(infile, blocksize=128 * 1024):
363 def blockstream(infile, blocksize=128 * 1024):
363 """Generator that yields blocks of data from infile and closes infile."""
364 """Generator that yields blocks of data from infile and closes infile."""
364 while True:
365 while True:
365 data = infile.read(blocksize)
366 data = infile.read(blocksize)
366 if not data:
367 if not data:
367 break
368 break
368 yield data
369 yield data
369 # same blecch as copyandhash() above
370 # same blecch as copyandhash() above
370 infile.close()
371 infile.close()
371
372
372 def readhash(filename):
373 def readhash(filename):
373 rfile = open(filename, 'rb')
374 rfile = open(filename, 'rb')
374 hash = rfile.read(40)
375 hash = rfile.read(40)
375 rfile.close()
376 rfile.close()
376 if len(hash) < 40:
377 if len(hash) < 40:
377 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
378 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
378 % (filename, len(hash)))
379 % (filename, len(hash)))
379 return hash
380 return hash
380
381
381 def writehash(hash, filename, executable):
382 def writehash(hash, filename, executable):
382 util.makedirs(os.path.dirname(filename))
383 util.makedirs(os.path.dirname(filename))
383 if os.path.exists(filename):
384 if os.path.exists(filename):
384 os.unlink(filename)
385 os.unlink(filename)
385 wfile = open(filename, 'wb')
386 wfile = open(filename, 'wb')
386
387
387 try:
388 try:
388 wfile.write(hash)
389 wfile.write(hash)
389 wfile.write('\n')
390 wfile.write('\n')
390 finally:
391 finally:
391 wfile.close()
392 wfile.close()
392 if os.path.exists(filename):
393 if os.path.exists(filename):
393 os.chmod(filename, getmode(executable))
394 os.chmod(filename, getmode(executable))
394
395
395 def getexecutable(filename):
396 def getexecutable(filename):
396 mode = os.stat(filename).st_mode
397 mode = os.stat(filename).st_mode
397 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
398 return ((mode & stat.S_IXUSR) and
398 stat.S_IXOTH)
399 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXOTH))
399
401
400 def getmode(executable):
402 def getmode(executable):
401 if executable:
403 if executable:
402 return 0755
404 return 0755
403 else:
405 else:
404 return 0644
406 return 0644
405
407
406 def urljoin(first, second, *arg):
408 def urljoin(first, second, *arg):
407 def join(left, right):
409 def join(left, right):
408 if not left.endswith('/'):
410 if not left.endswith('/'):
409 left += '/'
411 left += '/'
410 if right.startswith('/'):
412 if right.startswith('/'):
411 right = right[1:]
413 right = right[1:]
412 return left + right
414 return left + right
413
415
414 url = join(first, second)
416 url = join(first, second)
415 for a in arg:
417 for a in arg:
416 url = join(url, a)
418 url = join(url, a)
417 return url
419 return url
418
420
419 def hexsha1(data):
421 def hexsha1(data):
420 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
421 object data"""
423 object data"""
422 h = hashlib.sha1()
424 h = hashlib.sha1()
423 for chunk in util.filechunkiter(data):
425 for chunk in util.filechunkiter(data):
424 h.update(chunk)
426 h.update(chunk)
425 return h.hexdigest()
427 return h.hexdigest()
426
428
427 def httpsendfile(ui, filename):
429 def httpsendfile(ui, filename):
428 return httpconnection.httpsendfile(ui, filename, 'rb')
430 return httpconnection.httpsendfile(ui, filename, 'rb')
429
431
430 def unixpath(path):
432 def unixpath(path):
431 '''Return a version of path normalized for use with the lfdirstate.'''
433 '''Return a version of path normalized for use with the lfdirstate.'''
432 return os.path.normpath(path).replace(os.sep, '/')
434 return os.path.normpath(path).replace(os.sep, '/')
433
435
434 def islfilesrepo(repo):
436 def islfilesrepo(repo):
435 return ('largefiles' in repo.requirements and
437 return ('largefiles' in repo.requirements and
436 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
438 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
437
439
438 def any_(gen):
440 def any_(gen):
439 for x in gen:
441 for x in gen:
440 if x:
442 if x:
441 return True
443 return True
442 return False
444 return False
443
445
444 class storeprotonotcapable(BaseException):
446 class storeprotonotcapable(BaseException):
445 def __init__(self, storetypes):
447 def __init__(self, storetypes):
446 self.storetypes = storetypes
448 self.storetypes = storetypes
@@ -1,826 +1,830 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 archival, error, merge
15 archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19 import lfutil
19 import lfutil
20
20
21 try:
21 try:
22 from mercurial import scmutil
22 from mercurial import scmutil
23 except ImportError:
23 except ImportError:
24 pass
24 pass
25
25
26 import lfutil
26 import lfutil
27 import lfcommands
27 import lfcommands
28
28
29 def installnormalfilesmatchfn(manifest):
29 def installnormalfilesmatchfn(manifest):
30 '''overrides scmutil.match so that the matcher it returns will ignore all
30 '''overrides scmutil.match so that the matcher it returns will ignore all
31 largefiles'''
31 largefiles'''
32 oldmatch = None # for the closure
32 oldmatch = None # for the closure
33 def override_match(repo, pats=[], opts={}, globbed=False,
33 def override_match(repo, pats=[], opts={}, globbed=False,
34 default='relpath'):
34 default='relpath'):
35 match = oldmatch(repo, pats, opts, globbed, default)
35 match = oldmatch(repo, pats, opts, globbed, default)
36 m = copy.copy(match)
36 m = copy.copy(match)
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
38 manifest)
38 manifest)
39 m._files = filter(notlfile, m._files)
39 m._files = filter(notlfile, m._files)
40 m._fmap = set(m._files)
40 m._fmap = set(m._files)
41 orig_matchfn = m.matchfn
41 orig_matchfn = m.matchfn
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
43 return m
43 return m
44 oldmatch = installmatchfn(override_match)
44 oldmatch = installmatchfn(override_match)
45
45
46 def installmatchfn(f):
46 def installmatchfn(f):
47 oldmatch = scmutil.match
47 oldmatch = scmutil.match
48 setattr(f, 'oldmatch', oldmatch)
48 setattr(f, 'oldmatch', oldmatch)
49 scmutil.match = f
49 scmutil.match = f
50 return oldmatch
50 return oldmatch
51
51
52 def restorematchfn():
52 def restorematchfn():
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
54 was called. no-op if scmutil.match is its original function.
54 was called. no-op if scmutil.match is its original function.
55
55
56 Note that n calls to installnormalfilesmatchfn will require n calls to
56 Note that n calls to installnormalfilesmatchfn will require n calls to
57 restore matchfn to reverse'''
57 restore matchfn to reverse'''
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
59
59
60 # -- Wrappers: modify existing commands --------------------------------
60 # -- Wrappers: modify existing commands --------------------------------
61
61
62 # Add works by going through the files that the user wanted to add and
62 # Add works by going through the files that the user wanted to add and
63 # checking if they should be added as largefiles. Then it makes a new
63 # checking if they should be added as largefiles. Then it makes a new
64 # matcher which matches only the normal files and runs the original
64 # matcher which matches only the normal files and runs the original
65 # version of add.
65 # version of add.
66 def override_add(orig, ui, repo, *pats, **opts):
66 def override_add(orig, ui, repo, *pats, **opts):
67 large = opts.pop('large', None)
67 large = opts.pop('large', None)
68 lfsize = lfutil.getminsize(
68 lfsize = lfutil.getminsize(
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
70
70
71 lfmatcher = None
71 lfmatcher = None
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
72 if os.path.exists(repo.wjoin(lfutil.shortname)):
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 if lfpats:
74 if lfpats:
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
75 lfmatcher = match_.match(repo.root, '', list(lfpats))
76
76
77 lfnames = []
77 lfnames = []
78 m = scmutil.match(repo[None], pats, opts)
78 m = scmutil.match(repo[None], pats, opts)
79 m.bad = lambda x, y: None
79 m.bad = lambda x, y: None
80 wctx = repo[None]
80 wctx = repo[None]
81 for f in repo.walk(m):
81 for f in repo.walk(m):
82 exact = m.exact(f)
82 exact = m.exact(f)
83 lfile = lfutil.standin(f) in wctx
83 lfile = lfutil.standin(f) in wctx
84 nfile = f in wctx
84 nfile = f in wctx
85 exists = lfile or nfile
85 exists = lfile or nfile
86
86
87 # Don't warn the user when they attempt to add a normal tracked file.
87 # Don't warn the user when they attempt to add a normal tracked file.
88 # The normal add code will do that for us.
88 # The normal add code will do that for us.
89 if exact and exists:
89 if exact and exists:
90 if lfile:
90 if lfile:
91 ui.warn(_('%s already a largefile\n') % f)
91 ui.warn(_('%s already a largefile\n') % f)
92 continue
92 continue
93
93
94 if exact or not exists:
94 if exact or not exists:
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
95 abovemin = (lfsize and
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
96 os.path.getsize(repo.wjoin(f)) >= lfsize * 1024 * 1024)
97 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
98 lfnames.append(f)
98 if ui.verbose or not exact:
99 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
101
101 bad = []
102 bad = []
102 standins = []
103 standins = []
103
104
104 # Need to lock, otherwise there could be a race condition between
105 # Need to lock, otherwise there could be a race condition between
105 # when standins are created and added to the repo.
106 # when standins are created and added to the repo.
106 wlock = repo.wlock()
107 wlock = repo.wlock()
107 try:
108 try:
108 if not opts.get('dry_run'):
109 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
111 for f in lfnames:
111 standinname = lfutil.standin(f)
112 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
113 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
115 standins.append(standinname)
115 if lfdirstate[f] == 'r':
116 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
117 lfdirstate.normallookup(f)
117 else:
118 else:
118 lfdirstate.add(f)
119 lfdirstate.add(f)
119 lfdirstate.write()
120 lfdirstate.write()
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
121 bad += [lfutil.splitstandin(f)
121 standins) if f in m.files()]
122 for f in lfutil.repo_add(repo, standins)
123 if f in m.files()]
122 finally:
124 finally:
123 wlock.release()
125 wlock.release()
124
126
125 installnormalfilesmatchfn(repo[None].manifest())
127 installnormalfilesmatchfn(repo[None].manifest())
126 result = orig(ui, repo, *pats, **opts)
128 result = orig(ui, repo, *pats, **opts)
127 restorematchfn()
129 restorematchfn()
128
130
129 return (result == 1 or bad) and 1 or 0
131 return (result == 1 or bad) and 1 or 0
130
132
131 def override_remove(orig, ui, repo, *pats, **opts):
133 def override_remove(orig, ui, repo, *pats, **opts):
132 manifest = repo[None].manifest()
134 manifest = repo[None].manifest()
133 installnormalfilesmatchfn(manifest)
135 installnormalfilesmatchfn(manifest)
134 orig(ui, repo, *pats, **opts)
136 orig(ui, repo, *pats, **opts)
135 restorematchfn()
137 restorematchfn()
136
138
137 after, force = opts.get('after'), opts.get('force')
139 after, force = opts.get('after'), opts.get('force')
138 if not pats and not after:
140 if not pats and not after:
139 raise util.Abort(_('no files specified'))
141 raise util.Abort(_('no files specified'))
140 m = scmutil.match(repo[None], pats, opts)
142 m = scmutil.match(repo[None], pats, opts)
141 try:
143 try:
142 repo.lfstatus = True
144 repo.lfstatus = True
143 s = repo.status(match=m, clean=True)
145 s = repo.status(match=m, clean=True)
144 finally:
146 finally:
145 repo.lfstatus = False
147 repo.lfstatus = False
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
148 modified, added, deleted, clean = [[f for f in list
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
149 if lfutil.standin(f) in manifest]
150 for list in [s[0], s[1], s[3], s[6]]]
148
151
149 def warn(files, reason):
152 def warn(files, reason):
150 for f in files:
153 for f in files:
151 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
154 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
152 % (m.rel(f), reason))
155 % (m.rel(f), reason))
153
156
154 if force:
157 if force:
155 remove, forget = modified + deleted + clean, added
158 remove, forget = modified + deleted + clean, added
156 elif after:
159 elif after:
157 remove, forget = deleted, []
160 remove, forget = deleted, []
158 warn(modified + added + clean, _('still exists'))
161 warn(modified + added + clean, _('still exists'))
159 else:
162 else:
160 remove, forget = deleted + clean, []
163 remove, forget = deleted + clean, []
161 warn(modified, _('is modified'))
164 warn(modified, _('is modified'))
162 warn(added, _('has been marked for add'))
165 warn(added, _('has been marked for add'))
163
166
164 for f in sorted(remove + forget):
167 for f in sorted(remove + forget):
165 if ui.verbose or not m.exact(f):
168 if ui.verbose or not m.exact(f):
166 ui.status(_('removing %s\n') % m.rel(f))
169 ui.status(_('removing %s\n') % m.rel(f))
167
170
168 # Need to lock because standin files are deleted then removed from the
171 # Need to lock because standin files are deleted then removed from the
169 # repository and we could race inbetween.
172 # repository and we could race inbetween.
170 wlock = repo.wlock()
173 wlock = repo.wlock()
171 try:
174 try:
172 lfdirstate = lfutil.openlfdirstate(ui, repo)
175 lfdirstate = lfutil.openlfdirstate(ui, repo)
173 for f in remove:
176 for f in remove:
174 if not after:
177 if not after:
175 os.unlink(repo.wjoin(f))
178 os.unlink(repo.wjoin(f))
176 currentdir = os.path.split(f)[0]
179 currentdir = os.path.split(f)[0]
177 while currentdir and not os.listdir(repo.wjoin(currentdir)):
180 while currentdir and not os.listdir(repo.wjoin(currentdir)):
178 os.rmdir(repo.wjoin(currentdir))
181 os.rmdir(repo.wjoin(currentdir))
179 currentdir = os.path.split(currentdir)[0]
182 currentdir = os.path.split(currentdir)[0]
180 lfdirstate.remove(f)
183 lfdirstate.remove(f)
181 lfdirstate.write()
184 lfdirstate.write()
182
185
183 forget = [lfutil.standin(f) for f in forget]
186 forget = [lfutil.standin(f) for f in forget]
184 remove = [lfutil.standin(f) for f in remove]
187 remove = [lfutil.standin(f) for f in remove]
185 lfutil.repo_forget(repo, forget)
188 lfutil.repo_forget(repo, forget)
186 lfutil.repo_remove(repo, remove, unlink=True)
189 lfutil.repo_remove(repo, remove, unlink=True)
187 finally:
190 finally:
188 wlock.release()
191 wlock.release()
189
192
190 def override_status(orig, ui, repo, *pats, **opts):
193 def override_status(orig, ui, repo, *pats, **opts):
191 try:
194 try:
192 repo.lfstatus = True
195 repo.lfstatus = True
193 return orig(ui, repo, *pats, **opts)
196 return orig(ui, repo, *pats, **opts)
194 finally:
197 finally:
195 repo.lfstatus = False
198 repo.lfstatus = False
196
199
197 def override_log(orig, ui, repo, *pats, **opts):
200 def override_log(orig, ui, repo, *pats, **opts):
198 try:
201 try:
199 repo.lfstatus = True
202 repo.lfstatus = True
200 orig(ui, repo, *pats, **opts)
203 orig(ui, repo, *pats, **opts)
201 finally:
204 finally:
202 repo.lfstatus = False
205 repo.lfstatus = False
203
206
204 def override_verify(orig, ui, repo, *pats, **opts):
207 def override_verify(orig, ui, repo, *pats, **opts):
205 large = opts.pop('large', False)
208 large = opts.pop('large', False)
206 all = opts.pop('lfa', False)
209 all = opts.pop('lfa', False)
207 contents = opts.pop('lfc', False)
210 contents = opts.pop('lfc', False)
208
211
209 result = orig(ui, repo, *pats, **opts)
212 result = orig(ui, repo, *pats, **opts)
210 if large:
213 if large:
211 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
214 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
212 return result
215 return result
213
216
214 # Override needs to refresh standins so that update's normal merge
217 # Override needs to refresh standins so that update's normal merge
215 # will go through properly. Then the other update hook (overriding repo.update)
218 # will go through properly. Then the other update hook (overriding repo.update)
216 # will get the new files. Filemerge is also overriden so that the merge
219 # will get the new files. Filemerge is also overriden so that the merge
217 # will merge standins correctly.
220 # will merge standins correctly.
218 def override_update(orig, ui, repo, *pats, **opts):
221 def override_update(orig, ui, repo, *pats, **opts):
219 lfdirstate = lfutil.openlfdirstate(ui, repo)
222 lfdirstate = lfutil.openlfdirstate(ui, repo)
220 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
223 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
221 False, False)
224 False, False)
222 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
225 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
223
226
224 # Need to lock between the standins getting updated and their
227 # Need to lock between the standins getting updated and their
225 # largefiles getting updated
228 # largefiles getting updated
226 wlock = repo.wlock()
229 wlock = repo.wlock()
227 try:
230 try:
228 if opts['check']:
231 if opts['check']:
229 mod = len(modified) > 0
232 mod = len(modified) > 0
230 for lfile in unsure:
233 for lfile in unsure:
231 standin = lfutil.standin(lfile)
234 standin = lfutil.standin(lfile)
232 if repo['.'][standin].data().strip() != \
235 if repo['.'][standin].data().strip() != \
233 lfutil.hashfile(repo.wjoin(lfile)):
236 lfutil.hashfile(repo.wjoin(lfile)):
234 mod = True
237 mod = True
235 else:
238 else:
236 lfdirstate.normal(lfile)
239 lfdirstate.normal(lfile)
237 lfdirstate.write()
240 lfdirstate.write()
238 if mod:
241 if mod:
239 raise util.Abort(_('uncommitted local changes'))
242 raise util.Abort(_('uncommitted local changes'))
240 # XXX handle removed differently
243 # XXX handle removed differently
241 if not opts['clean']:
244 if not opts['clean']:
242 for lfile in unsure + modified + added:
245 for lfile in unsure + modified + added:
243 lfutil.updatestandin(repo, lfutil.standin(lfile))
246 lfutil.updatestandin(repo, lfutil.standin(lfile))
244 finally:
247 finally:
245 wlock.release()
248 wlock.release()
246 return orig(ui, repo, *pats, **opts)
249 return orig(ui, repo, *pats, **opts)
247
250
248 # Override filemerge to prompt the user about how they wish to merge
251 # Override filemerge to prompt the user about how they wish to merge
249 # largefiles. This will handle identical edits, and copy/rename +
252 # largefiles. This will handle identical edits, and copy/rename +
250 # edit without prompting the user.
253 # edit without prompting the user.
251 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
254 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
252 # Use better variable names here. Because this is a wrapper we cannot
255 # Use better variable names here. Because this is a wrapper we cannot
253 # change the variable names in the function declaration.
256 # change the variable names in the function declaration.
254 fcdest, fcother, fcancestor = fcd, fco, fca
257 fcdest, fcother, fcancestor = fcd, fco, fca
255 if not lfutil.isstandin(orig):
258 if not lfutil.isstandin(orig):
256 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
259 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
257 else:
260 else:
258 if not fcother.cmp(fcdest): # files identical?
261 if not fcother.cmp(fcdest): # files identical?
259 return None
262 return None
260
263
261 # backwards, use working dir parent as ancestor
264 # backwards, use working dir parent as ancestor
262 if fcancestor == fcother:
265 if fcancestor == fcother:
263 fcancestor = fcdest.parents()[0]
266 fcancestor = fcdest.parents()[0]
264
267
265 if orig != fcother.path():
268 if orig != fcother.path():
266 repo.ui.status(_('merging %s and %s to %s\n')
269 repo.ui.status(_('merging %s and %s to %s\n')
267 % (lfutil.splitstandin(orig),
270 % (lfutil.splitstandin(orig),
268 lfutil.splitstandin(fcother.path()),
271 lfutil.splitstandin(fcother.path()),
269 lfutil.splitstandin(fcdest.path())))
272 lfutil.splitstandin(fcdest.path())))
270 else:
273 else:
271 repo.ui.status(_('merging %s\n')
274 repo.ui.status(_('merging %s\n')
272 % lfutil.splitstandin(fcdest.path()))
275 % lfutil.splitstandin(fcdest.path()))
273
276
274 if fcancestor.path() != fcother.path() and fcother.data() == \
277 if fcancestor.path() != fcother.path() and fcother.data() == \
275 fcancestor.data():
278 fcancestor.data():
276 return 0
279 return 0
277 if fcancestor.path() != fcdest.path() and fcdest.data() == \
280 if fcancestor.path() != fcdest.path() and fcdest.data() == \
278 fcancestor.data():
281 fcancestor.data():
279 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
282 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
280 return 0
283 return 0
281
284
282 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
285 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
283 'keep (l)ocal or take (o)ther?') %
286 'keep (l)ocal or take (o)ther?') %
284 lfutil.splitstandin(orig),
287 lfutil.splitstandin(orig),
285 (_('&Local'), _('&Other')), 0) == 0:
288 (_('&Local'), _('&Other')), 0) == 0:
286 return 0
289 return 0
287 else:
290 else:
288 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
291 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
289 return 0
292 return 0
290
293
291 # Copy first changes the matchers to match standins instead of
294 # Copy first changes the matchers to match standins instead of
292 # largefiles. Then it overrides util.copyfile in that function it
295 # largefiles. Then it overrides util.copyfile in that function it
293 # checks if the destination largefile already exists. It also keeps a
296 # checks if the destination largefile already exists. It also keeps a
294 # list of copied files so that the largefiles can be copied and the
297 # list of copied files so that the largefiles can be copied and the
295 # dirstate updated.
298 # dirstate updated.
296 def override_copy(orig, ui, repo, pats, opts, rename=False):
299 def override_copy(orig, ui, repo, pats, opts, rename=False):
297 # doesn't remove largefile on rename
300 # doesn't remove largefile on rename
298 if len(pats) < 2:
301 if len(pats) < 2:
299 # this isn't legal, let the original function deal with it
302 # this isn't legal, let the original function deal with it
300 return orig(ui, repo, pats, opts, rename)
303 return orig(ui, repo, pats, opts, rename)
301
304
302 def makestandin(relpath):
305 def makestandin(relpath):
303 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
306 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
304 return os.path.join(os.path.relpath('.', repo.getcwd()),
307 return os.path.join(os.path.relpath('.', repo.getcwd()),
305 lfutil.standin(path))
308 lfutil.standin(path))
306
309
307 fullpats = scmutil.expandpats(pats)
310 fullpats = scmutil.expandpats(pats)
308 dest = fullpats[-1]
311 dest = fullpats[-1]
309
312
310 if os.path.isdir(dest):
313 if os.path.isdir(dest):
311 if not os.path.isdir(makestandin(dest)):
314 if not os.path.isdir(makestandin(dest)):
312 os.makedirs(makestandin(dest))
315 os.makedirs(makestandin(dest))
313 # This could copy both lfiles and normal files in one command,
316 # This could copy both lfiles and normal files in one command,
314 # but we don't want to do that. First replace their matcher to
317 # but we don't want to do that. First replace their matcher to
315 # only match normal files and run it, then replace it to just
318 # only match normal files and run it, then replace it to just
316 # match largefiles and run it again.
319 # match largefiles and run it again.
317 nonormalfiles = False
320 nonormalfiles = False
318 nolfiles = False
321 nolfiles = False
319 try:
322 try:
320 installnormalfilesmatchfn(repo[None].manifest())
323 installnormalfilesmatchfn(repo[None].manifest())
321 result = orig(ui, repo, pats, opts, rename)
324 result = orig(ui, repo, pats, opts, rename)
322 except util.Abort, e:
325 except util.Abort, e:
323 if str(e) != 'no files to copy':
326 if str(e) != 'no files to copy':
324 raise e
327 raise e
325 else:
328 else:
326 nonormalfiles = True
329 nonormalfiles = True
327 result = 0
330 result = 0
328 finally:
331 finally:
329 restorematchfn()
332 restorematchfn()
330
333
331 # The first rename can cause our current working directory to be removed.
334 # The first rename can cause our current working directory to be removed.
332 # In that case there is nothing left to copy/rename so just quit.
335 # In that case there is nothing left to copy/rename so just quit.
333 try:
336 try:
334 repo.getcwd()
337 repo.getcwd()
335 except OSError:
338 except OSError:
336 return result
339 return result
337
340
338 try:
341 try:
339 # When we call orig below it creates the standins but we don't add them
342 # When we call orig below it creates the standins but we don't add them
340 # to the dir state until later so lock during that time.
343 # to the dir state until later so lock during that time.
341 wlock = repo.wlock()
344 wlock = repo.wlock()
342
345
343 manifest = repo[None].manifest()
346 manifest = repo[None].manifest()
344 oldmatch = None # for the closure
347 oldmatch = None # for the closure
345 def override_match(repo, pats=[], opts={}, globbed=False,
348 def override_match(repo, pats=[], opts={}, globbed=False,
346 default='relpath'):
349 default='relpath'):
347 newpats = []
350 newpats = []
348 # The patterns were previously mangled to add the standin
351 # The patterns were previously mangled to add the standin
349 # directory; we need to remove that now
352 # directory; we need to remove that now
350 for pat in pats:
353 for pat in pats:
351 if match_.patkind(pat) is None and lfutil.shortname in pat:
354 if match_.patkind(pat) is None and lfutil.shortname in pat:
352 newpats.append(pat.replace(lfutil.shortname, ''))
355 newpats.append(pat.replace(lfutil.shortname, ''))
353 else:
356 else:
354 newpats.append(pat)
357 newpats.append(pat)
355 match = oldmatch(repo, newpats, opts, globbed, default)
358 match = oldmatch(repo, newpats, opts, globbed, default)
356 m = copy.copy(match)
359 m = copy.copy(match)
357 lfile = lambda f: lfutil.standin(f) in manifest
360 lfile = lambda f: lfutil.standin(f) in manifest
358 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
361 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
359 m._fmap = set(m._files)
362 m._fmap = set(m._files)
360 orig_matchfn = m.matchfn
363 orig_matchfn = m.matchfn
361 m.matchfn = lambda f: lfutil.isstandin(f) and \
364 m.matchfn = lambda f: (lfutil.isstandin(f) and
362 lfile(lfutil.splitstandin(f)) and \
365 lfile(lfutil.splitstandin(f)) and
363 orig_matchfn(lfutil.splitstandin(f)) or None
366 orig_matchfn(lfutil.splitstandin(f)) or
367 None)
364 return m
368 return m
365 oldmatch = installmatchfn(override_match)
369 oldmatch = installmatchfn(override_match)
366 listpats = []
370 listpats = []
367 for pat in pats:
371 for pat in pats:
368 if match_.patkind(pat) is not None:
372 if match_.patkind(pat) is not None:
369 listpats.append(pat)
373 listpats.append(pat)
370 else:
374 else:
371 listpats.append(makestandin(pat))
375 listpats.append(makestandin(pat))
372
376
373 try:
377 try:
374 origcopyfile = util.copyfile
378 origcopyfile = util.copyfile
375 copiedfiles = []
379 copiedfiles = []
376 def override_copyfile(src, dest):
380 def override_copyfile(src, dest):
377 if lfutil.shortname in src and lfutil.shortname in dest:
381 if lfutil.shortname in src and lfutil.shortname in dest:
378 destlfile = dest.replace(lfutil.shortname, '')
382 destlfile = dest.replace(lfutil.shortname, '')
379 if not opts['force'] and os.path.exists(destlfile):
383 if not opts['force'] and os.path.exists(destlfile):
380 raise IOError('',
384 raise IOError('',
381 _('destination largefile already exists'))
385 _('destination largefile already exists'))
382 copiedfiles.append((src, dest))
386 copiedfiles.append((src, dest))
383 origcopyfile(src, dest)
387 origcopyfile(src, dest)
384
388
385 util.copyfile = override_copyfile
389 util.copyfile = override_copyfile
386 result += orig(ui, repo, listpats, opts, rename)
390 result += orig(ui, repo, listpats, opts, rename)
387 finally:
391 finally:
388 util.copyfile = origcopyfile
392 util.copyfile = origcopyfile
389
393
390 lfdirstate = lfutil.openlfdirstate(ui, repo)
394 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 for (src, dest) in copiedfiles:
395 for (src, dest) in copiedfiles:
392 if lfutil.shortname in src and lfutil.shortname in dest:
396 if lfutil.shortname in src and lfutil.shortname in dest:
393 srclfile = src.replace(lfutil.shortname, '')
397 srclfile = src.replace(lfutil.shortname, '')
394 destlfile = dest.replace(lfutil.shortname, '')
398 destlfile = dest.replace(lfutil.shortname, '')
395 destlfiledir = os.path.dirname(destlfile) or '.'
399 destlfiledir = os.path.dirname(destlfile) or '.'
396 if not os.path.isdir(destlfiledir):
400 if not os.path.isdir(destlfiledir):
397 os.makedirs(destlfiledir)
401 os.makedirs(destlfiledir)
398 if rename:
402 if rename:
399 os.rename(srclfile, destlfile)
403 os.rename(srclfile, destlfile)
400 lfdirstate.remove(os.path.relpath(srclfile,
404 lfdirstate.remove(os.path.relpath(srclfile,
401 repo.root))
405 repo.root))
402 else:
406 else:
403 util.copyfile(srclfile, destlfile)
407 util.copyfile(srclfile, destlfile)
404 lfdirstate.add(os.path.relpath(destlfile,
408 lfdirstate.add(os.path.relpath(destlfile,
405 repo.root))
409 repo.root))
406 lfdirstate.write()
410 lfdirstate.write()
407 except util.Abort, e:
411 except util.Abort, e:
408 if str(e) != 'no files to copy':
412 if str(e) != 'no files to copy':
409 raise e
413 raise e
410 else:
414 else:
411 nolfiles = True
415 nolfiles = True
412 finally:
416 finally:
413 restorematchfn()
417 restorematchfn()
414 wlock.release()
418 wlock.release()
415
419
416 if nolfiles and nonormalfiles:
420 if nolfiles and nonormalfiles:
417 raise util.Abort(_('no files to copy'))
421 raise util.Abort(_('no files to copy'))
418
422
419 return result
423 return result
420
424
421 # When the user calls revert, we have to be careful to not revert any
425 # When the user calls revert, we have to be careful to not revert any
422 # changes to other largefiles accidentally. This means we have to keep
426 # changes to other largefiles accidentally. This means we have to keep
423 # track of the largefiles that are being reverted so we only pull down
427 # track of the largefiles that are being reverted so we only pull down
424 # the necessary largefiles.
428 # the necessary largefiles.
425 #
429 #
426 # Standins are only updated (to match the hash of largefiles) before
430 # Standins are only updated (to match the hash of largefiles) before
427 # commits. Update the standins then run the original revert, changing
431 # commits. Update the standins then run the original revert, changing
428 # the matcher to hit standins instead of largefiles. Based on the
432 # the matcher to hit standins instead of largefiles. Based on the
429 # resulting standins update the largefiles. Then return the standins
433 # resulting standins update the largefiles. Then return the standins
430 # to their proper state
434 # to their proper state
431 def override_revert(orig, ui, repo, *pats, **opts):
435 def override_revert(orig, ui, repo, *pats, **opts):
432 # Because we put the standins in a bad state (by updating them)
436 # Because we put the standins in a bad state (by updating them)
433 # and then return them to a correct state we need to lock to
437 # and then return them to a correct state we need to lock to
434 # prevent others from changing them in their incorrect state.
438 # prevent others from changing them in their incorrect state.
435 wlock = repo.wlock()
439 wlock = repo.wlock()
436 try:
440 try:
437 lfdirstate = lfutil.openlfdirstate(ui, repo)
441 lfdirstate = lfutil.openlfdirstate(ui, repo)
438 (modified, added, removed, missing, unknown, ignored, clean) = \
442 (modified, added, removed, missing, unknown, ignored, clean) = \
439 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
443 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
440 for lfile in modified:
444 for lfile in modified:
441 lfutil.updatestandin(repo, lfutil.standin(lfile))
445 lfutil.updatestandin(repo, lfutil.standin(lfile))
442
446
443 try:
447 try:
444 ctx = repo[opts.get('rev')]
448 ctx = repo[opts.get('rev')]
445 oldmatch = None # for the closure
449 oldmatch = None # for the closure
446 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
450 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
447 default='relpath'):
451 default='relpath'):
448 if util.safehasattr(ctxorrepo, 'match'):
452 if util.safehasattr(ctxorrepo, 'match'):
449 ctx0 = ctxorrepo
453 ctx0 = ctxorrepo
450 else:
454 else:
451 ctx0 = ctxorrepo[None]
455 ctx0 = ctxorrepo[None]
452 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
456 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
453 m = copy.copy(match)
457 m = copy.copy(match)
454 def tostandin(f):
458 def tostandin(f):
455 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
459 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
456 return lfutil.standin(f)
460 return lfutil.standin(f)
457 elif lfutil.standin(f) in repo[None]:
461 elif lfutil.standin(f) in repo[None]:
458 return None
462 return None
459 return f
463 return f
460 m._files = [tostandin(f) for f in m._files]
464 m._files = [tostandin(f) for f in m._files]
461 m._files = [f for f in m._files if f is not None]
465 m._files = [f for f in m._files if f is not None]
462 m._fmap = set(m._files)
466 m._fmap = set(m._files)
463 orig_matchfn = m.matchfn
467 orig_matchfn = m.matchfn
464 def matchfn(f):
468 def matchfn(f):
465 if lfutil.isstandin(f):
469 if lfutil.isstandin(f):
466 # We need to keep track of what largefiles are being
470 # We need to keep track of what largefiles are being
467 # matched so we know which ones to update later --
471 # matched so we know which ones to update later --
468 # otherwise we accidentally revert changes to other
472 # otherwise we accidentally revert changes to other
469 # largefiles. This is repo-specific, so duckpunch the
473 # largefiles. This is repo-specific, so duckpunch the
470 # repo object to keep the list of largefiles for us
474 # repo object to keep the list of largefiles for us
471 # later.
475 # later.
472 if orig_matchfn(lfutil.splitstandin(f)) and \
476 if orig_matchfn(lfutil.splitstandin(f)) and \
473 (f in repo[None] or f in ctx):
477 (f in repo[None] or f in ctx):
474 lfileslist = getattr(repo, '_lfilestoupdate', [])
478 lfileslist = getattr(repo, '_lfilestoupdate', [])
475 lfileslist.append(lfutil.splitstandin(f))
479 lfileslist.append(lfutil.splitstandin(f))
476 repo._lfilestoupdate = lfileslist
480 repo._lfilestoupdate = lfileslist
477 return True
481 return True
478 else:
482 else:
479 return False
483 return False
480 return orig_matchfn(f)
484 return orig_matchfn(f)
481 m.matchfn = matchfn
485 m.matchfn = matchfn
482 return m
486 return m
483 oldmatch = installmatchfn(override_match)
487 oldmatch = installmatchfn(override_match)
484 scmutil.match
488 scmutil.match
485 matches = override_match(repo[None], pats, opts)
489 matches = override_match(repo[None], pats, opts)
486 orig(ui, repo, *pats, **opts)
490 orig(ui, repo, *pats, **opts)
487 finally:
491 finally:
488 restorematchfn()
492 restorematchfn()
489 lfileslist = getattr(repo, '_lfilestoupdate', [])
493 lfileslist = getattr(repo, '_lfilestoupdate', [])
490 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
494 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
491 printmessage=False)
495 printmessage=False)
492
496
493 # empty out the largefiles list so we start fresh next time
497 # empty out the largefiles list so we start fresh next time
494 repo._lfilestoupdate = []
498 repo._lfilestoupdate = []
495 for lfile in modified:
499 for lfile in modified:
496 if lfile in lfileslist:
500 if lfile in lfileslist:
497 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
501 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
498 in repo['.']:
502 in repo['.']:
499 lfutil.writestandin(repo, lfutil.standin(lfile),
503 lfutil.writestandin(repo, lfutil.standin(lfile),
500 repo['.'][lfile].data().strip(),
504 repo['.'][lfile].data().strip(),
501 'x' in repo['.'][lfile].flags())
505 'x' in repo['.'][lfile].flags())
502 lfdirstate = lfutil.openlfdirstate(ui, repo)
506 lfdirstate = lfutil.openlfdirstate(ui, repo)
503 for lfile in added:
507 for lfile in added:
504 standin = lfutil.standin(lfile)
508 standin = lfutil.standin(lfile)
505 if standin not in ctx and (standin in matches or opts.get('all')):
509 if standin not in ctx and (standin in matches or opts.get('all')):
506 if lfile in lfdirstate:
510 if lfile in lfdirstate:
507 lfdirstate.drop(lfile)
511 lfdirstate.drop(lfile)
508 util.unlinkpath(repo.wjoin(standin))
512 util.unlinkpath(repo.wjoin(standin))
509 lfdirstate.write()
513 lfdirstate.write()
510 finally:
514 finally:
511 wlock.release()
515 wlock.release()
512
516
513 def hg_update(orig, repo, node):
517 def hg_update(orig, repo, node):
514 result = orig(repo, node)
518 result = orig(repo, node)
515 # XXX check if it worked first
519 # XXX check if it worked first
516 lfcommands.updatelfiles(repo.ui, repo)
520 lfcommands.updatelfiles(repo.ui, repo)
517 return result
521 return result
518
522
519 def hg_clean(orig, repo, node, show_stats=True):
523 def hg_clean(orig, repo, node, show_stats=True):
520 result = orig(repo, node, show_stats)
524 result = orig(repo, node, show_stats)
521 lfcommands.updatelfiles(repo.ui, repo)
525 lfcommands.updatelfiles(repo.ui, repo)
522 return result
526 return result
523
527
524 def hg_merge(orig, repo, node, force=None, remind=True):
528 def hg_merge(orig, repo, node, force=None, remind=True):
525 result = orig(repo, node, force, remind)
529 result = orig(repo, node, force, remind)
526 lfcommands.updatelfiles(repo.ui, repo)
530 lfcommands.updatelfiles(repo.ui, repo)
527 return result
531 return result
528
532
529 # When we rebase a repository with remotely changed largefiles, we need to
533 # When we rebase a repository with remotely changed largefiles, we need to
530 # take some extra care so that the largefiles are correctly updated in the
534 # take some extra care so that the largefiles are correctly updated in the
531 # working copy
535 # working copy
532 def override_pull(orig, ui, repo, source=None, **opts):
536 def override_pull(orig, ui, repo, source=None, **opts):
533 if opts.get('rebase', False):
537 if opts.get('rebase', False):
534 repo._isrebasing = True
538 repo._isrebasing = True
535 try:
539 try:
536 if opts.get('update'):
540 if opts.get('update'):
537 del opts['update']
541 del opts['update']
538 ui.debug('--update and --rebase are not compatible, ignoring '
542 ui.debug('--update and --rebase are not compatible, ignoring '
539 'the update flag\n')
543 'the update flag\n')
540 del opts['rebase']
544 del opts['rebase']
541 cmdutil.bailifchanged(repo)
545 cmdutil.bailifchanged(repo)
542 revsprepull = len(repo)
546 revsprepull = len(repo)
543 origpostincoming = commands.postincoming
547 origpostincoming = commands.postincoming
544 def _dummy(*args, **kwargs):
548 def _dummy(*args, **kwargs):
545 pass
549 pass
546 commands.postincoming = _dummy
550 commands.postincoming = _dummy
547 repo.lfpullsource = source
551 repo.lfpullsource = source
548 if not source:
552 if not source:
549 source = 'default'
553 source = 'default'
550 try:
554 try:
551 result = commands.pull(ui, repo, source, **opts)
555 result = commands.pull(ui, repo, source, **opts)
552 finally:
556 finally:
553 commands.postincoming = origpostincoming
557 commands.postincoming = origpostincoming
554 revspostpull = len(repo)
558 revspostpull = len(repo)
555 if revspostpull > revsprepull:
559 if revspostpull > revsprepull:
556 result = result or rebase.rebase(ui, repo)
560 result = result or rebase.rebase(ui, repo)
557 finally:
561 finally:
558 repo._isrebasing = False
562 repo._isrebasing = False
559 else:
563 else:
560 repo.lfpullsource = source
564 repo.lfpullsource = source
561 if not source:
565 if not source:
562 source = 'default'
566 source = 'default'
563 result = orig(ui, repo, source, **opts)
567 result = orig(ui, repo, source, **opts)
564 return result
568 return result
565
569
566 def override_rebase(orig, ui, repo, **opts):
570 def override_rebase(orig, ui, repo, **opts):
567 repo._isrebasing = True
571 repo._isrebasing = True
568 try:
572 try:
569 orig(ui, repo, **opts)
573 orig(ui, repo, **opts)
570 finally:
574 finally:
571 repo._isrebasing = False
575 repo._isrebasing = False
572
576
573 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
577 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
574 prefix=None, mtime=None, subrepos=None):
578 prefix=None, mtime=None, subrepos=None):
575 # No need to lock because we are only reading history and
579 # No need to lock because we are only reading history and
576 # largefile caches, neither of which are modified.
580 # largefile caches, neither of which are modified.
577 lfcommands.cachelfiles(repo.ui, repo, node)
581 lfcommands.cachelfiles(repo.ui, repo, node)
578
582
579 if kind not in archival.archivers:
583 if kind not in archival.archivers:
580 raise util.Abort(_("unknown archive type '%s'") % kind)
584 raise util.Abort(_("unknown archive type '%s'") % kind)
581
585
582 ctx = repo[node]
586 ctx = repo[node]
583
587
584 if kind == 'files':
588 if kind == 'files':
585 if prefix:
589 if prefix:
586 raise util.Abort(
590 raise util.Abort(
587 _('cannot give prefix when archiving to files'))
591 _('cannot give prefix when archiving to files'))
588 else:
592 else:
589 prefix = archival.tidyprefix(dest, kind, prefix)
593 prefix = archival.tidyprefix(dest, kind, prefix)
590
594
591 def write(name, mode, islink, getdata):
595 def write(name, mode, islink, getdata):
592 if matchfn and not matchfn(name):
596 if matchfn and not matchfn(name):
593 return
597 return
594 data = getdata()
598 data = getdata()
595 if decode:
599 if decode:
596 data = repo.wwritedata(name, data)
600 data = repo.wwritedata(name, data)
597 archiver.addfile(prefix + name, mode, islink, data)
601 archiver.addfile(prefix + name, mode, islink, data)
598
602
599 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
603 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
600
604
601 if repo.ui.configbool("ui", "archivemeta", True):
605 if repo.ui.configbool("ui", "archivemeta", True):
602 def metadata():
606 def metadata():
603 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
607 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
604 hex(repo.changelog.node(0)), hex(node), ctx.branch())
608 hex(repo.changelog.node(0)), hex(node), ctx.branch())
605
609
606 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
610 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
607 if repo.tagtype(t) == 'global')
611 if repo.tagtype(t) == 'global')
608 if not tags:
612 if not tags:
609 repo.ui.pushbuffer()
613 repo.ui.pushbuffer()
610 opts = {'template': '{latesttag}\n{latesttagdistance}',
614 opts = {'template': '{latesttag}\n{latesttagdistance}',
611 'style': '', 'patch': None, 'git': None}
615 'style': '', 'patch': None, 'git': None}
612 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
616 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
613 ltags, dist = repo.ui.popbuffer().split('\n')
617 ltags, dist = repo.ui.popbuffer().split('\n')
614 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
618 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
615 tags += 'latesttagdistance: %s\n' % dist
619 tags += 'latesttagdistance: %s\n' % dist
616
620
617 return base + tags
621 return base + tags
618
622
619 write('.hg_archival.txt', 0644, False, metadata)
623 write('.hg_archival.txt', 0644, False, metadata)
620
624
621 for f in ctx:
625 for f in ctx:
622 ff = ctx.flags(f)
626 ff = ctx.flags(f)
623 getdata = ctx[f].data
627 getdata = ctx[f].data
624 if lfutil.isstandin(f):
628 if lfutil.isstandin(f):
625 path = lfutil.findfile(repo, getdata().strip())
629 path = lfutil.findfile(repo, getdata().strip())
626 f = lfutil.splitstandin(f)
630 f = lfutil.splitstandin(f)
627
631
628 def getdatafn():
632 def getdatafn():
629 try:
633 try:
630 fd = open(path, 'rb')
634 fd = open(path, 'rb')
631 return fd.read()
635 return fd.read()
632 finally:
636 finally:
633 fd.close()
637 fd.close()
634
638
635 getdata = getdatafn
639 getdata = getdatafn
636 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
640 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
637
641
638 if subrepos:
642 if subrepos:
639 for subpath in ctx.substate:
643 for subpath in ctx.substate:
640 sub = ctx.sub(subpath)
644 sub = ctx.sub(subpath)
641 try:
645 try:
642 sub.archive(repo.ui, archiver, prefix)
646 sub.archive(repo.ui, archiver, prefix)
643 except TypeError:
647 except TypeError:
644 sub.archive(archiver, prefix)
648 sub.archive(archiver, prefix)
645
649
646 archiver.done()
650 archiver.done()
647
651
648 # If a largefile is modified, the change is not reflected in its
652 # If a largefile is modified, the change is not reflected in its
649 # standin until a commit. cmdutil.bailifchanged() raises an exception
653 # standin until a commit. cmdutil.bailifchanged() raises an exception
650 # if the repo has uncommitted changes. Wrap it to also check if
654 # if the repo has uncommitted changes. Wrap it to also check if
651 # largefiles were changed. This is used by bisect and backout.
655 # largefiles were changed. This is used by bisect and backout.
652 def override_bailifchanged(orig, repo):
656 def override_bailifchanged(orig, repo):
653 orig(repo)
657 orig(repo)
654 repo.lfstatus = True
658 repo.lfstatus = True
655 modified, added, removed, deleted = repo.status()[:4]
659 modified, added, removed, deleted = repo.status()[:4]
656 repo.lfstatus = False
660 repo.lfstatus = False
657 if modified or added or removed or deleted:
661 if modified or added or removed or deleted:
658 raise util.Abort(_('outstanding uncommitted changes'))
662 raise util.Abort(_('outstanding uncommitted changes'))
659
663
660 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
664 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
661 def override_fetch(orig, ui, repo, *pats, **opts):
665 def override_fetch(orig, ui, repo, *pats, **opts):
662 repo.lfstatus = True
666 repo.lfstatus = True
663 modified, added, removed, deleted = repo.status()[:4]
667 modified, added, removed, deleted = repo.status()[:4]
664 repo.lfstatus = False
668 repo.lfstatus = False
665 if modified or added or removed or deleted:
669 if modified or added or removed or deleted:
666 raise util.Abort(_('outstanding uncommitted changes'))
670 raise util.Abort(_('outstanding uncommitted changes'))
667 return orig(ui, repo, *pats, **opts)
671 return orig(ui, repo, *pats, **opts)
668
672
669 def override_forget(orig, ui, repo, *pats, **opts):
673 def override_forget(orig, ui, repo, *pats, **opts):
670 installnormalfilesmatchfn(repo[None].manifest())
674 installnormalfilesmatchfn(repo[None].manifest())
671 orig(ui, repo, *pats, **opts)
675 orig(ui, repo, *pats, **opts)
672 restorematchfn()
676 restorematchfn()
673 m = scmutil.match(repo[None], pats, opts)
677 m = scmutil.match(repo[None], pats, opts)
674
678
675 try:
679 try:
676 repo.lfstatus = True
680 repo.lfstatus = True
677 s = repo.status(match=m, clean=True)
681 s = repo.status(match=m, clean=True)
678 finally:
682 finally:
679 repo.lfstatus = False
683 repo.lfstatus = False
680 forget = sorted(s[0] + s[1] + s[3] + s[6])
684 forget = sorted(s[0] + s[1] + s[3] + s[6])
681 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
685 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
682
686
683 for f in forget:
687 for f in forget:
684 if lfutil.standin(f) not in repo.dirstate and not \
688 if lfutil.standin(f) not in repo.dirstate and not \
685 os.path.isdir(m.rel(lfutil.standin(f))):
689 os.path.isdir(m.rel(lfutil.standin(f))):
686 ui.warn(_('not removing %s: file is already untracked\n')
690 ui.warn(_('not removing %s: file is already untracked\n')
687 % m.rel(f))
691 % m.rel(f))
688
692
689 for f in forget:
693 for f in forget:
690 if ui.verbose or not m.exact(f):
694 if ui.verbose or not m.exact(f):
691 ui.status(_('removing %s\n') % m.rel(f))
695 ui.status(_('removing %s\n') % m.rel(f))
692
696
693 # Need to lock because standin files are deleted then removed from the
697 # Need to lock because standin files are deleted then removed from the
694 # repository and we could race inbetween.
698 # repository and we could race inbetween.
695 wlock = repo.wlock()
699 wlock = repo.wlock()
696 try:
700 try:
697 lfdirstate = lfutil.openlfdirstate(ui, repo)
701 lfdirstate = lfutil.openlfdirstate(ui, repo)
698 for f in forget:
702 for f in forget:
699 if lfdirstate[f] == 'a':
703 if lfdirstate[f] == 'a':
700 lfdirstate.drop(f)
704 lfdirstate.drop(f)
701 else:
705 else:
702 lfdirstate.remove(f)
706 lfdirstate.remove(f)
703 lfdirstate.write()
707 lfdirstate.write()
704 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
708 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
705 unlink=True)
709 unlink=True)
706 finally:
710 finally:
707 wlock.release()
711 wlock.release()
708
712
709 def getoutgoinglfiles(ui, repo, dest=None, **opts):
713 def getoutgoinglfiles(ui, repo, dest=None, **opts):
710 dest = ui.expandpath(dest or 'default-push', dest or 'default')
714 dest = ui.expandpath(dest or 'default-push', dest or 'default')
711 dest, branches = hg.parseurl(dest, opts.get('branch'))
715 dest, branches = hg.parseurl(dest, opts.get('branch'))
712 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
716 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
713 if revs:
717 if revs:
714 revs = [repo.lookup(rev) for rev in revs]
718 revs = [repo.lookup(rev) for rev in revs]
715
719
716 remoteui = hg.remoteui
720 remoteui = hg.remoteui
717
721
718 try:
722 try:
719 remote = hg.repository(remoteui(repo, opts), dest)
723 remote = hg.repository(remoteui(repo, opts), dest)
720 except error.RepoError:
724 except error.RepoError:
721 return None
725 return None
722 o = lfutil.findoutgoing(repo, remote, False)
726 o = lfutil.findoutgoing(repo, remote, False)
723 if not o:
727 if not o:
724 return None
728 return None
725 o = repo.changelog.nodesbetween(o, revs)[0]
729 o = repo.changelog.nodesbetween(o, revs)[0]
726 if opts.get('newest_first'):
730 if opts.get('newest_first'):
727 o.reverse()
731 o.reverse()
728
732
729 toupload = set()
733 toupload = set()
730 for n in o:
734 for n in o:
731 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
735 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
732 ctx = repo[n]
736 ctx = repo[n]
733 files = set(ctx.files())
737 files = set(ctx.files())
734 if len(parents) == 2:
738 if len(parents) == 2:
735 mc = ctx.manifest()
739 mc = ctx.manifest()
736 mp1 = ctx.parents()[0].manifest()
740 mp1 = ctx.parents()[0].manifest()
737 mp2 = ctx.parents()[1].manifest()
741 mp2 = ctx.parents()[1].manifest()
738 for f in mp1:
742 for f in mp1:
739 if f not in mc:
743 if f not in mc:
740 files.add(f)
744 files.add(f)
741 for f in mp2:
745 for f in mp2:
742 if f not in mc:
746 if f not in mc:
743 files.add(f)
747 files.add(f)
744 for f in mc:
748 for f in mc:
745 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
749 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
746 files.add(f)
750 files.add(f)
747 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
751 toupload = toupload.union(
748 and f in ctx]))
752 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
749 return toupload
753 return toupload
750
754
751 def override_outgoing(orig, ui, repo, dest=None, **opts):
755 def override_outgoing(orig, ui, repo, dest=None, **opts):
752 orig(ui, repo, dest, **opts)
756 orig(ui, repo, dest, **opts)
753
757
754 if opts.pop('large', None):
758 if opts.pop('large', None):
755 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
759 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
756 if toupload is None:
760 if toupload is None:
757 ui.status(_('largefiles: No remote repo\n'))
761 ui.status(_('largefiles: No remote repo\n'))
758 else:
762 else:
759 ui.status(_('largefiles to upload:\n'))
763 ui.status(_('largefiles to upload:\n'))
760 for file in toupload:
764 for file in toupload:
761 ui.status(lfutil.splitstandin(file) + '\n')
765 ui.status(lfutil.splitstandin(file) + '\n')
762 ui.status('\n')
766 ui.status('\n')
763
767
764 def override_summary(orig, ui, repo, *pats, **opts):
768 def override_summary(orig, ui, repo, *pats, **opts):
765 orig(ui, repo, *pats, **opts)
769 orig(ui, repo, *pats, **opts)
766
770
767 if opts.pop('large', None):
771 if opts.pop('large', None):
768 toupload = getoutgoinglfiles(ui, repo, None, **opts)
772 toupload = getoutgoinglfiles(ui, repo, None, **opts)
769 if toupload is None:
773 if toupload is None:
770 ui.status(_('largefiles: No remote repo\n'))
774 ui.status(_('largefiles: No remote repo\n'))
771 else:
775 else:
772 ui.status(_('largefiles: %d to upload\n') % len(toupload))
776 ui.status(_('largefiles: %d to upload\n') % len(toupload))
773
777
774 def override_addremove(orig, ui, repo, *pats, **opts):
778 def override_addremove(orig, ui, repo, *pats, **opts):
775 # Check if the parent or child has largefiles; if so, disallow
779 # Check if the parent or child has largefiles; if so, disallow
776 # addremove. If there is a symlink in the manifest then getting
780 # addremove. If there is a symlink in the manifest then getting
777 # the manifest throws an exception: catch it and let addremove
781 # the manifest throws an exception: catch it and let addremove
778 # deal with it.
782 # deal with it.
779 try:
783 try:
780 manifesttip = set(repo['tip'].manifest())
784 manifesttip = set(repo['tip'].manifest())
781 except util.Abort:
785 except util.Abort:
782 manifesttip = set()
786 manifesttip = set()
783 try:
787 try:
784 manifestworking = set(repo[None].manifest())
788 manifestworking = set(repo[None].manifest())
785 except util.Abort:
789 except util.Abort:
786 manifestworking = set()
790 manifestworking = set()
787
791
788 # Manifests are only iterable so turn them into sets then union
792 # Manifests are only iterable so turn them into sets then union
789 for file in manifesttip.union(manifestworking):
793 for file in manifesttip.union(manifestworking):
790 if file.startswith(lfutil.shortname):
794 if file.startswith(lfutil.shortname):
791 raise util.Abort(
795 raise util.Abort(
792 _('addremove cannot be run on a repo with largefiles'))
796 _('addremove cannot be run on a repo with largefiles'))
793
797
794 return orig(ui, repo, *pats, **opts)
798 return orig(ui, repo, *pats, **opts)
795
799
796 # Calling purge with --all will cause the largefiles to be deleted.
800 # Calling purge with --all will cause the largefiles to be deleted.
797 # Override repo.status to prevent this from happening.
801 # Override repo.status to prevent this from happening.
798 def override_purge(orig, ui, repo, *dirs, **opts):
802 def override_purge(orig, ui, repo, *dirs, **opts):
799 oldstatus = repo.status
803 oldstatus = repo.status
800 def override_status(node1='.', node2=None, match=None, ignored=False,
804 def override_status(node1='.', node2=None, match=None, ignored=False,
801 clean=False, unknown=False, listsubrepos=False):
805 clean=False, unknown=False, listsubrepos=False):
802 r = oldstatus(node1, node2, match, ignored, clean, unknown,
806 r = oldstatus(node1, node2, match, ignored, clean, unknown,
803 listsubrepos)
807 listsubrepos)
804 lfdirstate = lfutil.openlfdirstate(ui, repo)
808 lfdirstate = lfutil.openlfdirstate(ui, repo)
805 modified, added, removed, deleted, unknown, ignored, clean = r
809 modified, added, removed, deleted, unknown, ignored, clean = r
806 unknown = [f for f in unknown if lfdirstate[f] == '?']
810 unknown = [f for f in unknown if lfdirstate[f] == '?']
807 ignored = [f for f in ignored if lfdirstate[f] == '?']
811 ignored = [f for f in ignored if lfdirstate[f] == '?']
808 return modified, added, removed, deleted, unknown, ignored, clean
812 return modified, added, removed, deleted, unknown, ignored, clean
809 repo.status = override_status
813 repo.status = override_status
810 orig(ui, repo, *dirs, **opts)
814 orig(ui, repo, *dirs, **opts)
811 repo.status = oldstatus
815 repo.status = oldstatus
812
816
813 def override_rollback(orig, ui, repo, **opts):
817 def override_rollback(orig, ui, repo, **opts):
814 result = orig(ui, repo, **opts)
818 result = orig(ui, repo, **opts)
815 merge.update(repo, node=None, branchmerge=False, force=True,
819 merge.update(repo, node=None, branchmerge=False, force=True,
816 partial=lfutil.isstandin)
820 partial=lfutil.isstandin)
817 lfdirstate = lfutil.openlfdirstate(ui, repo)
821 lfdirstate = lfutil.openlfdirstate(ui, repo)
818 lfiles = lfutil.listlfiles(repo)
822 lfiles = lfutil.listlfiles(repo)
819 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
823 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
820 for file in lfiles:
824 for file in lfiles:
821 if file in oldlfiles:
825 if file in oldlfiles:
822 lfdirstate.normallookup(file)
826 lfdirstate.normallookup(file)
823 else:
827 else:
824 lfdirstate.add(file)
828 lfdirstate.add(file)
825 lfdirstate.write()
829 lfdirstate.write()
826 return result
830 return result
@@ -1,160 +1,160 b''
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import tempfile
7 import tempfile
8 import urllib2
8 import urllib2
9
9
10 from mercurial import error, httprepo, util, wireproto
10 from mercurial import error, httprepo, util, wireproto
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 import lfutil
13 import lfutil
14
14
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
16 '\n\nPlease enable it in your Mercurial config ' \
16 '\n\nPlease enable it in your Mercurial config '
17 'file.\n'
17 'file.\n')
18
18
19 def putlfile(repo, proto, sha):
19 def putlfile(repo, proto, sha):
20 '''Put a largefile into a repository's local cache and into the
20 '''Put a largefile into a repository's local cache and into the
21 system cache.'''
21 system cache.'''
22 f = None
22 f = None
23 proto.redirect()
23 proto.redirect()
24 try:
24 try:
25 try:
25 try:
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 proto.getfile(f)
27 proto.getfile(f)
28 f.seek(0)
28 f.seek(0)
29 if sha != lfutil.hexsha1(f):
29 if sha != lfutil.hexsha1(f):
30 return wireproto.pushres(1)
30 return wireproto.pushres(1)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
32 except IOError:
32 except IOError:
33 repo.ui.warn(
33 repo.ui.warn(
34 _('error: could not put received data into largefile store'))
34 _('error: could not put received data into largefile store'))
35 return wireproto.pushres(1)
35 return wireproto.pushres(1)
36 finally:
36 finally:
37 if f:
37 if f:
38 f.close()
38 f.close()
39
39
40 return wireproto.pushres(0)
40 return wireproto.pushres(0)
41
41
42 def getlfile(repo, proto, sha):
42 def getlfile(repo, proto, sha):
43 '''Retrieve a largefile from the repository-local cache or system
43 '''Retrieve a largefile from the repository-local cache or system
44 cache.'''
44 cache.'''
45 filename = lfutil.findfile(repo, sha)
45 filename = lfutil.findfile(repo, sha)
46 if not filename:
46 if not filename:
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 f = open(filename, 'rb')
48 f = open(filename, 'rb')
49 length = os.fstat(f.fileno())[6]
49 length = os.fstat(f.fileno())[6]
50
50
51 # Since we can't set an HTTP content-length header here, and
51 # Since we can't set an HTTP content-length header here, and
52 # Mercurial core provides no way to give the length of a streamres
52 # Mercurial core provides no way to give the length of a streamres
53 # (and reading the entire file into RAM would be ill-advised), we
53 # (and reading the entire file into RAM would be ill-advised), we
54 # just send the length on the first line of the response, like the
54 # just send the length on the first line of the response, like the
55 # ssh proto does for string responses.
55 # ssh proto does for string responses.
56 def generator():
56 def generator():
57 yield '%d\n' % length
57 yield '%d\n' % length
58 for chunk in f:
58 for chunk in f:
59 yield chunk
59 yield chunk
60 return wireproto.streamres(generator())
60 return wireproto.streamres(generator())
61
61
62 def statlfile(repo, proto, sha):
62 def statlfile(repo, proto, sha):
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 mismatched checksum, or '0\n' if it is in good condition'''
64 mismatched checksum, or '0\n' if it is in good condition'''
65 filename = lfutil.findfile(repo, sha)
65 filename = lfutil.findfile(repo, sha)
66 if not filename:
66 if not filename:
67 return '2\n'
67 return '2\n'
68 fd = None
68 fd = None
69 try:
69 try:
70 fd = open(filename, 'rb')
70 fd = open(filename, 'rb')
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 finally:
72 finally:
73 if fd:
73 if fd:
74 fd.close()
74 fd.close()
75
75
76 def wirereposetup(ui, repo):
76 def wirereposetup(ui, repo):
77 class lfileswirerepository(repo.__class__):
77 class lfileswirerepository(repo.__class__):
78 def putlfile(self, sha, fd):
78 def putlfile(self, sha, fd):
79 # unfortunately, httprepository._callpush tries to convert its
79 # unfortunately, httprepository._callpush tries to convert its
80 # input file-like into a bundle before sending it, so we can't use
80 # input file-like into a bundle before sending it, so we can't use
81 # it ...
81 # it ...
82 if issubclass(self.__class__, httprepo.httprepository):
82 if issubclass(self.__class__, httprepo.httprepository):
83 try:
83 try:
84 return int(self._call('putlfile', data=fd, sha=sha,
84 return int(self._call('putlfile', data=fd, sha=sha,
85 headers={'content-type':'application/mercurial-0.1'}))
85 headers={'content-type':'application/mercurial-0.1'}))
86 except (ValueError, urllib2.HTTPError):
86 except (ValueError, urllib2.HTTPError):
87 return 1
87 return 1
88 # ... but we can't use sshrepository._call because the data=
88 # ... but we can't use sshrepository._call because the data=
89 # argument won't get sent, and _callpush does exactly what we want
89 # argument won't get sent, and _callpush does exactly what we want
90 # in this case: send the data straight through
90 # in this case: send the data straight through
91 else:
91 else:
92 try:
92 try:
93 ret, output = self._callpush("putlfile", fd, sha=sha)
93 ret, output = self._callpush("putlfile", fd, sha=sha)
94 if ret == "":
94 if ret == "":
95 raise error.ResponseError(_('putlfile failed:'),
95 raise error.ResponseError(_('putlfile failed:'),
96 output)
96 output)
97 return int(ret)
97 return int(ret)
98 except IOError:
98 except IOError:
99 return 1
99 return 1
100 except ValueError:
100 except ValueError:
101 raise error.ResponseError(
101 raise error.ResponseError(
102 _('putlfile failed (unexpected response):'), ret)
102 _('putlfile failed (unexpected response):'), ret)
103
103
104 def getlfile(self, sha):
104 def getlfile(self, sha):
105 stream = self._callstream("getlfile", sha=sha)
105 stream = self._callstream("getlfile", sha=sha)
106 length = stream.readline()
106 length = stream.readline()
107 try:
107 try:
108 length = int(length)
108 length = int(length)
109 except ValueError:
109 except ValueError:
110 self._abort(error.ResponseError(_("unexpected response:"),
110 self._abort(error.ResponseError(_("unexpected response:"),
111 length))
111 length))
112 return (length, stream)
112 return (length, stream)
113
113
114 def statlfile(self, sha):
114 def statlfile(self, sha):
115 try:
115 try:
116 return int(self._call("statlfile", sha=sha))
116 return int(self._call("statlfile", sha=sha))
117 except (ValueError, urllib2.HTTPError):
117 except (ValueError, urllib2.HTTPError):
118 # If the server returns anything but an integer followed by a
118 # If the server returns anything but an integer followed by a
119 # newline, newline, it's not speaking our language; if we get
119 # newline, newline, it's not speaking our language; if we get
120 # an HTTP error, we can't be sure the largefile is present;
120 # an HTTP error, we can't be sure the largefile is present;
121 # either way, consider it missing.
121 # either way, consider it missing.
122 return 2
122 return 2
123
123
124 repo.__class__ = lfileswirerepository
124 repo.__class__ = lfileswirerepository
125
125
126 # advertise the largefiles=serve capability
126 # advertise the largefiles=serve capability
127 def capabilities(repo, proto):
127 def capabilities(repo, proto):
128 return capabilities_orig(repo, proto) + ' largefiles=serve'
128 return capabilities_orig(repo, proto) + ' largefiles=serve'
129
129
130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
131 # clients old and new alike both handle it well
131 # clients old and new alike both handle it well
132 def webproto_refuseclient(self, message):
132 def webproto_refuseclient(self, message):
133 self.req.header([('Content-Type', 'application/hg-error')])
133 self.req.header([('Content-Type', 'application/hg-error')])
134 return message
134 return message
135
135
136 def sshproto_refuseclient(self, message):
136 def sshproto_refuseclient(self, message):
137 self.ui.write_err('%s\n-\n' % message)
137 self.ui.write_err('%s\n-\n' % message)
138 self.fout.write('\n')
138 self.fout.write('\n')
139 self.fout.flush()
139 self.fout.flush()
140
140
141 return ''
141 return ''
142
142
143 def heads(repo, proto):
143 def heads(repo, proto):
144 if lfutil.islfilesrepo(repo):
144 if lfutil.islfilesrepo(repo):
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
146 return wireproto.heads(repo, proto)
146 return wireproto.heads(repo, proto)
147
147
148 def sshrepo_callstream(self, cmd, **args):
148 def sshrepo_callstream(self, cmd, **args):
149 if cmd == 'heads' and self.capable('largefiles'):
149 if cmd == 'heads' and self.capable('largefiles'):
150 cmd = 'lheads'
150 cmd = 'lheads'
151 if cmd == 'batch' and self.capable('largefiles'):
151 if cmd == 'batch' and self.capable('largefiles'):
152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
153 return ssh_oldcallstream(self, cmd, **args)
153 return ssh_oldcallstream(self, cmd, **args)
154
154
155 def httprepo_callstream(self, cmd, **args):
155 def httprepo_callstream(self, cmd, **args):
156 if cmd == 'heads' and self.capable('largefiles'):
156 if cmd == 'heads' and self.capable('largefiles'):
157 cmd = 'lheads'
157 cmd = 'lheads'
158 if cmd == 'batch' and self.capable('largefiles'):
158 if cmd == 'batch' and self.capable('largefiles'):
159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
160 return http_oldcallstream(self, cmd, **args)
160 return http_oldcallstream(self, cmd, **args)
@@ -1,412 +1,416 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13 import re
13 import re
14
14
15 from mercurial import context, error, manifest, match as match_, \
15 from mercurial import context, error, manifest, match as match_, \
16 node, util
16 node, util
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfcommands
19 import lfcommands
20 import proto
20 import proto
21 import lfutil
21 import lfutil
22
22
23 def reposetup(ui, repo):
23 def reposetup(ui, repo):
24 # wire repositories should be given new wireproto functions but not the
24 # wire repositories should be given new wireproto functions but not the
25 # other largefiles modifications
25 # other largefiles modifications
26 if not repo.local():
26 if not repo.local():
27 return proto.wirereposetup(ui, repo)
27 return proto.wirereposetup(ui, repo)
28
28
29 for name in ('status', 'commitctx', 'commit', 'push'):
29 for name in ('status', 'commitctx', 'commit', 'push'):
30 method = getattr(repo, name)
30 method = getattr(repo, name)
31 #if not (isinstance(method, types.MethodType) and
31 #if not (isinstance(method, types.MethodType) and
32 # method.im_func is repo.__class__.commitctx.im_func):
32 # method.im_func is repo.__class__.commitctx.im_func):
33 if isinstance(method, types.FunctionType) and method.func_name == \
33 if (isinstance(method, types.FunctionType) and
34 'wrap':
34 method.func_name == 'wrap'):
35 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ui.warn(_('largefiles: repo method %r appears to have already been'
36 ' wrapped by another extension: '
36 ' wrapped by another extension: '
37 'largefiles may behave incorrectly\n')
37 'largefiles may behave incorrectly\n')
38 % name)
38 % name)
39
39
40 class lfiles_repo(repo.__class__):
40 class lfiles_repo(repo.__class__):
41 lfstatus = False
41 lfstatus = False
42 def status_nolfiles(self, *args, **kwargs):
42 def status_nolfiles(self, *args, **kwargs):
43 return super(lfiles_repo, self).status(*args, **kwargs)
43 return super(lfiles_repo, self).status(*args, **kwargs)
44
44
45 # When lfstatus is set, return a context that gives the names
45 # When lfstatus is set, return a context that gives the names
46 # of largefiles instead of their corresponding standins and
46 # of largefiles instead of their corresponding standins and
47 # identifies the largefiles as always binary, regardless of
47 # identifies the largefiles as always binary, regardless of
48 # their actual contents.
48 # their actual contents.
49 def __getitem__(self, changeid):
49 def __getitem__(self, changeid):
50 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 ctx = super(lfiles_repo, self).__getitem__(changeid)
51 if self.lfstatus:
51 if self.lfstatus:
52 class lfiles_manifestdict(manifest.manifestdict):
52 class lfiles_manifestdict(manifest.manifestdict):
53 def __contains__(self, filename):
53 def __contains__(self, filename):
54 if super(lfiles_manifestdict,
54 if super(lfiles_manifestdict,
55 self).__contains__(filename):
55 self).__contains__(filename):
56 return True
56 return True
57 return super(lfiles_manifestdict,
57 return super(lfiles_manifestdict,
58 self).__contains__(lfutil.shortname+'/' + filename)
58 self).__contains__(lfutil.shortname+'/' + filename)
59 class lfiles_ctx(ctx.__class__):
59 class lfiles_ctx(ctx.__class__):
60 def files(self):
60 def files(self):
61 filenames = super(lfiles_ctx, self).files()
61 filenames = super(lfiles_ctx, self).files()
62 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 return [re.sub('^\\'+lfutil.shortname+'/', '',
63 filename) for filename in filenames]
63 filename) for filename in filenames]
64 def manifest(self):
64 def manifest(self):
65 man1 = super(lfiles_ctx, self).manifest()
65 man1 = super(lfiles_ctx, self).manifest()
66 man1.__class__ = lfiles_manifestdict
66 man1.__class__ = lfiles_manifestdict
67 return man1
67 return man1
68 def filectx(self, path, fileid=None, filelog=None):
68 def filectx(self, path, fileid=None, filelog=None):
69 try:
69 try:
70 result = super(lfiles_ctx, self).filectx(path,
70 result = super(lfiles_ctx, self).filectx(path,
71 fileid, filelog)
71 fileid, filelog)
72 except error.LookupError:
72 except error.LookupError:
73 # Adding a null character will cause Mercurial to
73 # Adding a null character will cause Mercurial to
74 # identify this as a binary file.
74 # identify this as a binary file.
75 result = super(lfiles_ctx, self).filectx(
75 result = super(lfiles_ctx, self).filectx(
76 lfutil.shortname + '/' + path, fileid,
76 lfutil.shortname + '/' + path, fileid,
77 filelog)
77 filelog)
78 olddata = result.data
78 olddata = result.data
79 result.data = lambda: olddata() + '\0'
79 result.data = lambda: olddata() + '\0'
80 return result
80 return result
81 ctx.__class__ = lfiles_ctx
81 ctx.__class__ = lfiles_ctx
82 return ctx
82 return ctx
83
83
84 # Figure out the status of big files and insert them into the
84 # Figure out the status of big files and insert them into the
85 # appropriate list in the result. Also removes standin files
85 # appropriate list in the result. Also removes standin files
86 # from the listing. Revert to the original status if
86 # from the listing. Revert to the original status if
87 # self.lfstatus is False.
87 # self.lfstatus is False.
88 def status(self, node1='.', node2=None, match=None, ignored=False,
88 def status(self, node1='.', node2=None, match=None, ignored=False,
89 clean=False, unknown=False, listsubrepos=False):
89 clean=False, unknown=False, listsubrepos=False):
90 listignored, listclean, listunknown = ignored, clean, unknown
90 listignored, listclean, listunknown = ignored, clean, unknown
91 if not self.lfstatus:
91 if not self.lfstatus:
92 try:
92 try:
93 return super(lfiles_repo, self).status(node1, node2, match,
93 return super(lfiles_repo, self).status(node1, node2, match,
94 listignored, listclean, listunknown, listsubrepos)
94 listignored, listclean, listunknown, listsubrepos)
95 except TypeError:
95 except TypeError:
96 return super(lfiles_repo, self).status(node1, node2, match,
96 return super(lfiles_repo, self).status(node1, node2, match,
97 listignored, listclean, listunknown)
97 listignored, listclean, listunknown)
98 else:
98 else:
99 # some calls in this function rely on the old version of status
99 # some calls in this function rely on the old version of status
100 self.lfstatus = False
100 self.lfstatus = False
101 if isinstance(node1, context.changectx):
101 if isinstance(node1, context.changectx):
102 ctx1 = node1
102 ctx1 = node1
103 else:
103 else:
104 ctx1 = repo[node1]
104 ctx1 = repo[node1]
105 if isinstance(node2, context.changectx):
105 if isinstance(node2, context.changectx):
106 ctx2 = node2
106 ctx2 = node2
107 else:
107 else:
108 ctx2 = repo[node2]
108 ctx2 = repo[node2]
109 working = ctx2.rev() is None
109 working = ctx2.rev() is None
110 parentworking = working and ctx1 == self['.']
110 parentworking = working and ctx1 == self['.']
111
111
112 def inctx(file, ctx):
112 def inctx(file, ctx):
113 try:
113 try:
114 if ctx.rev() is None:
114 if ctx.rev() is None:
115 return file in ctx.manifest()
115 return file in ctx.manifest()
116 ctx[file]
116 ctx[file]
117 return True
117 return True
118 except KeyError:
118 except KeyError:
119 return False
119 return False
120
120
121 if match is None:
121 if match is None:
122 match = match_.always(self.root, self.getcwd())
122 match = match_.always(self.root, self.getcwd())
123
123
124 # Create a copy of match that matches standins instead
124 # Create a copy of match that matches standins instead
125 # of largefiles.
125 # of largefiles.
126 def tostandin(file):
126 def tostandin(file):
127 if inctx(lfutil.standin(file), ctx2):
127 if inctx(lfutil.standin(file), ctx2):
128 return lfutil.standin(file)
128 return lfutil.standin(file)
129 return file
129 return file
130
130
131 m = copy.copy(match)
131 m = copy.copy(match)
132 m._files = [tostandin(f) for f in m._files]
132 m._files = [tostandin(f) for f in m._files]
133
133
134 # get ignored, clean, and unknown but remove them
134 # get ignored, clean, and unknown but remove them
135 # later if they were not asked for
135 # later if they were not asked for
136 try:
136 try:
137 result = super(lfiles_repo, self).status(node1, node2, m,
137 result = super(lfiles_repo, self).status(node1, node2, m,
138 True, True, True, listsubrepos)
138 True, True, True, listsubrepos)
139 except TypeError:
139 except TypeError:
140 result = super(lfiles_repo, self).status(node1, node2, m,
140 result = super(lfiles_repo, self).status(node1, node2, m,
141 True, True, True)
141 True, True, True)
142 if working:
142 if working:
143 # hold the wlock while we read largefiles and
143 # hold the wlock while we read largefiles and
144 # update the lfdirstate
144 # update the lfdirstate
145 wlock = repo.wlock()
145 wlock = repo.wlock()
146 try:
146 try:
147 # Any non-largefiles that were explicitly listed must be
147 # Any non-largefiles that were explicitly listed must be
148 # taken out or lfdirstate.status will report an error.
148 # taken out or lfdirstate.status will report an error.
149 # The status of these files was already computed using
149 # The status of these files was already computed using
150 # super's status.
150 # super's status.
151 lfdirstate = lfutil.openlfdirstate(ui, self)
151 lfdirstate = lfutil.openlfdirstate(ui, self)
152 match._files = [f for f in match._files if f in
152 match._files = [f for f in match._files if f in
153 lfdirstate]
153 lfdirstate]
154 s = lfdirstate.status(match, [], listignored,
154 s = lfdirstate.status(match, [], listignored,
155 listclean, listunknown)
155 listclean, listunknown)
156 (unsure, modified, added, removed, missing, unknown,
156 (unsure, modified, added, removed, missing, unknown,
157 ignored, clean) = s
157 ignored, clean) = s
158 if parentworking:
158 if parentworking:
159 for lfile in unsure:
159 for lfile in unsure:
160 if ctx1[lfutil.standin(lfile)].data().strip() \
160 if ctx1[lfutil.standin(lfile)].data().strip() \
161 != lfutil.hashfile(self.wjoin(lfile)):
161 != lfutil.hashfile(self.wjoin(lfile)):
162 modified.append(lfile)
162 modified.append(lfile)
163 else:
163 else:
164 clean.append(lfile)
164 clean.append(lfile)
165 lfdirstate.normal(lfile)
165 lfdirstate.normal(lfile)
166 lfdirstate.write()
166 lfdirstate.write()
167 else:
167 else:
168 tocheck = unsure + modified + added + clean
168 tocheck = unsure + modified + added + clean
169 modified, added, clean = [], [], []
169 modified, added, clean = [], [], []
170
170
171 for lfile in tocheck:
171 for lfile in tocheck:
172 standin = lfutil.standin(lfile)
172 standin = lfutil.standin(lfile)
173 if inctx(standin, ctx1):
173 if inctx(standin, ctx1):
174 if ctx1[standin].data().strip() != \
174 if ctx1[standin].data().strip() != \
175 lfutil.hashfile(self.wjoin(lfile)):
175 lfutil.hashfile(self.wjoin(lfile)):
176 modified.append(lfile)
176 modified.append(lfile)
177 else:
177 else:
178 clean.append(lfile)
178 clean.append(lfile)
179 else:
179 else:
180 added.append(lfile)
180 added.append(lfile)
181 finally:
181 finally:
182 wlock.release()
182 wlock.release()
183
183
184 for standin in ctx1.manifest():
184 for standin in ctx1.manifest():
185 if not lfutil.isstandin(standin):
185 if not lfutil.isstandin(standin):
186 continue
186 continue
187 lfile = lfutil.splitstandin(standin)
187 lfile = lfutil.splitstandin(standin)
188 if not match(lfile):
188 if not match(lfile):
189 continue
189 continue
190 if lfile not in lfdirstate:
190 if lfile not in lfdirstate:
191 removed.append(lfile)
191 removed.append(lfile)
192 # Handle unknown and ignored differently
192 # Handle unknown and ignored differently
193 lfiles = (modified, added, removed, missing, [], [], clean)
193 lfiles = (modified, added, removed, missing, [], [], clean)
194 result = list(result)
194 result = list(result)
195 # Unknown files
195 # Unknown files
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
196 result[4] = [f for f in unknown
197 and not lfutil.isstandin(f)]
197 if (repo.dirstate[f] == '?' and
198 not lfutil.isstandin(f))]
198 # Ignored files must be ignored by both the dirstate and
199 # Ignored files must be ignored by both the dirstate and
199 # lfdirstate
200 # lfdirstate
200 result[5] = set(ignored).intersection(set(result[5]))
201 result[5] = set(ignored).intersection(set(result[5]))
201 # combine normal files and largefiles
202 # combine normal files and largefiles
202 normals = [[fn for fn in filelist if not \
203 normals = [[fn for fn in filelist
203 lfutil.isstandin(fn)] for filelist in result]
204 if not lfutil.isstandin(fn)]
204 result = [sorted(list1 + list2) for (list1, list2) in \
205 for filelist in result]
205 zip(normals, lfiles)]
206 result = [sorted(list1 + list2)
207 for (list1, list2) in zip(normals, lfiles)]
206 else:
208 else:
207 def toname(f):
209 def toname(f):
208 if lfutil.isstandin(f):
210 if lfutil.isstandin(f):
209 return lfutil.splitstandin(f)
211 return lfutil.splitstandin(f)
210 return f
212 return f
211 result = [[toname(f) for f in items] for items in result]
213 result = [[toname(f) for f in items] for items in result]
212
214
213 if not listunknown:
215 if not listunknown:
214 result[4] = []
216 result[4] = []
215 if not listignored:
217 if not listignored:
216 result[5] = []
218 result[5] = []
217 if not listclean:
219 if not listclean:
218 result[6] = []
220 result[6] = []
219 self.lfstatus = True
221 self.lfstatus = True
220 return result
222 return result
221
223
222 # As part of committing, copy all of the largefiles into the
224 # As part of committing, copy all of the largefiles into the
223 # cache.
225 # cache.
224 def commitctx(self, *args, **kwargs):
226 def commitctx(self, *args, **kwargs):
225 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
227 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 ctx = self[node]
228 ctx = self[node]
227 for filename in ctx.files():
229 for filename in ctx.files():
228 if lfutil.isstandin(filename) and filename in ctx.manifest():
230 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 realfile = lfutil.splitstandin(filename)
231 realfile = lfutil.splitstandin(filename)
230 lfutil.copytocache(self, ctx.node(), realfile)
232 lfutil.copytocache(self, ctx.node(), realfile)
231
233
232 return node
234 return node
233
235
234 # Before commit, largefile standins have not had their
236 # Before commit, largefile standins have not had their
235 # contents updated to reflect the hash of their largefile.
237 # contents updated to reflect the hash of their largefile.
236 # Do that here.
238 # Do that here.
237 def commit(self, text="", user=None, date=None, match=None,
239 def commit(self, text="", user=None, date=None, match=None,
238 force=False, editor=False, extra={}):
240 force=False, editor=False, extra={}):
239 orig = super(lfiles_repo, self).commit
241 orig = super(lfiles_repo, self).commit
240
242
241 wlock = repo.wlock()
243 wlock = repo.wlock()
242 try:
244 try:
243 if getattr(repo, "_isrebasing", False):
245 if getattr(repo, "_isrebasing", False):
244 # We have to take the time to pull down the new
246 # We have to take the time to pull down the new
245 # largefiles now. Otherwise if we are rebasing,
247 # largefiles now. Otherwise if we are rebasing,
246 # any largefiles that were modified in the
248 # any largefiles that were modified in the
247 # destination changesets get overwritten, either
249 # destination changesets get overwritten, either
248 # by the rebase or in the first commit after the
250 # by the rebase or in the first commit after the
249 # rebase.
251 # rebase.
250 lfcommands.updatelfiles(repo.ui, repo)
252 lfcommands.updatelfiles(repo.ui, repo)
251 # Case 1: user calls commit with no specific files or
253 # Case 1: user calls commit with no specific files or
252 # include/exclude patterns: refresh and commit all files that
254 # include/exclude patterns: refresh and commit all files that
253 # are "dirty".
255 # are "dirty".
254 if (match is None) or (not match.anypats() and not \
256 if ((match is None) or
255 match.files()):
257 (not match.anypats() and not match.files())):
256 # Spend a bit of time here to get a list of files we know
258 # Spend a bit of time here to get a list of files we know
257 # are modified so we can compare only against those.
259 # are modified so we can compare only against those.
258 # It can cost a lot of time (several seconds)
260 # It can cost a lot of time (several seconds)
259 # otherwise to update all standins if the largefiles are
261 # otherwise to update all standins if the largefiles are
260 # large.
262 # large.
261 lfdirstate = lfutil.openlfdirstate(ui, self)
263 lfdirstate = lfutil.openlfdirstate(ui, self)
262 dirtymatch = match_.always(repo.root, repo.getcwd())
264 dirtymatch = match_.always(repo.root, repo.getcwd())
263 s = lfdirstate.status(dirtymatch, [], False, False, False)
265 s = lfdirstate.status(dirtymatch, [], False, False, False)
264 modifiedfiles = []
266 modifiedfiles = []
265 for i in s:
267 for i in s:
266 modifiedfiles.extend(i)
268 modifiedfiles.extend(i)
267 lfiles = lfutil.listlfiles(self)
269 lfiles = lfutil.listlfiles(self)
268 # this only loops through largefiles that exist (not
270 # this only loops through largefiles that exist (not
269 # removed/renamed)
271 # removed/renamed)
270 for lfile in lfiles:
272 for lfile in lfiles:
271 if lfile in modifiedfiles:
273 if lfile in modifiedfiles:
272 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
274 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
273 # this handles the case where a rebase is being
275 # this handles the case where a rebase is being
274 # performed and the working copy is not updated
276 # performed and the working copy is not updated
275 # yet.
277 # yet.
276 if os.path.exists(self.wjoin(lfile)):
278 if os.path.exists(self.wjoin(lfile)):
277 lfutil.updatestandin(self,
279 lfutil.updatestandin(self,
278 lfutil.standin(lfile))
280 lfutil.standin(lfile))
279 lfdirstate.normal(lfile)
281 lfdirstate.normal(lfile)
280 for lfile in lfdirstate:
282 for lfile in lfdirstate:
281 if lfile in modifiedfiles:
283 if lfile in modifiedfiles:
282 if not os.path.exists(
284 if not os.path.exists(
283 repo.wjoin(lfutil.standin(lfile))):
285 repo.wjoin(lfutil.standin(lfile))):
284 lfdirstate.drop(lfile)
286 lfdirstate.drop(lfile)
285 lfdirstate.write()
287 lfdirstate.write()
286
288
287 return orig(text=text, user=user, date=date, match=match,
289 return orig(text=text, user=user, date=date, match=match,
288 force=force, editor=editor, extra=extra)
290 force=force, editor=editor, extra=extra)
289
291
290 for f in match.files():
292 for f in match.files():
291 if lfutil.isstandin(f):
293 if lfutil.isstandin(f):
292 raise util.Abort(
294 raise util.Abort(
293 _('file "%s" is a largefile standin') % f,
295 _('file "%s" is a largefile standin') % f,
294 hint=('commit the largefile itself instead'))
296 hint=('commit the largefile itself instead'))
295
297
296 # Case 2: user calls commit with specified patterns: refresh
298 # Case 2: user calls commit with specified patterns: refresh
297 # any matching big files.
299 # any matching big files.
298 smatcher = lfutil.composestandinmatcher(self, match)
300 smatcher = lfutil.composestandinmatcher(self, match)
299 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
301 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
300
302
301 # No matching big files: get out of the way and pass control to
303 # No matching big files: get out of the way and pass control to
302 # the usual commit() method.
304 # the usual commit() method.
303 if not standins:
305 if not standins:
304 return orig(text=text, user=user, date=date, match=match,
306 return orig(text=text, user=user, date=date, match=match,
305 force=force, editor=editor, extra=extra)
307 force=force, editor=editor, extra=extra)
306
308
307 # Refresh all matching big files. It's possible that the
309 # Refresh all matching big files. It's possible that the
308 # commit will end up failing, in which case the big files will
310 # commit will end up failing, in which case the big files will
309 # stay refreshed. No harm done: the user modified them and
311 # stay refreshed. No harm done: the user modified them and
310 # asked to commit them, so sooner or later we're going to
312 # asked to commit them, so sooner or later we're going to
311 # refresh the standins. Might as well leave them refreshed.
313 # refresh the standins. Might as well leave them refreshed.
312 lfdirstate = lfutil.openlfdirstate(ui, self)
314 lfdirstate = lfutil.openlfdirstate(ui, self)
313 for standin in standins:
315 for standin in standins:
314 lfile = lfutil.splitstandin(standin)
316 lfile = lfutil.splitstandin(standin)
315 if lfdirstate[lfile] <> 'r':
317 if lfdirstate[lfile] <> 'r':
316 lfutil.updatestandin(self, standin)
318 lfutil.updatestandin(self, standin)
317 lfdirstate.normal(lfile)
319 lfdirstate.normal(lfile)
318 else:
320 else:
319 lfdirstate.drop(lfile)
321 lfdirstate.drop(lfile)
320 lfdirstate.write()
322 lfdirstate.write()
321
323
322 # Cook up a new matcher that only matches regular files or
324 # Cook up a new matcher that only matches regular files or
323 # standins corresponding to the big files requested by the
325 # standins corresponding to the big files requested by the
324 # user. Have to modify _files to prevent commit() from
326 # user. Have to modify _files to prevent commit() from
325 # complaining "not tracked" for big files.
327 # complaining "not tracked" for big files.
326 lfiles = lfutil.listlfiles(repo)
328 lfiles = lfutil.listlfiles(repo)
327 match = copy.copy(match)
329 match = copy.copy(match)
328 orig_matchfn = match.matchfn
330 orig_matchfn = match.matchfn
329
331
330 # Check both the list of largefiles and the list of
332 # Check both the list of largefiles and the list of
331 # standins because if a largefile was removed, it
333 # standins because if a largefile was removed, it
332 # won't be in the list of largefiles at this point
334 # won't be in the list of largefiles at this point
333 match._files += sorted(standins)
335 match._files += sorted(standins)
334
336
335 actualfiles = []
337 actualfiles = []
336 for f in match._files:
338 for f in match._files:
337 fstandin = lfutil.standin(f)
339 fstandin = lfutil.standin(f)
338
340
339 # ignore known largefiles and standins
341 # ignore known largefiles and standins
340 if f in lfiles or fstandin in standins:
342 if f in lfiles or fstandin in standins:
341 continue
343 continue
342
344
343 # append directory separator to avoid collisions
345 # append directory separator to avoid collisions
344 if not fstandin.endswith(os.sep):
346 if not fstandin.endswith(os.sep):
345 fstandin += os.sep
347 fstandin += os.sep
346
348
347 # prevalidate matching standin directories
349 # prevalidate matching standin directories
348 if lfutil.any_(st for st in match._files if \
350 if lfutil.any_(st for st in match._files
349 st.startswith(fstandin)):
351 if st.startswith(fstandin)):
350 continue
352 continue
351 actualfiles.append(f)
353 actualfiles.append(f)
352 match._files = actualfiles
354 match._files = actualfiles
353
355
354 def matchfn(f):
356 def matchfn(f):
355 if orig_matchfn(f):
357 if orig_matchfn(f):
356 return f not in lfiles
358 return f not in lfiles
357 else:
359 else:
358 return f in standins
360 return f in standins
359
361
360 match.matchfn = matchfn
362 match.matchfn = matchfn
361 return orig(text=text, user=user, date=date, match=match,
363 return orig(text=text, user=user, date=date, match=match,
362 force=force, editor=editor, extra=extra)
364 force=force, editor=editor, extra=extra)
363 finally:
365 finally:
364 wlock.release()
366 wlock.release()
365
367
366 def push(self, remote, force=False, revs=None, newbranch=False):
368 def push(self, remote, force=False, revs=None, newbranch=False):
367 o = lfutil.findoutgoing(repo, remote, force)
369 o = lfutil.findoutgoing(repo, remote, force)
368 if o:
370 if o:
369 toupload = set()
371 toupload = set()
370 o = repo.changelog.nodesbetween(o, revs)[0]
372 o = repo.changelog.nodesbetween(o, revs)[0]
371 for n in o:
373 for n in o:
372 parents = [p for p in repo.changelog.parents(n) if p != \
374 parents = [p for p in repo.changelog.parents(n)
373 node.nullid]
375 if p != node.nullid]
374 ctx = repo[n]
376 ctx = repo[n]
375 files = set(ctx.files())
377 files = set(ctx.files())
376 if len(parents) == 2:
378 if len(parents) == 2:
377 mc = ctx.manifest()
379 mc = ctx.manifest()
378 mp1 = ctx.parents()[0].manifest()
380 mp1 = ctx.parents()[0].manifest()
379 mp2 = ctx.parents()[1].manifest()
381 mp2 = ctx.parents()[1].manifest()
380 for f in mp1:
382 for f in mp1:
381 if f not in mc:
383 if f not in mc:
382 files.add(f)
384 files.add(f)
383 for f in mp2:
385 for f in mp2:
384 if f not in mc:
386 if f not in mc:
385 files.add(f)
387 files.add(f)
386 for f in mc:
388 for f in mc:
387 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
389 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
388 None):
390 None):
389 files.add(f)
391 files.add(f)
390
392
391 toupload = toupload.union(set([ctx[f].data().strip() for f\
393 toupload = toupload.union(
392 in files if lfutil.isstandin(f) and f in ctx]))
394 set([ctx[f].data().strip()
395 for f in files
396 if lfutil.isstandin(f) and f in ctx]))
393 lfcommands.uploadlfiles(ui, self, remote, toupload)
397 lfcommands.uploadlfiles(ui, self, remote, toupload)
394 return super(lfiles_repo, self).push(remote, force, revs,
398 return super(lfiles_repo, self).push(remote, force, revs,
395 newbranch)
399 newbranch)
396
400
397 repo.__class__ = lfiles_repo
401 repo.__class__ = lfiles_repo
398
402
399 def checkrequireslfiles(ui, repo, **kwargs):
403 def checkrequireslfiles(ui, repo, **kwargs):
400 if 'largefiles' not in repo.requirements and lfutil.any_(
404 if 'largefiles' not in repo.requirements and lfutil.any_(
401 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
405 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
402 # workaround bug in Mercurial 1.9 whereby requirements is
406 # workaround bug in Mercurial 1.9 whereby requirements is
403 # a list on newly-cloned repos
407 # a list on newly-cloned repos
404 repo.requirements = set(repo.requirements)
408 repo.requirements = set(repo.requirements)
405
409
406 repo.requirements |= set(['largefiles'])
410 repo.requirements |= set(['largefiles'])
407 repo._writerequirements()
411 repo._writerequirements()
408
412
409 checkrequireslfiles(ui, repo)
413 checkrequireslfiles(ui, repo)
410
414
411 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
415 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
412 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
416 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now