##// END OF EJS Templates
largefiles: remove use of underscores that breaks coding convention
Na'Tosha Bard -
r16247:d87d9d8a default
parent child Browse files
Show More
@@ -1,195 +1,195 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import binascii
11 import binascii
12 import re
12 import re
13
13
14 from mercurial import util, node, hg
14 from mercurial import util, node, hg
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18
18
19 class StoreError(Exception):
19 class StoreError(Exception):
20 '''Raised when there is a problem getting files from or putting
20 '''Raised when there is a problem getting files from or putting
21 files to a central store.'''
21 files to a central store.'''
22 def __init__(self, filename, hash, url, detail):
22 def __init__(self, filename, hash, url, detail):
23 self.filename = filename
23 self.filename = filename
24 self.hash = hash
24 self.hash = hash
25 self.url = url
25 self.url = url
26 self.detail = detail
26 self.detail = detail
27
27
28 def longmessage(self):
28 def longmessage(self):
29 if self.url:
29 if self.url:
30 return ('%s: %s\n'
30 return ('%s: %s\n'
31 '(failed URL: %s)\n'
31 '(failed URL: %s)\n'
32 % (self.filename, self.detail, self.url))
32 % (self.filename, self.detail, self.url))
33 else:
33 else:
34 return ('%s: %s\n'
34 return ('%s: %s\n'
35 '(no default or default-push path set in hgrc)\n'
35 '(no default or default-push path set in hgrc)\n'
36 % (self.filename, self.detail))
36 % (self.filename, self.detail))
37
37
38 def __str__(self):
38 def __str__(self):
39 return "%s: %s" % (self.url, self.detail)
39 return "%s: %s" % (self.url, self.detail)
40
40
41 class basestore(object):
41 class basestore(object):
42 def __init__(self, ui, repo, url):
42 def __init__(self, ui, repo, url):
43 self.ui = ui
43 self.ui = ui
44 self.repo = repo
44 self.repo = repo
45 self.url = url
45 self.url = url
46
46
47 def put(self, source, hash):
47 def put(self, source, hash):
48 '''Put source file into the store under <filename>/<hash>.'''
48 '''Put source file into the store under <filename>/<hash>.'''
49 raise NotImplementedError('abstract method')
49 raise NotImplementedError('abstract method')
50
50
51 def exists(self, hash):
51 def exists(self, hash):
52 '''Check to see if the store contains the given hash.'''
52 '''Check to see if the store contains the given hash.'''
53 raise NotImplementedError('abstract method')
53 raise NotImplementedError('abstract method')
54
54
55 def get(self, files):
55 def get(self, files):
56 '''Get the specified largefiles from the store and write to local
56 '''Get the specified largefiles from the store and write to local
57 files under repo.root. files is a list of (filename, hash)
57 files under repo.root. files is a list of (filename, hash)
58 tuples. Return (success, missing), lists of files successfuly
58 tuples. Return (success, missing), lists of files successfuly
59 downloaded and those not found in the store. success is a list
59 downloaded and those not found in the store. success is a list
60 of (filename, hash) tuples; missing is a list of filenames that
60 of (filename, hash) tuples; missing is a list of filenames that
61 we could not get. (The detailed error message will already have
61 we could not get. (The detailed error message will already have
62 been presented to the user, so missing is just supplied as a
62 been presented to the user, so missing is just supplied as a
63 summary.)'''
63 summary.)'''
64 success = []
64 success = []
65 missing = []
65 missing = []
66 ui = self.ui
66 ui = self.ui
67
67
68 at = 0
68 at = 0
69 for filename, hash in files:
69 for filename, hash in files:
70 ui.progress(_('getting largefiles'), at, unit='lfile',
70 ui.progress(_('getting largefiles'), at, unit='lfile',
71 total=len(files))
71 total=len(files))
72 at += 1
72 at += 1
73 ui.note(_('getting %s:%s\n') % (filename, hash))
73 ui.note(_('getting %s:%s\n') % (filename, hash))
74
74
75 storefilename = lfutil.storepath(self.repo, hash)
75 storefilename = lfutil.storepath(self.repo, hash)
76 tmpfile = util.atomictempfile(storefilename,
76 tmpfile = util.atomictempfile(storefilename,
77 createmode=self.repo.store.createmode)
77 createmode=self.repo.store.createmode)
78
78
79 try:
79 try:
80 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
80 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
81 except StoreError, err:
81 except StoreError, err:
82 ui.warn(err.longmessage())
82 ui.warn(err.longmessage())
83 hhash = ""
83 hhash = ""
84
84
85 if hhash != hash:
85 if hhash != hash:
86 if hhash != "":
86 if hhash != "":
87 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
87 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
88 % (filename, hash, hhash))
88 % (filename, hash, hhash))
89 tmpfile.discard() # no-op if it's already closed
89 tmpfile.discard() # no-op if it's already closed
90 missing.append(filename)
90 missing.append(filename)
91 continue
91 continue
92
92
93 tmpfile.close()
93 tmpfile.close()
94 lfutil.linktousercache(self.repo, hash)
94 lfutil.linktousercache(self.repo, hash)
95 success.append((filename, hhash))
95 success.append((filename, hhash))
96
96
97 ui.progress(_('getting largefiles'), None)
97 ui.progress(_('getting largefiles'), None)
98 return (success, missing)
98 return (success, missing)
99
99
100 def verify(self, revs, contents=False):
100 def verify(self, revs, contents=False):
101 '''Verify the existence (and, optionally, contents) of every big
101 '''Verify the existence (and, optionally, contents) of every big
102 file revision referenced by every changeset in revs.
102 file revision referenced by every changeset in revs.
103 Return 0 if all is well, non-zero on any errors.'''
103 Return 0 if all is well, non-zero on any errors.'''
104 write = self.ui.write
104 write = self.ui.write
105 failed = False
105 failed = False
106
106
107 write(_('searching %d changesets for largefiles\n') % len(revs))
107 write(_('searching %d changesets for largefiles\n') % len(revs))
108 verified = set() # set of (filename, filenode) tuples
108 verified = set() # set of (filename, filenode) tuples
109
109
110 for rev in revs:
110 for rev in revs:
111 cctx = self.repo[rev]
111 cctx = self.repo[rev]
112 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
112 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
113
113
114 failed = util.any(self._verifyfile(
114 failed = util.any(self._verifyfile(
115 cctx, cset, contents, standin, verified) for standin in cctx)
115 cctx, cset, contents, standin, verified) for standin in cctx)
116
116
117 num_revs = len(verified)
117 numrevs = len(verified)
118 num_lfiles = len(set([fname for (fname, fnode) in verified]))
118 numlfiles = len(set([fname for (fname, fnode) in verified]))
119 if contents:
119 if contents:
120 write(_('verified contents of %d revisions of %d largefiles\n')
120 write(_('verified contents of %d revisions of %d largefiles\n')
121 % (num_revs, num_lfiles))
121 % (numrevs, numlfiles))
122 else:
122 else:
123 write(_('verified existence of %d revisions of %d largefiles\n')
123 write(_('verified existence of %d revisions of %d largefiles\n')
124 % (num_revs, num_lfiles))
124 % (numrevs, numlfiles))
125
125
126 return int(failed)
126 return int(failed)
127
127
128 def _getfile(self, tmpfile, filename, hash):
128 def _getfile(self, tmpfile, filename, hash):
129 '''Fetch one revision of one file from the store and write it
129 '''Fetch one revision of one file from the store and write it
130 to tmpfile. Compute the hash of the file on-the-fly as it
130 to tmpfile. Compute the hash of the file on-the-fly as it
131 downloads and return the binary hash. Close tmpfile. Raise
131 downloads and return the binary hash. Close tmpfile. Raise
132 StoreError if unable to download the file (e.g. it does not
132 StoreError if unable to download the file (e.g. it does not
133 exist in the store).'''
133 exist in the store).'''
134 raise NotImplementedError('abstract method')
134 raise NotImplementedError('abstract method')
135
135
136 def _verifyfile(self, cctx, cset, contents, standin, verified):
136 def _verifyfile(self, cctx, cset, contents, standin, verified):
137 '''Perform the actual verification of a file in the store.
137 '''Perform the actual verification of a file in the store.
138 '''
138 '''
139 raise NotImplementedError('abstract method')
139 raise NotImplementedError('abstract method')
140
140
141 import localstore, wirestore
141 import localstore, wirestore
142
142
143 _storeprovider = {
143 _storeprovider = {
144 'file': [localstore.localstore],
144 'file': [localstore.localstore],
145 'http': [wirestore.wirestore],
145 'http': [wirestore.wirestore],
146 'https': [wirestore.wirestore],
146 'https': [wirestore.wirestore],
147 'ssh': [wirestore.wirestore],
147 'ssh': [wirestore.wirestore],
148 }
148 }
149
149
150 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
150 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
151
151
152 # During clone this function is passed the src's ui object
152 # During clone this function is passed the src's ui object
153 # but it needs the dest's ui object so it can read out of
153 # but it needs the dest's ui object so it can read out of
154 # the config file. Use repo.ui instead.
154 # the config file. Use repo.ui instead.
155 def _openstore(repo, remote=None, put=False):
155 def _openstore(repo, remote=None, put=False):
156 ui = repo.ui
156 ui = repo.ui
157
157
158 if not remote:
158 if not remote:
159 lfpullsource = getattr(repo, 'lfpullsource', None)
159 lfpullsource = getattr(repo, 'lfpullsource', None)
160 if lfpullsource:
160 if lfpullsource:
161 path = ui.expandpath(lfpullsource)
161 path = ui.expandpath(lfpullsource)
162 else:
162 else:
163 path = ui.expandpath('default-push', 'default')
163 path = ui.expandpath('default-push', 'default')
164
164
165 # ui.expandpath() leaves 'default-push' and 'default' alone if
165 # ui.expandpath() leaves 'default-push' and 'default' alone if
166 # they cannot be expanded: fallback to the empty string,
166 # they cannot be expanded: fallback to the empty string,
167 # meaning the current directory.
167 # meaning the current directory.
168 if path == 'default-push' or path == 'default':
168 if path == 'default-push' or path == 'default':
169 path = ''
169 path = ''
170 remote = repo
170 remote = repo
171 else:
171 else:
172 remote = hg.peer(repo, {}, path)
172 remote = hg.peer(repo, {}, path)
173
173
174 # The path could be a scheme so use Mercurial's normal functionality
174 # The path could be a scheme so use Mercurial's normal functionality
175 # to resolve the scheme to a repository and use its path
175 # to resolve the scheme to a repository and use its path
176 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
176 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
177
177
178 match = _scheme_re.match(path)
178 match = _scheme_re.match(path)
179 if not match: # regular filesystem path
179 if not match: # regular filesystem path
180 scheme = 'file'
180 scheme = 'file'
181 else:
181 else:
182 scheme = match.group(1)
182 scheme = match.group(1)
183
183
184 try:
184 try:
185 storeproviders = _storeprovider[scheme]
185 storeproviders = _storeprovider[scheme]
186 except KeyError:
186 except KeyError:
187 raise util.Abort(_('unsupported URL scheme %r') % scheme)
187 raise util.Abort(_('unsupported URL scheme %r') % scheme)
188
188
189 for class_obj in storeproviders:
189 for classobj in storeproviders:
190 try:
190 try:
191 return class_obj(ui, repo, remote)
191 return classobj(ui, repo, remote)
192 except lfutil.storeprotonotcapable:
192 except lfutil.storeprotonotcapable:
193 pass
193 pass
194
194
195 raise util.Abort(_('%s does not appear to be a largefile store') % path)
195 raise util.Abort(_('%s does not appear to be a largefile store') % path)
@@ -1,500 +1,500 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10
10
11 import os
11 import os
12 import shutil
12 import shutil
13
13
14 from mercurial import util, match as match_, hg, node, context, error
14 from mercurial import util, match as match_, hg, node, context, error
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 import lfutil
17 import lfutil
18 import basestore
18 import basestore
19
19
20 # -- Commands ----------------------------------------------------------
20 # -- Commands ----------------------------------------------------------
21
21
22 def lfconvert(ui, src, dest, *pats, **opts):
22 def lfconvert(ui, src, dest, *pats, **opts):
23 '''convert a normal repository to a largefiles repository
23 '''convert a normal repository to a largefiles repository
24
24
25 Convert repository SOURCE to a new repository DEST, identical to
25 Convert repository SOURCE to a new repository DEST, identical to
26 SOURCE except that certain files will be converted as largefiles:
26 SOURCE except that certain files will be converted as largefiles:
27 specifically, any file that matches any PATTERN *or* whose size is
27 specifically, any file that matches any PATTERN *or* whose size is
28 above the minimum size threshold is converted as a largefile. The
28 above the minimum size threshold is converted as a largefile. The
29 size used to determine whether or not to track a file as a
29 size used to determine whether or not to track a file as a
30 largefile is the size of the first version of the file. The
30 largefile is the size of the first version of the file. The
31 minimum size can be specified either with --size or in
31 minimum size can be specified either with --size or in
32 configuration as ``largefiles.size``.
32 configuration as ``largefiles.size``.
33
33
34 After running this command you will need to make sure that
34 After running this command you will need to make sure that
35 largefiles is enabled anywhere you intend to push the new
35 largefiles is enabled anywhere you intend to push the new
36 repository.
36 repository.
37
37
38 Use --to-normal to convert largefiles back to normal files; after
38 Use --to-normal to convert largefiles back to normal files; after
39 this, the DEST repository can be used without largefiles at all.'''
39 this, the DEST repository can be used without largefiles at all.'''
40
40
41 if opts['to_normal']:
41 if opts['to_normal']:
42 tolfile = False
42 tolfile = False
43 else:
43 else:
44 tolfile = True
44 tolfile = True
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46
46
47 if not hg.islocal(src):
47 if not hg.islocal(src):
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 if not hg.islocal(dest):
49 if not hg.islocal(dest):
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51
51
52 rsrc = hg.repository(ui, src)
52 rsrc = hg.repository(ui, src)
53 ui.status(_('initializing destination %s\n') % dest)
53 ui.status(_('initializing destination %s\n') % dest)
54 rdst = hg.repository(ui, dest, create=True)
54 rdst = hg.repository(ui, dest, create=True)
55
55
56 success = False
56 success = False
57 try:
57 try:
58 # Lock destination to prevent modification while it is converted to.
58 # Lock destination to prevent modification while it is converted to.
59 # Don't need to lock src because we are just reading from its history
59 # Don't need to lock src because we are just reading from its history
60 # which can't change.
60 # which can't change.
61 dst_lock = rdst.lock()
61 dstlock = rdst.lock()
62
62
63 # Get a list of all changesets in the source. The easy way to do this
63 # Get a list of all changesets in the source. The easy way to do this
64 # is to simply walk the changelog, using changelog.nodesbewteen().
64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 # Take a look at mercurial/revlog.py:639 for more details.
65 # Take a look at mercurial/revlog.py:639 for more details.
66 # Use a generator instead of a list to decrease memory usage
66 # Use a generator instead of a list to decrease memory usage
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 rsrc.heads())[0])
68 rsrc.heads())[0])
69 revmap = {node.nullid: node.nullid}
69 revmap = {node.nullid: node.nullid}
70 if tolfile:
70 if tolfile:
71 lfiles = set()
71 lfiles = set()
72 normalfiles = set()
72 normalfiles = set()
73 if not pats:
73 if not pats:
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 if pats:
75 if pats:
76 matcher = match_.match(rsrc.root, '', list(pats))
76 matcher = match_.match(rsrc.root, '', list(pats))
77 else:
77 else:
78 matcher = None
78 matcher = None
79
79
80 lfiletohash = {}
80 lfiletohash = {}
81 for ctx in ctxs:
81 for ctx in ctxs:
82 ui.progress(_('converting revisions'), ctx.rev(),
82 ui.progress(_('converting revisions'), ctx.rev(),
83 unit=_('revision'), total=rsrc['tip'].rev())
83 unit=_('revision'), total=rsrc['tip'].rev())
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 lfiles, normalfiles, matcher, size, lfiletohash)
85 lfiles, normalfiles, matcher, size, lfiletohash)
86 ui.progress(_('converting revisions'), None)
86 ui.progress(_('converting revisions'), None)
87
87
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90
90
91 for f in lfiletohash.keys():
91 for f in lfiletohash.keys():
92 if os.path.isfile(rdst.wjoin(f)):
92 if os.path.isfile(rdst.wjoin(f)):
93 os.unlink(rdst.wjoin(f))
93 os.unlink(rdst.wjoin(f))
94 try:
94 try:
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 except OSError:
96 except OSError:
97 pass
97 pass
98
98
99 # If there were any files converted to largefiles, add largefiles
99 # If there were any files converted to largefiles, add largefiles
100 # to the destination repository's requirements.
100 # to the destination repository's requirements.
101 if lfiles:
101 if lfiles:
102 rdst.requirements.add('largefiles')
102 rdst.requirements.add('largefiles')
103 rdst._writerequirements()
103 rdst._writerequirements()
104 else:
104 else:
105 for ctx in ctxs:
105 for ctx in ctxs:
106 ui.progress(_('converting revisions'), ctx.rev(),
106 ui.progress(_('converting revisions'), ctx.rev(),
107 unit=_('revision'), total=rsrc['tip'].rev())
107 unit=_('revision'), total=rsrc['tip'].rev())
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109
109
110 ui.progress(_('converting revisions'), None)
110 ui.progress(_('converting revisions'), None)
111 success = True
111 success = True
112 finally:
112 finally:
113 if not success:
113 if not success:
114 # we failed, remove the new directory
114 # we failed, remove the new directory
115 shutil.rmtree(rdst.root)
115 shutil.rmtree(rdst.root)
116 dst_lock.release()
116 dstlock.release()
117
117
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 # Convert src parents to dst parents
119 # Convert src parents to dst parents
120 parents = _convertparents(ctx, revmap)
120 parents = _convertparents(ctx, revmap)
121
121
122 # Generate list of changed files
122 # Generate list of changed files
123 files = _getchangedfiles(ctx, parents)
123 files = _getchangedfiles(ctx, parents)
124
124
125 def getfilectx(repo, memctx, f):
125 def getfilectx(repo, memctx, f):
126 if lfutil.standin(f) in files:
126 if lfutil.standin(f) in files:
127 # if the file isn't in the manifest then it was removed
127 # if the file isn't in the manifest then it was removed
128 # or renamed, raise IOError to indicate this
128 # or renamed, raise IOError to indicate this
129 try:
129 try:
130 fctx = ctx.filectx(lfutil.standin(f))
130 fctx = ctx.filectx(lfutil.standin(f))
131 except error.LookupError:
131 except error.LookupError:
132 raise IOError()
132 raise IOError()
133 renamed = fctx.renamed()
133 renamed = fctx.renamed()
134 if renamed:
134 if renamed:
135 renamed = lfutil.splitstandin(renamed[0])
135 renamed = lfutil.splitstandin(renamed[0])
136
136
137 hash = fctx.data().strip()
137 hash = fctx.data().strip()
138 path = lfutil.findfile(rsrc, hash)
138 path = lfutil.findfile(rsrc, hash)
139 ### TODO: What if the file is not cached?
139 ### TODO: What if the file is not cached?
140 data = ''
140 data = ''
141 fd = None
141 fd = None
142 try:
142 try:
143 fd = open(path, 'rb')
143 fd = open(path, 'rb')
144 data = fd.read()
144 data = fd.read()
145 finally:
145 finally:
146 if fd:
146 if fd:
147 fd.close()
147 fd.close()
148 return context.memfilectx(f, data, 'l' in fctx.flags(),
148 return context.memfilectx(f, data, 'l' in fctx.flags(),
149 'x' in fctx.flags(), renamed)
149 'x' in fctx.flags(), renamed)
150 else:
150 else:
151 return _getnormalcontext(repo.ui, ctx, f, revmap)
151 return _getnormalcontext(repo.ui, ctx, f, revmap)
152
152
153 dstfiles = []
153 dstfiles = []
154 for file in files:
154 for file in files:
155 if lfutil.isstandin(file):
155 if lfutil.isstandin(file):
156 dstfiles.append(lfutil.splitstandin(file))
156 dstfiles.append(lfutil.splitstandin(file))
157 else:
157 else:
158 dstfiles.append(file)
158 dstfiles.append(file)
159 # Commit
159 # Commit
160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
161
161
162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
163 matcher, size, lfiletohash):
163 matcher, size, lfiletohash):
164 # Convert src parents to dst parents
164 # Convert src parents to dst parents
165 parents = _convertparents(ctx, revmap)
165 parents = _convertparents(ctx, revmap)
166
166
167 # Generate list of changed files
167 # Generate list of changed files
168 files = _getchangedfiles(ctx, parents)
168 files = _getchangedfiles(ctx, parents)
169
169
170 dstfiles = []
170 dstfiles = []
171 for f in files:
171 for f in files:
172 if f not in lfiles and f not in normalfiles:
172 if f not in lfiles and f not in normalfiles:
173 islfile = _islfile(f, ctx, matcher, size)
173 islfile = _islfile(f, ctx, matcher, size)
174 # If this file was renamed or copied then copy
174 # If this file was renamed or copied then copy
175 # the lfileness of its predecessor
175 # the lfileness of its predecessor
176 if f in ctx.manifest():
176 if f in ctx.manifest():
177 fctx = ctx.filectx(f)
177 fctx = ctx.filectx(f)
178 renamed = fctx.renamed()
178 renamed = fctx.renamed()
179 renamedlfile = renamed and renamed[0] in lfiles
179 renamedlfile = renamed and renamed[0] in lfiles
180 islfile |= renamedlfile
180 islfile |= renamedlfile
181 if 'l' in fctx.flags():
181 if 'l' in fctx.flags():
182 if renamedlfile:
182 if renamedlfile:
183 raise util.Abort(
183 raise util.Abort(
184 _('renamed/copied largefile %s becomes symlink')
184 _('renamed/copied largefile %s becomes symlink')
185 % f)
185 % f)
186 islfile = False
186 islfile = False
187 if islfile:
187 if islfile:
188 lfiles.add(f)
188 lfiles.add(f)
189 else:
189 else:
190 normalfiles.add(f)
190 normalfiles.add(f)
191
191
192 if f in lfiles:
192 if f in lfiles:
193 dstfiles.append(lfutil.standin(f))
193 dstfiles.append(lfutil.standin(f))
194 # largefile in manifest if it has not been removed/renamed
194 # largefile in manifest if it has not been removed/renamed
195 if f in ctx.manifest():
195 if f in ctx.manifest():
196 fctx = ctx.filectx(f)
196 fctx = ctx.filectx(f)
197 if 'l' in fctx.flags():
197 if 'l' in fctx.flags():
198 renamed = fctx.renamed()
198 renamed = fctx.renamed()
199 if renamed and renamed[0] in lfiles:
199 if renamed and renamed[0] in lfiles:
200 raise util.Abort(_('largefile %s becomes symlink') % f)
200 raise util.Abort(_('largefile %s becomes symlink') % f)
201
201
202 # largefile was modified, update standins
202 # largefile was modified, update standins
203 fullpath = rdst.wjoin(f)
203 fullpath = rdst.wjoin(f)
204 util.makedirs(os.path.dirname(fullpath))
204 util.makedirs(os.path.dirname(fullpath))
205 m = util.sha1('')
205 m = util.sha1('')
206 m.update(ctx[f].data())
206 m.update(ctx[f].data())
207 hash = m.hexdigest()
207 hash = m.hexdigest()
208 if f not in lfiletohash or lfiletohash[f] != hash:
208 if f not in lfiletohash or lfiletohash[f] != hash:
209 try:
209 try:
210 fd = open(fullpath, 'wb')
210 fd = open(fullpath, 'wb')
211 fd.write(ctx[f].data())
211 fd.write(ctx[f].data())
212 finally:
212 finally:
213 if fd:
213 if fd:
214 fd.close()
214 fd.close()
215 executable = 'x' in ctx[f].flags()
215 executable = 'x' in ctx[f].flags()
216 os.chmod(fullpath, lfutil.getmode(executable))
216 os.chmod(fullpath, lfutil.getmode(executable))
217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
218 executable)
218 executable)
219 lfiletohash[f] = hash
219 lfiletohash[f] = hash
220 else:
220 else:
221 # normal file
221 # normal file
222 dstfiles.append(f)
222 dstfiles.append(f)
223
223
224 def getfilectx(repo, memctx, f):
224 def getfilectx(repo, memctx, f):
225 if lfutil.isstandin(f):
225 if lfutil.isstandin(f):
226 # if the file isn't in the manifest then it was removed
226 # if the file isn't in the manifest then it was removed
227 # or renamed, raise IOError to indicate this
227 # or renamed, raise IOError to indicate this
228 srcfname = lfutil.splitstandin(f)
228 srcfname = lfutil.splitstandin(f)
229 try:
229 try:
230 fctx = ctx.filectx(srcfname)
230 fctx = ctx.filectx(srcfname)
231 except error.LookupError:
231 except error.LookupError:
232 raise IOError()
232 raise IOError()
233 renamed = fctx.renamed()
233 renamed = fctx.renamed()
234 if renamed:
234 if renamed:
235 # standin is always a largefile because largefile-ness
235 # standin is always a largefile because largefile-ness
236 # doesn't change after rename or copy
236 # doesn't change after rename or copy
237 renamed = lfutil.standin(renamed[0])
237 renamed = lfutil.standin(renamed[0])
238
238
239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
240 fctx.flags(), 'x' in fctx.flags(), renamed)
240 fctx.flags(), 'x' in fctx.flags(), renamed)
241 else:
241 else:
242 return _getnormalcontext(repo.ui, ctx, f, revmap)
242 return _getnormalcontext(repo.ui, ctx, f, revmap)
243
243
244 # Commit
244 # Commit
245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
246
246
247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
250 ret = rdst.commitctx(mctx)
250 ret = rdst.commitctx(mctx)
251 rdst.dirstate.setparents(ret)
251 rdst.dirstate.setparents(ret)
252 revmap[ctx.node()] = rdst.changelog.tip()
252 revmap[ctx.node()] = rdst.changelog.tip()
253
253
254 # Generate list of changed files
254 # Generate list of changed files
255 def _getchangedfiles(ctx, parents):
255 def _getchangedfiles(ctx, parents):
256 files = set(ctx.files())
256 files = set(ctx.files())
257 if node.nullid not in parents:
257 if node.nullid not in parents:
258 mc = ctx.manifest()
258 mc = ctx.manifest()
259 mp1 = ctx.parents()[0].manifest()
259 mp1 = ctx.parents()[0].manifest()
260 mp2 = ctx.parents()[1].manifest()
260 mp2 = ctx.parents()[1].manifest()
261 files |= (set(mp1) | set(mp2)) - set(mc)
261 files |= (set(mp1) | set(mp2)) - set(mc)
262 for f in mc:
262 for f in mc:
263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
264 files.add(f)
264 files.add(f)
265 return files
265 return files
266
266
267 # Convert src parents to dst parents
267 # Convert src parents to dst parents
268 def _convertparents(ctx, revmap):
268 def _convertparents(ctx, revmap):
269 parents = []
269 parents = []
270 for p in ctx.parents():
270 for p in ctx.parents():
271 parents.append(revmap[p.node()])
271 parents.append(revmap[p.node()])
272 while len(parents) < 2:
272 while len(parents) < 2:
273 parents.append(node.nullid)
273 parents.append(node.nullid)
274 return parents
274 return parents
275
275
276 # Get memfilectx for a normal file
276 # Get memfilectx for a normal file
277 def _getnormalcontext(ui, ctx, f, revmap):
277 def _getnormalcontext(ui, ctx, f, revmap):
278 try:
278 try:
279 fctx = ctx.filectx(f)
279 fctx = ctx.filectx(f)
280 except error.LookupError:
280 except error.LookupError:
281 raise IOError()
281 raise IOError()
282 renamed = fctx.renamed()
282 renamed = fctx.renamed()
283 if renamed:
283 if renamed:
284 renamed = renamed[0]
284 renamed = renamed[0]
285
285
286 data = fctx.data()
286 data = fctx.data()
287 if f == '.hgtags':
287 if f == '.hgtags':
288 data = _converttags (ui, revmap, data)
288 data = _converttags (ui, revmap, data)
289 return context.memfilectx(f, data, 'l' in fctx.flags(),
289 return context.memfilectx(f, data, 'l' in fctx.flags(),
290 'x' in fctx.flags(), renamed)
290 'x' in fctx.flags(), renamed)
291
291
292 # Remap tag data using a revision map
292 # Remap tag data using a revision map
293 def _converttags(ui, revmap, data):
293 def _converttags(ui, revmap, data):
294 newdata = []
294 newdata = []
295 for line in data.splitlines():
295 for line in data.splitlines():
296 try:
296 try:
297 id, name = line.split(' ', 1)
297 id, name = line.split(' ', 1)
298 except ValueError:
298 except ValueError:
299 ui.warn(_('skipping incorrectly formatted tag %s\n'
299 ui.warn(_('skipping incorrectly formatted tag %s\n'
300 % line))
300 % line))
301 continue
301 continue
302 try:
302 try:
303 newid = node.bin(id)
303 newid = node.bin(id)
304 except TypeError:
304 except TypeError:
305 ui.warn(_('skipping incorrectly formatted id %s\n'
305 ui.warn(_('skipping incorrectly formatted id %s\n'
306 % id))
306 % id))
307 continue
307 continue
308 try:
308 try:
309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
310 name))
310 name))
311 except KeyError:
311 except KeyError:
312 ui.warn(_('no mapping for id %s\n') % id)
312 ui.warn(_('no mapping for id %s\n') % id)
313 continue
313 continue
314 return ''.join(newdata)
314 return ''.join(newdata)
315
315
316 def _islfile(file, ctx, matcher, size):
316 def _islfile(file, ctx, matcher, size):
317 '''Return true if file should be considered a largefile, i.e.
317 '''Return true if file should be considered a largefile, i.e.
318 matcher matches it or it is larger than size.'''
318 matcher matches it or it is larger than size.'''
319 # never store special .hg* files as largefiles
319 # never store special .hg* files as largefiles
320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
321 return False
321 return False
322 if matcher and matcher(file):
322 if matcher and matcher(file):
323 return True
323 return True
324 try:
324 try:
325 return ctx.filectx(file).size() >= size * 1024 * 1024
325 return ctx.filectx(file).size() >= size * 1024 * 1024
326 except error.LookupError:
326 except error.LookupError:
327 return False
327 return False
328
328
329 def uploadlfiles(ui, rsrc, rdst, files):
329 def uploadlfiles(ui, rsrc, rdst, files):
330 '''upload largefiles to the central store'''
330 '''upload largefiles to the central store'''
331
331
332 if not files:
332 if not files:
333 return
333 return
334
334
335 store = basestore._openstore(rsrc, rdst, put=True)
335 store = basestore._openstore(rsrc, rdst, put=True)
336
336
337 at = 0
337 at = 0
338 files = filter(lambda h: not store.exists(h), files)
338 files = filter(lambda h: not store.exists(h), files)
339 for hash in files:
339 for hash in files:
340 ui.progress(_('uploading largefiles'), at, unit='largefile',
340 ui.progress(_('uploading largefiles'), at, unit='largefile',
341 total=len(files))
341 total=len(files))
342 source = lfutil.findfile(rsrc, hash)
342 source = lfutil.findfile(rsrc, hash)
343 if not source:
343 if not source:
344 raise util.Abort(_('largefile %s missing from store'
344 raise util.Abort(_('largefile %s missing from store'
345 ' (needs to be uploaded)') % hash)
345 ' (needs to be uploaded)') % hash)
346 # XXX check for errors here
346 # XXX check for errors here
347 store.put(source, hash)
347 store.put(source, hash)
348 at += 1
348 at += 1
349 ui.progress(_('uploading largefiles'), None)
349 ui.progress(_('uploading largefiles'), None)
350
350
351 def verifylfiles(ui, repo, all=False, contents=False):
351 def verifylfiles(ui, repo, all=False, contents=False):
352 '''Verify that every big file revision in the current changeset
352 '''Verify that every big file revision in the current changeset
353 exists in the central store. With --contents, also verify that
353 exists in the central store. With --contents, also verify that
354 the contents of each big file revision are correct (SHA-1 hash
354 the contents of each big file revision are correct (SHA-1 hash
355 matches the revision ID). With --all, check every changeset in
355 matches the revision ID). With --all, check every changeset in
356 this repository.'''
356 this repository.'''
357 if all:
357 if all:
358 # Pass a list to the function rather than an iterator because we know a
358 # Pass a list to the function rather than an iterator because we know a
359 # list will work.
359 # list will work.
360 revs = range(len(repo))
360 revs = range(len(repo))
361 else:
361 else:
362 revs = ['.']
362 revs = ['.']
363
363
364 store = basestore._openstore(repo)
364 store = basestore._openstore(repo)
365 return store.verify(revs, contents=contents)
365 return store.verify(revs, contents=contents)
366
366
367 def cachelfiles(ui, repo, node):
367 def cachelfiles(ui, repo, node):
368 '''cachelfiles ensures that all largefiles needed by the specified revision
368 '''cachelfiles ensures that all largefiles needed by the specified revision
369 are present in the repository's largefile cache.
369 are present in the repository's largefile cache.
370
370
371 returns a tuple (cached, missing). cached is the list of files downloaded
371 returns a tuple (cached, missing). cached is the list of files downloaded
372 by this operation; missing is the list of files that were needed but could
372 by this operation; missing is the list of files that were needed but could
373 not be found.'''
373 not be found.'''
374 lfiles = lfutil.listlfiles(repo, node)
374 lfiles = lfutil.listlfiles(repo, node)
375 toget = []
375 toget = []
376
376
377 for lfile in lfiles:
377 for lfile in lfiles:
378 # If we are mid-merge, then we have to trust the standin that is in the
378 # If we are mid-merge, then we have to trust the standin that is in the
379 # working copy to have the correct hashvalue. This is because the
379 # working copy to have the correct hashvalue. This is because the
380 # original hg.merge() already updated the standin as part of the normal
380 # original hg.merge() already updated the standin as part of the normal
381 # merge process -- we just have to udpate the largefile to match.
381 # merge process -- we just have to udpate the largefile to match.
382 if (getattr(repo, "_ismerging", False) and
382 if (getattr(repo, "_ismerging", False) and
383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
384 expectedhash = lfutil.readstandin(repo, lfile)
384 expectedhash = lfutil.readstandin(repo, lfile)
385 else:
385 else:
386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
387
387
388 # if it exists and its hash matches, it might have been locally
388 # if it exists and its hash matches, it might have been locally
389 # modified before updating and the user chose 'local'. in this case,
389 # modified before updating and the user chose 'local'. in this case,
390 # it will not be in any store, so don't look for it.
390 # it will not be in any store, so don't look for it.
391 if ((not os.path.exists(repo.wjoin(lfile)) or
391 if ((not os.path.exists(repo.wjoin(lfile)) or
392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
393 not lfutil.findfile(repo, expectedhash)):
393 not lfutil.findfile(repo, expectedhash)):
394 toget.append((lfile, expectedhash))
394 toget.append((lfile, expectedhash))
395
395
396 if toget:
396 if toget:
397 store = basestore._openstore(repo)
397 store = basestore._openstore(repo)
398 ret = store.get(toget)
398 ret = store.get(toget)
399 return ret
399 return ret
400
400
401 return ([], [])
401 return ([], [])
402
402
403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
404 wlock = repo.wlock()
404 wlock = repo.wlock()
405 try:
405 try:
406 lfdirstate = lfutil.openlfdirstate(ui, repo)
406 lfdirstate = lfutil.openlfdirstate(ui, repo)
407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
408
408
409 if filelist is not None:
409 if filelist is not None:
410 lfiles = [f for f in lfiles if f in filelist]
410 lfiles = [f for f in lfiles if f in filelist]
411
411
412 printed = False
412 printed = False
413 if printmessage and lfiles:
413 if printmessage and lfiles:
414 ui.status(_('getting changed largefiles\n'))
414 ui.status(_('getting changed largefiles\n'))
415 printed = True
415 printed = True
416 cachelfiles(ui, repo, '.')
416 cachelfiles(ui, repo, '.')
417
417
418 updated, removed = 0, 0
418 updated, removed = 0, 0
419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
420 # increment the appropriate counter according to _updatelfile's
420 # increment the appropriate counter according to _updatelfile's
421 # return value
421 # return value
422 updated += i > 0 and i or 0
422 updated += i > 0 and i or 0
423 removed -= i < 0 and i or 0
423 removed -= i < 0 and i or 0
424 if printmessage and (removed or updated) and not printed:
424 if printmessage and (removed or updated) and not printed:
425 ui.status(_('getting changed largefiles\n'))
425 ui.status(_('getting changed largefiles\n'))
426 printed = True
426 printed = True
427
427
428 lfdirstate.write()
428 lfdirstate.write()
429 if printed and printmessage:
429 if printed and printmessage:
430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
431 removed))
431 removed))
432 finally:
432 finally:
433 wlock.release()
433 wlock.release()
434
434
435 def _updatelfile(repo, lfdirstate, lfile):
435 def _updatelfile(repo, lfdirstate, lfile):
436 '''updates a single largefile and copies the state of its standin from
436 '''updates a single largefile and copies the state of its standin from
437 the repository's dirstate to its state in the lfdirstate.
437 the repository's dirstate to its state in the lfdirstate.
438
438
439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
440 file was unchanged, and None if the needed largefile was missing from the
440 file was unchanged, and None if the needed largefile was missing from the
441 cache.'''
441 cache.'''
442 ret = 0
442 ret = 0
443 abslfile = repo.wjoin(lfile)
443 abslfile = repo.wjoin(lfile)
444 absstandin = repo.wjoin(lfutil.standin(lfile))
444 absstandin = repo.wjoin(lfutil.standin(lfile))
445 if os.path.exists(absstandin):
445 if os.path.exists(absstandin):
446 if os.path.exists(absstandin+'.orig'):
446 if os.path.exists(absstandin+'.orig'):
447 shutil.copyfile(abslfile, abslfile+'.orig')
447 shutil.copyfile(abslfile, abslfile+'.orig')
448 expecthash = lfutil.readstandin(repo, lfile)
448 expecthash = lfutil.readstandin(repo, lfile)
449 if (expecthash != '' and
449 if (expecthash != '' and
450 (not os.path.exists(abslfile) or
450 (not os.path.exists(abslfile) or
451 expecthash != lfutil.hashfile(abslfile))):
451 expecthash != lfutil.hashfile(abslfile))):
452 if not lfutil.copyfromcache(repo, expecthash, lfile):
452 if not lfutil.copyfromcache(repo, expecthash, lfile):
453 # use normallookup() to allocate entry in largefiles dirstate,
453 # use normallookup() to allocate entry in largefiles dirstate,
454 # because lack of it misleads lfiles_repo.status() into
454 # because lack of it misleads lfilesrepo.status() into
455 # recognition that such cache missing files are REMOVED.
455 # recognition that such cache missing files are REMOVED.
456 lfdirstate.normallookup(lfile)
456 lfdirstate.normallookup(lfile)
457 return None # don't try to set the mode
457 return None # don't try to set the mode
458 ret = 1
458 ret = 1
459 mode = os.stat(absstandin).st_mode
459 mode = os.stat(absstandin).st_mode
460 if mode != os.stat(abslfile).st_mode:
460 if mode != os.stat(abslfile).st_mode:
461 os.chmod(abslfile, mode)
461 os.chmod(abslfile, mode)
462 ret = 1
462 ret = 1
463 else:
463 else:
464 # Remove lfiles for which the standin is deleted, unless the
464 # Remove lfiles for which the standin is deleted, unless the
465 # lfile is added to the repository again. This happens when a
465 # lfile is added to the repository again. This happens when a
466 # largefile is converted back to a normal file: the standin
466 # largefile is converted back to a normal file: the standin
467 # disappears, but a new (normal) file appears as the lfile.
467 # disappears, but a new (normal) file appears as the lfile.
468 if os.path.exists(abslfile) and lfile not in repo[None]:
468 if os.path.exists(abslfile) and lfile not in repo[None]:
469 util.unlinkpath(abslfile)
469 util.unlinkpath(abslfile)
470 ret = -1
470 ret = -1
471 state = repo.dirstate[lfutil.standin(lfile)]
471 state = repo.dirstate[lfutil.standin(lfile)]
472 if state == 'n':
472 if state == 'n':
473 # When rebasing, we need to synchronize the standin and the largefile,
473 # When rebasing, we need to synchronize the standin and the largefile,
474 # because otherwise the largefile will get reverted. But for commit's
474 # because otherwise the largefile will get reverted. But for commit's
475 # sake, we have to mark the file as unclean.
475 # sake, we have to mark the file as unclean.
476 if getattr(repo, "_isrebasing", False):
476 if getattr(repo, "_isrebasing", False):
477 lfdirstate.normallookup(lfile)
477 lfdirstate.normallookup(lfile)
478 else:
478 else:
479 lfdirstate.normal(lfile)
479 lfdirstate.normal(lfile)
480 elif state == 'r':
480 elif state == 'r':
481 lfdirstate.remove(lfile)
481 lfdirstate.remove(lfile)
482 elif state == 'a':
482 elif state == 'a':
483 lfdirstate.add(lfile)
483 lfdirstate.add(lfile)
484 elif state == '?':
484 elif state == '?':
485 lfdirstate.drop(lfile)
485 lfdirstate.drop(lfile)
486 return ret
486 return ret
487
487
488 # -- hg commands declarations ------------------------------------------------
488 # -- hg commands declarations ------------------------------------------------
489
489
490 cmdtable = {
490 cmdtable = {
491 'lfconvert': (lfconvert,
491 'lfconvert': (lfconvert,
492 [('s', 'size', '',
492 [('s', 'size', '',
493 _('minimum size (MB) for files to be converted '
493 _('minimum size (MB) for files to be converted '
494 'as largefiles'),
494 'as largefiles'),
495 'SIZE'),
495 'SIZE'),
496 ('', 'to-normal', False,
496 ('', 'to-normal', False,
497 _('convert from a largefiles repo to a normal repo')),
497 _('convert from a largefiles repo to a normal repo')),
498 ],
498 ],
499 _('hg lfconvert SOURCE DEST [FILE ...]')),
499 _('hg lfconvert SOURCE DEST [FILE ...]')),
500 }
500 }
@@ -1,467 +1,467 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repoforget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
64 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on atomic copy
79 # if hardlinks fail, fallback on atomic copy
80 dst = util.atomictempfile(dest)
80 dst = util.atomictempfile(dest)
81 for chunk in util.filechunkiter(open(src, 'rb')):
81 for chunk in util.filechunkiter(open(src, 'rb')):
82 dst.write(chunk)
82 dst.write(chunk)
83 dst.close()
83 dst.close()
84 os.chmod(dest, os.stat(src).st_mode)
84 os.chmod(dest, os.stat(src).st_mode)
85
85
86 def usercachepath(ui, hash):
86 def usercachepath(ui, hash):
87 path = ui.configpath(longname, 'usercache', None)
87 path = ui.configpath(longname, 'usercache', None)
88 if path:
88 if path:
89 path = os.path.join(path, hash)
89 path = os.path.join(path, hash)
90 else:
90 else:
91 if os.name == 'nt':
91 if os.name == 'nt':
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 if appdata:
93 if appdata:
94 path = os.path.join(appdata, longname, hash)
94 path = os.path.join(appdata, longname, hash)
95 elif platform.system() == 'Darwin':
95 elif platform.system() == 'Darwin':
96 home = os.getenv('HOME')
96 home = os.getenv('HOME')
97 if home:
97 if home:
98 path = os.path.join(home, 'Library', 'Caches',
98 path = os.path.join(home, 'Library', 'Caches',
99 longname, hash)
99 longname, hash)
100 elif os.name == 'posix':
100 elif os.name == 'posix':
101 path = os.getenv('XDG_CACHE_HOME')
101 path = os.getenv('XDG_CACHE_HOME')
102 if path:
102 if path:
103 path = os.path.join(path, longname, hash)
103 path = os.path.join(path, longname, hash)
104 else:
104 else:
105 home = os.getenv('HOME')
105 home = os.getenv('HOME')
106 if home:
106 if home:
107 path = os.path.join(home, '.cache', longname, hash)
107 path = os.path.join(home, '.cache', longname, hash)
108 else:
108 else:
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 return path
110 return path
111
111
112 def inusercache(ui, hash):
112 def inusercache(ui, hash):
113 path = usercachepath(ui, hash)
113 path = usercachepath(ui, hash)
114 return path and os.path.exists(path)
114 return path and os.path.exists(path)
115
115
116 def findfile(repo, hash):
116 def findfile(repo, hash):
117 if instore(repo, hash):
117 if instore(repo, hash):
118 repo.ui.note(_('Found %s in store\n') % hash)
118 repo.ui.note(_('Found %s in store\n') % hash)
119 return storepath(repo, hash)
119 return storepath(repo, hash)
120 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
122 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
125 return path
125 return path
126 return None
126 return None
127
127
128 class largefiles_dirstate(dirstate.dirstate):
128 class largefilesdirstate(dirstate.dirstate):
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
131 def normal(self, f):
131 def normal(self, f):
132 return super(largefiles_dirstate, self).normal(unixpath(f))
132 return super(largefilesdirstate, self).normal(unixpath(f))
133 def remove(self, f):
133 def remove(self, f):
134 return super(largefiles_dirstate, self).remove(unixpath(f))
134 return super(largefilesdirstate, self).remove(unixpath(f))
135 def add(self, f):
135 def add(self, f):
136 return super(largefiles_dirstate, self).add(unixpath(f))
136 return super(largefilesdirstate, self).add(unixpath(f))
137 def drop(self, f):
137 def drop(self, f):
138 return super(largefiles_dirstate, self).drop(unixpath(f))
138 return super(largefilesdirstate, self).drop(unixpath(f))
139 def forget(self, f):
139 def forget(self, f):
140 return super(largefiles_dirstate, self).forget(unixpath(f))
140 return super(largefilesdirstate, self).forget(unixpath(f))
141 def normallookup(self, f):
141 def normallookup(self, f):
142 return super(largefiles_dirstate, self).normallookup(unixpath(f))
142 return super(largefilesdirstate, self).normallookup(unixpath(f))
143
143
144 def openlfdirstate(ui, repo):
144 def openlfdirstate(ui, repo):
145 '''
145 '''
146 Return a dirstate object that tracks largefiles: i.e. its root is
146 Return a dirstate object that tracks largefiles: i.e. its root is
147 the repo root, but it is saved in .hg/largefiles/dirstate.
147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 '''
148 '''
149 admin = repo.join(longname)
149 admin = repo.join(longname)
150 opener = scmutil.opener(admin)
150 opener = scmutil.opener(admin)
151 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
152 repo.dirstate._validate)
152 repo.dirstate._validate)
153
153
154 # If the largefiles dirstate does not exist, populate and create
154 # If the largefiles dirstate does not exist, populate and create
155 # it. This ensures that we create it on the first meaningful
155 # it. This ensures that we create it on the first meaningful
156 # largefiles operation in a new clone.
156 # largefiles operation in a new clone.
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 util.makedirs(admin)
158 util.makedirs(admin)
159 matcher = getstandinmatcher(repo)
159 matcher = getstandinmatcher(repo)
160 for standin in dirstate_walk(repo.dirstate, matcher):
160 for standin in dirstatewalk(repo.dirstate, matcher):
161 lfile = splitstandin(standin)
161 lfile = splitstandin(standin)
162 hash = readstandin(repo, lfile)
162 hash = readstandin(repo, lfile)
163 lfdirstate.normallookup(lfile)
163 lfdirstate.normallookup(lfile)
164 try:
164 try:
165 if hash == hashfile(repo.wjoin(lfile)):
165 if hash == hashfile(repo.wjoin(lfile)):
166 lfdirstate.normal(lfile)
166 lfdirstate.normal(lfile)
167 except OSError, err:
167 except OSError, err:
168 if err.errno != errno.ENOENT:
168 if err.errno != errno.ENOENT:
169 raise
169 raise
170 return lfdirstate
170 return lfdirstate
171
171
172 def lfdirstate_status(lfdirstate, repo, rev):
172 def lfdirstatestatus(lfdirstate, repo, rev):
173 match = match_.always(repo.root, repo.getcwd())
173 match = match_.always(repo.root, repo.getcwd())
174 s = lfdirstate.status(match, [], False, False, False)
174 s = lfdirstate.status(match, [], False, False, False)
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 for lfile in unsure:
176 for lfile in unsure:
177 if repo[rev][standin(lfile)].data().strip() != \
177 if repo[rev][standin(lfile)].data().strip() != \
178 hashfile(repo.wjoin(lfile)):
178 hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
179 modified.append(lfile)
180 else:
180 else:
181 clean.append(lfile)
181 clean.append(lfile)
182 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
183 return (modified, added, removed, missing, unknown, ignored, clean)
183 return (modified, added, removed, missing, unknown, ignored, clean)
184
184
185 def listlfiles(repo, rev=None, matcher=None):
185 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
186 '''return a list of largefiles in the working copy or the
187 specified changeset'''
187 specified changeset'''
188
188
189 if matcher is None:
189 if matcher is None:
190 matcher = getstandinmatcher(repo)
190 matcher = getstandinmatcher(repo)
191
191
192 # ignore unknown files in working directory
192 # ignore unknown files in working directory
193 return [splitstandin(f)
193 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
194 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
195 if rev is not None or repo.dirstate[f] != '?']
196
196
197 def instore(repo, hash):
197 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
198 return os.path.exists(storepath(repo, hash))
199
199
200 def storepath(repo, hash):
200 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
201 return repo.join(os.path.join(longname, hash))
202
202
203 def copyfromcache(repo, hash, filename):
203 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
204 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
205 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
206 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
207 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
208 largefile exists in the cache).'''
209 path = findfile(repo, hash)
209 path = findfile(repo, hash)
210 if path is None:
210 if path is None:
211 return False
211 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 # The write may fail before the file is fully written, but we
213 # The write may fail before the file is fully written, but we
214 # don't use atomic writes in the working copy.
214 # don't use atomic writes in the working copy.
215 shutil.copy(path, repo.wjoin(filename))
215 shutil.copy(path, repo.wjoin(filename))
216 return True
216 return True
217
217
218 def copytostore(repo, rev, file, uploaded=False):
218 def copytostore(repo, rev, file, uploaded=False):
219 hash = readstandin(repo, file)
219 hash = readstandin(repo, file)
220 if instore(repo, hash):
220 if instore(repo, hash):
221 return
221 return
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223
223
224 def copyalltostore(repo, node):
224 def copyalltostore(repo, node):
225 '''Copy all largefiles in a given revision to the store'''
225 '''Copy all largefiles in a given revision to the store'''
226
226
227 ctx = repo[node]
227 ctx = repo[node]
228 for filename in ctx.files():
228 for filename in ctx.files():
229 if isstandin(filename) and filename in ctx.manifest():
229 if isstandin(filename) and filename in ctx.manifest():
230 realfile = splitstandin(filename)
230 realfile = splitstandin(filename)
231 copytostore(repo, ctx.node(), realfile)
231 copytostore(repo, ctx.node(), realfile)
232
232
233
233
234 def copytostoreabsolute(repo, file, hash):
234 def copytostoreabsolute(repo, file, hash):
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 if inusercache(repo.ui, hash):
236 if inusercache(repo.ui, hash):
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 else:
238 else:
239 dst = util.atomictempfile(storepath(repo, hash),
239 dst = util.atomictempfile(storepath(repo, hash),
240 createmode=repo.store.createmode)
240 createmode=repo.store.createmode)
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 linktousercache(repo, hash)
244 linktousercache(repo, hash)
245
245
246 def linktousercache(repo, hash):
246 def linktousercache(repo, hash):
247 path = usercachepath(repo.ui, hash)
247 path = usercachepath(repo.ui, hash)
248 if path:
248 if path:
249 util.makedirs(os.path.dirname(path))
249 util.makedirs(os.path.dirname(path))
250 link(storepath(repo, hash), path)
250 link(storepath(repo, hash), path)
251
251
252 def getstandinmatcher(repo, pats=[], opts={}):
252 def getstandinmatcher(repo, pats=[], opts={}):
253 '''Return a match object that applies pats to the standin directory'''
253 '''Return a match object that applies pats to the standin directory'''
254 standindir = repo.pathto(shortname)
254 standindir = repo.pathto(shortname)
255 if pats:
255 if pats:
256 # patterns supplied: search standin directory relative to current dir
256 # patterns supplied: search standin directory relative to current dir
257 cwd = repo.getcwd()
257 cwd = repo.getcwd()
258 if os.path.isabs(cwd):
258 if os.path.isabs(cwd):
259 # cwd is an absolute path for hg -R <reponame>
259 # cwd is an absolute path for hg -R <reponame>
260 # work relative to the repository root in this case
260 # work relative to the repository root in this case
261 cwd = ''
261 cwd = ''
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 elif os.path.isdir(standindir):
263 elif os.path.isdir(standindir):
264 # no patterns: relative to repo root
264 # no patterns: relative to repo root
265 pats = [standindir]
265 pats = [standindir]
266 else:
266 else:
267 # no patterns and no standin dir: return matcher that matches nothing
267 # no patterns and no standin dir: return matcher that matches nothing
268 match = match_.match(repo.root, None, [], exact=True)
268 match = match_.match(repo.root, None, [], exact=True)
269 match.matchfn = lambda f: False
269 match.matchfn = lambda f: False
270 return match
270 return match
271 return getmatcher(repo, pats, opts, showbad=False)
271 return getmatcher(repo, pats, opts, showbad=False)
272
272
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 '''Wrapper around scmutil.match() that adds showbad: if false,
274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 neuter the match object's bad() method so it does not print any
275 neuter the match object's bad() method so it does not print any
276 warnings about missing files or directories.'''
276 warnings about missing files or directories.'''
277 match = scmutil.match(repo[None], pats, opts)
277 match = scmutil.match(repo[None], pats, opts)
278
278
279 if not showbad:
279 if not showbad:
280 match.bad = lambda f, msg: None
280 match.bad = lambda f, msg: None
281 return match
281 return match
282
282
283 def composestandinmatcher(repo, rmatcher):
283 def composestandinmatcher(repo, rmatcher):
284 '''Return a matcher that accepts standins corresponding to the
284 '''Return a matcher that accepts standins corresponding to the
285 files accepted by rmatcher. Pass the list of files in the matcher
285 files accepted by rmatcher. Pass the list of files in the matcher
286 as the paths specified by the user.'''
286 as the paths specified by the user.'''
287 smatcher = getstandinmatcher(repo, rmatcher.files())
287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 isstandin = smatcher.matchfn
288 isstandin = smatcher.matchfn
289 def composed_matchfn(f):
289 def composedmatchfn(f):
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 smatcher.matchfn = composed_matchfn
291 smatcher.matchfn = composedmatchfn
292
292
293 return smatcher
293 return smatcher
294
294
295 def standin(filename):
295 def standin(filename):
296 '''Return the repo-relative path to the standin for the specified big
296 '''Return the repo-relative path to the standin for the specified big
297 file.'''
297 file.'''
298 # Notes:
298 # Notes:
299 # 1) Most callers want an absolute path, but _create_standin() needs
299 # 1) Most callers want an absolute path, but _createstandin() needs
300 # it repo-relative so lfadd() can pass it to repo_add(). So leave
300 # it repo-relative so lfadd() can pass it to repoadd(). So leave
301 # it up to the caller to use repo.wjoin() to get an absolute path.
301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # 2) Join with '/' because that's what dirstate always uses, even on
302 # 2) Join with '/' because that's what dirstate always uses, even on
303 # Windows. Change existing separator to '/' first in case we are
303 # Windows. Change existing separator to '/' first in case we are
304 # passed filenames from an external source (like the command line).
304 # passed filenames from an external source (like the command line).
305 return shortname + '/' + util.pconvert(filename)
305 return shortname + '/' + util.pconvert(filename)
306
306
307 def isstandin(filename):
307 def isstandin(filename):
308 '''Return true if filename is a big file standin. filename must be
308 '''Return true if filename is a big file standin. filename must be
309 in Mercurial's internal form (slash-separated).'''
309 in Mercurial's internal form (slash-separated).'''
310 return filename.startswith(shortname + '/')
310 return filename.startswith(shortname + '/')
311
311
312 def splitstandin(filename):
312 def splitstandin(filename):
313 # Split on / because that's what dirstate always uses, even on Windows.
313 # Split on / because that's what dirstate always uses, even on Windows.
314 # Change local separator to / first just in case we are passed filenames
314 # Change local separator to / first just in case we are passed filenames
315 # from an external source (like the command line).
315 # from an external source (like the command line).
316 bits = util.pconvert(filename).split('/', 1)
316 bits = util.pconvert(filename).split('/', 1)
317 if len(bits) == 2 and bits[0] == shortname:
317 if len(bits) == 2 and bits[0] == shortname:
318 return bits[1]
318 return bits[1]
319 else:
319 else:
320 return None
320 return None
321
321
322 def updatestandin(repo, standin):
322 def updatestandin(repo, standin):
323 file = repo.wjoin(splitstandin(standin))
323 file = repo.wjoin(splitstandin(standin))
324 if os.path.exists(file):
324 if os.path.exists(file):
325 hash = hashfile(file)
325 hash = hashfile(file)
326 executable = getexecutable(file)
326 executable = getexecutable(file)
327 writestandin(repo, standin, hash, executable)
327 writestandin(repo, standin, hash, executable)
328
328
329 def readstandin(repo, filename, node=None):
329 def readstandin(repo, filename, node=None):
330 '''read hex hash from standin for filename at given node, or working
330 '''read hex hash from standin for filename at given node, or working
331 directory if no node is given'''
331 directory if no node is given'''
332 return repo[node][standin(filename)].data().strip()
332 return repo[node][standin(filename)].data().strip()
333
333
334 def writestandin(repo, standin, hash, executable):
334 def writestandin(repo, standin, hash, executable):
335 '''write hash to <repo.root>/<standin>'''
335 '''write hash to <repo.root>/<standin>'''
336 writehash(hash, repo.wjoin(standin), executable)
336 writehash(hash, repo.wjoin(standin), executable)
337
337
338 def copyandhash(instream, outfile):
338 def copyandhash(instream, outfile):
339 '''Read bytes from instream (iterable) and write them to outfile,
339 '''Read bytes from instream (iterable) and write them to outfile,
340 computing the SHA-1 hash of the data along the way. Close outfile
340 computing the SHA-1 hash of the data along the way. Close outfile
341 when done and return the binary hash.'''
341 when done and return the binary hash.'''
342 hasher = util.sha1('')
342 hasher = util.sha1('')
343 for data in instream:
343 for data in instream:
344 hasher.update(data)
344 hasher.update(data)
345 outfile.write(data)
345 outfile.write(data)
346
346
347 # Blecch: closing a file that somebody else opened is rude and
347 # Blecch: closing a file that somebody else opened is rude and
348 # wrong. But it's so darn convenient and practical! After all,
348 # wrong. But it's so darn convenient and practical! After all,
349 # outfile was opened just to copy and hash.
349 # outfile was opened just to copy and hash.
350 outfile.close()
350 outfile.close()
351
351
352 return hasher.digest()
352 return hasher.digest()
353
353
354 def hashrepofile(repo, file):
354 def hashrepofile(repo, file):
355 return hashfile(repo.wjoin(file))
355 return hashfile(repo.wjoin(file))
356
356
357 def hashfile(file):
357 def hashfile(file):
358 if not os.path.exists(file):
358 if not os.path.exists(file):
359 return ''
359 return ''
360 hasher = util.sha1('')
360 hasher = util.sha1('')
361 fd = open(file, 'rb')
361 fd = open(file, 'rb')
362 for data in blockstream(fd):
362 for data in blockstream(fd):
363 hasher.update(data)
363 hasher.update(data)
364 fd.close()
364 fd.close()
365 return hasher.hexdigest()
365 return hasher.hexdigest()
366
366
367 class limitreader(object):
367 class limitreader(object):
368 def __init__(self, f, limit):
368 def __init__(self, f, limit):
369 self.f = f
369 self.f = f
370 self.limit = limit
370 self.limit = limit
371
371
372 def read(self, length):
372 def read(self, length):
373 if self.limit == 0:
373 if self.limit == 0:
374 return ''
374 return ''
375 length = length > self.limit and self.limit or length
375 length = length > self.limit and self.limit or length
376 self.limit -= length
376 self.limit -= length
377 return self.f.read(length)
377 return self.f.read(length)
378
378
379 def close(self):
379 def close(self):
380 pass
380 pass
381
381
382 def blockstream(infile, blocksize=128 * 1024):
382 def blockstream(infile, blocksize=128 * 1024):
383 """Generator that yields blocks of data from infile and closes infile."""
383 """Generator that yields blocks of data from infile and closes infile."""
384 while True:
384 while True:
385 data = infile.read(blocksize)
385 data = infile.read(blocksize)
386 if not data:
386 if not data:
387 break
387 break
388 yield data
388 yield data
389 # same blecch as copyandhash() above
389 # same blecch as copyandhash() above
390 infile.close()
390 infile.close()
391
391
392 def writehash(hash, filename, executable):
392 def writehash(hash, filename, executable):
393 util.makedirs(os.path.dirname(filename))
393 util.makedirs(os.path.dirname(filename))
394 util.writefile(filename, hash + '\n')
394 util.writefile(filename, hash + '\n')
395 os.chmod(filename, getmode(executable))
395 os.chmod(filename, getmode(executable))
396
396
397 def getexecutable(filename):
397 def getexecutable(filename):
398 mode = os.stat(filename).st_mode
398 mode = os.stat(filename).st_mode
399 return ((mode & stat.S_IXUSR) and
399 return ((mode & stat.S_IXUSR) and
400 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXOTH))
401 (mode & stat.S_IXOTH))
402
402
403 def getmode(executable):
403 def getmode(executable):
404 if executable:
404 if executable:
405 return 0755
405 return 0755
406 else:
406 else:
407 return 0644
407 return 0644
408
408
409 def urljoin(first, second, *arg):
409 def urljoin(first, second, *arg):
410 def join(left, right):
410 def join(left, right):
411 if not left.endswith('/'):
411 if not left.endswith('/'):
412 left += '/'
412 left += '/'
413 if right.startswith('/'):
413 if right.startswith('/'):
414 right = right[1:]
414 right = right[1:]
415 return left + right
415 return left + right
416
416
417 url = join(first, second)
417 url = join(first, second)
418 for a in arg:
418 for a in arg:
419 url = join(url, a)
419 url = join(url, a)
420 return url
420 return url
421
421
422 def hexsha1(data):
422 def hexsha1(data):
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 object data"""
424 object data"""
425 h = util.sha1()
425 h = util.sha1()
426 for chunk in util.filechunkiter(data):
426 for chunk in util.filechunkiter(data):
427 h.update(chunk)
427 h.update(chunk)
428 return h.hexdigest()
428 return h.hexdigest()
429
429
430 def httpsendfile(ui, filename):
430 def httpsendfile(ui, filename):
431 return httpconnection.httpsendfile(ui, filename, 'rb')
431 return httpconnection.httpsendfile(ui, filename, 'rb')
432
432
433 def unixpath(path):
433 def unixpath(path):
434 '''Return a version of path normalized for use with the lfdirstate.'''
434 '''Return a version of path normalized for use with the lfdirstate.'''
435 return util.pconvert(os.path.normpath(path))
435 return util.pconvert(os.path.normpath(path))
436
436
437 def islfilesrepo(repo):
437 def islfilesrepo(repo):
438 return ('largefiles' in repo.requirements and
438 return ('largefiles' in repo.requirements and
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440
440
441 class storeprotonotcapable(Exception):
441 class storeprotonotcapable(Exception):
442 def __init__(self, storetypes):
442 def __init__(self, storetypes):
443 self.storetypes = storetypes
443 self.storetypes = storetypes
444
444
445 def getcurrentheads(repo):
445 def getcurrentheads(repo):
446 branches = repo.branchmap()
446 branches = repo.branchmap()
447 heads = []
447 heads = []
448 for branch in branches:
448 for branch in branches:
449 newheads = repo.branchheads(branch)
449 newheads = repo.branchheads(branch)
450 heads = heads + newheads
450 heads = heads + newheads
451 return heads
451 return heads
452
452
453 def getstandinsstate(repo):
453 def getstandinsstate(repo):
454 standins = []
454 standins = []
455 matcher = getstandinmatcher(repo)
455 matcher = getstandinmatcher(repo)
456 for standin in dirstate_walk(repo.dirstate, matcher):
456 for standin in dirstatewalk(repo.dirstate, matcher):
457 lfile = splitstandin(standin)
457 lfile = splitstandin(standin)
458 standins.append((lfile, readstandin(repo, lfile)))
458 standins.append((lfile, readstandin(repo, lfile)))
459 return standins
459 return standins
460
460
461 def getlfilestoupdate(oldstandins, newstandins):
461 def getlfilestoupdate(oldstandins, newstandins):
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 filelist = []
463 filelist = []
464 for f in changedstandins:
464 for f in changedstandins:
465 if f[0] not in filelist:
465 if f[0] not in filelist:
466 filelist.append(f[0])
466 filelist.append(f[0])
467 return filelist
467 return filelist
@@ -1,968 +1,968 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def addlargefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repoadd(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def removelargefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n') % f)
160 ui.status(_('removing %s\n') % f)
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 util.unlinkpath(repo.wjoin(f))
162 util.unlinkpath(repo.wjoin(f))
163 lfdirstate.remove(f)
163 lfdirstate.remove(f)
164 lfdirstate.write()
164 lfdirstate.write()
165 forget = [lfutil.standin(f) for f in forget]
165 forget = [lfutil.standin(f) for f in forget]
166 remove = [lfutil.standin(f) for f in remove]
166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repoforget(repo, forget)
168 # If this is being called by addremove, let the original addremove
168 # If this is being called by addremove, let the original addremove
169 # function handle this.
169 # function handle this.
170 if not getattr(repo, "_isaddremove", False):
170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.reporemove(repo, remove, unlink=True)
172 finally:
172 finally:
173 wlock.release()
173 wlock.release()
174
174
175 # -- Wrappers: modify existing commands --------------------------------
175 # -- Wrappers: modify existing commands --------------------------------
176
176
177 # Add works by going through the files that the user wanted to add and
177 # Add works by going through the files that the user wanted to add and
178 # checking if they should be added as largefiles. Then it makes a new
178 # checking if they should be added as largefiles. Then it makes a new
179 # matcher which matches only the normal files and runs the original
179 # matcher which matches only the normal files and runs the original
180 # version of add.
180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def overrideadd(orig, ui, repo, *pats, **opts):
182 normal = opts.pop('normal')
182 normal = opts.pop('normal')
183 if normal:
183 if normal:
184 if opts.get('large'):
184 if opts.get('large'):
185 raise util.Abort(_('--normal cannot be used with --large'))
185 raise util.Abort(_('--normal cannot be used with --large'))
186 return orig(ui, repo, *pats, **opts)
186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = addlargefiles(ui, repo, *pats, **opts)
188 installnormalfilesmatchfn(repo[None].manifest())
188 installnormalfilesmatchfn(repo[None].manifest())
189 result = orig(ui, repo, *pats, **opts)
189 result = orig(ui, repo, *pats, **opts)
190 restorematchfn()
190 restorematchfn()
191
191
192 return (result == 1 or bad) and 1 or 0
192 return (result == 1 or bad) and 1 or 0
193
193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def overrideremove(orig, ui, repo, *pats, **opts):
195 installnormalfilesmatchfn(repo[None].manifest())
195 installnormalfilesmatchfn(repo[None].manifest())
196 orig(ui, repo, *pats, **opts)
196 orig(ui, repo, *pats, **opts)
197 restorematchfn()
197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 removelargefiles(ui, repo, *pats, **opts)
199
199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def overridestatus(orig, ui, repo, *pats, **opts):
201 try:
201 try:
202 repo.lfstatus = True
202 repo.lfstatus = True
203 return orig(ui, repo, *pats, **opts)
203 return orig(ui, repo, *pats, **opts)
204 finally:
204 finally:
205 repo.lfstatus = False
205 repo.lfstatus = False
206
206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def overridelog(orig, ui, repo, *pats, **opts):
208 try:
208 try:
209 repo.lfstatus = True
209 repo.lfstatus = True
210 orig(ui, repo, *pats, **opts)
210 orig(ui, repo, *pats, **opts)
211 finally:
211 finally:
212 repo.lfstatus = False
212 repo.lfstatus = False
213
213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def overrideverify(orig, ui, repo, *pats, **opts):
215 large = opts.pop('large', False)
215 large = opts.pop('large', False)
216 all = opts.pop('lfa', False)
216 all = opts.pop('lfa', False)
217 contents = opts.pop('lfc', False)
217 contents = opts.pop('lfc', False)
218
218
219 result = orig(ui, repo, *pats, **opts)
219 result = orig(ui, repo, *pats, **opts)
220 if large:
220 if large:
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 return result
222 return result
223
223
224 # Override needs to refresh standins so that update's normal merge
224 # Override needs to refresh standins so that update's normal merge
225 # will go through properly. Then the other update hook (overriding repo.update)
225 # will go through properly. Then the other update hook (overriding repo.update)
226 # will get the new files. Filemerge is also overriden so that the merge
226 # will get the new files. Filemerge is also overriden so that the merge
227 # will merge standins correctly.
227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def overrideupdate(orig, ui, repo, *pats, **opts):
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 False, False)
231 False, False)
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233
233
234 # Need to lock between the standins getting updated and their
234 # Need to lock between the standins getting updated and their
235 # largefiles getting updated
235 # largefiles getting updated
236 wlock = repo.wlock()
236 wlock = repo.wlock()
237 try:
237 try:
238 if opts['check']:
238 if opts['check']:
239 mod = len(modified) > 0
239 mod = len(modified) > 0
240 for lfile in unsure:
240 for lfile in unsure:
241 standin = lfutil.standin(lfile)
241 standin = lfutil.standin(lfile)
242 if repo['.'][standin].data().strip() != \
242 if repo['.'][standin].data().strip() != \
243 lfutil.hashfile(repo.wjoin(lfile)):
243 lfutil.hashfile(repo.wjoin(lfile)):
244 mod = True
244 mod = True
245 else:
245 else:
246 lfdirstate.normal(lfile)
246 lfdirstate.normal(lfile)
247 lfdirstate.write()
247 lfdirstate.write()
248 if mod:
248 if mod:
249 raise util.Abort(_('uncommitted local changes'))
249 raise util.Abort(_('uncommitted local changes'))
250 # XXX handle removed differently
250 # XXX handle removed differently
251 if not opts['clean']:
251 if not opts['clean']:
252 for lfile in unsure + modified + added:
252 for lfile in unsure + modified + added:
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 finally:
254 finally:
255 wlock.release()
255 wlock.release()
256 return orig(ui, repo, *pats, **opts)
256 return orig(ui, repo, *pats, **opts)
257
257
258 # Before starting the manifest merge, merge.updates will call
258 # Before starting the manifest merge, merge.updates will call
259 # _checkunknown to check if there are any files in the merged-in
259 # _checkunknown to check if there are any files in the merged-in
260 # changeset that collide with unknown files in the working copy.
260 # changeset that collide with unknown files in the working copy.
261 #
261 #
262 # The largefiles are seen as unknown, so this prevents us from merging
262 # The largefiles are seen as unknown, so this prevents us from merging
263 # in a file 'foo' if we already have a largefile with the same name.
263 # in a file 'foo' if we already have a largefile with the same name.
264 #
264 #
265 # The overridden function filters the unknown files by removing any
265 # The overridden function filters the unknown files by removing any
266 # largefiles. This makes the merge proceed and we can then handle this
266 # largefiles. This makes the merge proceed and we can then handle this
267 # case further in the overridden manifestmerge function below.
267 # case further in the overridden manifestmerge function below.
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
268 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
269 if lfutil.standin(f) in wctx:
269 if lfutil.standin(f) in wctx:
270 return False
270 return False
271 return origfn(repo, wctx, mctx, f)
271 return origfn(repo, wctx, mctx, f)
272
272
273 # The manifest merge handles conflicts on the manifest level. We want
273 # The manifest merge handles conflicts on the manifest level. We want
274 # to handle changes in largefile-ness of files at this level too.
274 # to handle changes in largefile-ness of files at this level too.
275 #
275 #
276 # The strategy is to run the original manifestmerge and then process
276 # The strategy is to run the original manifestmerge and then process
277 # the action list it outputs. There are two cases we need to deal with:
277 # the action list it outputs. There are two cases we need to deal with:
278 #
278 #
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 # detected via its standin file, which will enter the working copy
280 # detected via its standin file, which will enter the working copy
281 # with a "get" action. It is not "merge" since the standin is all
281 # with a "get" action. It is not "merge" since the standin is all
282 # Mercurial is concerned with at this level -- the link to the
282 # Mercurial is concerned with at this level -- the link to the
283 # existing normal file is not relevant here.
283 # existing normal file is not relevant here.
284 #
284 #
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 # since the largefile will be present in the working copy and
286 # since the largefile will be present in the working copy and
287 # different from the normal file in p2. Mercurial therefore
287 # different from the normal file in p2. Mercurial therefore
288 # triggers a merge action.
288 # triggers a merge action.
289 #
289 #
290 # In both cases, we prompt the user and emit new actions to either
290 # In both cases, we prompt the user and emit new actions to either
291 # remove the standin (if the normal file was kept) or to remove the
291 # remove the standin (if the normal file was kept) or to remove the
292 # normal file and get the standin (if the largefile was kept). The
292 # normal file and get the standin (if the largefile was kept). The
293 # default prompt answer is to use the largefile version since it was
293 # default prompt answer is to use the largefile version since it was
294 # presumably changed on purpose.
294 # presumably changed on purpose.
295 #
295 #
296 # Finally, the merge.applyupdates function will then take care of
296 # Finally, the merge.applyupdates function will then take care of
297 # writing the files into the working copy and lfcommands.updatelfiles
297 # writing the files into the working copy and lfcommands.updatelfiles
298 # will update the largefiles.
298 # will update the largefiles.
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
299 def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 processed = []
301 processed = []
302
302
303 for action in actions:
303 for action in actions:
304 if overwrite:
304 if overwrite:
305 processed.append(action)
305 processed.append(action)
306 continue
306 continue
307 f, m = action[:2]
307 f, m = action[:2]
308
308
309 choices = (_('&Largefile'), _('&Normal file'))
309 choices = (_('&Largefile'), _('&Normal file'))
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 # Case 1: normal file in the working copy, largefile in
311 # Case 1: normal file in the working copy, largefile in
312 # the second parent
312 # the second parent
313 lfile = lfutil.splitstandin(f)
313 lfile = lfutil.splitstandin(f)
314 standin = f
314 standin = f
315 msg = _('%s has been turned into a largefile\n'
315 msg = _('%s has been turned into a largefile\n'
316 'use (l)argefile or keep as (n)ormal file?') % lfile
316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 processed.append((lfile, "r"))
318 processed.append((lfile, "r"))
319 processed.append((standin, "g", p2.flags(standin)))
319 processed.append((standin, "g", p2.flags(standin)))
320 else:
320 else:
321 processed.append((standin, "r"))
321 processed.append((standin, "r"))
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 # Case 2: largefile in the working copy, normal file in
323 # Case 2: largefile in the working copy, normal file in
324 # the second parent
324 # the second parent
325 standin = lfutil.standin(f)
325 standin = lfutil.standin(f)
326 lfile = f
326 lfile = f
327 msg = _('%s has been turned into a normal file\n'
327 msg = _('%s has been turned into a normal file\n'
328 'keep as (l)argefile or use (n)ormal file?') % lfile
328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 processed.append((lfile, "r"))
330 processed.append((lfile, "r"))
331 else:
331 else:
332 processed.append((standin, "r"))
332 processed.append((standin, "r"))
333 processed.append((lfile, "g", p2.flags(lfile)))
333 processed.append((lfile, "g", p2.flags(lfile)))
334 else:
334 else:
335 processed.append(action)
335 processed.append(action)
336
336
337 return processed
337 return processed
338
338
339 # Override filemerge to prompt the user about how they wish to merge
339 # Override filemerge to prompt the user about how they wish to merge
340 # largefiles. This will handle identical edits, and copy/rename +
340 # largefiles. This will handle identical edits, and copy/rename +
341 # edit without prompting the user.
341 # edit without prompting the user.
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
342 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 # Use better variable names here. Because this is a wrapper we cannot
343 # Use better variable names here. Because this is a wrapper we cannot
344 # change the variable names in the function declaration.
344 # change the variable names in the function declaration.
345 fcdest, fcother, fcancestor = fcd, fco, fca
345 fcdest, fcother, fcancestor = fcd, fco, fca
346 if not lfutil.isstandin(orig):
346 if not lfutil.isstandin(orig):
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 else:
348 else:
349 if not fcother.cmp(fcdest): # files identical?
349 if not fcother.cmp(fcdest): # files identical?
350 return None
350 return None
351
351
352 # backwards, use working dir parent as ancestor
352 # backwards, use working dir parent as ancestor
353 if fcancestor == fcother:
353 if fcancestor == fcother:
354 fcancestor = fcdest.parents()[0]
354 fcancestor = fcdest.parents()[0]
355
355
356 if orig != fcother.path():
356 if orig != fcother.path():
357 repo.ui.status(_('merging %s and %s to %s\n')
357 repo.ui.status(_('merging %s and %s to %s\n')
358 % (lfutil.splitstandin(orig),
358 % (lfutil.splitstandin(orig),
359 lfutil.splitstandin(fcother.path()),
359 lfutil.splitstandin(fcother.path()),
360 lfutil.splitstandin(fcdest.path())))
360 lfutil.splitstandin(fcdest.path())))
361 else:
361 else:
362 repo.ui.status(_('merging %s\n')
362 repo.ui.status(_('merging %s\n')
363 % lfutil.splitstandin(fcdest.path()))
363 % lfutil.splitstandin(fcdest.path()))
364
364
365 if fcancestor.path() != fcother.path() and fcother.data() == \
365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 fcancestor.data():
366 fcancestor.data():
367 return 0
367 return 0
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 fcancestor.data():
369 fcancestor.data():
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 return 0
371 return 0
372
372
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 'keep (l)ocal or take (o)ther?') %
374 'keep (l)ocal or take (o)ther?') %
375 lfutil.splitstandin(orig),
375 lfutil.splitstandin(orig),
376 (_('&Local'), _('&Other')), 0) == 0:
376 (_('&Local'), _('&Other')), 0) == 0:
377 return 0
377 return 0
378 else:
378 else:
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 return 0
380 return 0
381
381
382 # Copy first changes the matchers to match standins instead of
382 # Copy first changes the matchers to match standins instead of
383 # largefiles. Then it overrides util.copyfile in that function it
383 # largefiles. Then it overrides util.copyfile in that function it
384 # checks if the destination largefile already exists. It also keeps a
384 # checks if the destination largefile already exists. It also keeps a
385 # list of copied files so that the largefiles can be copied and the
385 # list of copied files so that the largefiles can be copied and the
386 # dirstate updated.
386 # dirstate updated.
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
387 def overridecopy(orig, ui, repo, pats, opts, rename=False):
388 # doesn't remove largefile on rename
388 # doesn't remove largefile on rename
389 if len(pats) < 2:
389 if len(pats) < 2:
390 # this isn't legal, let the original function deal with it
390 # this isn't legal, let the original function deal with it
391 return orig(ui, repo, pats, opts, rename)
391 return orig(ui, repo, pats, opts, rename)
392
392
393 def makestandin(relpath):
393 def makestandin(relpath):
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396
396
397 fullpats = scmutil.expandpats(pats)
397 fullpats = scmutil.expandpats(pats)
398 dest = fullpats[-1]
398 dest = fullpats[-1]
399
399
400 if os.path.isdir(dest):
400 if os.path.isdir(dest):
401 if not os.path.isdir(makestandin(dest)):
401 if not os.path.isdir(makestandin(dest)):
402 os.makedirs(makestandin(dest))
402 os.makedirs(makestandin(dest))
403 # This could copy both lfiles and normal files in one command,
403 # This could copy both lfiles and normal files in one command,
404 # but we don't want to do that. First replace their matcher to
404 # but we don't want to do that. First replace their matcher to
405 # only match normal files and run it, then replace it to just
405 # only match normal files and run it, then replace it to just
406 # match largefiles and run it again.
406 # match largefiles and run it again.
407 nonormalfiles = False
407 nonormalfiles = False
408 nolfiles = False
408 nolfiles = False
409 try:
409 try:
410 try:
410 try:
411 installnormalfilesmatchfn(repo[None].manifest())
411 installnormalfilesmatchfn(repo[None].manifest())
412 result = orig(ui, repo, pats, opts, rename)
412 result = orig(ui, repo, pats, opts, rename)
413 except util.Abort, e:
413 except util.Abort, e:
414 if str(e) != 'no files to copy':
414 if str(e) != 'no files to copy':
415 raise e
415 raise e
416 else:
416 else:
417 nonormalfiles = True
417 nonormalfiles = True
418 result = 0
418 result = 0
419 finally:
419 finally:
420 restorematchfn()
420 restorematchfn()
421
421
422 # The first rename can cause our current working directory to be removed.
422 # The first rename can cause our current working directory to be removed.
423 # In that case there is nothing left to copy/rename so just quit.
423 # In that case there is nothing left to copy/rename so just quit.
424 try:
424 try:
425 repo.getcwd()
425 repo.getcwd()
426 except OSError:
426 except OSError:
427 return result
427 return result
428
428
429 try:
429 try:
430 try:
430 try:
431 # When we call orig below it creates the standins but we don't add them
431 # When we call orig below it creates the standins but we don't add them
432 # to the dir state until later so lock during that time.
432 # to the dir state until later so lock during that time.
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434
434
435 manifest = repo[None].manifest()
435 manifest = repo[None].manifest()
436 oldmatch = None # for the closure
436 oldmatch = None # for the closure
437 def override_match(ctx, pats=[], opts={}, globbed=False,
437 def overridematch(ctx, pats=[], opts={}, globbed=False,
438 default='relpath'):
438 default='relpath'):
439 newpats = []
439 newpats = []
440 # The patterns were previously mangled to add the standin
440 # The patterns were previously mangled to add the standin
441 # directory; we need to remove that now
441 # directory; we need to remove that now
442 for pat in pats:
442 for pat in pats:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 newpats.append(pat.replace(lfutil.shortname, ''))
444 newpats.append(pat.replace(lfutil.shortname, ''))
445 else:
445 else:
446 newpats.append(pat)
446 newpats.append(pat)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 m = copy.copy(match)
448 m = copy.copy(match)
449 lfile = lambda f: lfutil.standin(f) in manifest
449 lfile = lambda f: lfutil.standin(f) in manifest
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 m._fmap = set(m._files)
451 m._fmap = set(m._files)
452 orig_matchfn = m.matchfn
452 origmatchfn = m.matchfn
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 (f in manifest) and
454 (f in manifest) and
455 orig_matchfn(lfutil.splitstandin(f)) or
455 origmatchfn(lfutil.splitstandin(f)) or
456 None)
456 None)
457 return m
457 return m
458 oldmatch = installmatchfn(override_match)
458 oldmatch = installmatchfn(overridematch)
459 listpats = []
459 listpats = []
460 for pat in pats:
460 for pat in pats:
461 if match_.patkind(pat) is not None:
461 if match_.patkind(pat) is not None:
462 listpats.append(pat)
462 listpats.append(pat)
463 else:
463 else:
464 listpats.append(makestandin(pat))
464 listpats.append(makestandin(pat))
465
465
466 try:
466 try:
467 origcopyfile = util.copyfile
467 origcopyfile = util.copyfile
468 copiedfiles = []
468 copiedfiles = []
469 def override_copyfile(src, dest):
469 def overridecopyfile(src, dest):
470 if (lfutil.shortname in src and
470 if (lfutil.shortname in src and
471 dest.startswith(repo.wjoin(lfutil.shortname))):
471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 destlfile = dest.replace(lfutil.shortname, '')
472 destlfile = dest.replace(lfutil.shortname, '')
473 if not opts['force'] and os.path.exists(destlfile):
473 if not opts['force'] and os.path.exists(destlfile):
474 raise IOError('',
474 raise IOError('',
475 _('destination largefile already exists'))
475 _('destination largefile already exists'))
476 copiedfiles.append((src, dest))
476 copiedfiles.append((src, dest))
477 origcopyfile(src, dest)
477 origcopyfile(src, dest)
478
478
479 util.copyfile = override_copyfile
479 util.copyfile = overridecopyfile
480 result += orig(ui, repo, listpats, opts, rename)
480 result += orig(ui, repo, listpats, opts, rename)
481 finally:
481 finally:
482 util.copyfile = origcopyfile
482 util.copyfile = origcopyfile
483
483
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 for (src, dest) in copiedfiles:
485 for (src, dest) in copiedfiles:
486 if (lfutil.shortname in src and
486 if (lfutil.shortname in src and
487 dest.startswith(repo.wjoin(lfutil.shortname))):
487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 destlfiledir = os.path.dirname(destlfile) or '.'
490 destlfiledir = os.path.dirname(destlfile) or '.'
491 if not os.path.isdir(destlfiledir):
491 if not os.path.isdir(destlfiledir):
492 os.makedirs(destlfiledir)
492 os.makedirs(destlfiledir)
493 if rename:
493 if rename:
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 lfdirstate.remove(srclfile)
495 lfdirstate.remove(srclfile)
496 else:
496 else:
497 util.copyfile(srclfile, destlfile)
497 util.copyfile(srclfile, destlfile)
498 lfdirstate.add(destlfile)
498 lfdirstate.add(destlfile)
499 lfdirstate.write()
499 lfdirstate.write()
500 except util.Abort, e:
500 except util.Abort, e:
501 if str(e) != 'no files to copy':
501 if str(e) != 'no files to copy':
502 raise e
502 raise e
503 else:
503 else:
504 nolfiles = True
504 nolfiles = True
505 finally:
505 finally:
506 restorematchfn()
506 restorematchfn()
507 wlock.release()
507 wlock.release()
508
508
509 if nolfiles and nonormalfiles:
509 if nolfiles and nonormalfiles:
510 raise util.Abort(_('no files to copy'))
510 raise util.Abort(_('no files to copy'))
511
511
512 return result
512 return result
513
513
514 # When the user calls revert, we have to be careful to not revert any
514 # When the user calls revert, we have to be careful to not revert any
515 # changes to other largefiles accidentally. This means we have to keep
515 # changes to other largefiles accidentally. This means we have to keep
516 # track of the largefiles that are being reverted so we only pull down
516 # track of the largefiles that are being reverted so we only pull down
517 # the necessary largefiles.
517 # the necessary largefiles.
518 #
518 #
519 # Standins are only updated (to match the hash of largefiles) before
519 # Standins are only updated (to match the hash of largefiles) before
520 # commits. Update the standins then run the original revert, changing
520 # commits. Update the standins then run the original revert, changing
521 # the matcher to hit standins instead of largefiles. Based on the
521 # the matcher to hit standins instead of largefiles. Based on the
522 # resulting standins update the largefiles. Then return the standins
522 # resulting standins update the largefiles. Then return the standins
523 # to their proper state
523 # to their proper state
524 def override_revert(orig, ui, repo, *pats, **opts):
524 def overriderevert(orig, ui, repo, *pats, **opts):
525 # Because we put the standins in a bad state (by updating them)
525 # Because we put the standins in a bad state (by updating them)
526 # and then return them to a correct state we need to lock to
526 # and then return them to a correct state we need to lock to
527 # prevent others from changing them in their incorrect state.
527 # prevent others from changing them in their incorrect state.
528 wlock = repo.wlock()
528 wlock = repo.wlock()
529 try:
529 try:
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 (modified, added, removed, missing, unknown, ignored, clean) = \
531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
532 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
533 for lfile in modified:
533 for lfile in modified:
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 for lfile in missing:
535 for lfile in missing:
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537
537
538 try:
538 try:
539 ctx = repo[opts.get('rev')]
539 ctx = repo[opts.get('rev')]
540 oldmatch = None # for the closure
540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def overridematch(ctx, pats=[], opts={}, globbed=False,
542 default='relpath'):
542 default='relpath'):
543 match = oldmatch(ctx, pats, opts, globbed, default)
543 match = oldmatch(ctx, pats, opts, globbed, default)
544 m = copy.copy(match)
544 m = copy.copy(match)
545 def tostandin(f):
545 def tostandin(f):
546 if lfutil.standin(f) in ctx:
546 if lfutil.standin(f) in ctx:
547 return lfutil.standin(f)
547 return lfutil.standin(f)
548 elif lfutil.standin(f) in repo[None]:
548 elif lfutil.standin(f) in repo[None]:
549 return None
549 return None
550 return f
550 return f
551 m._files = [tostandin(f) for f in m._files]
551 m._files = [tostandin(f) for f in m._files]
552 m._files = [f for f in m._files if f is not None]
552 m._files = [f for f in m._files if f is not None]
553 m._fmap = set(m._files)
553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 origmatchfn = m.matchfn
555 def matchfn(f):
555 def matchfn(f):
556 if lfutil.isstandin(f):
556 if lfutil.isstandin(f):
557 # We need to keep track of what largefiles are being
557 # We need to keep track of what largefiles are being
558 # matched so we know which ones to update later --
558 # matched so we know which ones to update later --
559 # otherwise we accidentally revert changes to other
559 # otherwise we accidentally revert changes to other
560 # largefiles. This is repo-specific, so duckpunch the
560 # largefiles. This is repo-specific, so duckpunch the
561 # repo object to keep the list of largefiles for us
561 # repo object to keep the list of largefiles for us
562 # later.
562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if origmatchfn(lfutil.splitstandin(f)) and \
564 (f in repo[None] or f in ctx):
564 (f in repo[None] or f in ctx):
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 lfileslist.append(lfutil.splitstandin(f))
566 lfileslist.append(lfutil.splitstandin(f))
567 repo._lfilestoupdate = lfileslist
567 repo._lfilestoupdate = lfileslist
568 return True
568 return True
569 else:
569 else:
570 return False
570 return False
571 return orig_matchfn(f)
571 return origmatchfn(f)
572 m.matchfn = matchfn
572 m.matchfn = matchfn
573 return m
573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(overridematch)
575 scmutil.match
575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = overridematch(repo[None], pats, opts)
577 orig(ui, repo, *pats, **opts)
577 orig(ui, repo, *pats, **opts)
578 finally:
578 finally:
579 restorematchfn()
579 restorematchfn()
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 printmessage=False)
582 printmessage=False)
583
583
584 # empty out the largefiles list so we start fresh next time
584 # empty out the largefiles list so we start fresh next time
585 repo._lfilestoupdate = []
585 repo._lfilestoupdate = []
586 for lfile in modified:
586 for lfile in modified:
587 if lfile in lfileslist:
587 if lfile in lfileslist:
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 in repo['.']:
589 in repo['.']:
590 lfutil.writestandin(repo, lfutil.standin(lfile),
590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 repo['.'][lfile].data().strip(),
591 repo['.'][lfile].data().strip(),
592 'x' in repo['.'][lfile].flags())
592 'x' in repo['.'][lfile].flags())
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 for lfile in added:
594 for lfile in added:
595 standin = lfutil.standin(lfile)
595 standin = lfutil.standin(lfile)
596 if standin not in ctx and (standin in matches or opts.get('all')):
596 if standin not in ctx and (standin in matches or opts.get('all')):
597 if lfile in lfdirstate:
597 if lfile in lfdirstate:
598 lfdirstate.drop(lfile)
598 lfdirstate.drop(lfile)
599 util.unlinkpath(repo.wjoin(standin))
599 util.unlinkpath(repo.wjoin(standin))
600 lfdirstate.write()
600 lfdirstate.write()
601 finally:
601 finally:
602 wlock.release()
602 wlock.release()
603
603
604 def hg_update(orig, repo, node):
604 def hgupdate(orig, repo, node):
605 # Only call updatelfiles the standins that have changed to save time
605 # Only call updatelfiles the standins that have changed to save time
606 oldstandins = lfutil.getstandinsstate(repo)
606 oldstandins = lfutil.getstandinsstate(repo)
607 result = orig(repo, node)
607 result = orig(repo, node)
608 newstandins = lfutil.getstandinsstate(repo)
608 newstandins = lfutil.getstandinsstate(repo)
609 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
609 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
610 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
610 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
611 return result
611 return result
612
612
613 def hg_clean(orig, repo, node, show_stats=True):
613 def hgclean(orig, repo, node, show_stats=True):
614 result = orig(repo, node, show_stats)
614 result = orig(repo, node, show_stats)
615 lfcommands.updatelfiles(repo.ui, repo)
615 lfcommands.updatelfiles(repo.ui, repo)
616 return result
616 return result
617
617
618 def hg_merge(orig, repo, node, force=None, remind=True):
618 def hgmerge(orig, repo, node, force=None, remind=True):
619 # Mark the repo as being in the middle of a merge, so that
619 # Mark the repo as being in the middle of a merge, so that
620 # updatelfiles() will know that it needs to trust the standins in
620 # updatelfiles() will know that it needs to trust the standins in
621 # the working copy, not in the standins in the current node
621 # the working copy, not in the standins in the current node
622 repo._ismerging = True
622 repo._ismerging = True
623 try:
623 try:
624 result = orig(repo, node, force, remind)
624 result = orig(repo, node, force, remind)
625 lfcommands.updatelfiles(repo.ui, repo)
625 lfcommands.updatelfiles(repo.ui, repo)
626 finally:
626 finally:
627 repo._ismerging = False
627 repo._ismerging = False
628 return result
628 return result
629
629
630 # When we rebase a repository with remotely changed largefiles, we need to
630 # When we rebase a repository with remotely changed largefiles, we need to
631 # take some extra care so that the largefiles are correctly updated in the
631 # take some extra care so that the largefiles are correctly updated in the
632 # working copy
632 # working copy
633 def override_pull(orig, ui, repo, source=None, **opts):
633 def overridepull(orig, ui, repo, source=None, **opts):
634 if opts.get('rebase', False):
634 if opts.get('rebase', False):
635 repo._isrebasing = True
635 repo._isrebasing = True
636 try:
636 try:
637 if opts.get('update'):
637 if opts.get('update'):
638 del opts['update']
638 del opts['update']
639 ui.debug('--update and --rebase are not compatible, ignoring '
639 ui.debug('--update and --rebase are not compatible, ignoring '
640 'the update flag\n')
640 'the update flag\n')
641 del opts['rebase']
641 del opts['rebase']
642 cmdutil.bailifchanged(repo)
642 cmdutil.bailifchanged(repo)
643 revsprepull = len(repo)
643 revsprepull = len(repo)
644 origpostincoming = commands.postincoming
644 origpostincoming = commands.postincoming
645 def _dummy(*args, **kwargs):
645 def _dummy(*args, **kwargs):
646 pass
646 pass
647 commands.postincoming = _dummy
647 commands.postincoming = _dummy
648 repo.lfpullsource = source
648 repo.lfpullsource = source
649 if not source:
649 if not source:
650 source = 'default'
650 source = 'default'
651 try:
651 try:
652 result = commands.pull(ui, repo, source, **opts)
652 result = commands.pull(ui, repo, source, **opts)
653 finally:
653 finally:
654 commands.postincoming = origpostincoming
654 commands.postincoming = origpostincoming
655 revspostpull = len(repo)
655 revspostpull = len(repo)
656 if revspostpull > revsprepull:
656 if revspostpull > revsprepull:
657 result = result or rebase.rebase(ui, repo)
657 result = result or rebase.rebase(ui, repo)
658 finally:
658 finally:
659 repo._isrebasing = False
659 repo._isrebasing = False
660 else:
660 else:
661 repo.lfpullsource = source
661 repo.lfpullsource = source
662 if not source:
662 if not source:
663 source = 'default'
663 source = 'default'
664 oldheads = lfutil.getcurrentheads(repo)
664 oldheads = lfutil.getcurrentheads(repo)
665 result = orig(ui, repo, source, **opts)
665 result = orig(ui, repo, source, **opts)
666 # If we do not have the new largefiles for any new heads we pulled, we
666 # If we do not have the new largefiles for any new heads we pulled, we
667 # will run into a problem later if we try to merge or rebase with one of
667 # will run into a problem later if we try to merge or rebase with one of
668 # these heads, so cache the largefiles now direclty into the system
668 # these heads, so cache the largefiles now direclty into the system
669 # cache.
669 # cache.
670 ui.status(_("caching new largefiles\n"))
670 ui.status(_("caching new largefiles\n"))
671 numcached = 0
671 numcached = 0
672 heads = lfutil.getcurrentheads(repo)
672 heads = lfutil.getcurrentheads(repo)
673 newheads = set(heads).difference(set(oldheads))
673 newheads = set(heads).difference(set(oldheads))
674 for head in newheads:
674 for head in newheads:
675 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
675 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
676 numcached += len(cached)
676 numcached += len(cached)
677 ui.status(_("%d largefiles cached\n") % numcached)
677 ui.status(_("%d largefiles cached\n") % numcached)
678 return result
678 return result
679
679
680 def override_rebase(orig, ui, repo, **opts):
680 def overriderebase(orig, ui, repo, **opts):
681 repo._isrebasing = True
681 repo._isrebasing = True
682 try:
682 try:
683 orig(ui, repo, **opts)
683 orig(ui, repo, **opts)
684 finally:
684 finally:
685 repo._isrebasing = False
685 repo._isrebasing = False
686
686
687 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
687 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
688 prefix=None, mtime=None, subrepos=None):
688 prefix=None, mtime=None, subrepos=None):
689 # No need to lock because we are only reading history and
689 # No need to lock because we are only reading history and
690 # largefile caches, neither of which are modified.
690 # largefile caches, neither of which are modified.
691 lfcommands.cachelfiles(repo.ui, repo, node)
691 lfcommands.cachelfiles(repo.ui, repo, node)
692
692
693 if kind not in archival.archivers:
693 if kind not in archival.archivers:
694 raise util.Abort(_("unknown archive type '%s'") % kind)
694 raise util.Abort(_("unknown archive type '%s'") % kind)
695
695
696 ctx = repo[node]
696 ctx = repo[node]
697
697
698 if kind == 'files':
698 if kind == 'files':
699 if prefix:
699 if prefix:
700 raise util.Abort(
700 raise util.Abort(
701 _('cannot give prefix when archiving to files'))
701 _('cannot give prefix when archiving to files'))
702 else:
702 else:
703 prefix = archival.tidyprefix(dest, kind, prefix)
703 prefix = archival.tidyprefix(dest, kind, prefix)
704
704
705 def write(name, mode, islink, getdata):
705 def write(name, mode, islink, getdata):
706 if matchfn and not matchfn(name):
706 if matchfn and not matchfn(name):
707 return
707 return
708 data = getdata()
708 data = getdata()
709 if decode:
709 if decode:
710 data = repo.wwritedata(name, data)
710 data = repo.wwritedata(name, data)
711 archiver.addfile(prefix + name, mode, islink, data)
711 archiver.addfile(prefix + name, mode, islink, data)
712
712
713 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
713 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
714
714
715 if repo.ui.configbool("ui", "archivemeta", True):
715 if repo.ui.configbool("ui", "archivemeta", True):
716 def metadata():
716 def metadata():
717 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
717 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
718 hex(repo.changelog.node(0)), hex(node), ctx.branch())
718 hex(repo.changelog.node(0)), hex(node), ctx.branch())
719
719
720 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
720 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
721 if repo.tagtype(t) == 'global')
721 if repo.tagtype(t) == 'global')
722 if not tags:
722 if not tags:
723 repo.ui.pushbuffer()
723 repo.ui.pushbuffer()
724 opts = {'template': '{latesttag}\n{latesttagdistance}',
724 opts = {'template': '{latesttag}\n{latesttagdistance}',
725 'style': '', 'patch': None, 'git': None}
725 'style': '', 'patch': None, 'git': None}
726 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
726 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
727 ltags, dist = repo.ui.popbuffer().split('\n')
727 ltags, dist = repo.ui.popbuffer().split('\n')
728 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
728 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
729 tags += 'latesttagdistance: %s\n' % dist
729 tags += 'latesttagdistance: %s\n' % dist
730
730
731 return base + tags
731 return base + tags
732
732
733 write('.hg_archival.txt', 0644, False, metadata)
733 write('.hg_archival.txt', 0644, False, metadata)
734
734
735 for f in ctx:
735 for f in ctx:
736 ff = ctx.flags(f)
736 ff = ctx.flags(f)
737 getdata = ctx[f].data
737 getdata = ctx[f].data
738 if lfutil.isstandin(f):
738 if lfutil.isstandin(f):
739 path = lfutil.findfile(repo, getdata().strip())
739 path = lfutil.findfile(repo, getdata().strip())
740 if path is None:
740 if path is None:
741 raise util.Abort(
741 raise util.Abort(
742 _('largefile %s not found in repo store or system cache')
742 _('largefile %s not found in repo store or system cache')
743 % lfutil.splitstandin(f))
743 % lfutil.splitstandin(f))
744 f = lfutil.splitstandin(f)
744 f = lfutil.splitstandin(f)
745
745
746 def getdatafn():
746 def getdatafn():
747 fd = None
747 fd = None
748 try:
748 try:
749 fd = open(path, 'rb')
749 fd = open(path, 'rb')
750 return fd.read()
750 return fd.read()
751 finally:
751 finally:
752 if fd:
752 if fd:
753 fd.close()
753 fd.close()
754
754
755 getdata = getdatafn
755 getdata = getdatafn
756 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
756 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
757
757
758 if subrepos:
758 if subrepos:
759 for subpath in ctx.substate:
759 for subpath in ctx.substate:
760 sub = ctx.sub(subpath)
760 sub = ctx.sub(subpath)
761 sub.archive(repo.ui, archiver, prefix)
761 sub.archive(repo.ui, archiver, prefix)
762
762
763 archiver.done()
763 archiver.done()
764
764
765 # If a largefile is modified, the change is not reflected in its
765 # If a largefile is modified, the change is not reflected in its
766 # standin until a commit. cmdutil.bailifchanged() raises an exception
766 # standin until a commit. cmdutil.bailifchanged() raises an exception
767 # if the repo has uncommitted changes. Wrap it to also check if
767 # if the repo has uncommitted changes. Wrap it to also check if
768 # largefiles were changed. This is used by bisect and backout.
768 # largefiles were changed. This is used by bisect and backout.
769 def override_bailifchanged(orig, repo):
769 def overridebailifchanged(orig, repo):
770 orig(repo)
770 orig(repo)
771 repo.lfstatus = True
771 repo.lfstatus = True
772 modified, added, removed, deleted = repo.status()[:4]
772 modified, added, removed, deleted = repo.status()[:4]
773 repo.lfstatus = False
773 repo.lfstatus = False
774 if modified or added or removed or deleted:
774 if modified or added or removed or deleted:
775 raise util.Abort(_('outstanding uncommitted changes'))
775 raise util.Abort(_('outstanding uncommitted changes'))
776
776
777 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
777 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
778 def override_fetch(orig, ui, repo, *pats, **opts):
778 def overridefetch(orig, ui, repo, *pats, **opts):
779 repo.lfstatus = True
779 repo.lfstatus = True
780 modified, added, removed, deleted = repo.status()[:4]
780 modified, added, removed, deleted = repo.status()[:4]
781 repo.lfstatus = False
781 repo.lfstatus = False
782 if modified or added or removed or deleted:
782 if modified or added or removed or deleted:
783 raise util.Abort(_('outstanding uncommitted changes'))
783 raise util.Abort(_('outstanding uncommitted changes'))
784 return orig(ui, repo, *pats, **opts)
784 return orig(ui, repo, *pats, **opts)
785
785
786 def override_forget(orig, ui, repo, *pats, **opts):
786 def overrideforget(orig, ui, repo, *pats, **opts):
787 installnormalfilesmatchfn(repo[None].manifest())
787 installnormalfilesmatchfn(repo[None].manifest())
788 orig(ui, repo, *pats, **opts)
788 orig(ui, repo, *pats, **opts)
789 restorematchfn()
789 restorematchfn()
790 m = scmutil.match(repo[None], pats, opts)
790 m = scmutil.match(repo[None], pats, opts)
791
791
792 try:
792 try:
793 repo.lfstatus = True
793 repo.lfstatus = True
794 s = repo.status(match=m, clean=True)
794 s = repo.status(match=m, clean=True)
795 finally:
795 finally:
796 repo.lfstatus = False
796 repo.lfstatus = False
797 forget = sorted(s[0] + s[1] + s[3] + s[6])
797 forget = sorted(s[0] + s[1] + s[3] + s[6])
798 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
798 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
799
799
800 for f in forget:
800 for f in forget:
801 if lfutil.standin(f) not in repo.dirstate and not \
801 if lfutil.standin(f) not in repo.dirstate and not \
802 os.path.isdir(m.rel(lfutil.standin(f))):
802 os.path.isdir(m.rel(lfutil.standin(f))):
803 ui.warn(_('not removing %s: file is already untracked\n')
803 ui.warn(_('not removing %s: file is already untracked\n')
804 % m.rel(f))
804 % m.rel(f))
805
805
806 for f in forget:
806 for f in forget:
807 if ui.verbose or not m.exact(f):
807 if ui.verbose or not m.exact(f):
808 ui.status(_('removing %s\n') % m.rel(f))
808 ui.status(_('removing %s\n') % m.rel(f))
809
809
810 # Need to lock because standin files are deleted then removed from the
810 # Need to lock because standin files are deleted then removed from the
811 # repository and we could race inbetween.
811 # repository and we could race inbetween.
812 wlock = repo.wlock()
812 wlock = repo.wlock()
813 try:
813 try:
814 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 lfdirstate = lfutil.openlfdirstate(ui, repo)
815 for f in forget:
815 for f in forget:
816 if lfdirstate[f] == 'a':
816 if lfdirstate[f] == 'a':
817 lfdirstate.drop(f)
817 lfdirstate.drop(f)
818 else:
818 else:
819 lfdirstate.remove(f)
819 lfdirstate.remove(f)
820 lfdirstate.write()
820 lfdirstate.write()
821 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
821 lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
822 unlink=True)
822 unlink=True)
823 finally:
823 finally:
824 wlock.release()
824 wlock.release()
825
825
826 def getoutgoinglfiles(ui, repo, dest=None, **opts):
826 def getoutgoinglfiles(ui, repo, dest=None, **opts):
827 dest = ui.expandpath(dest or 'default-push', dest or 'default')
827 dest = ui.expandpath(dest or 'default-push', dest or 'default')
828 dest, branches = hg.parseurl(dest, opts.get('branch'))
828 dest, branches = hg.parseurl(dest, opts.get('branch'))
829 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
829 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
830 if revs:
830 if revs:
831 revs = [repo.lookup(rev) for rev in revs]
831 revs = [repo.lookup(rev) for rev in revs]
832
832
833 remoteui = hg.remoteui
833 remoteui = hg.remoteui
834
834
835 try:
835 try:
836 remote = hg.repository(remoteui(repo, opts), dest)
836 remote = hg.repository(remoteui(repo, opts), dest)
837 except error.RepoError:
837 except error.RepoError:
838 return None
838 return None
839 o = lfutil.findoutgoing(repo, remote, False)
839 o = lfutil.findoutgoing(repo, remote, False)
840 if not o:
840 if not o:
841 return None
841 return None
842 o = repo.changelog.nodesbetween(o, revs)[0]
842 o = repo.changelog.nodesbetween(o, revs)[0]
843 if opts.get('newest_first'):
843 if opts.get('newest_first'):
844 o.reverse()
844 o.reverse()
845
845
846 toupload = set()
846 toupload = set()
847 for n in o:
847 for n in o:
848 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
848 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
849 ctx = repo[n]
849 ctx = repo[n]
850 files = set(ctx.files())
850 files = set(ctx.files())
851 if len(parents) == 2:
851 if len(parents) == 2:
852 mc = ctx.manifest()
852 mc = ctx.manifest()
853 mp1 = ctx.parents()[0].manifest()
853 mp1 = ctx.parents()[0].manifest()
854 mp2 = ctx.parents()[1].manifest()
854 mp2 = ctx.parents()[1].manifest()
855 for f in mp1:
855 for f in mp1:
856 if f not in mc:
856 if f not in mc:
857 files.add(f)
857 files.add(f)
858 for f in mp2:
858 for f in mp2:
859 if f not in mc:
859 if f not in mc:
860 files.add(f)
860 files.add(f)
861 for f in mc:
861 for f in mc:
862 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
862 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
863 files.add(f)
863 files.add(f)
864 toupload = toupload.union(
864 toupload = toupload.union(
865 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
865 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
866 return toupload
866 return toupload
867
867
868 def override_outgoing(orig, ui, repo, dest=None, **opts):
868 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
869 orig(ui, repo, dest, **opts)
869 orig(ui, repo, dest, **opts)
870
870
871 if opts.pop('large', None):
871 if opts.pop('large', None):
872 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
872 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
873 if toupload is None:
873 if toupload is None:
874 ui.status(_('largefiles: No remote repo\n'))
874 ui.status(_('largefiles: No remote repo\n'))
875 else:
875 else:
876 ui.status(_('largefiles to upload:\n'))
876 ui.status(_('largefiles to upload:\n'))
877 for file in toupload:
877 for file in toupload:
878 ui.status(lfutil.splitstandin(file) + '\n')
878 ui.status(lfutil.splitstandin(file) + '\n')
879 ui.status('\n')
879 ui.status('\n')
880
880
881 def override_summary(orig, ui, repo, *pats, **opts):
881 def overridesummary(orig, ui, repo, *pats, **opts):
882 try:
882 try:
883 repo.lfstatus = True
883 repo.lfstatus = True
884 orig(ui, repo, *pats, **opts)
884 orig(ui, repo, *pats, **opts)
885 finally:
885 finally:
886 repo.lfstatus = False
886 repo.lfstatus = False
887
887
888 if opts.pop('large', None):
888 if opts.pop('large', None):
889 toupload = getoutgoinglfiles(ui, repo, None, **opts)
889 toupload = getoutgoinglfiles(ui, repo, None, **opts)
890 if toupload is None:
890 if toupload is None:
891 ui.status(_('largefiles: No remote repo\n'))
891 ui.status(_('largefiles: No remote repo\n'))
892 else:
892 else:
893 ui.status(_('largefiles: %d to upload\n') % len(toupload))
893 ui.status(_('largefiles: %d to upload\n') % len(toupload))
894
894
895 def override_addremove(orig, ui, repo, *pats, **opts):
895 def overrideaddremove(orig, ui, repo, *pats, **opts):
896 # Get the list of missing largefiles so we can remove them
896 # Get the list of missing largefiles so we can remove them
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
898 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
898 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
899 False, False)
899 False, False)
900 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
900 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
901
901
902 # Call into the normal remove code, but the removing of the standin, we want
902 # Call into the normal remove code, but the removing of the standin, we want
903 # to have handled by original addremove. Monkey patching here makes sure
903 # to have handled by original addremove. Monkey patching here makes sure
904 # we don't remove the standin in the largefiles code, preventing a very
904 # we don't remove the standin in the largefiles code, preventing a very
905 # confused state later.
905 # confused state later.
906 if missing:
906 if missing:
907 repo._isaddremove = True
907 repo._isaddremove = True
908 remove_largefiles(ui, repo, *missing, **opts)
908 removelargefiles(ui, repo, *missing, **opts)
909 repo._isaddremove = False
909 repo._isaddremove = False
910 # Call into the normal add code, and any files that *should* be added as
910 # Call into the normal add code, and any files that *should* be added as
911 # largefiles will be
911 # largefiles will be
912 add_largefiles(ui, repo, *pats, **opts)
912 addlargefiles(ui, repo, *pats, **opts)
913 # Now that we've handled largefiles, hand off to the original addremove
913 # Now that we've handled largefiles, hand off to the original addremove
914 # function to take care of the rest. Make sure it doesn't do anything with
914 # function to take care of the rest. Make sure it doesn't do anything with
915 # largefiles by installing a matcher that will ignore them.
915 # largefiles by installing a matcher that will ignore them.
916 installnormalfilesmatchfn(repo[None].manifest())
916 installnormalfilesmatchfn(repo[None].manifest())
917 result = orig(ui, repo, *pats, **opts)
917 result = orig(ui, repo, *pats, **opts)
918 restorematchfn()
918 restorematchfn()
919 return result
919 return result
920
920
921 # Calling purge with --all will cause the largefiles to be deleted.
921 # Calling purge with --all will cause the largefiles to be deleted.
922 # Override repo.status to prevent this from happening.
922 # Override repo.status to prevent this from happening.
923 def override_purge(orig, ui, repo, *dirs, **opts):
923 def overridepurge(orig, ui, repo, *dirs, **opts):
924 oldstatus = repo.status
924 oldstatus = repo.status
925 def override_status(node1='.', node2=None, match=None, ignored=False,
925 def overridestatus(node1='.', node2=None, match=None, ignored=False,
926 clean=False, unknown=False, listsubrepos=False):
926 clean=False, unknown=False, listsubrepos=False):
927 r = oldstatus(node1, node2, match, ignored, clean, unknown,
927 r = oldstatus(node1, node2, match, ignored, clean, unknown,
928 listsubrepos)
928 listsubrepos)
929 lfdirstate = lfutil.openlfdirstate(ui, repo)
929 lfdirstate = lfutil.openlfdirstate(ui, repo)
930 modified, added, removed, deleted, unknown, ignored, clean = r
930 modified, added, removed, deleted, unknown, ignored, clean = r
931 unknown = [f for f in unknown if lfdirstate[f] == '?']
931 unknown = [f for f in unknown if lfdirstate[f] == '?']
932 ignored = [f for f in ignored if lfdirstate[f] == '?']
932 ignored = [f for f in ignored if lfdirstate[f] == '?']
933 return modified, added, removed, deleted, unknown, ignored, clean
933 return modified, added, removed, deleted, unknown, ignored, clean
934 repo.status = override_status
934 repo.status = overridestatus
935 orig(ui, repo, *dirs, **opts)
935 orig(ui, repo, *dirs, **opts)
936 repo.status = oldstatus
936 repo.status = oldstatus
937
937
938 def override_rollback(orig, ui, repo, **opts):
938 def overriderollback(orig, ui, repo, **opts):
939 result = orig(ui, repo, **opts)
939 result = orig(ui, repo, **opts)
940 merge.update(repo, node=None, branchmerge=False, force=True,
940 merge.update(repo, node=None, branchmerge=False, force=True,
941 partial=lfutil.isstandin)
941 partial=lfutil.isstandin)
942 wlock = repo.wlock()
942 wlock = repo.wlock()
943 try:
943 try:
944 lfdirstate = lfutil.openlfdirstate(ui, repo)
944 lfdirstate = lfutil.openlfdirstate(ui, repo)
945 lfiles = lfutil.listlfiles(repo)
945 lfiles = lfutil.listlfiles(repo)
946 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
946 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
947 for file in lfiles:
947 for file in lfiles:
948 if file in oldlfiles:
948 if file in oldlfiles:
949 lfdirstate.normallookup(file)
949 lfdirstate.normallookup(file)
950 else:
950 else:
951 lfdirstate.add(file)
951 lfdirstate.add(file)
952 lfdirstate.write()
952 lfdirstate.write()
953 finally:
953 finally:
954 wlock.release()
954 wlock.release()
955 return result
955 return result
956
956
957 def override_transplant(orig, ui, repo, *revs, **opts):
957 def overridetransplant(orig, ui, repo, *revs, **opts):
958 try:
958 try:
959 oldstandins = lfutil.getstandinsstate(repo)
959 oldstandins = lfutil.getstandinsstate(repo)
960 repo._istransplanting = True
960 repo._istransplanting = True
961 result = orig(ui, repo, *revs, **opts)
961 result = orig(ui, repo, *revs, **opts)
962 newstandins = lfutil.getstandinsstate(repo)
962 newstandins = lfutil.getstandinsstate(repo)
963 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
963 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
964 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
964 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
965 printmessage=True)
965 printmessage=True)
966 finally:
966 finally:
967 repo._istransplanting = False
967 repo._istransplanting = False
968 return result
968 return result
@@ -1,166 +1,166 b''
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import urllib2
7 import urllib2
8
8
9 from mercurial import error, httprepo, util, wireproto
9 from mercurial import error, httprepo, util, wireproto
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11
11
12 import lfutil
12 import lfutil
13
13
14 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
14 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
15 '\n\nPlease enable it in your Mercurial config '
15 '\n\nPlease enable it in your Mercurial config '
16 'file.\n')
16 'file.\n')
17
17
18 def putlfile(repo, proto, sha):
18 def putlfile(repo, proto, sha):
19 '''Put a largefile into a repository's local store and into the
19 '''Put a largefile into a repository's local store and into the
20 user cache.'''
20 user cache.'''
21 proto.redirect()
21 proto.redirect()
22
22
23 tmpfp = util.atomictempfile(lfutil.storepath(repo, sha),
23 tmpfp = util.atomictempfile(lfutil.storepath(repo, sha),
24 createmode=repo.store.createmode)
24 createmode=repo.store.createmode)
25 try:
25 try:
26 try:
26 try:
27 proto.getfile(tmpfp)
27 proto.getfile(tmpfp)
28 tmpfp._fp.seek(0)
28 tmpfp._fp.seek(0)
29 if sha != lfutil.hexsha1(tmpfp._fp):
29 if sha != lfutil.hexsha1(tmpfp._fp):
30 raise IOError(0, _('largefile contents do not match hash'))
30 raise IOError(0, _('largefile contents do not match hash'))
31 tmpfp.close()
31 tmpfp.close()
32 lfutil.linktousercache(repo, sha)
32 lfutil.linktousercache(repo, sha)
33 except IOError, e:
33 except IOError, e:
34 repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
34 repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
35 (sha, e.strerror))
35 (sha, e.strerror))
36 return wireproto.pushres(1)
36 return wireproto.pushres(1)
37 finally:
37 finally:
38 tmpfp.discard()
38 tmpfp.discard()
39
39
40 return wireproto.pushres(0)
40 return wireproto.pushres(0)
41
41
42 def getlfile(repo, proto, sha):
42 def getlfile(repo, proto, sha):
43 '''Retrieve a largefile from the repository-local cache or system
43 '''Retrieve a largefile from the repository-local cache or system
44 cache.'''
44 cache.'''
45 filename = lfutil.findfile(repo, sha)
45 filename = lfutil.findfile(repo, sha)
46 if not filename:
46 if not filename:
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 f = open(filename, 'rb')
48 f = open(filename, 'rb')
49 length = os.fstat(f.fileno())[6]
49 length = os.fstat(f.fileno())[6]
50
50
51 # Since we can't set an HTTP content-length header here, and
51 # Since we can't set an HTTP content-length header here, and
52 # Mercurial core provides no way to give the length of a streamres
52 # Mercurial core provides no way to give the length of a streamres
53 # (and reading the entire file into RAM would be ill-advised), we
53 # (and reading the entire file into RAM would be ill-advised), we
54 # just send the length on the first line of the response, like the
54 # just send the length on the first line of the response, like the
55 # ssh proto does for string responses.
55 # ssh proto does for string responses.
56 def generator():
56 def generator():
57 yield '%d\n' % length
57 yield '%d\n' % length
58 for chunk in f:
58 for chunk in f:
59 yield chunk
59 yield chunk
60 return wireproto.streamres(generator())
60 return wireproto.streamres(generator())
61
61
62 def statlfile(repo, proto, sha):
62 def statlfile(repo, proto, sha):
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 mismatched checksum, or '0\n' if it is in good condition'''
64 mismatched checksum, or '0\n' if it is in good condition'''
65 filename = lfutil.findfile(repo, sha)
65 filename = lfutil.findfile(repo, sha)
66 if not filename:
66 if not filename:
67 return '2\n'
67 return '2\n'
68 fd = None
68 fd = None
69 try:
69 try:
70 fd = open(filename, 'rb')
70 fd = open(filename, 'rb')
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 finally:
72 finally:
73 if fd:
73 if fd:
74 fd.close()
74 fd.close()
75
75
76 def wirereposetup(ui, repo):
76 def wirereposetup(ui, repo):
77 class lfileswirerepository(repo.__class__):
77 class lfileswirerepository(repo.__class__):
78 def putlfile(self, sha, fd):
78 def putlfile(self, sha, fd):
79 # unfortunately, httprepository._callpush tries to convert its
79 # unfortunately, httprepository._callpush tries to convert its
80 # input file-like into a bundle before sending it, so we can't use
80 # input file-like into a bundle before sending it, so we can't use
81 # it ...
81 # it ...
82 if issubclass(self.__class__, httprepo.httprepository):
82 if issubclass(self.__class__, httprepo.httprepository):
83 res = None
83 res = None
84 try:
84 try:
85 res = self._call('putlfile', data=fd, sha=sha,
85 res = self._call('putlfile', data=fd, sha=sha,
86 headers={'content-type':'application/mercurial-0.1'})
86 headers={'content-type':'application/mercurial-0.1'})
87 d, output = res.split('\n', 1)
87 d, output = res.split('\n', 1)
88 for l in output.splitlines(True):
88 for l in output.splitlines(True):
89 self.ui.warn(_('remote: '), l, '\n')
89 self.ui.warn(_('remote: '), l, '\n')
90 return int(d)
90 return int(d)
91 except (ValueError, urllib2.HTTPError):
91 except (ValueError, urllib2.HTTPError):
92 self.ui.warn(_('unexpected putlfile response: %s') % res)
92 self.ui.warn(_('unexpected putlfile response: %s') % res)
93 return 1
93 return 1
94 # ... but we can't use sshrepository._call because the data=
94 # ... but we can't use sshrepository._call because the data=
95 # argument won't get sent, and _callpush does exactly what we want
95 # argument won't get sent, and _callpush does exactly what we want
96 # in this case: send the data straight through
96 # in this case: send the data straight through
97 else:
97 else:
98 try:
98 try:
99 ret, output = self._callpush("putlfile", fd, sha=sha)
99 ret, output = self._callpush("putlfile", fd, sha=sha)
100 if ret == "":
100 if ret == "":
101 raise error.ResponseError(_('putlfile failed:'),
101 raise error.ResponseError(_('putlfile failed:'),
102 output)
102 output)
103 return int(ret)
103 return int(ret)
104 except IOError:
104 except IOError:
105 return 1
105 return 1
106 except ValueError:
106 except ValueError:
107 raise error.ResponseError(
107 raise error.ResponseError(
108 _('putlfile failed (unexpected response):'), ret)
108 _('putlfile failed (unexpected response):'), ret)
109
109
110 def getlfile(self, sha):
110 def getlfile(self, sha):
111 stream = self._callstream("getlfile", sha=sha)
111 stream = self._callstream("getlfile", sha=sha)
112 length = stream.readline()
112 length = stream.readline()
113 try:
113 try:
114 length = int(length)
114 length = int(length)
115 except ValueError:
115 except ValueError:
116 self._abort(error.ResponseError(_("unexpected response:"),
116 self._abort(error.ResponseError(_("unexpected response:"),
117 length))
117 length))
118 return (length, stream)
118 return (length, stream)
119
119
120 def statlfile(self, sha):
120 def statlfile(self, sha):
121 try:
121 try:
122 return int(self._call("statlfile", sha=sha))
122 return int(self._call("statlfile", sha=sha))
123 except (ValueError, urllib2.HTTPError):
123 except (ValueError, urllib2.HTTPError):
124 # If the server returns anything but an integer followed by a
124 # If the server returns anything but an integer followed by a
125 # newline, newline, it's not speaking our language; if we get
125 # newline, newline, it's not speaking our language; if we get
126 # an HTTP error, we can't be sure the largefile is present;
126 # an HTTP error, we can't be sure the largefile is present;
127 # either way, consider it missing.
127 # either way, consider it missing.
128 return 2
128 return 2
129
129
130 repo.__class__ = lfileswirerepository
130 repo.__class__ = lfileswirerepository
131
131
132 # advertise the largefiles=serve capability
132 # advertise the largefiles=serve capability
133 def capabilities(repo, proto):
133 def capabilities(repo, proto):
134 return capabilities_orig(repo, proto) + ' largefiles=serve'
134 return capabilitiesorig(repo, proto) + ' largefiles=serve'
135
135
136 # duplicate what Mercurial's new out-of-band errors mechanism does, because
136 # duplicate what Mercurial's new out-of-band errors mechanism does, because
137 # clients old and new alike both handle it well
137 # clients old and new alike both handle it well
138 def webproto_refuseclient(self, message):
138 def webprotorefuseclient(self, message):
139 self.req.header([('Content-Type', 'application/hg-error')])
139 self.req.header([('Content-Type', 'application/hg-error')])
140 return message
140 return message
141
141
142 def sshproto_refuseclient(self, message):
142 def sshprotorefuseclient(self, message):
143 self.ui.write_err('%s\n-\n' % message)
143 self.ui.write_err('%s\n-\n' % message)
144 self.fout.write('\n')
144 self.fout.write('\n')
145 self.fout.flush()
145 self.fout.flush()
146
146
147 return ''
147 return ''
148
148
149 def heads(repo, proto):
149 def heads(repo, proto):
150 if lfutil.islfilesrepo(repo):
150 if lfutil.islfilesrepo(repo):
151 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
151 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
152 return wireproto.heads(repo, proto)
152 return wireproto.heads(repo, proto)
153
153
154 def sshrepo_callstream(self, cmd, **args):
154 def sshrepocallstream(self, cmd, **args):
155 if cmd == 'heads' and self.capable('largefiles'):
155 if cmd == 'heads' and self.capable('largefiles'):
156 cmd = 'lheads'
156 cmd = 'lheads'
157 if cmd == 'batch' and self.capable('largefiles'):
157 if cmd == 'batch' and self.capable('largefiles'):
158 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
158 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
159 return ssh_oldcallstream(self, cmd, **args)
159 return ssholdcallstream(self, cmd, **args)
160
160
161 def httprepo_callstream(self, cmd, **args):
161 def httprepocallstream(self, cmd, **args):
162 if cmd == 'heads' and self.capable('largefiles'):
162 if cmd == 'heads' and self.capable('largefiles'):
163 cmd = 'lheads'
163 cmd = 'lheads'
164 if cmd == 'batch' and self.capable('largefiles'):
164 if cmd == 'batch' and self.capable('largefiles'):
165 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
165 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
166 return http_oldcallstream(self, cmd, **args)
166 return httpoldcallstream(self, cmd, **args)
@@ -1,458 +1,458 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util
14 from mercurial import context, error, manifest, match as match_, util
15 from mercurial import node as node_
15 from mercurial import node as node_
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfcommands
18 import lfcommands
19 import proto
19 import proto
20 import lfutil
20 import lfutil
21
21
22 def reposetup(ui, repo):
22 def reposetup(ui, repo):
23 # wire repositories should be given new wireproto functions but not the
23 # wire repositories should be given new wireproto functions but not the
24 # other largefiles modifications
24 # other largefiles modifications
25 if not repo.local():
25 if not repo.local():
26 return proto.wirereposetup(ui, repo)
26 return proto.wirereposetup(ui, repo)
27
27
28 for name in ('status', 'commitctx', 'commit', 'push'):
28 for name in ('status', 'commitctx', 'commit', 'push'):
29 method = getattr(repo, name)
29 method = getattr(repo, name)
30 if (isinstance(method, types.FunctionType) and
30 if (isinstance(method, types.FunctionType) and
31 method.func_name == 'wrap'):
31 method.func_name == 'wrap'):
32 ui.warn(_('largefiles: repo method %r appears to have already been'
32 ui.warn(_('largefiles: repo method %r appears to have already been'
33 ' wrapped by another extension: '
33 ' wrapped by another extension: '
34 'largefiles may behave incorrectly\n')
34 'largefiles may behave incorrectly\n')
35 % name)
35 % name)
36
36
37 class lfiles_repo(repo.__class__):
37 class lfilesrepo(repo.__class__):
38 lfstatus = False
38 lfstatus = False
39 def status_nolfiles(self, *args, **kwargs):
39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfiles_repo, self).status(*args, **kwargs)
40 return super(lfilesrepo, self).status(*args, **kwargs)
41
41
42 # When lfstatus is set, return a context that gives the names
42 # When lfstatus is set, return a context that gives the names
43 # of largefiles instead of their corresponding standins and
43 # of largefiles instead of their corresponding standins and
44 # identifies the largefiles as always binary, regardless of
44 # identifies the largefiles as always binary, regardless of
45 # their actual contents.
45 # their actual contents.
46 def __getitem__(self, changeid):
46 def __getitem__(self, changeid):
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 if self.lfstatus:
48 if self.lfstatus:
49 class lfiles_manifestdict(manifest.manifestdict):
49 class lfilesmanifestdict(manifest.manifestdict):
50 def __contains__(self, filename):
50 def __contains__(self, filename):
51 if super(lfiles_manifestdict,
51 if super(lfilesmanifestdict,
52 self).__contains__(filename):
52 self).__contains__(filename):
53 return True
53 return True
54 return super(lfiles_manifestdict,
54 return super(lfilesmanifestdict,
55 self).__contains__(lfutil.standin(filename))
55 self).__contains__(lfutil.standin(filename))
56 class lfiles_ctx(ctx.__class__):
56 class lfilesctx(ctx.__class__):
57 def files(self):
57 def files(self):
58 filenames = super(lfiles_ctx, self).files()
58 filenames = super(lfilesctx, self).files()
59 return [lfutil.splitstandin(f) or f for f in filenames]
59 return [lfutil.splitstandin(f) or f for f in filenames]
60 def manifest(self):
60 def manifest(self):
61 man1 = super(lfiles_ctx, self).manifest()
61 man1 = super(lfilesctx, self).manifest()
62 man1.__class__ = lfiles_manifestdict
62 man1.__class__ = lfilesmanifestdict
63 return man1
63 return man1
64 def filectx(self, path, fileid=None, filelog=None):
64 def filectx(self, path, fileid=None, filelog=None):
65 try:
65 try:
66 if filelog is not None:
66 if filelog is not None:
67 result = super(lfiles_ctx, self).filectx(
67 result = super(lfilesctx, self).filectx(
68 path, fileid, filelog)
68 path, fileid, filelog)
69 else:
69 else:
70 result = super(lfiles_ctx, self).filectx(
70 result = super(lfilesctx, self).filectx(
71 path, fileid)
71 path, fileid)
72 except error.LookupError:
72 except error.LookupError:
73 # Adding a null character will cause Mercurial to
73 # Adding a null character will cause Mercurial to
74 # identify this as a binary file.
74 # identify this as a binary file.
75 if filelog is not None:
75 if filelog is not None:
76 result = super(lfiles_ctx, self).filectx(
76 result = super(lfilesctx, self).filectx(
77 lfutil.standin(path), fileid, filelog)
77 lfutil.standin(path), fileid, filelog)
78 else:
78 else:
79 result = super(lfiles_ctx, self).filectx(
79 result = super(lfilesctx, self).filectx(
80 lfutil.standin(path), fileid)
80 lfutil.standin(path), fileid)
81 olddata = result.data
81 olddata = result.data
82 result.data = lambda: olddata() + '\0'
82 result.data = lambda: olddata() + '\0'
83 return result
83 return result
84 ctx.__class__ = lfiles_ctx
84 ctx.__class__ = lfilesctx
85 return ctx
85 return ctx
86
86
87 # Figure out the status of big files and insert them into the
87 # Figure out the status of big files and insert them into the
88 # appropriate list in the result. Also removes standin files
88 # appropriate list in the result. Also removes standin files
89 # from the listing. Revert to the original status if
89 # from the listing. Revert to the original status if
90 # self.lfstatus is False.
90 # self.lfstatus is False.
91 def status(self, node1='.', node2=None, match=None, ignored=False,
91 def status(self, node1='.', node2=None, match=None, ignored=False,
92 clean=False, unknown=False, listsubrepos=False):
92 clean=False, unknown=False, listsubrepos=False):
93 listignored, listclean, listunknown = ignored, clean, unknown
93 listignored, listclean, listunknown = ignored, clean, unknown
94 if not self.lfstatus:
94 if not self.lfstatus:
95 return super(lfiles_repo, self).status(node1, node2, match,
95 return super(lfilesrepo, self).status(node1, node2, match,
96 listignored, listclean, listunknown, listsubrepos)
96 listignored, listclean, listunknown, listsubrepos)
97 else:
97 else:
98 # some calls in this function rely on the old version of status
98 # some calls in this function rely on the old version of status
99 self.lfstatus = False
99 self.lfstatus = False
100 if isinstance(node1, context.changectx):
100 if isinstance(node1, context.changectx):
101 ctx1 = node1
101 ctx1 = node1
102 else:
102 else:
103 ctx1 = repo[node1]
103 ctx1 = repo[node1]
104 if isinstance(node2, context.changectx):
104 if isinstance(node2, context.changectx):
105 ctx2 = node2
105 ctx2 = node2
106 else:
106 else:
107 ctx2 = repo[node2]
107 ctx2 = repo[node2]
108 working = ctx2.rev() is None
108 working = ctx2.rev() is None
109 parentworking = working and ctx1 == self['.']
109 parentworking = working and ctx1 == self['.']
110
110
111 def inctx(file, ctx):
111 def inctx(file, ctx):
112 try:
112 try:
113 if ctx.rev() is None:
113 if ctx.rev() is None:
114 return file in ctx.manifest()
114 return file in ctx.manifest()
115 ctx[file]
115 ctx[file]
116 return True
116 return True
117 except KeyError:
117 except KeyError:
118 return False
118 return False
119
119
120 if match is None:
120 if match is None:
121 match = match_.always(self.root, self.getcwd())
121 match = match_.always(self.root, self.getcwd())
122
122
123 # First check if there were files specified on the
123 # First check if there were files specified on the
124 # command line. If there were, and none of them were
124 # command line. If there were, and none of them were
125 # largefiles, we should just bail here and let super
125 # largefiles, we should just bail here and let super
126 # handle it -- thus gaining a big performance boost.
126 # handle it -- thus gaining a big performance boost.
127 lfdirstate = lfutil.openlfdirstate(ui, self)
127 lfdirstate = lfutil.openlfdirstate(ui, self)
128 if match.files() and not match.anypats():
128 if match.files() and not match.anypats():
129 for f in lfdirstate:
129 for f in lfdirstate:
130 if match(f):
130 if match(f):
131 break
131 break
132 else:
132 else:
133 return super(lfiles_repo, self).status(node1, node2,
133 return super(lfilesrepo, self).status(node1, node2,
134 match, listignored, listclean,
134 match, listignored, listclean,
135 listunknown, listsubrepos)
135 listunknown, listsubrepos)
136
136
137 # Create a copy of match that matches standins instead
137 # Create a copy of match that matches standins instead
138 # of largefiles.
138 # of largefiles.
139 def tostandin(file):
139 def tostandin(file):
140 if inctx(lfutil.standin(file), ctx2):
140 if inctx(lfutil.standin(file), ctx2):
141 return lfutil.standin(file)
141 return lfutil.standin(file)
142 return file
142 return file
143
143
144 # Create a function that we can use to override what is
144 # Create a function that we can use to override what is
145 # normally the ignore matcher. We've already checked
145 # normally the ignore matcher. We've already checked
146 # for ignored files on the first dirstate walk, and
146 # for ignored files on the first dirstate walk, and
147 # unecessarily re-checking here causes a huge performance
147 # unecessarily re-checking here causes a huge performance
148 # hit because lfdirstate only knows about largefiles
148 # hit because lfdirstate only knows about largefiles
149 def _ignoreoverride(self):
149 def _ignoreoverride(self):
150 return False
150 return False
151
151
152 m = copy.copy(match)
152 m = copy.copy(match)
153 m._files = [tostandin(f) for f in m._files]
153 m._files = [tostandin(f) for f in m._files]
154
154
155 # Get ignored files here even if we weren't asked for them; we
155 # Get ignored files here even if we weren't asked for them; we
156 # must use the result here for filtering later
156 # must use the result here for filtering later
157 result = super(lfiles_repo, self).status(node1, node2, m,
157 result = super(lfilesrepo, self).status(node1, node2, m,
158 True, clean, unknown, listsubrepos)
158 True, clean, unknown, listsubrepos)
159 if working:
159 if working:
160 try:
160 try:
161 # Any non-largefiles that were explicitly listed must be
161 # Any non-largefiles that were explicitly listed must be
162 # taken out or lfdirstate.status will report an error.
162 # taken out or lfdirstate.status will report an error.
163 # The status of these files was already computed using
163 # The status of these files was already computed using
164 # super's status.
164 # super's status.
165 # Override lfdirstate's ignore matcher to not do
165 # Override lfdirstate's ignore matcher to not do
166 # anything
166 # anything
167 orig_ignore = lfdirstate._ignore
167 origignore = lfdirstate._ignore
168 lfdirstate._ignore = _ignoreoverride
168 lfdirstate._ignore = _ignoreoverride
169
169
170 match._files = [f for f in match._files if f in
170 match._files = [f for f in match._files if f in
171 lfdirstate]
171 lfdirstate]
172 # Don't waste time getting the ignored and unknown
172 # Don't waste time getting the ignored and unknown
173 # files again; we already have them
173 # files again; we already have them
174 s = lfdirstate.status(match, [], False,
174 s = lfdirstate.status(match, [], False,
175 listclean, False)
175 listclean, False)
176 (unsure, modified, added, removed, missing, unknown,
176 (unsure, modified, added, removed, missing, unknown,
177 ignored, clean) = s
177 ignored, clean) = s
178 # Replace the list of ignored and unknown files with
178 # Replace the list of ignored and unknown files with
179 # the previously caclulated lists, and strip out the
179 # the previously caclulated lists, and strip out the
180 # largefiles
180 # largefiles
181 lfiles = set(lfdirstate._map)
181 lfiles = set(lfdirstate._map)
182 ignored = set(result[5]).difference(lfiles)
182 ignored = set(result[5]).difference(lfiles)
183 unknown = set(result[4]).difference(lfiles)
183 unknown = set(result[4]).difference(lfiles)
184 if parentworking:
184 if parentworking:
185 for lfile in unsure:
185 for lfile in unsure:
186 standin = lfutil.standin(lfile)
186 standin = lfutil.standin(lfile)
187 if standin not in ctx1:
187 if standin not in ctx1:
188 # from second parent
188 # from second parent
189 modified.append(lfile)
189 modified.append(lfile)
190 elif ctx1[standin].data().strip() \
190 elif ctx1[standin].data().strip() \
191 != lfutil.hashfile(self.wjoin(lfile)):
191 != lfutil.hashfile(self.wjoin(lfile)):
192 modified.append(lfile)
192 modified.append(lfile)
193 else:
193 else:
194 clean.append(lfile)
194 clean.append(lfile)
195 lfdirstate.normal(lfile)
195 lfdirstate.normal(lfile)
196 else:
196 else:
197 tocheck = unsure + modified + added + clean
197 tocheck = unsure + modified + added + clean
198 modified, added, clean = [], [], []
198 modified, added, clean = [], [], []
199
199
200 for lfile in tocheck:
200 for lfile in tocheck:
201 standin = lfutil.standin(lfile)
201 standin = lfutil.standin(lfile)
202 if inctx(standin, ctx1):
202 if inctx(standin, ctx1):
203 if ctx1[standin].data().strip() != \
203 if ctx1[standin].data().strip() != \
204 lfutil.hashfile(self.wjoin(lfile)):
204 lfutil.hashfile(self.wjoin(lfile)):
205 modified.append(lfile)
205 modified.append(lfile)
206 else:
206 else:
207 clean.append(lfile)
207 clean.append(lfile)
208 else:
208 else:
209 added.append(lfile)
209 added.append(lfile)
210 finally:
210 finally:
211 # Replace the original ignore function
211 # Replace the original ignore function
212 lfdirstate._ignore = orig_ignore
212 lfdirstate._ignore = origignore
213
213
214 for standin in ctx1.manifest():
214 for standin in ctx1.manifest():
215 if not lfutil.isstandin(standin):
215 if not lfutil.isstandin(standin):
216 continue
216 continue
217 lfile = lfutil.splitstandin(standin)
217 lfile = lfutil.splitstandin(standin)
218 if not match(lfile):
218 if not match(lfile):
219 continue
219 continue
220 if lfile not in lfdirstate:
220 if lfile not in lfdirstate:
221 removed.append(lfile)
221 removed.append(lfile)
222
222
223 # Filter result lists
223 # Filter result lists
224 result = list(result)
224 result = list(result)
225
225
226 # Largefiles are not really removed when they're
226 # Largefiles are not really removed when they're
227 # still in the normal dirstate. Likewise, normal
227 # still in the normal dirstate. Likewise, normal
228 # files are not really removed if it's still in
228 # files are not really removed if it's still in
229 # lfdirstate. This happens in merges where files
229 # lfdirstate. This happens in merges where files
230 # change type.
230 # change type.
231 removed = [f for f in removed if f not in repo.dirstate]
231 removed = [f for f in removed if f not in repo.dirstate]
232 result[2] = [f for f in result[2] if f not in lfdirstate]
232 result[2] = [f for f in result[2] if f not in lfdirstate]
233
233
234 # Unknown files
234 # Unknown files
235 unknown = set(unknown).difference(ignored)
235 unknown = set(unknown).difference(ignored)
236 result[4] = [f for f in unknown
236 result[4] = [f for f in unknown
237 if (repo.dirstate[f] == '?' and
237 if (repo.dirstate[f] == '?' and
238 not lfutil.isstandin(f))]
238 not lfutil.isstandin(f))]
239 # Ignored files were calculated earlier by the dirstate,
239 # Ignored files were calculated earlier by the dirstate,
240 # and we already stripped out the largefiles from the list
240 # and we already stripped out the largefiles from the list
241 result[5] = ignored
241 result[5] = ignored
242 # combine normal files and largefiles
242 # combine normal files and largefiles
243 normals = [[fn for fn in filelist
243 normals = [[fn for fn in filelist
244 if not lfutil.isstandin(fn)]
244 if not lfutil.isstandin(fn)]
245 for filelist in result]
245 for filelist in result]
246 lfiles = (modified, added, removed, missing, [], [], clean)
246 lfiles = (modified, added, removed, missing, [], [], clean)
247 result = [sorted(list1 + list2)
247 result = [sorted(list1 + list2)
248 for (list1, list2) in zip(normals, lfiles)]
248 for (list1, list2) in zip(normals, lfiles)]
249 else:
249 else:
250 def toname(f):
250 def toname(f):
251 if lfutil.isstandin(f):
251 if lfutil.isstandin(f):
252 return lfutil.splitstandin(f)
252 return lfutil.splitstandin(f)
253 return f
253 return f
254 result = [[toname(f) for f in items] for items in result]
254 result = [[toname(f) for f in items] for items in result]
255
255
256 if not listunknown:
256 if not listunknown:
257 result[4] = []
257 result[4] = []
258 if not listignored:
258 if not listignored:
259 result[5] = []
259 result[5] = []
260 if not listclean:
260 if not listclean:
261 result[6] = []
261 result[6] = []
262 self.lfstatus = True
262 self.lfstatus = True
263 return result
263 return result
264
264
265 # As part of committing, copy all of the largefiles into the
265 # As part of committing, copy all of the largefiles into the
266 # cache.
266 # cache.
267 def commitctx(self, *args, **kwargs):
267 def commitctx(self, *args, **kwargs):
268 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
268 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
269 lfutil.copyalltostore(self, node)
269 lfutil.copyalltostore(self, node)
270 return node
270 return node
271
271
272 # Before commit, largefile standins have not had their
272 # Before commit, largefile standins have not had their
273 # contents updated to reflect the hash of their largefile.
273 # contents updated to reflect the hash of their largefile.
274 # Do that here.
274 # Do that here.
275 def commit(self, text="", user=None, date=None, match=None,
275 def commit(self, text="", user=None, date=None, match=None,
276 force=False, editor=False, extra={}):
276 force=False, editor=False, extra={}):
277 orig = super(lfiles_repo, self).commit
277 orig = super(lfilesrepo, self).commit
278
278
279 wlock = repo.wlock()
279 wlock = repo.wlock()
280 try:
280 try:
281 # Case 0: Rebase or Transplant
281 # Case 0: Rebase or Transplant
282 # We have to take the time to pull down the new largefiles now.
282 # We have to take the time to pull down the new largefiles now.
283 # Otherwise, any largefiles that were modified in the
283 # Otherwise, any largefiles that were modified in the
284 # destination changesets get overwritten, either by the rebase
284 # destination changesets get overwritten, either by the rebase
285 # or in the first commit after the rebase or transplant.
285 # or in the first commit after the rebase or transplant.
286 # updatelfiles will update the dirstate to mark any pulled
286 # updatelfiles will update the dirstate to mark any pulled
287 # largefiles as modified
287 # largefiles as modified
288 if getattr(repo, "_isrebasing", False) or \
288 if getattr(repo, "_isrebasing", False) or \
289 getattr(repo, "_istransplanting", False):
289 getattr(repo, "_istransplanting", False):
290 lfcommands.updatelfiles(repo.ui, repo, filelist=None,
290 lfcommands.updatelfiles(repo.ui, repo, filelist=None,
291 printmessage=False)
291 printmessage=False)
292 result = orig(text=text, user=user, date=date, match=match,
292 result = orig(text=text, user=user, date=date, match=match,
293 force=force, editor=editor, extra=extra)
293 force=force, editor=editor, extra=extra)
294 return result
294 return result
295 # Case 1: user calls commit with no specific files or
295 # Case 1: user calls commit with no specific files or
296 # include/exclude patterns: refresh and commit all files that
296 # include/exclude patterns: refresh and commit all files that
297 # are "dirty".
297 # are "dirty".
298 if ((match is None) or
298 if ((match is None) or
299 (not match.anypats() and not match.files())):
299 (not match.anypats() and not match.files())):
300 # Spend a bit of time here to get a list of files we know
300 # Spend a bit of time here to get a list of files we know
301 # are modified so we can compare only against those.
301 # are modified so we can compare only against those.
302 # It can cost a lot of time (several seconds)
302 # It can cost a lot of time (several seconds)
303 # otherwise to update all standins if the largefiles are
303 # otherwise to update all standins if the largefiles are
304 # large.
304 # large.
305 lfdirstate = lfutil.openlfdirstate(ui, self)
305 lfdirstate = lfutil.openlfdirstate(ui, self)
306 dirtymatch = match_.always(repo.root, repo.getcwd())
306 dirtymatch = match_.always(repo.root, repo.getcwd())
307 s = lfdirstate.status(dirtymatch, [], False, False, False)
307 s = lfdirstate.status(dirtymatch, [], False, False, False)
308 modifiedfiles = []
308 modifiedfiles = []
309 for i in s:
309 for i in s:
310 modifiedfiles.extend(i)
310 modifiedfiles.extend(i)
311 lfiles = lfutil.listlfiles(self)
311 lfiles = lfutil.listlfiles(self)
312 # this only loops through largefiles that exist (not
312 # this only loops through largefiles that exist (not
313 # removed/renamed)
313 # removed/renamed)
314 for lfile in lfiles:
314 for lfile in lfiles:
315 if lfile in modifiedfiles:
315 if lfile in modifiedfiles:
316 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
316 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
317 # this handles the case where a rebase is being
317 # this handles the case where a rebase is being
318 # performed and the working copy is not updated
318 # performed and the working copy is not updated
319 # yet.
319 # yet.
320 if os.path.exists(self.wjoin(lfile)):
320 if os.path.exists(self.wjoin(lfile)):
321 lfutil.updatestandin(self,
321 lfutil.updatestandin(self,
322 lfutil.standin(lfile))
322 lfutil.standin(lfile))
323 lfdirstate.normal(lfile)
323 lfdirstate.normal(lfile)
324 for lfile in lfdirstate:
324 for lfile in lfdirstate:
325 if lfile in modifiedfiles:
325 if lfile in modifiedfiles:
326 if not os.path.exists(
326 if not os.path.exists(
327 repo.wjoin(lfutil.standin(lfile))):
327 repo.wjoin(lfutil.standin(lfile))):
328 lfdirstate.drop(lfile)
328 lfdirstate.drop(lfile)
329
329
330 result = orig(text=text, user=user, date=date, match=match,
330 result = orig(text=text, user=user, date=date, match=match,
331 force=force, editor=editor, extra=extra)
331 force=force, editor=editor, extra=extra)
332 # This needs to be after commit; otherwise precommit hooks
332 # This needs to be after commit; otherwise precommit hooks
333 # get the wrong status
333 # get the wrong status
334 lfdirstate.write()
334 lfdirstate.write()
335 return result
335 return result
336
336
337 for f in match.files():
337 for f in match.files():
338 if lfutil.isstandin(f):
338 if lfutil.isstandin(f):
339 raise util.Abort(
339 raise util.Abort(
340 _('file "%s" is a largefile standin') % f,
340 _('file "%s" is a largefile standin') % f,
341 hint=('commit the largefile itself instead'))
341 hint=('commit the largefile itself instead'))
342
342
343 # Case 2: user calls commit with specified patterns: refresh
343 # Case 2: user calls commit with specified patterns: refresh
344 # any matching big files.
344 # any matching big files.
345 smatcher = lfutil.composestandinmatcher(self, match)
345 smatcher = lfutil.composestandinmatcher(self, match)
346 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
346 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
347
347
348 # No matching big files: get out of the way and pass control to
348 # No matching big files: get out of the way and pass control to
349 # the usual commit() method.
349 # the usual commit() method.
350 if not standins:
350 if not standins:
351 return orig(text=text, user=user, date=date, match=match,
351 return orig(text=text, user=user, date=date, match=match,
352 force=force, editor=editor, extra=extra)
352 force=force, editor=editor, extra=extra)
353
353
354 # Refresh all matching big files. It's possible that the
354 # Refresh all matching big files. It's possible that the
355 # commit will end up failing, in which case the big files will
355 # commit will end up failing, in which case the big files will
356 # stay refreshed. No harm done: the user modified them and
356 # stay refreshed. No harm done: the user modified them and
357 # asked to commit them, so sooner or later we're going to
357 # asked to commit them, so sooner or later we're going to
358 # refresh the standins. Might as well leave them refreshed.
358 # refresh the standins. Might as well leave them refreshed.
359 lfdirstate = lfutil.openlfdirstate(ui, self)
359 lfdirstate = lfutil.openlfdirstate(ui, self)
360 for standin in standins:
360 for standin in standins:
361 lfile = lfutil.splitstandin(standin)
361 lfile = lfutil.splitstandin(standin)
362 if lfdirstate[lfile] <> 'r':
362 if lfdirstate[lfile] <> 'r':
363 lfutil.updatestandin(self, standin)
363 lfutil.updatestandin(self, standin)
364 lfdirstate.normal(lfile)
364 lfdirstate.normal(lfile)
365 else:
365 else:
366 lfdirstate.drop(lfile)
366 lfdirstate.drop(lfile)
367
367
368 # Cook up a new matcher that only matches regular files or
368 # Cook up a new matcher that only matches regular files or
369 # standins corresponding to the big files requested by the
369 # standins corresponding to the big files requested by the
370 # user. Have to modify _files to prevent commit() from
370 # user. Have to modify _files to prevent commit() from
371 # complaining "not tracked" for big files.
371 # complaining "not tracked" for big files.
372 lfiles = lfutil.listlfiles(repo)
372 lfiles = lfutil.listlfiles(repo)
373 match = copy.copy(match)
373 match = copy.copy(match)
374 orig_matchfn = match.matchfn
374 origmatchfn = match.matchfn
375
375
376 # Check both the list of largefiles and the list of
376 # Check both the list of largefiles and the list of
377 # standins because if a largefile was removed, it
377 # standins because if a largefile was removed, it
378 # won't be in the list of largefiles at this point
378 # won't be in the list of largefiles at this point
379 match._files += sorted(standins)
379 match._files += sorted(standins)
380
380
381 actualfiles = []
381 actualfiles = []
382 for f in match._files:
382 for f in match._files:
383 fstandin = lfutil.standin(f)
383 fstandin = lfutil.standin(f)
384
384
385 # ignore known largefiles and standins
385 # ignore known largefiles and standins
386 if f in lfiles or fstandin in standins:
386 if f in lfiles or fstandin in standins:
387 continue
387 continue
388
388
389 # append directory separator to avoid collisions
389 # append directory separator to avoid collisions
390 if not fstandin.endswith(os.sep):
390 if not fstandin.endswith(os.sep):
391 fstandin += os.sep
391 fstandin += os.sep
392
392
393 # prevalidate matching standin directories
393 # prevalidate matching standin directories
394 if util.any(st for st in match._files
394 if util.any(st for st in match._files
395 if st.startswith(fstandin)):
395 if st.startswith(fstandin)):
396 continue
396 continue
397 actualfiles.append(f)
397 actualfiles.append(f)
398 match._files = actualfiles
398 match._files = actualfiles
399
399
400 def matchfn(f):
400 def matchfn(f):
401 if orig_matchfn(f):
401 if origmatchfn(f):
402 return f not in lfiles
402 return f not in lfiles
403 else:
403 else:
404 return f in standins
404 return f in standins
405
405
406 match.matchfn = matchfn
406 match.matchfn = matchfn
407 result = orig(text=text, user=user, date=date, match=match,
407 result = orig(text=text, user=user, date=date, match=match,
408 force=force, editor=editor, extra=extra)
408 force=force, editor=editor, extra=extra)
409 # This needs to be after commit; otherwise precommit hooks
409 # This needs to be after commit; otherwise precommit hooks
410 # get the wrong status
410 # get the wrong status
411 lfdirstate.write()
411 lfdirstate.write()
412 return result
412 return result
413 finally:
413 finally:
414 wlock.release()
414 wlock.release()
415
415
416 def push(self, remote, force=False, revs=None, newbranch=False):
416 def push(self, remote, force=False, revs=None, newbranch=False):
417 o = lfutil.findoutgoing(repo, remote, force)
417 o = lfutil.findoutgoing(repo, remote, force)
418 if o:
418 if o:
419 toupload = set()
419 toupload = set()
420 o = repo.changelog.nodesbetween(o, revs)[0]
420 o = repo.changelog.nodesbetween(o, revs)[0]
421 for n in o:
421 for n in o:
422 parents = [p for p in repo.changelog.parents(n)
422 parents = [p for p in repo.changelog.parents(n)
423 if p != node_.nullid]
423 if p != node_.nullid]
424 ctx = repo[n]
424 ctx = repo[n]
425 files = set(ctx.files())
425 files = set(ctx.files())
426 if len(parents) == 2:
426 if len(parents) == 2:
427 mc = ctx.manifest()
427 mc = ctx.manifest()
428 mp1 = ctx.parents()[0].manifest()
428 mp1 = ctx.parents()[0].manifest()
429 mp2 = ctx.parents()[1].manifest()
429 mp2 = ctx.parents()[1].manifest()
430 for f in mp1:
430 for f in mp1:
431 if f not in mc:
431 if f not in mc:
432 files.add(f)
432 files.add(f)
433 for f in mp2:
433 for f in mp2:
434 if f not in mc:
434 if f not in mc:
435 files.add(f)
435 files.add(f)
436 for f in mc:
436 for f in mc:
437 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
437 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
438 None):
438 None):
439 files.add(f)
439 files.add(f)
440
440
441 toupload = toupload.union(
441 toupload = toupload.union(
442 set([ctx[f].data().strip()
442 set([ctx[f].data().strip()
443 for f in files
443 for f in files
444 if lfutil.isstandin(f) and f in ctx]))
444 if lfutil.isstandin(f) and f in ctx]))
445 lfcommands.uploadlfiles(ui, self, remote, toupload)
445 lfcommands.uploadlfiles(ui, self, remote, toupload)
446 return super(lfiles_repo, self).push(remote, force, revs,
446 return super(lfilesrepo, self).push(remote, force, revs,
447 newbranch)
447 newbranch)
448
448
449 repo.__class__ = lfiles_repo
449 repo.__class__ = lfilesrepo
450
450
451 def checkrequireslfiles(ui, repo, **kwargs):
451 def checkrequireslfiles(ui, repo, **kwargs):
452 if 'largefiles' not in repo.requirements and util.any(
452 if 'largefiles' not in repo.requirements and util.any(
453 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
453 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
454 repo.requirements.add('largefiles')
454 repo.requirements.add('largefiles')
455 repo._writerequirements()
455 repo._writerequirements()
456
456
457 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
457 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
458 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
458 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,143 +1,143 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10
10
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httprepo, localrepo, merge, sshrepo, sshserver, wireproto
12 httprepo, localrepo, merge, sshrepo, sshserver, wireproto
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.hgweb import hgweb_mod, protocol
14 from mercurial.hgweb import hgweb_mod, protocol
15
15
16 import overrides
16 import overrides
17 import proto
17 import proto
18
18
19 def uisetup(ui):
19 def uisetup(ui):
20 # Disable auto-status for some commands which assume that all
20 # Disable auto-status for some commands which assume that all
21 # files in the result are under Mercurial's control
21 # files in the result are under Mercurial's control
22
22
23 entry = extensions.wrapcommand(commands.table, 'add',
23 entry = extensions.wrapcommand(commands.table, 'add',
24 overrides.override_add)
24 overrides.overrideadd)
25 addopt = [('', 'large', None, _('add as largefile')),
25 addopt = [('', 'large', None, _('add as largefile')),
26 ('', 'normal', None, _('add as normal file')),
26 ('', 'normal', None, _('add as normal file')),
27 ('', 'lfsize', '', _('add all files above this size '
27 ('', 'lfsize', '', _('add all files above this size '
28 '(in megabytes) as largefiles '
28 '(in megabytes) as largefiles '
29 '(default: 10)'))]
29 '(default: 10)'))]
30 entry[1].extend(addopt)
30 entry[1].extend(addopt)
31
31
32 entry = extensions.wrapcommand(commands.table, 'addremove',
32 entry = extensions.wrapcommand(commands.table, 'addremove',
33 overrides.override_addremove)
33 overrides.overrideaddremove)
34 entry = extensions.wrapcommand(commands.table, 'remove',
34 entry = extensions.wrapcommand(commands.table, 'remove',
35 overrides.override_remove)
35 overrides.overrideremove)
36 entry = extensions.wrapcommand(commands.table, 'forget',
36 entry = extensions.wrapcommand(commands.table, 'forget',
37 overrides.override_forget)
37 overrides.overrideforget)
38 entry = extensions.wrapcommand(commands.table, 'status',
38 entry = extensions.wrapcommand(commands.table, 'status',
39 overrides.override_status)
39 overrides.overridestatus)
40 entry = extensions.wrapcommand(commands.table, 'log',
40 entry = extensions.wrapcommand(commands.table, 'log',
41 overrides.override_log)
41 overrides.overridelog)
42 entry = extensions.wrapcommand(commands.table, 'rollback',
42 entry = extensions.wrapcommand(commands.table, 'rollback',
43 overrides.override_rollback)
43 overrides.overriderollback)
44 entry = extensions.wrapcommand(commands.table, 'verify',
44 entry = extensions.wrapcommand(commands.table, 'verify',
45 overrides.override_verify)
45 overrides.overrideverify)
46
46
47 verifyopt = [('', 'large', None, _('verify largefiles')),
47 verifyopt = [('', 'large', None, _('verify largefiles')),
48 ('', 'lfa', None,
48 ('', 'lfa', None,
49 _('verify all revisions of largefiles not just current')),
49 _('verify all revisions of largefiles not just current')),
50 ('', 'lfc', None,
50 ('', 'lfc', None,
51 _('verify largefile contents not just existence'))]
51 _('verify largefile contents not just existence'))]
52 entry[1].extend(verifyopt)
52 entry[1].extend(verifyopt)
53
53
54 entry = extensions.wrapcommand(commands.table, 'outgoing',
54 entry = extensions.wrapcommand(commands.table, 'outgoing',
55 overrides.override_outgoing)
55 overrides.overrideoutgoing)
56 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
56 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
57 entry[1].extend(outgoingopt)
57 entry[1].extend(outgoingopt)
58 entry = extensions.wrapcommand(commands.table, 'summary',
58 entry = extensions.wrapcommand(commands.table, 'summary',
59 overrides.override_summary)
59 overrides.overridesummary)
60 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
60 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
61 entry[1].extend(summaryopt)
61 entry[1].extend(summaryopt)
62
62
63 entry = extensions.wrapcommand(commands.table, 'update',
63 entry = extensions.wrapcommand(commands.table, 'update',
64 overrides.override_update)
64 overrides.overrideupdate)
65 entry = extensions.wrapcommand(commands.table, 'pull',
65 entry = extensions.wrapcommand(commands.table, 'pull',
66 overrides.override_pull)
66 overrides.overridepull)
67 entry = extensions.wrapfunction(merge, '_checkunknownfile',
67 entry = extensions.wrapfunction(merge, '_checkunknownfile',
68 overrides.override_checkunknownfile)
68 overrides.overridecheckunknownfile)
69 entry = extensions.wrapfunction(merge, 'manifestmerge',
69 entry = extensions.wrapfunction(merge, 'manifestmerge',
70 overrides.override_manifestmerge)
70 overrides.overridemanifestmerge)
71 entry = extensions.wrapfunction(filemerge, 'filemerge',
71 entry = extensions.wrapfunction(filemerge, 'filemerge',
72 overrides.override_filemerge)
72 overrides.overridefilemerge)
73 entry = extensions.wrapfunction(cmdutil, 'copy',
73 entry = extensions.wrapfunction(cmdutil, 'copy',
74 overrides.override_copy)
74 overrides.overridecopy)
75
75
76 # Backout calls revert so we need to override both the command and the
76 # Backout calls revert so we need to override both the command and the
77 # function
77 # function
78 entry = extensions.wrapcommand(commands.table, 'revert',
78 entry = extensions.wrapcommand(commands.table, 'revert',
79 overrides.override_revert)
79 overrides.overriderevert)
80 entry = extensions.wrapfunction(commands, 'revert',
80 entry = extensions.wrapfunction(commands, 'revert',
81 overrides.override_revert)
81 overrides.overriderevert)
82
82
83 # clone uses hg._update instead of hg.update even though they are the
83 # clone uses hg._update instead of hg.update even though they are the
84 # same function... so wrap both of them)
84 # same function... so wrap both of them)
85 extensions.wrapfunction(hg, 'update', overrides.hg_update)
85 extensions.wrapfunction(hg, 'update', overrides.hgupdate)
86 extensions.wrapfunction(hg, '_update', overrides.hg_update)
86 extensions.wrapfunction(hg, '_update', overrides.hgupdate)
87 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
87 extensions.wrapfunction(hg, 'clean', overrides.hgclean)
88 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
88 extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
89
89
90 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
90 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
91 extensions.wrapfunction(cmdutil, 'bailifchanged',
91 extensions.wrapfunction(cmdutil, 'bailifchanged',
92 overrides.override_bailifchanged)
92 overrides.overridebailifchanged)
93
93
94 # create the new wireproto commands ...
94 # create the new wireproto commands ...
95 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
95 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
96 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
96 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
97 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
97 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
98
98
99 # ... and wrap some existing ones
99 # ... and wrap some existing ones
100 wireproto.commands['capabilities'] = (proto.capabilities, '')
100 wireproto.commands['capabilities'] = (proto.capabilities, '')
101 wireproto.commands['heads'] = (proto.heads, '')
101 wireproto.commands['heads'] = (proto.heads, '')
102 wireproto.commands['lheads'] = (wireproto.heads, '')
102 wireproto.commands['lheads'] = (wireproto.heads, '')
103
103
104 # make putlfile behave the same as push and {get,stat}lfile behave
104 # make putlfile behave the same as push and {get,stat}lfile behave
105 # the same as pull w.r.t. permissions checks
105 # the same as pull w.r.t. permissions checks
106 hgweb_mod.perms['putlfile'] = 'push'
106 hgweb_mod.perms['putlfile'] = 'push'
107 hgweb_mod.perms['getlfile'] = 'pull'
107 hgweb_mod.perms['getlfile'] = 'pull'
108 hgweb_mod.perms['statlfile'] = 'pull'
108 hgweb_mod.perms['statlfile'] = 'pull'
109
109
110 # the hello wireproto command uses wireproto.capabilities, so it won't see
110 # the hello wireproto command uses wireproto.capabilities, so it won't see
111 # our largefiles capability unless we replace the actual function as well.
111 # our largefiles capability unless we replace the actual function as well.
112 proto.capabilities_orig = wireproto.capabilities
112 proto.capabilitiesorig = wireproto.capabilities
113 wireproto.capabilities = proto.capabilities
113 wireproto.capabilities = proto.capabilities
114
114
115 # these let us reject non-largefiles clients and make them display
115 # these let us reject non-largefiles clients and make them display
116 # our error messages
116 # our error messages
117 protocol.webproto.refuseclient = proto.webproto_refuseclient
117 protocol.webproto.refuseclient = proto.webprotorefuseclient
118 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
118 sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
119
119
120 # can't do this in reposetup because it needs to have happened before
120 # can't do this in reposetup because it needs to have happened before
121 # wirerepo.__init__ is called
121 # wirerepo.__init__ is called
122 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
122 proto.ssholdcallstream = sshrepo.sshrepository._callstream
123 proto.http_oldcallstream = httprepo.httprepository._callstream
123 proto.httpoldcallstream = httprepo.httprepository._callstream
124 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
124 sshrepo.sshrepository._callstream = proto.sshrepocallstream
125 httprepo.httprepository._callstream = proto.httprepo_callstream
125 httprepo.httprepository._callstream = proto.httprepocallstream
126
126
127 # don't die on seeing a repo with the largefiles requirement
127 # don't die on seeing a repo with the largefiles requirement
128 localrepo.localrepository.supported |= set(['largefiles'])
128 localrepo.localrepository.supported |= set(['largefiles'])
129
129
130 # override some extensions' stuff as well
130 # override some extensions' stuff as well
131 for name, module in extensions.extensions():
131 for name, module in extensions.extensions():
132 if name == 'fetch':
132 if name == 'fetch':
133 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
133 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
134 overrides.override_fetch)
134 overrides.overridefetch)
135 if name == 'purge':
135 if name == 'purge':
136 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
136 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
137 overrides.override_purge)
137 overrides.overridepurge)
138 if name == 'rebase':
138 if name == 'rebase':
139 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
139 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
140 overrides.override_rebase)
140 overrides.overriderebase)
141 if name == 'transplant':
141 if name == 'transplant':
142 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
142 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
143 overrides.override_transplant)
143 overrides.overridetransplant)
General Comments 0
You need to be logged in to leave comments. Login now