##// END OF EJS Templates
largefiles: fix inappropriate locking (issue3182)...
Levi Bard -
r15794:0d91211d default
parent child Browse files
Show More
@@ -1,462 +1,450 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src, 'rb')):
82 for chunk in util.filechunkiter(open(src, 'rb')):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 if appdata:
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 path = os.path.join(appdata, longname, hash)
96 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
97 home = os.getenv('HOME')
97 home = os.getenv('HOME')
98 if home:
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
100 longname, hash)
101 elif os.name == 'posix':
101 elif os.name == 'posix':
102 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
103 if path:
103 if path:
104 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
105 else:
105 else:
106 home = os.getenv('HOME')
106 home = os.getenv('HOME')
107 if home:
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
108 path = os.path.join(home, '.cache', longname, hash)
109 else:
109 else:
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 return path
111 return path
112
112
113 def inusercache(ui, hash):
113 def inusercache(ui, hash):
114 path = usercachepath(ui, hash)
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
115 return path and os.path.exists(path)
116
116
117 def findfile(repo, hash):
117 def findfile(repo, hash):
118 if instore(repo, hash):
118 if instore(repo, hash):
119 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
120 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
122 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
125 else:
125 else:
126 return None
126 return None
127 return storepath(repo, hash)
127 return storepath(repo, hash)
128
128
129 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
130 def __getitem__(self, key):
130 def __getitem__(self, key):
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 def normal(self, f):
132 def normal(self, f):
133 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 def remove(self, f):
134 def remove(self, f):
135 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 def add(self, f):
136 def add(self, f):
137 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
138 def drop(self, f):
138 def drop(self, f):
139 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 def forget(self, f):
140 def forget(self, f):
141 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 def normallookup(self, f):
142 def normallookup(self, f):
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144
144
145 def openlfdirstate(ui, repo):
145 def openlfdirstate(ui, repo):
146 '''
146 '''
147 Return a dirstate object that tracks largefiles: i.e. its root is
147 Return a dirstate object that tracks largefiles: i.e. its root is
148 the repo root, but it is saved in .hg/largefiles/dirstate.
148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 '''
149 '''
150 admin = repo.join(longname)
150 admin = repo.join(longname)
151 opener = scmutil.opener(admin)
151 opener = scmutil.opener(admin)
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 repo.dirstate._validate)
153 repo.dirstate._validate)
154
154
155 # If the largefiles dirstate does not exist, populate and create
155 # If the largefiles dirstate does not exist, populate and create
156 # it. This ensures that we create it on the first meaningful
156 # it. This ensures that we create it on the first meaningful
157 # largefiles operation in a new clone. It also gives us an easy
157 # largefiles operation in a new clone.
158 # way to forcibly rebuild largefiles state:
159 # rm .hg/largefiles/dirstate && hg status
160 # Or even, if things are really messed up:
161 # rm -rf .hg/largefiles && hg status
162 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
163 util.makedirs(admin)
159 util.makedirs(admin)
164 matcher = getstandinmatcher(repo)
160 matcher = getstandinmatcher(repo)
165 for standin in dirstate_walk(repo.dirstate, matcher):
161 for standin in dirstate_walk(repo.dirstate, matcher):
166 lfile = splitstandin(standin)
162 lfile = splitstandin(standin)
167 hash = readstandin(repo, lfile)
163 hash = readstandin(repo, lfile)
168 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
169 try:
165 try:
170 if hash == hashfile(repo.wjoin(lfile)):
166 if hash == hashfile(repo.wjoin(lfile)):
171 lfdirstate.normal(lfile)
167 lfdirstate.normal(lfile)
172 except OSError, err:
168 except OSError, err:
173 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
174 raise
170 raise
175
176 lfdirstate.write()
177
178 return lfdirstate
171 return lfdirstate
179
172
180 def lfdirstate_status(lfdirstate, repo, rev):
173 def lfdirstate_status(lfdirstate, repo, rev):
181 wlock = repo.wlock()
182 try:
183 match = match_.always(repo.root, repo.getcwd())
174 match = match_.always(repo.root, repo.getcwd())
184 s = lfdirstate.status(match, [], False, False, False)
175 s = lfdirstate.status(match, [], False, False, False)
185 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
186 for lfile in unsure:
177 for lfile in unsure:
187 if repo[rev][standin(lfile)].data().strip() != \
178 if repo[rev][standin(lfile)].data().strip() != \
188 hashfile(repo.wjoin(lfile)):
179 hashfile(repo.wjoin(lfile)):
189 modified.append(lfile)
180 modified.append(lfile)
190 else:
181 else:
191 clean.append(lfile)
182 clean.append(lfile)
192 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
193 lfdirstate.write()
194 finally:
195 wlock.release()
196 return (modified, added, removed, missing, unknown, ignored, clean)
184 return (modified, added, removed, missing, unknown, ignored, clean)
197
185
198 def listlfiles(repo, rev=None, matcher=None):
186 def listlfiles(repo, rev=None, matcher=None):
199 '''return a list of largefiles in the working copy or the
187 '''return a list of largefiles in the working copy or the
200 specified changeset'''
188 specified changeset'''
201
189
202 if matcher is None:
190 if matcher is None:
203 matcher = getstandinmatcher(repo)
191 matcher = getstandinmatcher(repo)
204
192
205 # ignore unknown files in working directory
193 # ignore unknown files in working directory
206 return [splitstandin(f)
194 return [splitstandin(f)
207 for f in repo[rev].walk(matcher)
195 for f in repo[rev].walk(matcher)
208 if rev is not None or repo.dirstate[f] != '?']
196 if rev is not None or repo.dirstate[f] != '?']
209
197
210 def instore(repo, hash):
198 def instore(repo, hash):
211 return os.path.exists(storepath(repo, hash))
199 return os.path.exists(storepath(repo, hash))
212
200
213 def storepath(repo, hash):
201 def storepath(repo, hash):
214 return repo.join(os.path.join(longname, hash))
202 return repo.join(os.path.join(longname, hash))
215
203
216 def copyfromcache(repo, hash, filename):
204 def copyfromcache(repo, hash, filename):
217 '''Copy the specified largefile from the repo or system cache to
205 '''Copy the specified largefile from the repo or system cache to
218 filename in the repository. Return true on success or false if the
206 filename in the repository. Return true on success or false if the
219 file was not found in either cache (which should not happened:
207 file was not found in either cache (which should not happened:
220 this is meant to be called only after ensuring that the needed
208 this is meant to be called only after ensuring that the needed
221 largefile exists in the cache).'''
209 largefile exists in the cache).'''
222 path = findfile(repo, hash)
210 path = findfile(repo, hash)
223 if path is None:
211 if path is None:
224 return False
212 return False
225 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
226 # The write may fail before the file is fully written, but we
214 # The write may fail before the file is fully written, but we
227 # don't use atomic writes in the working copy.
215 # don't use atomic writes in the working copy.
228 shutil.copy(path, repo.wjoin(filename))
216 shutil.copy(path, repo.wjoin(filename))
229 return True
217 return True
230
218
231 def copytostore(repo, rev, file, uploaded=False):
219 def copytostore(repo, rev, file, uploaded=False):
232 hash = readstandin(repo, file)
220 hash = readstandin(repo, file)
233 if instore(repo, hash):
221 if instore(repo, hash):
234 return
222 return
235 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
236
224
237 def copytostoreabsolute(repo, file, hash):
225 def copytostoreabsolute(repo, file, hash):
238 util.makedirs(os.path.dirname(storepath(repo, hash)))
226 util.makedirs(os.path.dirname(storepath(repo, hash)))
239 if inusercache(repo.ui, hash):
227 if inusercache(repo.ui, hash):
240 link(usercachepath(repo.ui, hash), storepath(repo, hash))
228 link(usercachepath(repo.ui, hash), storepath(repo, hash))
241 else:
229 else:
242 dst = util.atomictempfile(storepath(repo, hash))
230 dst = util.atomictempfile(storepath(repo, hash))
243 for chunk in util.filechunkiter(open(file, 'rb')):
231 for chunk in util.filechunkiter(open(file, 'rb')):
244 dst.write(chunk)
232 dst.write(chunk)
245 dst.close()
233 dst.close()
246 util.copymode(file, storepath(repo, hash))
234 util.copymode(file, storepath(repo, hash))
247 linktousercache(repo, hash)
235 linktousercache(repo, hash)
248
236
249 def linktousercache(repo, hash):
237 def linktousercache(repo, hash):
250 path = usercachepath(repo.ui, hash)
238 path = usercachepath(repo.ui, hash)
251 if path:
239 if path:
252 util.makedirs(os.path.dirname(path))
240 util.makedirs(os.path.dirname(path))
253 link(storepath(repo, hash), path)
241 link(storepath(repo, hash), path)
254
242
255 def getstandinmatcher(repo, pats=[], opts={}):
243 def getstandinmatcher(repo, pats=[], opts={}):
256 '''Return a match object that applies pats to the standin directory'''
244 '''Return a match object that applies pats to the standin directory'''
257 standindir = repo.pathto(shortname)
245 standindir = repo.pathto(shortname)
258 if pats:
246 if pats:
259 # patterns supplied: search standin directory relative to current dir
247 # patterns supplied: search standin directory relative to current dir
260 cwd = repo.getcwd()
248 cwd = repo.getcwd()
261 if os.path.isabs(cwd):
249 if os.path.isabs(cwd):
262 # cwd is an absolute path for hg -R <reponame>
250 # cwd is an absolute path for hg -R <reponame>
263 # work relative to the repository root in this case
251 # work relative to the repository root in this case
264 cwd = ''
252 cwd = ''
265 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
253 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
266 elif os.path.isdir(standindir):
254 elif os.path.isdir(standindir):
267 # no patterns: relative to repo root
255 # no patterns: relative to repo root
268 pats = [standindir]
256 pats = [standindir]
269 else:
257 else:
270 # no patterns and no standin dir: return matcher that matches nothing
258 # no patterns and no standin dir: return matcher that matches nothing
271 match = match_.match(repo.root, None, [], exact=True)
259 match = match_.match(repo.root, None, [], exact=True)
272 match.matchfn = lambda f: False
260 match.matchfn = lambda f: False
273 return match
261 return match
274 return getmatcher(repo, pats, opts, showbad=False)
262 return getmatcher(repo, pats, opts, showbad=False)
275
263
276 def getmatcher(repo, pats=[], opts={}, showbad=True):
264 def getmatcher(repo, pats=[], opts={}, showbad=True):
277 '''Wrapper around scmutil.match() that adds showbad: if false,
265 '''Wrapper around scmutil.match() that adds showbad: if false,
278 neuter the match object's bad() method so it does not print any
266 neuter the match object's bad() method so it does not print any
279 warnings about missing files or directories.'''
267 warnings about missing files or directories.'''
280 match = scmutil.match(repo[None], pats, opts)
268 match = scmutil.match(repo[None], pats, opts)
281
269
282 if not showbad:
270 if not showbad:
283 match.bad = lambda f, msg: None
271 match.bad = lambda f, msg: None
284 return match
272 return match
285
273
286 def composestandinmatcher(repo, rmatcher):
274 def composestandinmatcher(repo, rmatcher):
287 '''Return a matcher that accepts standins corresponding to the
275 '''Return a matcher that accepts standins corresponding to the
288 files accepted by rmatcher. Pass the list of files in the matcher
276 files accepted by rmatcher. Pass the list of files in the matcher
289 as the paths specified by the user.'''
277 as the paths specified by the user.'''
290 smatcher = getstandinmatcher(repo, rmatcher.files())
278 smatcher = getstandinmatcher(repo, rmatcher.files())
291 isstandin = smatcher.matchfn
279 isstandin = smatcher.matchfn
292 def composed_matchfn(f):
280 def composed_matchfn(f):
293 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
281 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
294 smatcher.matchfn = composed_matchfn
282 smatcher.matchfn = composed_matchfn
295
283
296 return smatcher
284 return smatcher
297
285
298 def standin(filename):
286 def standin(filename):
299 '''Return the repo-relative path to the standin for the specified big
287 '''Return the repo-relative path to the standin for the specified big
300 file.'''
288 file.'''
301 # Notes:
289 # Notes:
302 # 1) Most callers want an absolute path, but _create_standin() needs
290 # 1) Most callers want an absolute path, but _create_standin() needs
303 # it repo-relative so lfadd() can pass it to repo_add(). So leave
291 # it repo-relative so lfadd() can pass it to repo_add(). So leave
304 # it up to the caller to use repo.wjoin() to get an absolute path.
292 # it up to the caller to use repo.wjoin() to get an absolute path.
305 # 2) Join with '/' because that's what dirstate always uses, even on
293 # 2) Join with '/' because that's what dirstate always uses, even on
306 # Windows. Change existing separator to '/' first in case we are
294 # Windows. Change existing separator to '/' first in case we are
307 # passed filenames from an external source (like the command line).
295 # passed filenames from an external source (like the command line).
308 return shortname + '/' + filename.replace(os.sep, '/')
296 return shortname + '/' + filename.replace(os.sep, '/')
309
297
310 def isstandin(filename):
298 def isstandin(filename):
311 '''Return true if filename is a big file standin. filename must be
299 '''Return true if filename is a big file standin. filename must be
312 in Mercurial's internal form (slash-separated).'''
300 in Mercurial's internal form (slash-separated).'''
313 return filename.startswith(shortname + '/')
301 return filename.startswith(shortname + '/')
314
302
315 def splitstandin(filename):
303 def splitstandin(filename):
316 # Split on / because that's what dirstate always uses, even on Windows.
304 # Split on / because that's what dirstate always uses, even on Windows.
317 # Change local separator to / first just in case we are passed filenames
305 # Change local separator to / first just in case we are passed filenames
318 # from an external source (like the command line).
306 # from an external source (like the command line).
319 bits = filename.replace(os.sep, '/').split('/', 1)
307 bits = filename.replace(os.sep, '/').split('/', 1)
320 if len(bits) == 2 and bits[0] == shortname:
308 if len(bits) == 2 and bits[0] == shortname:
321 return bits[1]
309 return bits[1]
322 else:
310 else:
323 return None
311 return None
324
312
325 def updatestandin(repo, standin):
313 def updatestandin(repo, standin):
326 file = repo.wjoin(splitstandin(standin))
314 file = repo.wjoin(splitstandin(standin))
327 if os.path.exists(file):
315 if os.path.exists(file):
328 hash = hashfile(file)
316 hash = hashfile(file)
329 executable = getexecutable(file)
317 executable = getexecutable(file)
330 writestandin(repo, standin, hash, executable)
318 writestandin(repo, standin, hash, executable)
331
319
332 def readstandin(repo, filename, node=None):
320 def readstandin(repo, filename, node=None):
333 '''read hex hash from standin for filename at given node, or working
321 '''read hex hash from standin for filename at given node, or working
334 directory if no node is given'''
322 directory if no node is given'''
335 return repo[node][standin(filename)].data().strip()
323 return repo[node][standin(filename)].data().strip()
336
324
337 def writestandin(repo, standin, hash, executable):
325 def writestandin(repo, standin, hash, executable):
338 '''write hash to <repo.root>/<standin>'''
326 '''write hash to <repo.root>/<standin>'''
339 writehash(hash, repo.wjoin(standin), executable)
327 writehash(hash, repo.wjoin(standin), executable)
340
328
341 def copyandhash(instream, outfile):
329 def copyandhash(instream, outfile):
342 '''Read bytes from instream (iterable) and write them to outfile,
330 '''Read bytes from instream (iterable) and write them to outfile,
343 computing the SHA-1 hash of the data along the way. Close outfile
331 computing the SHA-1 hash of the data along the way. Close outfile
344 when done and return the binary hash.'''
332 when done and return the binary hash.'''
345 hasher = util.sha1('')
333 hasher = util.sha1('')
346 for data in instream:
334 for data in instream:
347 hasher.update(data)
335 hasher.update(data)
348 outfile.write(data)
336 outfile.write(data)
349
337
350 # Blecch: closing a file that somebody else opened is rude and
338 # Blecch: closing a file that somebody else opened is rude and
351 # wrong. But it's so darn convenient and practical! After all,
339 # wrong. But it's so darn convenient and practical! After all,
352 # outfile was opened just to copy and hash.
340 # outfile was opened just to copy and hash.
353 outfile.close()
341 outfile.close()
354
342
355 return hasher.digest()
343 return hasher.digest()
356
344
357 def hashrepofile(repo, file):
345 def hashrepofile(repo, file):
358 return hashfile(repo.wjoin(file))
346 return hashfile(repo.wjoin(file))
359
347
360 def hashfile(file):
348 def hashfile(file):
361 if not os.path.exists(file):
349 if not os.path.exists(file):
362 return ''
350 return ''
363 hasher = util.sha1('')
351 hasher = util.sha1('')
364 fd = open(file, 'rb')
352 fd = open(file, 'rb')
365 for data in blockstream(fd):
353 for data in blockstream(fd):
366 hasher.update(data)
354 hasher.update(data)
367 fd.close()
355 fd.close()
368 return hasher.hexdigest()
356 return hasher.hexdigest()
369
357
370 class limitreader(object):
358 class limitreader(object):
371 def __init__(self, f, limit):
359 def __init__(self, f, limit):
372 self.f = f
360 self.f = f
373 self.limit = limit
361 self.limit = limit
374
362
375 def read(self, length):
363 def read(self, length):
376 if self.limit == 0:
364 if self.limit == 0:
377 return ''
365 return ''
378 length = length > self.limit and self.limit or length
366 length = length > self.limit and self.limit or length
379 self.limit -= length
367 self.limit -= length
380 return self.f.read(length)
368 return self.f.read(length)
381
369
382 def close(self):
370 def close(self):
383 pass
371 pass
384
372
385 def blockstream(infile, blocksize=128 * 1024):
373 def blockstream(infile, blocksize=128 * 1024):
386 """Generator that yields blocks of data from infile and closes infile."""
374 """Generator that yields blocks of data from infile and closes infile."""
387 while True:
375 while True:
388 data = infile.read(blocksize)
376 data = infile.read(blocksize)
389 if not data:
377 if not data:
390 break
378 break
391 yield data
379 yield data
392 # same blecch as copyandhash() above
380 # same blecch as copyandhash() above
393 infile.close()
381 infile.close()
394
382
395 def readhash(filename):
383 def readhash(filename):
396 rfile = open(filename, 'rb')
384 rfile = open(filename, 'rb')
397 hash = rfile.read(40)
385 hash = rfile.read(40)
398 rfile.close()
386 rfile.close()
399 if len(hash) < 40:
387 if len(hash) < 40:
400 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
388 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
401 % (filename, len(hash)))
389 % (filename, len(hash)))
402 return hash
390 return hash
403
391
404 def writehash(hash, filename, executable):
392 def writehash(hash, filename, executable):
405 util.makedirs(os.path.dirname(filename))
393 util.makedirs(os.path.dirname(filename))
406 util.writefile(filename, hash + '\n')
394 util.writefile(filename, hash + '\n')
407 os.chmod(filename, getmode(executable))
395 os.chmod(filename, getmode(executable))
408
396
409 def getexecutable(filename):
397 def getexecutable(filename):
410 mode = os.stat(filename).st_mode
398 mode = os.stat(filename).st_mode
411 return ((mode & stat.S_IXUSR) and
399 return ((mode & stat.S_IXUSR) and
412 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXGRP) and
413 (mode & stat.S_IXOTH))
401 (mode & stat.S_IXOTH))
414
402
415 def getmode(executable):
403 def getmode(executable):
416 if executable:
404 if executable:
417 return 0755
405 return 0755
418 else:
406 else:
419 return 0644
407 return 0644
420
408
421 def urljoin(first, second, *arg):
409 def urljoin(first, second, *arg):
422 def join(left, right):
410 def join(left, right):
423 if not left.endswith('/'):
411 if not left.endswith('/'):
424 left += '/'
412 left += '/'
425 if right.startswith('/'):
413 if right.startswith('/'):
426 right = right[1:]
414 right = right[1:]
427 return left + right
415 return left + right
428
416
429 url = join(first, second)
417 url = join(first, second)
430 for a in arg:
418 for a in arg:
431 url = join(url, a)
419 url = join(url, a)
432 return url
420 return url
433
421
434 def hexsha1(data):
422 def hexsha1(data):
435 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
436 object data"""
424 object data"""
437 h = util.sha1()
425 h = util.sha1()
438 for chunk in util.filechunkiter(data):
426 for chunk in util.filechunkiter(data):
439 h.update(chunk)
427 h.update(chunk)
440 return h.hexdigest()
428 return h.hexdigest()
441
429
442 def httpsendfile(ui, filename):
430 def httpsendfile(ui, filename):
443 return httpconnection.httpsendfile(ui, filename, 'rb')
431 return httpconnection.httpsendfile(ui, filename, 'rb')
444
432
445 def unixpath(path):
433 def unixpath(path):
446 '''Return a version of path normalized for use with the lfdirstate.'''
434 '''Return a version of path normalized for use with the lfdirstate.'''
447 return os.path.normpath(path).replace(os.sep, '/')
435 return os.path.normpath(path).replace(os.sep, '/')
448
436
449 def islfilesrepo(repo):
437 def islfilesrepo(repo):
450 return ('largefiles' in repo.requirements and
438 return ('largefiles' in repo.requirements and
451 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
452
440
453 def mkstemp(repo, prefix):
441 def mkstemp(repo, prefix):
454 '''Returns a file descriptor and a filename corresponding to a temporary
442 '''Returns a file descriptor and a filename corresponding to a temporary
455 file in the repo's largefiles store.'''
443 file in the repo's largefiles store.'''
456 path = repo.join(longname)
444 path = repo.join(longname)
457 util.makedirs(path)
445 util.makedirs(path)
458 return tempfile.mkstemp(prefix=prefix, dir=path)
446 return tempfile.mkstemp(prefix=prefix, dir=path)
459
447
460 class storeprotonotcapable(Exception):
448 class storeprotonotcapable(Exception):
461 def __init__(self, storetypes):
449 def __init__(self, storetypes):
462 self.storetypes = storetypes
450 self.storetypes = storetypes
@@ -1,927 +1,931 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(override_match)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def add_largefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repo_add(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def remove_largefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n' % f))
160 ui.status(_('removing %s\n' % f))
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 os.unlink(repo.wjoin(f))
162 os.unlink(repo.wjoin(f))
163 currentdir = os.path.split(f)[0]
163 currentdir = os.path.split(f)[0]
164 while currentdir and not os.listdir(repo.wjoin(currentdir)):
164 while currentdir and not os.listdir(repo.wjoin(currentdir)):
165 os.rmdir(repo.wjoin(currentdir))
165 os.rmdir(repo.wjoin(currentdir))
166 currentdir = os.path.split(currentdir)[0]
166 currentdir = os.path.split(currentdir)[0]
167 lfdirstate.remove(f)
167 lfdirstate.remove(f)
168 lfdirstate.write()
168 lfdirstate.write()
169 forget = [lfutil.standin(f) for f in forget]
169 forget = [lfutil.standin(f) for f in forget]
170 remove = [lfutil.standin(f) for f in remove]
170 remove = [lfutil.standin(f) for f in remove]
171 lfutil.repo_forget(repo, forget)
171 lfutil.repo_forget(repo, forget)
172 # If this is being called by addremove, let the original addremove
172 # If this is being called by addremove, let the original addremove
173 # function handle this.
173 # function handle this.
174 if not getattr(repo, "_isaddremove", False):
174 if not getattr(repo, "_isaddremove", False):
175 lfutil.repo_remove(repo, remove, unlink=True)
175 lfutil.repo_remove(repo, remove, unlink=True)
176 finally:
176 finally:
177 wlock.release()
177 wlock.release()
178
178
179 # -- Wrappers: modify existing commands --------------------------------
179 # -- Wrappers: modify existing commands --------------------------------
180
180
181 # Add works by going through the files that the user wanted to add and
181 # Add works by going through the files that the user wanted to add and
182 # checking if they should be added as largefiles. Then it makes a new
182 # checking if they should be added as largefiles. Then it makes a new
183 # matcher which matches only the normal files and runs the original
183 # matcher which matches only the normal files and runs the original
184 # version of add.
184 # version of add.
185 def override_add(orig, ui, repo, *pats, **opts):
185 def override_add(orig, ui, repo, *pats, **opts):
186 bad = add_largefiles(ui, repo, *pats, **opts)
186 bad = add_largefiles(ui, repo, *pats, **opts)
187 installnormalfilesmatchfn(repo[None].manifest())
187 installnormalfilesmatchfn(repo[None].manifest())
188 result = orig(ui, repo, *pats, **opts)
188 result = orig(ui, repo, *pats, **opts)
189 restorematchfn()
189 restorematchfn()
190
190
191 return (result == 1 or bad) and 1 or 0
191 return (result == 1 or bad) and 1 or 0
192
192
193 def override_remove(orig, ui, repo, *pats, **opts):
193 def override_remove(orig, ui, repo, *pats, **opts):
194 installnormalfilesmatchfn(repo[None].manifest())
194 installnormalfilesmatchfn(repo[None].manifest())
195 orig(ui, repo, *pats, **opts)
195 orig(ui, repo, *pats, **opts)
196 restorematchfn()
196 restorematchfn()
197 remove_largefiles(ui, repo, *pats, **opts)
197 remove_largefiles(ui, repo, *pats, **opts)
198
198
199 def override_status(orig, ui, repo, *pats, **opts):
199 def override_status(orig, ui, repo, *pats, **opts):
200 try:
200 try:
201 repo.lfstatus = True
201 repo.lfstatus = True
202 return orig(ui, repo, *pats, **opts)
202 return orig(ui, repo, *pats, **opts)
203 finally:
203 finally:
204 repo.lfstatus = False
204 repo.lfstatus = False
205
205
206 def override_log(orig, ui, repo, *pats, **opts):
206 def override_log(orig, ui, repo, *pats, **opts):
207 try:
207 try:
208 repo.lfstatus = True
208 repo.lfstatus = True
209 orig(ui, repo, *pats, **opts)
209 orig(ui, repo, *pats, **opts)
210 finally:
210 finally:
211 repo.lfstatus = False
211 repo.lfstatus = False
212
212
213 def override_verify(orig, ui, repo, *pats, **opts):
213 def override_verify(orig, ui, repo, *pats, **opts):
214 large = opts.pop('large', False)
214 large = opts.pop('large', False)
215 all = opts.pop('lfa', False)
215 all = opts.pop('lfa', False)
216 contents = opts.pop('lfc', False)
216 contents = opts.pop('lfc', False)
217
217
218 result = orig(ui, repo, *pats, **opts)
218 result = orig(ui, repo, *pats, **opts)
219 if large:
219 if large:
220 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
220 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 return result
221 return result
222
222
223 # Override needs to refresh standins so that update's normal merge
223 # Override needs to refresh standins so that update's normal merge
224 # will go through properly. Then the other update hook (overriding repo.update)
224 # will go through properly. Then the other update hook (overriding repo.update)
225 # will get the new files. Filemerge is also overriden so that the merge
225 # will get the new files. Filemerge is also overriden so that the merge
226 # will merge standins correctly.
226 # will merge standins correctly.
227 def override_update(orig, ui, repo, *pats, **opts):
227 def override_update(orig, ui, repo, *pats, **opts):
228 lfdirstate = lfutil.openlfdirstate(ui, repo)
228 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
229 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 False, False)
230 False, False)
231 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
231 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232
232
233 # Need to lock between the standins getting updated and their
233 # Need to lock between the standins getting updated and their
234 # largefiles getting updated
234 # largefiles getting updated
235 wlock = repo.wlock()
235 wlock = repo.wlock()
236 try:
236 try:
237 if opts['check']:
237 if opts['check']:
238 mod = len(modified) > 0
238 mod = len(modified) > 0
239 for lfile in unsure:
239 for lfile in unsure:
240 standin = lfutil.standin(lfile)
240 standin = lfutil.standin(lfile)
241 if repo['.'][standin].data().strip() != \
241 if repo['.'][standin].data().strip() != \
242 lfutil.hashfile(repo.wjoin(lfile)):
242 lfutil.hashfile(repo.wjoin(lfile)):
243 mod = True
243 mod = True
244 else:
244 else:
245 lfdirstate.normal(lfile)
245 lfdirstate.normal(lfile)
246 lfdirstate.write()
246 lfdirstate.write()
247 if mod:
247 if mod:
248 raise util.Abort(_('uncommitted local changes'))
248 raise util.Abort(_('uncommitted local changes'))
249 # XXX handle removed differently
249 # XXX handle removed differently
250 if not opts['clean']:
250 if not opts['clean']:
251 for lfile in unsure + modified + added:
251 for lfile in unsure + modified + added:
252 lfutil.updatestandin(repo, lfutil.standin(lfile))
252 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 finally:
253 finally:
254 wlock.release()
254 wlock.release()
255 return orig(ui, repo, *pats, **opts)
255 return orig(ui, repo, *pats, **opts)
256
256
257 # Before starting the manifest merge, merge.updates will call
257 # Before starting the manifest merge, merge.updates will call
258 # _checkunknown to check if there are any files in the merged-in
258 # _checkunknown to check if there are any files in the merged-in
259 # changeset that collide with unknown files in the working copy.
259 # changeset that collide with unknown files in the working copy.
260 #
260 #
261 # The largefiles are seen as unknown, so this prevents us from merging
261 # The largefiles are seen as unknown, so this prevents us from merging
262 # in a file 'foo' if we already have a largefile with the same name.
262 # in a file 'foo' if we already have a largefile with the same name.
263 #
263 #
264 # The overridden function filters the unknown files by removing any
264 # The overridden function filters the unknown files by removing any
265 # largefiles. This makes the merge proceed and we can then handle this
265 # largefiles. This makes the merge proceed and we can then handle this
266 # case further in the overridden manifestmerge function below.
266 # case further in the overridden manifestmerge function below.
267 def override_checkunknown(origfn, wctx, mctx, folding):
267 def override_checkunknown(origfn, wctx, mctx, folding):
268 origunknown = wctx.unknown()
268 origunknown = wctx.unknown()
269 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
269 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
270 try:
270 try:
271 return origfn(wctx, mctx, folding)
271 return origfn(wctx, mctx, folding)
272 finally:
272 finally:
273 wctx._unknown = origunknown
273 wctx._unknown = origunknown
274
274
275 # The manifest merge handles conflicts on the manifest level. We want
275 # The manifest merge handles conflicts on the manifest level. We want
276 # to handle changes in largefile-ness of files at this level too.
276 # to handle changes in largefile-ness of files at this level too.
277 #
277 #
278 # The strategy is to run the original manifestmerge and then process
278 # The strategy is to run the original manifestmerge and then process
279 # the action list it outputs. There are two cases we need to deal with:
279 # the action list it outputs. There are two cases we need to deal with:
280 #
280 #
281 # 1. Normal file in p1, largefile in p2. Here the largefile is
281 # 1. Normal file in p1, largefile in p2. Here the largefile is
282 # detected via its standin file, which will enter the working copy
282 # detected via its standin file, which will enter the working copy
283 # with a "get" action. It is not "merge" since the standin is all
283 # with a "get" action. It is not "merge" since the standin is all
284 # Mercurial is concerned with at this level -- the link to the
284 # Mercurial is concerned with at this level -- the link to the
285 # existing normal file is not relevant here.
285 # existing normal file is not relevant here.
286 #
286 #
287 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
287 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
288 # since the largefile will be present in the working copy and
288 # since the largefile will be present in the working copy and
289 # different from the normal file in p2. Mercurial therefore
289 # different from the normal file in p2. Mercurial therefore
290 # triggers a merge action.
290 # triggers a merge action.
291 #
291 #
292 # In both cases, we prompt the user and emit new actions to either
292 # In both cases, we prompt the user and emit new actions to either
293 # remove the standin (if the normal file was kept) or to remove the
293 # remove the standin (if the normal file was kept) or to remove the
294 # normal file and get the standin (if the largefile was kept). The
294 # normal file and get the standin (if the largefile was kept). The
295 # default prompt answer is to use the largefile version since it was
295 # default prompt answer is to use the largefile version since it was
296 # presumably changed on purpose.
296 # presumably changed on purpose.
297 #
297 #
298 # Finally, the merge.applyupdates function will then take care of
298 # Finally, the merge.applyupdates function will then take care of
299 # writing the files into the working copy and lfcommands.updatelfiles
299 # writing the files into the working copy and lfcommands.updatelfiles
300 # will update the largefiles.
300 # will update the largefiles.
301 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
301 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
302 actions = origfn(repo, p1, p2, pa, overwrite, partial)
302 actions = origfn(repo, p1, p2, pa, overwrite, partial)
303 processed = []
303 processed = []
304
304
305 for action in actions:
305 for action in actions:
306 if overwrite:
306 if overwrite:
307 processed.append(action)
307 processed.append(action)
308 continue
308 continue
309 f, m = action[:2]
309 f, m = action[:2]
310
310
311 choices = (_('&Largefile'), _('&Normal file'))
311 choices = (_('&Largefile'), _('&Normal file'))
312 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
312 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
313 # Case 1: normal file in the working copy, largefile in
313 # Case 1: normal file in the working copy, largefile in
314 # the second parent
314 # the second parent
315 lfile = lfutil.splitstandin(f)
315 lfile = lfutil.splitstandin(f)
316 standin = f
316 standin = f
317 msg = _('%s has been turned into a largefile\n'
317 msg = _('%s has been turned into a largefile\n'
318 'use (l)argefile or keep as (n)ormal file?') % lfile
318 'use (l)argefile or keep as (n)ormal file?') % lfile
319 if repo.ui.promptchoice(msg, choices, 0) == 0:
319 if repo.ui.promptchoice(msg, choices, 0) == 0:
320 processed.append((lfile, "r"))
320 processed.append((lfile, "r"))
321 processed.append((standin, "g", p2.flags(standin)))
321 processed.append((standin, "g", p2.flags(standin)))
322 else:
322 else:
323 processed.append((standin, "r"))
323 processed.append((standin, "r"))
324 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
324 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
325 # Case 2: largefile in the working copy, normal file in
325 # Case 2: largefile in the working copy, normal file in
326 # the second parent
326 # the second parent
327 standin = lfutil.standin(f)
327 standin = lfutil.standin(f)
328 lfile = f
328 lfile = f
329 msg = _('%s has been turned into a normal file\n'
329 msg = _('%s has been turned into a normal file\n'
330 'keep as (l)argefile or use (n)ormal file?') % lfile
330 'keep as (l)argefile or use (n)ormal file?') % lfile
331 if repo.ui.promptchoice(msg, choices, 0) == 0:
331 if repo.ui.promptchoice(msg, choices, 0) == 0:
332 processed.append((lfile, "r"))
332 processed.append((lfile, "r"))
333 else:
333 else:
334 processed.append((standin, "r"))
334 processed.append((standin, "r"))
335 processed.append((lfile, "g", p2.flags(lfile)))
335 processed.append((lfile, "g", p2.flags(lfile)))
336 else:
336 else:
337 processed.append(action)
337 processed.append(action)
338
338
339 return processed
339 return processed
340
340
341 # Override filemerge to prompt the user about how they wish to merge
341 # Override filemerge to prompt the user about how they wish to merge
342 # largefiles. This will handle identical edits, and copy/rename +
342 # largefiles. This will handle identical edits, and copy/rename +
343 # edit without prompting the user.
343 # edit without prompting the user.
344 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
344 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
345 # Use better variable names here. Because this is a wrapper we cannot
345 # Use better variable names here. Because this is a wrapper we cannot
346 # change the variable names in the function declaration.
346 # change the variable names in the function declaration.
347 fcdest, fcother, fcancestor = fcd, fco, fca
347 fcdest, fcother, fcancestor = fcd, fco, fca
348 if not lfutil.isstandin(orig):
348 if not lfutil.isstandin(orig):
349 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
349 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
350 else:
350 else:
351 if not fcother.cmp(fcdest): # files identical?
351 if not fcother.cmp(fcdest): # files identical?
352 return None
352 return None
353
353
354 # backwards, use working dir parent as ancestor
354 # backwards, use working dir parent as ancestor
355 if fcancestor == fcother:
355 if fcancestor == fcother:
356 fcancestor = fcdest.parents()[0]
356 fcancestor = fcdest.parents()[0]
357
357
358 if orig != fcother.path():
358 if orig != fcother.path():
359 repo.ui.status(_('merging %s and %s to %s\n')
359 repo.ui.status(_('merging %s and %s to %s\n')
360 % (lfutil.splitstandin(orig),
360 % (lfutil.splitstandin(orig),
361 lfutil.splitstandin(fcother.path()),
361 lfutil.splitstandin(fcother.path()),
362 lfutil.splitstandin(fcdest.path())))
362 lfutil.splitstandin(fcdest.path())))
363 else:
363 else:
364 repo.ui.status(_('merging %s\n')
364 repo.ui.status(_('merging %s\n')
365 % lfutil.splitstandin(fcdest.path()))
365 % lfutil.splitstandin(fcdest.path()))
366
366
367 if fcancestor.path() != fcother.path() and fcother.data() == \
367 if fcancestor.path() != fcother.path() and fcother.data() == \
368 fcancestor.data():
368 fcancestor.data():
369 return 0
369 return 0
370 if fcancestor.path() != fcdest.path() and fcdest.data() == \
370 if fcancestor.path() != fcdest.path() and fcdest.data() == \
371 fcancestor.data():
371 fcancestor.data():
372 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
372 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
373 return 0
373 return 0
374
374
375 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
375 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
376 'keep (l)ocal or take (o)ther?') %
376 'keep (l)ocal or take (o)ther?') %
377 lfutil.splitstandin(orig),
377 lfutil.splitstandin(orig),
378 (_('&Local'), _('&Other')), 0) == 0:
378 (_('&Local'), _('&Other')), 0) == 0:
379 return 0
379 return 0
380 else:
380 else:
381 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
381 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
382 return 0
382 return 0
383
383
384 # Copy first changes the matchers to match standins instead of
384 # Copy first changes the matchers to match standins instead of
385 # largefiles. Then it overrides util.copyfile in that function it
385 # largefiles. Then it overrides util.copyfile in that function it
386 # checks if the destination largefile already exists. It also keeps a
386 # checks if the destination largefile already exists. It also keeps a
387 # list of copied files so that the largefiles can be copied and the
387 # list of copied files so that the largefiles can be copied and the
388 # dirstate updated.
388 # dirstate updated.
389 def override_copy(orig, ui, repo, pats, opts, rename=False):
389 def override_copy(orig, ui, repo, pats, opts, rename=False):
390 # doesn't remove largefile on rename
390 # doesn't remove largefile on rename
391 if len(pats) < 2:
391 if len(pats) < 2:
392 # this isn't legal, let the original function deal with it
392 # this isn't legal, let the original function deal with it
393 return orig(ui, repo, pats, opts, rename)
393 return orig(ui, repo, pats, opts, rename)
394
394
395 def makestandin(relpath):
395 def makestandin(relpath):
396 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
396 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
397 return os.path.join(repo.wjoin(lfutil.standin(path)))
397 return os.path.join(repo.wjoin(lfutil.standin(path)))
398
398
399 fullpats = scmutil.expandpats(pats)
399 fullpats = scmutil.expandpats(pats)
400 dest = fullpats[-1]
400 dest = fullpats[-1]
401
401
402 if os.path.isdir(dest):
402 if os.path.isdir(dest):
403 if not os.path.isdir(makestandin(dest)):
403 if not os.path.isdir(makestandin(dest)):
404 os.makedirs(makestandin(dest))
404 os.makedirs(makestandin(dest))
405 # This could copy both lfiles and normal files in one command,
405 # This could copy both lfiles and normal files in one command,
406 # but we don't want to do that. First replace their matcher to
406 # but we don't want to do that. First replace their matcher to
407 # only match normal files and run it, then replace it to just
407 # only match normal files and run it, then replace it to just
408 # match largefiles and run it again.
408 # match largefiles and run it again.
409 nonormalfiles = False
409 nonormalfiles = False
410 nolfiles = False
410 nolfiles = False
411 try:
411 try:
412 try:
412 try:
413 installnormalfilesmatchfn(repo[None].manifest())
413 installnormalfilesmatchfn(repo[None].manifest())
414 result = orig(ui, repo, pats, opts, rename)
414 result = orig(ui, repo, pats, opts, rename)
415 except util.Abort, e:
415 except util.Abort, e:
416 if str(e) != 'no files to copy':
416 if str(e) != 'no files to copy':
417 raise e
417 raise e
418 else:
418 else:
419 nonormalfiles = True
419 nonormalfiles = True
420 result = 0
420 result = 0
421 finally:
421 finally:
422 restorematchfn()
422 restorematchfn()
423
423
424 # The first rename can cause our current working directory to be removed.
424 # The first rename can cause our current working directory to be removed.
425 # In that case there is nothing left to copy/rename so just quit.
425 # In that case there is nothing left to copy/rename so just quit.
426 try:
426 try:
427 repo.getcwd()
427 repo.getcwd()
428 except OSError:
428 except OSError:
429 return result
429 return result
430
430
431 try:
431 try:
432 try:
432 try:
433 # When we call orig below it creates the standins but we don't add them
433 # When we call orig below it creates the standins but we don't add them
434 # to the dir state until later so lock during that time.
434 # to the dir state until later so lock during that time.
435 wlock = repo.wlock()
435 wlock = repo.wlock()
436
436
437 manifest = repo[None].manifest()
437 manifest = repo[None].manifest()
438 oldmatch = None # for the closure
438 oldmatch = None # for the closure
439 def override_match(ctx, pats=[], opts={}, globbed=False,
439 def override_match(ctx, pats=[], opts={}, globbed=False,
440 default='relpath'):
440 default='relpath'):
441 newpats = []
441 newpats = []
442 # The patterns were previously mangled to add the standin
442 # The patterns were previously mangled to add the standin
443 # directory; we need to remove that now
443 # directory; we need to remove that now
444 for pat in pats:
444 for pat in pats:
445 if match_.patkind(pat) is None and lfutil.shortname in pat:
445 if match_.patkind(pat) is None and lfutil.shortname in pat:
446 newpats.append(pat.replace(lfutil.shortname, ''))
446 newpats.append(pat.replace(lfutil.shortname, ''))
447 else:
447 else:
448 newpats.append(pat)
448 newpats.append(pat)
449 match = oldmatch(ctx, newpats, opts, globbed, default)
449 match = oldmatch(ctx, newpats, opts, globbed, default)
450 m = copy.copy(match)
450 m = copy.copy(match)
451 lfile = lambda f: lfutil.standin(f) in manifest
451 lfile = lambda f: lfutil.standin(f) in manifest
452 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
452 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
453 m._fmap = set(m._files)
453 m._fmap = set(m._files)
454 orig_matchfn = m.matchfn
454 orig_matchfn = m.matchfn
455 m.matchfn = lambda f: (lfutil.isstandin(f) and
455 m.matchfn = lambda f: (lfutil.isstandin(f) and
456 lfile(lfutil.splitstandin(f)) and
456 lfile(lfutil.splitstandin(f)) and
457 orig_matchfn(lfutil.splitstandin(f)) or
457 orig_matchfn(lfutil.splitstandin(f)) or
458 None)
458 None)
459 return m
459 return m
460 oldmatch = installmatchfn(override_match)
460 oldmatch = installmatchfn(override_match)
461 listpats = []
461 listpats = []
462 for pat in pats:
462 for pat in pats:
463 if match_.patkind(pat) is not None:
463 if match_.patkind(pat) is not None:
464 listpats.append(pat)
464 listpats.append(pat)
465 else:
465 else:
466 listpats.append(makestandin(pat))
466 listpats.append(makestandin(pat))
467
467
468 try:
468 try:
469 origcopyfile = util.copyfile
469 origcopyfile = util.copyfile
470 copiedfiles = []
470 copiedfiles = []
471 def override_copyfile(src, dest):
471 def override_copyfile(src, dest):
472 if (lfutil.shortname in src and
472 if (lfutil.shortname in src and
473 dest.startswith(repo.wjoin(lfutil.shortname))):
473 dest.startswith(repo.wjoin(lfutil.shortname))):
474 destlfile = dest.replace(lfutil.shortname, '')
474 destlfile = dest.replace(lfutil.shortname, '')
475 if not opts['force'] and os.path.exists(destlfile):
475 if not opts['force'] and os.path.exists(destlfile):
476 raise IOError('',
476 raise IOError('',
477 _('destination largefile already exists'))
477 _('destination largefile already exists'))
478 copiedfiles.append((src, dest))
478 copiedfiles.append((src, dest))
479 origcopyfile(src, dest)
479 origcopyfile(src, dest)
480
480
481 util.copyfile = override_copyfile
481 util.copyfile = override_copyfile
482 result += orig(ui, repo, listpats, opts, rename)
482 result += orig(ui, repo, listpats, opts, rename)
483 finally:
483 finally:
484 util.copyfile = origcopyfile
484 util.copyfile = origcopyfile
485
485
486 lfdirstate = lfutil.openlfdirstate(ui, repo)
486 lfdirstate = lfutil.openlfdirstate(ui, repo)
487 for (src, dest) in copiedfiles:
487 for (src, dest) in copiedfiles:
488 if (lfutil.shortname in src and
488 if (lfutil.shortname in src and
489 dest.startswith(repo.wjoin(lfutil.shortname))):
489 dest.startswith(repo.wjoin(lfutil.shortname))):
490 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
490 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
491 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
491 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
492 destlfiledir = os.path.dirname(destlfile) or '.'
492 destlfiledir = os.path.dirname(destlfile) or '.'
493 if not os.path.isdir(destlfiledir):
493 if not os.path.isdir(destlfiledir):
494 os.makedirs(destlfiledir)
494 os.makedirs(destlfiledir)
495 if rename:
495 if rename:
496 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
496 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
497 lfdirstate.remove(srclfile)
497 lfdirstate.remove(srclfile)
498 else:
498 else:
499 util.copyfile(srclfile, destlfile)
499 util.copyfile(srclfile, destlfile)
500 lfdirstate.add(destlfile)
500 lfdirstate.add(destlfile)
501 lfdirstate.write()
501 lfdirstate.write()
502 except util.Abort, e:
502 except util.Abort, e:
503 if str(e) != 'no files to copy':
503 if str(e) != 'no files to copy':
504 raise e
504 raise e
505 else:
505 else:
506 nolfiles = True
506 nolfiles = True
507 finally:
507 finally:
508 restorematchfn()
508 restorematchfn()
509 wlock.release()
509 wlock.release()
510
510
511 if nolfiles and nonormalfiles:
511 if nolfiles and nonormalfiles:
512 raise util.Abort(_('no files to copy'))
512 raise util.Abort(_('no files to copy'))
513
513
514 return result
514 return result
515
515
516 # When the user calls revert, we have to be careful to not revert any
516 # When the user calls revert, we have to be careful to not revert any
517 # changes to other largefiles accidentally. This means we have to keep
517 # changes to other largefiles accidentally. This means we have to keep
518 # track of the largefiles that are being reverted so we only pull down
518 # track of the largefiles that are being reverted so we only pull down
519 # the necessary largefiles.
519 # the necessary largefiles.
520 #
520 #
521 # Standins are only updated (to match the hash of largefiles) before
521 # Standins are only updated (to match the hash of largefiles) before
522 # commits. Update the standins then run the original revert, changing
522 # commits. Update the standins then run the original revert, changing
523 # the matcher to hit standins instead of largefiles. Based on the
523 # the matcher to hit standins instead of largefiles. Based on the
524 # resulting standins update the largefiles. Then return the standins
524 # resulting standins update the largefiles. Then return the standins
525 # to their proper state
525 # to their proper state
526 def override_revert(orig, ui, repo, *pats, **opts):
526 def override_revert(orig, ui, repo, *pats, **opts):
527 # Because we put the standins in a bad state (by updating them)
527 # Because we put the standins in a bad state (by updating them)
528 # and then return them to a correct state we need to lock to
528 # and then return them to a correct state we need to lock to
529 # prevent others from changing them in their incorrect state.
529 # prevent others from changing them in their incorrect state.
530 wlock = repo.wlock()
530 wlock = repo.wlock()
531 try:
531 try:
532 lfdirstate = lfutil.openlfdirstate(ui, repo)
532 lfdirstate = lfutil.openlfdirstate(ui, repo)
533 (modified, added, removed, missing, unknown, ignored, clean) = \
533 (modified, added, removed, missing, unknown, ignored, clean) = \
534 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
534 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
535 for lfile in modified:
535 for lfile in modified:
536 lfutil.updatestandin(repo, lfutil.standin(lfile))
536 lfutil.updatestandin(repo, lfutil.standin(lfile))
537
537
538 try:
538 try:
539 ctx = repo[opts.get('rev')]
539 ctx = repo[opts.get('rev')]
540 oldmatch = None # for the closure
540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 default='relpath'):
542 default='relpath'):
543 match = oldmatch(ctx, pats, opts, globbed, default)
543 match = oldmatch(ctx, pats, opts, globbed, default)
544 m = copy.copy(match)
544 m = copy.copy(match)
545 def tostandin(f):
545 def tostandin(f):
546 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
546 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
547 return lfutil.standin(f)
547 return lfutil.standin(f)
548 elif lfutil.standin(f) in repo[None]:
548 elif lfutil.standin(f) in repo[None]:
549 return None
549 return None
550 return f
550 return f
551 m._files = [tostandin(f) for f in m._files]
551 m._files = [tostandin(f) for f in m._files]
552 m._files = [f for f in m._files if f is not None]
552 m._files = [f for f in m._files if f is not None]
553 m._fmap = set(m._files)
553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 orig_matchfn = m.matchfn
555 def matchfn(f):
555 def matchfn(f):
556 if lfutil.isstandin(f):
556 if lfutil.isstandin(f):
557 # We need to keep track of what largefiles are being
557 # We need to keep track of what largefiles are being
558 # matched so we know which ones to update later --
558 # matched so we know which ones to update later --
559 # otherwise we accidentally revert changes to other
559 # otherwise we accidentally revert changes to other
560 # largefiles. This is repo-specific, so duckpunch the
560 # largefiles. This is repo-specific, so duckpunch the
561 # repo object to keep the list of largefiles for us
561 # repo object to keep the list of largefiles for us
562 # later.
562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 (f in repo[None] or f in ctx):
564 (f in repo[None] or f in ctx):
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 lfileslist.append(lfutil.splitstandin(f))
566 lfileslist.append(lfutil.splitstandin(f))
567 repo._lfilestoupdate = lfileslist
567 repo._lfilestoupdate = lfileslist
568 return True
568 return True
569 else:
569 else:
570 return False
570 return False
571 return orig_matchfn(f)
571 return orig_matchfn(f)
572 m.matchfn = matchfn
572 m.matchfn = matchfn
573 return m
573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(override_match)
575 scmutil.match
575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = override_match(repo[None], pats, opts)
577 orig(ui, repo, *pats, **opts)
577 orig(ui, repo, *pats, **opts)
578 finally:
578 finally:
579 restorematchfn()
579 restorematchfn()
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 printmessage=False)
582 printmessage=False)
583
583
584 # empty out the largefiles list so we start fresh next time
584 # empty out the largefiles list so we start fresh next time
585 repo._lfilestoupdate = []
585 repo._lfilestoupdate = []
586 for lfile in modified:
586 for lfile in modified:
587 if lfile in lfileslist:
587 if lfile in lfileslist:
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 in repo['.']:
589 in repo['.']:
590 lfutil.writestandin(repo, lfutil.standin(lfile),
590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 repo['.'][lfile].data().strip(),
591 repo['.'][lfile].data().strip(),
592 'x' in repo['.'][lfile].flags())
592 'x' in repo['.'][lfile].flags())
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 for lfile in added:
594 for lfile in added:
595 standin = lfutil.standin(lfile)
595 standin = lfutil.standin(lfile)
596 if standin not in ctx and (standin in matches or opts.get('all')):
596 if standin not in ctx and (standin in matches or opts.get('all')):
597 if lfile in lfdirstate:
597 if lfile in lfdirstate:
598 lfdirstate.drop(lfile)
598 lfdirstate.drop(lfile)
599 util.unlinkpath(repo.wjoin(standin))
599 util.unlinkpath(repo.wjoin(standin))
600 lfdirstate.write()
600 lfdirstate.write()
601 finally:
601 finally:
602 wlock.release()
602 wlock.release()
603
603
604 def hg_update(orig, repo, node):
604 def hg_update(orig, repo, node):
605 result = orig(repo, node)
605 result = orig(repo, node)
606 lfcommands.updatelfiles(repo.ui, repo)
606 lfcommands.updatelfiles(repo.ui, repo)
607 return result
607 return result
608
608
609 def hg_clean(orig, repo, node, show_stats=True):
609 def hg_clean(orig, repo, node, show_stats=True):
610 result = orig(repo, node, show_stats)
610 result = orig(repo, node, show_stats)
611 lfcommands.updatelfiles(repo.ui, repo)
611 lfcommands.updatelfiles(repo.ui, repo)
612 return result
612 return result
613
613
614 def hg_merge(orig, repo, node, force=None, remind=True):
614 def hg_merge(orig, repo, node, force=None, remind=True):
615 result = orig(repo, node, force, remind)
615 result = orig(repo, node, force, remind)
616 lfcommands.updatelfiles(repo.ui, repo)
616 lfcommands.updatelfiles(repo.ui, repo)
617 return result
617 return result
618
618
619 # When we rebase a repository with remotely changed largefiles, we need to
619 # When we rebase a repository with remotely changed largefiles, we need to
620 # take some extra care so that the largefiles are correctly updated in the
620 # take some extra care so that the largefiles are correctly updated in the
621 # working copy
621 # working copy
622 def override_pull(orig, ui, repo, source=None, **opts):
622 def override_pull(orig, ui, repo, source=None, **opts):
623 if opts.get('rebase', False):
623 if opts.get('rebase', False):
624 repo._isrebasing = True
624 repo._isrebasing = True
625 try:
625 try:
626 if opts.get('update'):
626 if opts.get('update'):
627 del opts['update']
627 del opts['update']
628 ui.debug('--update and --rebase are not compatible, ignoring '
628 ui.debug('--update and --rebase are not compatible, ignoring '
629 'the update flag\n')
629 'the update flag\n')
630 del opts['rebase']
630 del opts['rebase']
631 cmdutil.bailifchanged(repo)
631 cmdutil.bailifchanged(repo)
632 revsprepull = len(repo)
632 revsprepull = len(repo)
633 origpostincoming = commands.postincoming
633 origpostincoming = commands.postincoming
634 def _dummy(*args, **kwargs):
634 def _dummy(*args, **kwargs):
635 pass
635 pass
636 commands.postincoming = _dummy
636 commands.postincoming = _dummy
637 repo.lfpullsource = source
637 repo.lfpullsource = source
638 if not source:
638 if not source:
639 source = 'default'
639 source = 'default'
640 try:
640 try:
641 result = commands.pull(ui, repo, source, **opts)
641 result = commands.pull(ui, repo, source, **opts)
642 finally:
642 finally:
643 commands.postincoming = origpostincoming
643 commands.postincoming = origpostincoming
644 revspostpull = len(repo)
644 revspostpull = len(repo)
645 if revspostpull > revsprepull:
645 if revspostpull > revsprepull:
646 result = result or rebase.rebase(ui, repo)
646 result = result or rebase.rebase(ui, repo)
647 finally:
647 finally:
648 repo._isrebasing = False
648 repo._isrebasing = False
649 else:
649 else:
650 repo.lfpullsource = source
650 repo.lfpullsource = source
651 if not source:
651 if not source:
652 source = 'default'
652 source = 'default'
653 result = orig(ui, repo, source, **opts)
653 result = orig(ui, repo, source, **opts)
654 return result
654 return result
655
655
656 def override_rebase(orig, ui, repo, **opts):
656 def override_rebase(orig, ui, repo, **opts):
657 repo._isrebasing = True
657 repo._isrebasing = True
658 try:
658 try:
659 orig(ui, repo, **opts)
659 orig(ui, repo, **opts)
660 finally:
660 finally:
661 repo._isrebasing = False
661 repo._isrebasing = False
662
662
663 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
663 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
664 prefix=None, mtime=None, subrepos=None):
664 prefix=None, mtime=None, subrepos=None):
665 # No need to lock because we are only reading history and
665 # No need to lock because we are only reading history and
666 # largefile caches, neither of which are modified.
666 # largefile caches, neither of which are modified.
667 lfcommands.cachelfiles(repo.ui, repo, node)
667 lfcommands.cachelfiles(repo.ui, repo, node)
668
668
669 if kind not in archival.archivers:
669 if kind not in archival.archivers:
670 raise util.Abort(_("unknown archive type '%s'") % kind)
670 raise util.Abort(_("unknown archive type '%s'") % kind)
671
671
672 ctx = repo[node]
672 ctx = repo[node]
673
673
674 if kind == 'files':
674 if kind == 'files':
675 if prefix:
675 if prefix:
676 raise util.Abort(
676 raise util.Abort(
677 _('cannot give prefix when archiving to files'))
677 _('cannot give prefix when archiving to files'))
678 else:
678 else:
679 prefix = archival.tidyprefix(dest, kind, prefix)
679 prefix = archival.tidyprefix(dest, kind, prefix)
680
680
681 def write(name, mode, islink, getdata):
681 def write(name, mode, islink, getdata):
682 if matchfn and not matchfn(name):
682 if matchfn and not matchfn(name):
683 return
683 return
684 data = getdata()
684 data = getdata()
685 if decode:
685 if decode:
686 data = repo.wwritedata(name, data)
686 data = repo.wwritedata(name, data)
687 archiver.addfile(prefix + name, mode, islink, data)
687 archiver.addfile(prefix + name, mode, islink, data)
688
688
689 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
689 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
690
690
691 if repo.ui.configbool("ui", "archivemeta", True):
691 if repo.ui.configbool("ui", "archivemeta", True):
692 def metadata():
692 def metadata():
693 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
693 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
694 hex(repo.changelog.node(0)), hex(node), ctx.branch())
694 hex(repo.changelog.node(0)), hex(node), ctx.branch())
695
695
696 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
696 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
697 if repo.tagtype(t) == 'global')
697 if repo.tagtype(t) == 'global')
698 if not tags:
698 if not tags:
699 repo.ui.pushbuffer()
699 repo.ui.pushbuffer()
700 opts = {'template': '{latesttag}\n{latesttagdistance}',
700 opts = {'template': '{latesttag}\n{latesttagdistance}',
701 'style': '', 'patch': None, 'git': None}
701 'style': '', 'patch': None, 'git': None}
702 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
702 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
703 ltags, dist = repo.ui.popbuffer().split('\n')
703 ltags, dist = repo.ui.popbuffer().split('\n')
704 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
704 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
705 tags += 'latesttagdistance: %s\n' % dist
705 tags += 'latesttagdistance: %s\n' % dist
706
706
707 return base + tags
707 return base + tags
708
708
709 write('.hg_archival.txt', 0644, False, metadata)
709 write('.hg_archival.txt', 0644, False, metadata)
710
710
711 for f in ctx:
711 for f in ctx:
712 ff = ctx.flags(f)
712 ff = ctx.flags(f)
713 getdata = ctx[f].data
713 getdata = ctx[f].data
714 if lfutil.isstandin(f):
714 if lfutil.isstandin(f):
715 path = lfutil.findfile(repo, getdata().strip())
715 path = lfutil.findfile(repo, getdata().strip())
716 f = lfutil.splitstandin(f)
716 f = lfutil.splitstandin(f)
717
717
718 def getdatafn():
718 def getdatafn():
719 fd = None
719 fd = None
720 try:
720 try:
721 fd = open(path, 'rb')
721 fd = open(path, 'rb')
722 return fd.read()
722 return fd.read()
723 finally:
723 finally:
724 if fd:
724 if fd:
725 fd.close()
725 fd.close()
726
726
727 getdata = getdatafn
727 getdata = getdatafn
728 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
728 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
729
729
730 if subrepos:
730 if subrepos:
731 for subpath in ctx.substate:
731 for subpath in ctx.substate:
732 sub = ctx.sub(subpath)
732 sub = ctx.sub(subpath)
733 sub.archive(repo.ui, archiver, prefix)
733 sub.archive(repo.ui, archiver, prefix)
734
734
735 archiver.done()
735 archiver.done()
736
736
737 # If a largefile is modified, the change is not reflected in its
737 # If a largefile is modified, the change is not reflected in its
738 # standin until a commit. cmdutil.bailifchanged() raises an exception
738 # standin until a commit. cmdutil.bailifchanged() raises an exception
739 # if the repo has uncommitted changes. Wrap it to also check if
739 # if the repo has uncommitted changes. Wrap it to also check if
740 # largefiles were changed. This is used by bisect and backout.
740 # largefiles were changed. This is used by bisect and backout.
741 def override_bailifchanged(orig, repo):
741 def override_bailifchanged(orig, repo):
742 orig(repo)
742 orig(repo)
743 repo.lfstatus = True
743 repo.lfstatus = True
744 modified, added, removed, deleted = repo.status()[:4]
744 modified, added, removed, deleted = repo.status()[:4]
745 repo.lfstatus = False
745 repo.lfstatus = False
746 if modified or added or removed or deleted:
746 if modified or added or removed or deleted:
747 raise util.Abort(_('outstanding uncommitted changes'))
747 raise util.Abort(_('outstanding uncommitted changes'))
748
748
749 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
749 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
750 def override_fetch(orig, ui, repo, *pats, **opts):
750 def override_fetch(orig, ui, repo, *pats, **opts):
751 repo.lfstatus = True
751 repo.lfstatus = True
752 modified, added, removed, deleted = repo.status()[:4]
752 modified, added, removed, deleted = repo.status()[:4]
753 repo.lfstatus = False
753 repo.lfstatus = False
754 if modified or added or removed or deleted:
754 if modified or added or removed or deleted:
755 raise util.Abort(_('outstanding uncommitted changes'))
755 raise util.Abort(_('outstanding uncommitted changes'))
756 return orig(ui, repo, *pats, **opts)
756 return orig(ui, repo, *pats, **opts)
757
757
758 def override_forget(orig, ui, repo, *pats, **opts):
758 def override_forget(orig, ui, repo, *pats, **opts):
759 installnormalfilesmatchfn(repo[None].manifest())
759 installnormalfilesmatchfn(repo[None].manifest())
760 orig(ui, repo, *pats, **opts)
760 orig(ui, repo, *pats, **opts)
761 restorematchfn()
761 restorematchfn()
762 m = scmutil.match(repo[None], pats, opts)
762 m = scmutil.match(repo[None], pats, opts)
763
763
764 try:
764 try:
765 repo.lfstatus = True
765 repo.lfstatus = True
766 s = repo.status(match=m, clean=True)
766 s = repo.status(match=m, clean=True)
767 finally:
767 finally:
768 repo.lfstatus = False
768 repo.lfstatus = False
769 forget = sorted(s[0] + s[1] + s[3] + s[6])
769 forget = sorted(s[0] + s[1] + s[3] + s[6])
770 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
770 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
771
771
772 for f in forget:
772 for f in forget:
773 if lfutil.standin(f) not in repo.dirstate and not \
773 if lfutil.standin(f) not in repo.dirstate and not \
774 os.path.isdir(m.rel(lfutil.standin(f))):
774 os.path.isdir(m.rel(lfutil.standin(f))):
775 ui.warn(_('not removing %s: file is already untracked\n')
775 ui.warn(_('not removing %s: file is already untracked\n')
776 % m.rel(f))
776 % m.rel(f))
777
777
778 for f in forget:
778 for f in forget:
779 if ui.verbose or not m.exact(f):
779 if ui.verbose or not m.exact(f):
780 ui.status(_('removing %s\n') % m.rel(f))
780 ui.status(_('removing %s\n') % m.rel(f))
781
781
782 # Need to lock because standin files are deleted then removed from the
782 # Need to lock because standin files are deleted then removed from the
783 # repository and we could race inbetween.
783 # repository and we could race inbetween.
784 wlock = repo.wlock()
784 wlock = repo.wlock()
785 try:
785 try:
786 lfdirstate = lfutil.openlfdirstate(ui, repo)
786 lfdirstate = lfutil.openlfdirstate(ui, repo)
787 for f in forget:
787 for f in forget:
788 if lfdirstate[f] == 'a':
788 if lfdirstate[f] == 'a':
789 lfdirstate.drop(f)
789 lfdirstate.drop(f)
790 else:
790 else:
791 lfdirstate.remove(f)
791 lfdirstate.remove(f)
792 lfdirstate.write()
792 lfdirstate.write()
793 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
793 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
794 unlink=True)
794 unlink=True)
795 finally:
795 finally:
796 wlock.release()
796 wlock.release()
797
797
798 def getoutgoinglfiles(ui, repo, dest=None, **opts):
798 def getoutgoinglfiles(ui, repo, dest=None, **opts):
799 dest = ui.expandpath(dest or 'default-push', dest or 'default')
799 dest = ui.expandpath(dest or 'default-push', dest or 'default')
800 dest, branches = hg.parseurl(dest, opts.get('branch'))
800 dest, branches = hg.parseurl(dest, opts.get('branch'))
801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
802 if revs:
802 if revs:
803 revs = [repo.lookup(rev) for rev in revs]
803 revs = [repo.lookup(rev) for rev in revs]
804
804
805 remoteui = hg.remoteui
805 remoteui = hg.remoteui
806
806
807 try:
807 try:
808 remote = hg.repository(remoteui(repo, opts), dest)
808 remote = hg.repository(remoteui(repo, opts), dest)
809 except error.RepoError:
809 except error.RepoError:
810 return None
810 return None
811 o = lfutil.findoutgoing(repo, remote, False)
811 o = lfutil.findoutgoing(repo, remote, False)
812 if not o:
812 if not o:
813 return None
813 return None
814 o = repo.changelog.nodesbetween(o, revs)[0]
814 o = repo.changelog.nodesbetween(o, revs)[0]
815 if opts.get('newest_first'):
815 if opts.get('newest_first'):
816 o.reverse()
816 o.reverse()
817
817
818 toupload = set()
818 toupload = set()
819 for n in o:
819 for n in o:
820 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
820 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
821 ctx = repo[n]
821 ctx = repo[n]
822 files = set(ctx.files())
822 files = set(ctx.files())
823 if len(parents) == 2:
823 if len(parents) == 2:
824 mc = ctx.manifest()
824 mc = ctx.manifest()
825 mp1 = ctx.parents()[0].manifest()
825 mp1 = ctx.parents()[0].manifest()
826 mp2 = ctx.parents()[1].manifest()
826 mp2 = ctx.parents()[1].manifest()
827 for f in mp1:
827 for f in mp1:
828 if f not in mc:
828 if f not in mc:
829 files.add(f)
829 files.add(f)
830 for f in mp2:
830 for f in mp2:
831 if f not in mc:
831 if f not in mc:
832 files.add(f)
832 files.add(f)
833 for f in mc:
833 for f in mc:
834 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
834 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
835 files.add(f)
835 files.add(f)
836 toupload = toupload.union(
836 toupload = toupload.union(
837 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
837 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
838 return toupload
838 return toupload
839
839
840 def override_outgoing(orig, ui, repo, dest=None, **opts):
840 def override_outgoing(orig, ui, repo, dest=None, **opts):
841 orig(ui, repo, dest, **opts)
841 orig(ui, repo, dest, **opts)
842
842
843 if opts.pop('large', None):
843 if opts.pop('large', None):
844 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
844 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
845 if toupload is None:
845 if toupload is None:
846 ui.status(_('largefiles: No remote repo\n'))
846 ui.status(_('largefiles: No remote repo\n'))
847 else:
847 else:
848 ui.status(_('largefiles to upload:\n'))
848 ui.status(_('largefiles to upload:\n'))
849 for file in toupload:
849 for file in toupload:
850 ui.status(lfutil.splitstandin(file) + '\n')
850 ui.status(lfutil.splitstandin(file) + '\n')
851 ui.status('\n')
851 ui.status('\n')
852
852
853 def override_summary(orig, ui, repo, *pats, **opts):
853 def override_summary(orig, ui, repo, *pats, **opts):
854 try:
854 try:
855 repo.lfstatus = True
855 repo.lfstatus = True
856 orig(ui, repo, *pats, **opts)
856 orig(ui, repo, *pats, **opts)
857 finally:
857 finally:
858 repo.lfstatus = False
858 repo.lfstatus = False
859
859
860 if opts.pop('large', None):
860 if opts.pop('large', None):
861 toupload = getoutgoinglfiles(ui, repo, None, **opts)
861 toupload = getoutgoinglfiles(ui, repo, None, **opts)
862 if toupload is None:
862 if toupload is None:
863 ui.status(_('largefiles: No remote repo\n'))
863 ui.status(_('largefiles: No remote repo\n'))
864 else:
864 else:
865 ui.status(_('largefiles: %d to upload\n') % len(toupload))
865 ui.status(_('largefiles: %d to upload\n') % len(toupload))
866
866
867 def override_addremove(orig, ui, repo, *pats, **opts):
867 def override_addremove(orig, ui, repo, *pats, **opts):
868 # Get the list of missing largefiles so we can remove them
868 # Get the list of missing largefiles so we can remove them
869 lfdirstate = lfutil.openlfdirstate(ui, repo)
869 lfdirstate = lfutil.openlfdirstate(ui, repo)
870 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
870 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
871 False, False)
871 False, False)
872 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
872 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
873
873
874 # Call into the normal remove code, but the removing of the standin, we want
874 # Call into the normal remove code, but the removing of the standin, we want
875 # to have handled by original addremove. Monkey patching here makes sure
875 # to have handled by original addremove. Monkey patching here makes sure
876 # we don't remove the standin in the largefiles code, preventing a very
876 # we don't remove the standin in the largefiles code, preventing a very
877 # confused state later.
877 # confused state later.
878 repo._isaddremove = True
878 repo._isaddremove = True
879 remove_largefiles(ui, repo, *missing, **opts)
879 remove_largefiles(ui, repo, *missing, **opts)
880 repo._isaddremove = False
880 repo._isaddremove = False
881 # Call into the normal add code, and any files that *should* be added as
881 # Call into the normal add code, and any files that *should* be added as
882 # largefiles will be
882 # largefiles will be
883 add_largefiles(ui, repo, *pats, **opts)
883 add_largefiles(ui, repo, *pats, **opts)
884 # Now that we've handled largefiles, hand off to the original addremove
884 # Now that we've handled largefiles, hand off to the original addremove
885 # function to take care of the rest. Make sure it doesn't do anything with
885 # function to take care of the rest. Make sure it doesn't do anything with
886 # largefiles by installing a matcher that will ignore them.
886 # largefiles by installing a matcher that will ignore them.
887 installnormalfilesmatchfn(repo[None].manifest())
887 installnormalfilesmatchfn(repo[None].manifest())
888 result = orig(ui, repo, *pats, **opts)
888 result = orig(ui, repo, *pats, **opts)
889 restorematchfn()
889 restorematchfn()
890 return result
890 return result
891
891
892 # Calling purge with --all will cause the largefiles to be deleted.
892 # Calling purge with --all will cause the largefiles to be deleted.
893 # Override repo.status to prevent this from happening.
893 # Override repo.status to prevent this from happening.
894 def override_purge(orig, ui, repo, *dirs, **opts):
894 def override_purge(orig, ui, repo, *dirs, **opts):
895 oldstatus = repo.status
895 oldstatus = repo.status
896 def override_status(node1='.', node2=None, match=None, ignored=False,
896 def override_status(node1='.', node2=None, match=None, ignored=False,
897 clean=False, unknown=False, listsubrepos=False):
897 clean=False, unknown=False, listsubrepos=False):
898 r = oldstatus(node1, node2, match, ignored, clean, unknown,
898 r = oldstatus(node1, node2, match, ignored, clean, unknown,
899 listsubrepos)
899 listsubrepos)
900 lfdirstate = lfutil.openlfdirstate(ui, repo)
900 lfdirstate = lfutil.openlfdirstate(ui, repo)
901 modified, added, removed, deleted, unknown, ignored, clean = r
901 modified, added, removed, deleted, unknown, ignored, clean = r
902 unknown = [f for f in unknown if lfdirstate[f] == '?']
902 unknown = [f for f in unknown if lfdirstate[f] == '?']
903 ignored = [f for f in ignored if lfdirstate[f] == '?']
903 ignored = [f for f in ignored if lfdirstate[f] == '?']
904 return modified, added, removed, deleted, unknown, ignored, clean
904 return modified, added, removed, deleted, unknown, ignored, clean
905 repo.status = override_status
905 repo.status = override_status
906 orig(ui, repo, *dirs, **opts)
906 orig(ui, repo, *dirs, **opts)
907 repo.status = oldstatus
907 repo.status = oldstatus
908
908
909 def override_rollback(orig, ui, repo, **opts):
909 def override_rollback(orig, ui, repo, **opts):
910 result = orig(ui, repo, **opts)
910 result = orig(ui, repo, **opts)
911 merge.update(repo, node=None, branchmerge=False, force=True,
911 merge.update(repo, node=None, branchmerge=False, force=True,
912 partial=lfutil.isstandin)
912 partial=lfutil.isstandin)
913 wlock = repo.wlock()
914 try:
913 lfdirstate = lfutil.openlfdirstate(ui, repo)
915 lfdirstate = lfutil.openlfdirstate(ui, repo)
914 lfiles = lfutil.listlfiles(repo)
916 lfiles = lfutil.listlfiles(repo)
915 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
917 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
916 for file in lfiles:
918 for file in lfiles:
917 if file in oldlfiles:
919 if file in oldlfiles:
918 lfdirstate.normallookup(file)
920 lfdirstate.normallookup(file)
919 else:
921 else:
920 lfdirstate.add(file)
922 lfdirstate.add(file)
921 lfdirstate.write()
923 lfdirstate.write()
924 finally:
925 wlock.release()
922 return result
926 return result
923
927
924 def override_transplant(orig, ui, repo, *revs, **opts):
928 def override_transplant(orig, ui, repo, *revs, **opts):
925 result = orig(ui, repo, *revs, **opts)
929 result = orig(ui, repo, *revs, **opts)
926 lfcommands.updatelfiles(repo.ui, repo)
930 lfcommands.updatelfiles(repo.ui, repo)
927 return result
931 return result
@@ -1,450 +1,451 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util
14 from mercurial import context, error, manifest, match as match_, util
15 from mercurial import node as node_
15 from mercurial import node as node_
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfcommands
18 import lfcommands
19 import proto
19 import proto
20 import lfutil
20 import lfutil
21
21
22 def reposetup(ui, repo):
22 def reposetup(ui, repo):
23 # wire repositories should be given new wireproto functions but not the
23 # wire repositories should be given new wireproto functions but not the
24 # other largefiles modifications
24 # other largefiles modifications
25 if not repo.local():
25 if not repo.local():
26 return proto.wirereposetup(ui, repo)
26 return proto.wirereposetup(ui, repo)
27
27
28 for name in ('status', 'commitctx', 'commit', 'push'):
28 for name in ('status', 'commitctx', 'commit', 'push'):
29 method = getattr(repo, name)
29 method = getattr(repo, name)
30 if (isinstance(method, types.FunctionType) and
30 if (isinstance(method, types.FunctionType) and
31 method.func_name == 'wrap'):
31 method.func_name == 'wrap'):
32 ui.warn(_('largefiles: repo method %r appears to have already been'
32 ui.warn(_('largefiles: repo method %r appears to have already been'
33 ' wrapped by another extension: '
33 ' wrapped by another extension: '
34 'largefiles may behave incorrectly\n')
34 'largefiles may behave incorrectly\n')
35 % name)
35 % name)
36
36
37 class lfiles_repo(repo.__class__):
37 class lfiles_repo(repo.__class__):
38 lfstatus = False
38 lfstatus = False
39 def status_nolfiles(self, *args, **kwargs):
39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfiles_repo, self).status(*args, **kwargs)
40 return super(lfiles_repo, self).status(*args, **kwargs)
41
41
42 # When lfstatus is set, return a context that gives the names
42 # When lfstatus is set, return a context that gives the names
43 # of largefiles instead of their corresponding standins and
43 # of largefiles instead of their corresponding standins and
44 # identifies the largefiles as always binary, regardless of
44 # identifies the largefiles as always binary, regardless of
45 # their actual contents.
45 # their actual contents.
46 def __getitem__(self, changeid):
46 def __getitem__(self, changeid):
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
48 if self.lfstatus:
48 if self.lfstatus:
49 class lfiles_manifestdict(manifest.manifestdict):
49 class lfiles_manifestdict(manifest.manifestdict):
50 def __contains__(self, filename):
50 def __contains__(self, filename):
51 if super(lfiles_manifestdict,
51 if super(lfiles_manifestdict,
52 self).__contains__(filename):
52 self).__contains__(filename):
53 return True
53 return True
54 return super(lfiles_manifestdict,
54 return super(lfiles_manifestdict,
55 self).__contains__(lfutil.standin(filename))
55 self).__contains__(lfutil.standin(filename))
56 class lfiles_ctx(ctx.__class__):
56 class lfiles_ctx(ctx.__class__):
57 def files(self):
57 def files(self):
58 filenames = super(lfiles_ctx, self).files()
58 filenames = super(lfiles_ctx, self).files()
59 return [lfutil.splitstandin(f) or f for f in filenames]
59 return [lfutil.splitstandin(f) or f for f in filenames]
60 def manifest(self):
60 def manifest(self):
61 man1 = super(lfiles_ctx, self).manifest()
61 man1 = super(lfiles_ctx, self).manifest()
62 man1.__class__ = lfiles_manifestdict
62 man1.__class__ = lfiles_manifestdict
63 return man1
63 return man1
64 def filectx(self, path, fileid=None, filelog=None):
64 def filectx(self, path, fileid=None, filelog=None):
65 try:
65 try:
66 result = super(lfiles_ctx, self).filectx(path,
66 result = super(lfiles_ctx, self).filectx(path,
67 fileid, filelog)
67 fileid, filelog)
68 except error.LookupError:
68 except error.LookupError:
69 # Adding a null character will cause Mercurial to
69 # Adding a null character will cause Mercurial to
70 # identify this as a binary file.
70 # identify this as a binary file.
71 result = super(lfiles_ctx, self).filectx(
71 result = super(lfiles_ctx, self).filectx(
72 lfutil.standin(path), fileid, filelog)
72 lfutil.standin(path), fileid, filelog)
73 olddata = result.data
73 olddata = result.data
74 result.data = lambda: olddata() + '\0'
74 result.data = lambda: olddata() + '\0'
75 return result
75 return result
76 ctx.__class__ = lfiles_ctx
76 ctx.__class__ = lfiles_ctx
77 return ctx
77 return ctx
78
78
79 # Figure out the status of big files and insert them into the
79 # Figure out the status of big files and insert them into the
80 # appropriate list in the result. Also removes standin files
80 # appropriate list in the result. Also removes standin files
81 # from the listing. Revert to the original status if
81 # from the listing. Revert to the original status if
82 # self.lfstatus is False.
82 # self.lfstatus is False.
83 def status(self, node1='.', node2=None, match=None, ignored=False,
83 def status(self, node1='.', node2=None, match=None, ignored=False,
84 clean=False, unknown=False, listsubrepos=False):
84 clean=False, unknown=False, listsubrepos=False):
85 listignored, listclean, listunknown = ignored, clean, unknown
85 listignored, listclean, listunknown = ignored, clean, unknown
86 if not self.lfstatus:
86 if not self.lfstatus:
87 return super(lfiles_repo, self).status(node1, node2, match,
87 return super(lfiles_repo, self).status(node1, node2, match,
88 listignored, listclean, listunknown, listsubrepos)
88 listignored, listclean, listunknown, listsubrepos)
89 else:
89 else:
90 # some calls in this function rely on the old version of status
90 # some calls in this function rely on the old version of status
91 self.lfstatus = False
91 self.lfstatus = False
92 if isinstance(node1, context.changectx):
92 if isinstance(node1, context.changectx):
93 ctx1 = node1
93 ctx1 = node1
94 else:
94 else:
95 ctx1 = repo[node1]
95 ctx1 = repo[node1]
96 if isinstance(node2, context.changectx):
96 if isinstance(node2, context.changectx):
97 ctx2 = node2
97 ctx2 = node2
98 else:
98 else:
99 ctx2 = repo[node2]
99 ctx2 = repo[node2]
100 working = ctx2.rev() is None
100 working = ctx2.rev() is None
101 parentworking = working and ctx1 == self['.']
101 parentworking = working and ctx1 == self['.']
102
102
103 def inctx(file, ctx):
103 def inctx(file, ctx):
104 try:
104 try:
105 if ctx.rev() is None:
105 if ctx.rev() is None:
106 return file in ctx.manifest()
106 return file in ctx.manifest()
107 ctx[file]
107 ctx[file]
108 return True
108 return True
109 except KeyError:
109 except KeyError:
110 return False
110 return False
111
111
112 if match is None:
112 if match is None:
113 match = match_.always(self.root, self.getcwd())
113 match = match_.always(self.root, self.getcwd())
114
114
115 # First check if there were files specified on the
115 # First check if there were files specified on the
116 # command line. If there were, and none of them were
116 # command line. If there were, and none of them were
117 # largefiles, we should just bail here and let super
117 # largefiles, we should just bail here and let super
118 # handle it -- thus gaining a big performance boost.
118 # handle it -- thus gaining a big performance boost.
119 lfdirstate = lfutil.openlfdirstate(ui, self)
119 lfdirstate = lfutil.openlfdirstate(ui, self)
120 if match.files() and not match.anypats():
120 if match.files() and not match.anypats():
121 matchedfiles = [f for f in match.files() if f in lfdirstate]
121 matchedfiles = [f for f in match.files() if f in lfdirstate]
122 if not matchedfiles:
122 if not matchedfiles:
123 return super(lfiles_repo, self).status(node1, node2,
123 return super(lfiles_repo, self).status(node1, node2,
124 match, listignored, listclean,
124 match, listignored, listclean,
125 listunknown, listsubrepos)
125 listunknown, listsubrepos)
126
126
127 # Create a copy of match that matches standins instead
127 # Create a copy of match that matches standins instead
128 # of largefiles.
128 # of largefiles.
129 def tostandin(file):
129 def tostandin(file):
130 if inctx(lfutil.standin(file), ctx2):
130 if inctx(lfutil.standin(file), ctx2):
131 return lfutil.standin(file)
131 return lfutil.standin(file)
132 return file
132 return file
133
133
134 # Create a function that we can use to override what is
134 # Create a function that we can use to override what is
135 # normally the ignore matcher. We've already checked
135 # normally the ignore matcher. We've already checked
136 # for ignored files on the first dirstate walk, and
136 # for ignored files on the first dirstate walk, and
137 # unecessarily re-checking here causes a huge performance
137 # unecessarily re-checking here causes a huge performance
138 # hit because lfdirstate only knows about largefiles
138 # hit because lfdirstate only knows about largefiles
139 def _ignoreoverride(self):
139 def _ignoreoverride(self):
140 return False
140 return False
141
141
142 m = copy.copy(match)
142 m = copy.copy(match)
143 m._files = [tostandin(f) for f in m._files]
143 m._files = [tostandin(f) for f in m._files]
144
144
145 # Get ignored files here even if we weren't asked for them; we
145 # Get ignored files here even if we weren't asked for them; we
146 # must use the result here for filtering later
146 # must use the result here for filtering later
147 result = super(lfiles_repo, self).status(node1, node2, m,
147 result = super(lfiles_repo, self).status(node1, node2, m,
148 True, clean, unknown, listsubrepos)
148 True, clean, unknown, listsubrepos)
149 if working:
149 if working:
150 # hold the wlock while we read largefiles and
151 # update the lfdirstate
152 wlock = repo.wlock()
153 try:
150 try:
154 # Any non-largefiles that were explicitly listed must be
151 # Any non-largefiles that were explicitly listed must be
155 # taken out or lfdirstate.status will report an error.
152 # taken out or lfdirstate.status will report an error.
156 # The status of these files was already computed using
153 # The status of these files was already computed using
157 # super's status.
154 # super's status.
158 # Override lfdirstate's ignore matcher to not do
155 # Override lfdirstate's ignore matcher to not do
159 # anything
156 # anything
160 orig_ignore = lfdirstate._ignore
157 orig_ignore = lfdirstate._ignore
161 lfdirstate._ignore = _ignoreoverride
158 lfdirstate._ignore = _ignoreoverride
162
159
163 match._files = [f for f in match._files if f in
160 match._files = [f for f in match._files if f in
164 lfdirstate]
161 lfdirstate]
165 # Don't waste time getting the ignored and unknown
162 # Don't waste time getting the ignored and unknown
166 # files again; we already have them
163 # files again; we already have them
167 s = lfdirstate.status(match, [], False,
164 s = lfdirstate.status(match, [], False,
168 listclean, False)
165 listclean, False)
169 (unsure, modified, added, removed, missing, unknown,
166 (unsure, modified, added, removed, missing, unknown,
170 ignored, clean) = s
167 ignored, clean) = s
171 # Replace the list of ignored and unknown files with
168 # Replace the list of ignored and unknown files with
172 # the previously caclulated lists, and strip out the
169 # the previously caclulated lists, and strip out the
173 # largefiles
170 # largefiles
174 lfiles = set(lfdirstate._map)
171 lfiles = set(lfdirstate._map)
175 ignored = set(result[5]).difference(lfiles)
172 ignored = set(result[5]).difference(lfiles)
176 unknown = set(result[4]).difference(lfiles)
173 unknown = set(result[4]).difference(lfiles)
177 if parentworking:
174 if parentworking:
178 for lfile in unsure:
175 for lfile in unsure:
179 standin = lfutil.standin(lfile)
176 standin = lfutil.standin(lfile)
180 if standin not in ctx1:
177 if standin not in ctx1:
181 # from second parent
178 # from second parent
182 modified.append(lfile)
179 modified.append(lfile)
183 elif ctx1[standin].data().strip() \
180 elif ctx1[standin].data().strip() \
184 != lfutil.hashfile(self.wjoin(lfile)):
181 != lfutil.hashfile(self.wjoin(lfile)):
185 modified.append(lfile)
182 modified.append(lfile)
186 else:
183 else:
187 clean.append(lfile)
184 clean.append(lfile)
188 lfdirstate.normal(lfile)
185 lfdirstate.normal(lfile)
189 lfdirstate.write()
190 else:
186 else:
191 tocheck = unsure + modified + added + clean
187 tocheck = unsure + modified + added + clean
192 modified, added, clean = [], [], []
188 modified, added, clean = [], [], []
193
189
194 for lfile in tocheck:
190 for lfile in tocheck:
195 standin = lfutil.standin(lfile)
191 standin = lfutil.standin(lfile)
196 if inctx(standin, ctx1):
192 if inctx(standin, ctx1):
197 if ctx1[standin].data().strip() != \
193 if ctx1[standin].data().strip() != \
198 lfutil.hashfile(self.wjoin(lfile)):
194 lfutil.hashfile(self.wjoin(lfile)):
199 modified.append(lfile)
195 modified.append(lfile)
200 else:
196 else:
201 clean.append(lfile)
197 clean.append(lfile)
202 else:
198 else:
203 added.append(lfile)
199 added.append(lfile)
200 finally:
204 # Replace the original ignore function
201 # Replace the original ignore function
205 lfdirstate._ignore = orig_ignore
202 lfdirstate._ignore = orig_ignore
206 finally:
207 wlock.release()
208
203
209 for standin in ctx1.manifest():
204 for standin in ctx1.manifest():
210 if not lfutil.isstandin(standin):
205 if not lfutil.isstandin(standin):
211 continue
206 continue
212 lfile = lfutil.splitstandin(standin)
207 lfile = lfutil.splitstandin(standin)
213 if not match(lfile):
208 if not match(lfile):
214 continue
209 continue
215 if lfile not in lfdirstate:
210 if lfile not in lfdirstate:
216 removed.append(lfile)
211 removed.append(lfile)
217
212
218 # Filter result lists
213 # Filter result lists
219 result = list(result)
214 result = list(result)
220
215
221 # Largefiles are not really removed when they're
216 # Largefiles are not really removed when they're
222 # still in the normal dirstate. Likewise, normal
217 # still in the normal dirstate. Likewise, normal
223 # files are not really removed if it's still in
218 # files are not really removed if it's still in
224 # lfdirstate. This happens in merges where files
219 # lfdirstate. This happens in merges where files
225 # change type.
220 # change type.
226 removed = [f for f in removed if f not in repo.dirstate]
221 removed = [f for f in removed if f not in repo.dirstate]
227 result[2] = [f for f in result[2] if f not in lfdirstate]
222 result[2] = [f for f in result[2] if f not in lfdirstate]
228
223
229 # Unknown files
224 # Unknown files
230 unknown = set(unknown).difference(ignored)
225 unknown = set(unknown).difference(ignored)
231 result[4] = [f for f in unknown
226 result[4] = [f for f in unknown
232 if (repo.dirstate[f] == '?' and
227 if (repo.dirstate[f] == '?' and
233 not lfutil.isstandin(f))]
228 not lfutil.isstandin(f))]
234 # Ignored files were calculated earlier by the dirstate,
229 # Ignored files were calculated earlier by the dirstate,
235 # and we already stripped out the largefiles from the list
230 # and we already stripped out the largefiles from the list
236 result[5] = ignored
231 result[5] = ignored
237 # combine normal files and largefiles
232 # combine normal files and largefiles
238 normals = [[fn for fn in filelist
233 normals = [[fn for fn in filelist
239 if not lfutil.isstandin(fn)]
234 if not lfutil.isstandin(fn)]
240 for filelist in result]
235 for filelist in result]
241 lfiles = (modified, added, removed, missing, [], [], clean)
236 lfiles = (modified, added, removed, missing, [], [], clean)
242 result = [sorted(list1 + list2)
237 result = [sorted(list1 + list2)
243 for (list1, list2) in zip(normals, lfiles)]
238 for (list1, list2) in zip(normals, lfiles)]
244 else:
239 else:
245 def toname(f):
240 def toname(f):
246 if lfutil.isstandin(f):
241 if lfutil.isstandin(f):
247 return lfutil.splitstandin(f)
242 return lfutil.splitstandin(f)
248 return f
243 return f
249 result = [[toname(f) for f in items] for items in result]
244 result = [[toname(f) for f in items] for items in result]
250
245
251 if not listunknown:
246 if not listunknown:
252 result[4] = []
247 result[4] = []
253 if not listignored:
248 if not listignored:
254 result[5] = []
249 result[5] = []
255 if not listclean:
250 if not listclean:
256 result[6] = []
251 result[6] = []
257 self.lfstatus = True
252 self.lfstatus = True
258 return result
253 return result
259
254
260 # As part of committing, copy all of the largefiles into the
255 # As part of committing, copy all of the largefiles into the
261 # cache.
256 # cache.
262 def commitctx(self, *args, **kwargs):
257 def commitctx(self, *args, **kwargs):
263 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
258 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
264 ctx = self[node]
259 ctx = self[node]
265 for filename in ctx.files():
260 for filename in ctx.files():
266 if lfutil.isstandin(filename) and filename in ctx.manifest():
261 if lfutil.isstandin(filename) and filename in ctx.manifest():
267 realfile = lfutil.splitstandin(filename)
262 realfile = lfutil.splitstandin(filename)
268 lfutil.copytostore(self, ctx.node(), realfile)
263 lfutil.copytostore(self, ctx.node(), realfile)
269
264
270 return node
265 return node
271
266
272 # Before commit, largefile standins have not had their
267 # Before commit, largefile standins have not had their
273 # contents updated to reflect the hash of their largefile.
268 # contents updated to reflect the hash of their largefile.
274 # Do that here.
269 # Do that here.
275 def commit(self, text="", user=None, date=None, match=None,
270 def commit(self, text="", user=None, date=None, match=None,
276 force=False, editor=False, extra={}):
271 force=False, editor=False, extra={}):
277 orig = super(lfiles_repo, self).commit
272 orig = super(lfiles_repo, self).commit
278
273
279 wlock = repo.wlock()
274 wlock = repo.wlock()
280 try:
275 try:
281 # Case 0: Rebase
276 # Case 0: Rebase
282 # We have to take the time to pull down the new largefiles now.
277 # We have to take the time to pull down the new largefiles now.
283 # Otherwise if we are rebasing, any largefiles that were
278 # Otherwise if we are rebasing, any largefiles that were
284 # modified in the destination changesets get overwritten, either
279 # modified in the destination changesets get overwritten, either
285 # by the rebase or in the first commit after the rebase.
280 # by the rebase or in the first commit after the rebase.
286 # updatelfiles will update the dirstate to mark any pulled
281 # updatelfiles will update the dirstate to mark any pulled
287 # largefiles as modified
282 # largefiles as modified
288 if getattr(repo, "_isrebasing", False):
283 if getattr(repo, "_isrebasing", False):
289 lfcommands.updatelfiles(repo.ui, repo)
284 lfcommands.updatelfiles(repo.ui, repo)
290 result = orig(text=text, user=user, date=date, match=match,
285 result = orig(text=text, user=user, date=date, match=match,
291 force=force, editor=editor, extra=extra)
286 force=force, editor=editor, extra=extra)
292 return result
287 return result
293 # Case 1: user calls commit with no specific files or
288 # Case 1: user calls commit with no specific files or
294 # include/exclude patterns: refresh and commit all files that
289 # include/exclude patterns: refresh and commit all files that
295 # are "dirty".
290 # are "dirty".
296 if ((match is None) or
291 if ((match is None) or
297 (not match.anypats() and not match.files())):
292 (not match.anypats() and not match.files())):
298 # Spend a bit of time here to get a list of files we know
293 # Spend a bit of time here to get a list of files we know
299 # are modified so we can compare only against those.
294 # are modified so we can compare only against those.
300 # It can cost a lot of time (several seconds)
295 # It can cost a lot of time (several seconds)
301 # otherwise to update all standins if the largefiles are
296 # otherwise to update all standins if the largefiles are
302 # large.
297 # large.
303 lfdirstate = lfutil.openlfdirstate(ui, self)
298 lfdirstate = lfutil.openlfdirstate(ui, self)
304 dirtymatch = match_.always(repo.root, repo.getcwd())
299 dirtymatch = match_.always(repo.root, repo.getcwd())
305 s = lfdirstate.status(dirtymatch, [], False, False, False)
300 s = lfdirstate.status(dirtymatch, [], False, False, False)
306 modifiedfiles = []
301 modifiedfiles = []
307 for i in s:
302 for i in s:
308 modifiedfiles.extend(i)
303 modifiedfiles.extend(i)
309 lfiles = lfutil.listlfiles(self)
304 lfiles = lfutil.listlfiles(self)
310 # this only loops through largefiles that exist (not
305 # this only loops through largefiles that exist (not
311 # removed/renamed)
306 # removed/renamed)
312 for lfile in lfiles:
307 for lfile in lfiles:
313 if lfile in modifiedfiles:
308 if lfile in modifiedfiles:
314 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
309 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
315 # this handles the case where a rebase is being
310 # this handles the case where a rebase is being
316 # performed and the working copy is not updated
311 # performed and the working copy is not updated
317 # yet.
312 # yet.
318 if os.path.exists(self.wjoin(lfile)):
313 if os.path.exists(self.wjoin(lfile)):
319 lfutil.updatestandin(self,
314 lfutil.updatestandin(self,
320 lfutil.standin(lfile))
315 lfutil.standin(lfile))
321 lfdirstate.normal(lfile)
316 lfdirstate.normal(lfile)
322 for lfile in lfdirstate:
317 for lfile in lfdirstate:
323 if lfile in modifiedfiles:
318 if lfile in modifiedfiles:
324 if not os.path.exists(
319 if not os.path.exists(
325 repo.wjoin(lfutil.standin(lfile))):
320 repo.wjoin(lfutil.standin(lfile))):
326 lfdirstate.drop(lfile)
321 lfdirstate.drop(lfile)
322
323 result = orig(text=text, user=user, date=date, match=match,
324 force=force, editor=editor, extra=extra)
325 # This needs to be after commit; otherwise precommit hooks
326 # get the wrong status
327 lfdirstate.write()
327 lfdirstate.write()
328
328 return result
329 return orig(text=text, user=user, date=date, match=match,
330 force=force, editor=editor, extra=extra)
331
329
332 for f in match.files():
330 for f in match.files():
333 if lfutil.isstandin(f):
331 if lfutil.isstandin(f):
334 raise util.Abort(
332 raise util.Abort(
335 _('file "%s" is a largefile standin') % f,
333 _('file "%s" is a largefile standin') % f,
336 hint=('commit the largefile itself instead'))
334 hint=('commit the largefile itself instead'))
337
335
338 # Case 2: user calls commit with specified patterns: refresh
336 # Case 2: user calls commit with specified patterns: refresh
339 # any matching big files.
337 # any matching big files.
340 smatcher = lfutil.composestandinmatcher(self, match)
338 smatcher = lfutil.composestandinmatcher(self, match)
341 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
339 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
342
340
343 # No matching big files: get out of the way and pass control to
341 # No matching big files: get out of the way and pass control to
344 # the usual commit() method.
342 # the usual commit() method.
345 if not standins:
343 if not standins:
346 return orig(text=text, user=user, date=date, match=match,
344 return orig(text=text, user=user, date=date, match=match,
347 force=force, editor=editor, extra=extra)
345 force=force, editor=editor, extra=extra)
348
346
349 # Refresh all matching big files. It's possible that the
347 # Refresh all matching big files. It's possible that the
350 # commit will end up failing, in which case the big files will
348 # commit will end up failing, in which case the big files will
351 # stay refreshed. No harm done: the user modified them and
349 # stay refreshed. No harm done: the user modified them and
352 # asked to commit them, so sooner or later we're going to
350 # asked to commit them, so sooner or later we're going to
353 # refresh the standins. Might as well leave them refreshed.
351 # refresh the standins. Might as well leave them refreshed.
354 lfdirstate = lfutil.openlfdirstate(ui, self)
352 lfdirstate = lfutil.openlfdirstate(ui, self)
355 for standin in standins:
353 for standin in standins:
356 lfile = lfutil.splitstandin(standin)
354 lfile = lfutil.splitstandin(standin)
357 if lfdirstate[lfile] <> 'r':
355 if lfdirstate[lfile] <> 'r':
358 lfutil.updatestandin(self, standin)
356 lfutil.updatestandin(self, standin)
359 lfdirstate.normal(lfile)
357 lfdirstate.normal(lfile)
360 else:
358 else:
361 lfdirstate.drop(lfile)
359 lfdirstate.drop(lfile)
362 lfdirstate.write()
363
360
364 # Cook up a new matcher that only matches regular files or
361 # Cook up a new matcher that only matches regular files or
365 # standins corresponding to the big files requested by the
362 # standins corresponding to the big files requested by the
366 # user. Have to modify _files to prevent commit() from
363 # user. Have to modify _files to prevent commit() from
367 # complaining "not tracked" for big files.
364 # complaining "not tracked" for big files.
368 lfiles = lfutil.listlfiles(repo)
365 lfiles = lfutil.listlfiles(repo)
369 match = copy.copy(match)
366 match = copy.copy(match)
370 orig_matchfn = match.matchfn
367 orig_matchfn = match.matchfn
371
368
372 # Check both the list of largefiles and the list of
369 # Check both the list of largefiles and the list of
373 # standins because if a largefile was removed, it
370 # standins because if a largefile was removed, it
374 # won't be in the list of largefiles at this point
371 # won't be in the list of largefiles at this point
375 match._files += sorted(standins)
372 match._files += sorted(standins)
376
373
377 actualfiles = []
374 actualfiles = []
378 for f in match._files:
375 for f in match._files:
379 fstandin = lfutil.standin(f)
376 fstandin = lfutil.standin(f)
380
377
381 # ignore known largefiles and standins
378 # ignore known largefiles and standins
382 if f in lfiles or fstandin in standins:
379 if f in lfiles or fstandin in standins:
383 continue
380 continue
384
381
385 # append directory separator to avoid collisions
382 # append directory separator to avoid collisions
386 if not fstandin.endswith(os.sep):
383 if not fstandin.endswith(os.sep):
387 fstandin += os.sep
384 fstandin += os.sep
388
385
389 # prevalidate matching standin directories
386 # prevalidate matching standin directories
390 if util.any(st for st in match._files
387 if util.any(st for st in match._files
391 if st.startswith(fstandin)):
388 if st.startswith(fstandin)):
392 continue
389 continue
393 actualfiles.append(f)
390 actualfiles.append(f)
394 match._files = actualfiles
391 match._files = actualfiles
395
392
396 def matchfn(f):
393 def matchfn(f):
397 if orig_matchfn(f):
394 if orig_matchfn(f):
398 return f not in lfiles
395 return f not in lfiles
399 else:
396 else:
400 return f in standins
397 return f in standins
401
398
402 match.matchfn = matchfn
399 match.matchfn = matchfn
403 return orig(text=text, user=user, date=date, match=match,
400 result = orig(text=text, user=user, date=date, match=match,
404 force=force, editor=editor, extra=extra)
401 force=force, editor=editor, extra=extra)
402 # This needs to be after commit; otherwise precommit hooks
403 # get the wrong status
404 lfdirstate.write()
405 return result
405 finally:
406 finally:
406 wlock.release()
407 wlock.release()
407
408
408 def push(self, remote, force=False, revs=None, newbranch=False):
409 def push(self, remote, force=False, revs=None, newbranch=False):
409 o = lfutil.findoutgoing(repo, remote, force)
410 o = lfutil.findoutgoing(repo, remote, force)
410 if o:
411 if o:
411 toupload = set()
412 toupload = set()
412 o = repo.changelog.nodesbetween(o, revs)[0]
413 o = repo.changelog.nodesbetween(o, revs)[0]
413 for n in o:
414 for n in o:
414 parents = [p for p in repo.changelog.parents(n)
415 parents = [p for p in repo.changelog.parents(n)
415 if p != node_.nullid]
416 if p != node_.nullid]
416 ctx = repo[n]
417 ctx = repo[n]
417 files = set(ctx.files())
418 files = set(ctx.files())
418 if len(parents) == 2:
419 if len(parents) == 2:
419 mc = ctx.manifest()
420 mc = ctx.manifest()
420 mp1 = ctx.parents()[0].manifest()
421 mp1 = ctx.parents()[0].manifest()
421 mp2 = ctx.parents()[1].manifest()
422 mp2 = ctx.parents()[1].manifest()
422 for f in mp1:
423 for f in mp1:
423 if f not in mc:
424 if f not in mc:
424 files.add(f)
425 files.add(f)
425 for f in mp2:
426 for f in mp2:
426 if f not in mc:
427 if f not in mc:
427 files.add(f)
428 files.add(f)
428 for f in mc:
429 for f in mc:
429 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
430 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
430 None):
431 None):
431 files.add(f)
432 files.add(f)
432
433
433 toupload = toupload.union(
434 toupload = toupload.union(
434 set([ctx[f].data().strip()
435 set([ctx[f].data().strip()
435 for f in files
436 for f in files
436 if lfutil.isstandin(f) and f in ctx]))
437 if lfutil.isstandin(f) and f in ctx]))
437 lfcommands.uploadlfiles(ui, self, remote, toupload)
438 lfcommands.uploadlfiles(ui, self, remote, toupload)
438 return super(lfiles_repo, self).push(remote, force, revs,
439 return super(lfiles_repo, self).push(remote, force, revs,
439 newbranch)
440 newbranch)
440
441
441 repo.__class__ = lfiles_repo
442 repo.__class__ = lfiles_repo
442
443
443 def checkrequireslfiles(ui, repo, **kwargs):
444 def checkrequireslfiles(ui, repo, **kwargs):
444 if 'largefiles' not in repo.requirements and util.any(
445 if 'largefiles' not in repo.requirements and util.any(
445 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
446 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
446 repo.requirements.add('largefiles')
447 repo.requirements.add('largefiles')
447 repo._writerequirements()
448 repo._writerequirements()
448
449
449 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
450 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
450 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
451 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,68 +1,67 b''
1 Test how largefiles abort in case the disk runs full
1 Test how largefiles abort in case the disk runs full
2
2
3 $ cat > criple.py <<EOF
3 $ cat > criple.py <<EOF
4 > import os, errno, shutil
4 > import os, errno, shutil
5 > from mercurial import util
5 > from mercurial import util
6 > #
6 > #
7 > # this makes the original largefiles code abort:
7 > # this makes the original largefiles code abort:
8 > def copyfileobj(fsrc, fdst, length=16*1024):
8 > def copyfileobj(fsrc, fdst, length=16*1024):
9 > fdst.write(fsrc.read(4))
9 > fdst.write(fsrc.read(4))
10 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
10 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
11 > shutil.copyfileobj = copyfileobj
11 > shutil.copyfileobj = copyfileobj
12 > #
12 > #
13 > # this makes the rewritten code abort:
13 > # this makes the rewritten code abort:
14 > def filechunkiter(f, size=65536, limit=None):
14 > def filechunkiter(f, size=65536, limit=None):
15 > yield f.read(4)
15 > yield f.read(4)
16 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
16 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
17 > util.filechunkiter = filechunkiter
17 > util.filechunkiter = filechunkiter
18 > #
18 > #
19 > def oslink(src, dest):
19 > def oslink(src, dest):
20 > raise OSError("no hardlinks, try copying instead")
20 > raise OSError("no hardlinks, try copying instead")
21 > util.oslink = oslink
21 > util.oslink = oslink
22 > EOF
22 > EOF
23
23
24 $ echo "[extensions]" >> $HGRCPATH
24 $ echo "[extensions]" >> $HGRCPATH
25 $ echo "largefiles =" >> $HGRCPATH
25 $ echo "largefiles =" >> $HGRCPATH
26
26
27 $ hg init alice
27 $ hg init alice
28 $ cd alice
28 $ cd alice
29 $ echo "this is a very big file" > big
29 $ echo "this is a very big file" > big
30 $ hg add --large big
30 $ hg add --large big
31 $ hg commit --config extensions.criple=$TESTTMP/criple.py -m big
31 $ hg commit --config extensions.criple=$TESTTMP/criple.py -m big
32 abort: No space left on device
32 abort: No space left on device
33 [255]
33 [255]
34
34
35 The largefile is not created in .hg/largefiles:
35 The largefile is not created in .hg/largefiles:
36
36
37 $ ls .hg/largefiles
37 $ ls .hg/largefiles
38 dirstate
38 dirstate
39
39
40 The user cache is not even created:
40 The user cache is not even created:
41
41
42 >>> import os; os.path.exists("$HOME/.cache/largefiles/")
42 >>> import os; os.path.exists("$HOME/.cache/largefiles/")
43 False
43 False
44
44
45 Make the commit with space on the device:
45 Make the commit with space on the device:
46
46
47 $ hg commit -m big
47 $ hg commit -m big
48
48
49 Now make a clone with a full disk, and make sure lfutil.link function
49 Now make a clone with a full disk, and make sure lfutil.link function
50 makes copies instead of hardlinks:
50 makes copies instead of hardlinks:
51
51
52 $ cd ..
52 $ cd ..
53 $ hg --config extensions.criple=$TESTTMP/criple.py clone --pull alice bob
53 $ hg --config extensions.criple=$TESTTMP/criple.py clone --pull alice bob
54 requesting all changes
54 requesting all changes
55 adding changesets
55 adding changesets
56 adding manifests
56 adding manifests
57 adding file changes
57 adding file changes
58 added 1 changesets with 1 changes to 1 files
58 added 1 changesets with 1 changes to 1 files
59 updating to branch default
59 updating to branch default
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 getting changed largefiles
61 getting changed largefiles
62 abort: No space left on device
62 abort: No space left on device
63 [255]
63 [255]
64
64
65 The largefile is not created in .hg/largefiles:
65 The largefile is not created in .hg/largefiles:
66
66
67 $ ls bob/.hg/largefiles
67 $ ls bob/.hg/largefiles
68 dirstate
General Comments 0
You need to be logged in to leave comments. Login now