##// END OF EJS Templates
largefiles: only cache largefiles in new heads...
Na'Tosha Bard -
r16103:3e1efb45 stable
parent child Browse files
Show More
@@ -1,451 +1,459 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src, 'rb')):
82 for chunk in util.filechunkiter(open(src, 'rb')):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 if appdata:
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 path = os.path.join(appdata, longname, hash)
96 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
97 home = os.getenv('HOME')
97 home = os.getenv('HOME')
98 if home:
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
100 longname, hash)
101 elif os.name == 'posix':
101 elif os.name == 'posix':
102 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
103 if path:
103 if path:
104 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
105 else:
105 else:
106 home = os.getenv('HOME')
106 home = os.getenv('HOME')
107 if home:
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
108 path = os.path.join(home, '.cache', longname, hash)
109 else:
109 else:
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 return path
111 return path
112
112
113 def inusercache(ui, hash):
113 def inusercache(ui, hash):
114 path = usercachepath(ui, hash)
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
115 return path and os.path.exists(path)
116
116
117 def findfile(repo, hash):
117 def findfile(repo, hash):
118 if instore(repo, hash):
118 if instore(repo, hash):
119 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
120 return storepath(repo, hash)
120 return storepath(repo, hash)
121 elif inusercache(repo.ui, hash):
121 elif inusercache(repo.ui, hash):
122 repo.ui.note(_('Found %s in system cache\n') % hash)
122 repo.ui.note(_('Found %s in system cache\n') % hash)
123 path = storepath(repo, hash)
123 path = storepath(repo, hash)
124 util.makedirs(os.path.dirname(path))
124 util.makedirs(os.path.dirname(path))
125 link(usercachepath(repo.ui, hash), path)
125 link(usercachepath(repo.ui, hash), path)
126 return path
126 return path
127 return None
127 return None
128
128
129 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
130 def __getitem__(self, key):
130 def __getitem__(self, key):
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 def normal(self, f):
132 def normal(self, f):
133 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 def remove(self, f):
134 def remove(self, f):
135 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 def add(self, f):
136 def add(self, f):
137 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
138 def drop(self, f):
138 def drop(self, f):
139 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 def forget(self, f):
140 def forget(self, f):
141 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 def normallookup(self, f):
142 def normallookup(self, f):
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144
144
145 def openlfdirstate(ui, repo):
145 def openlfdirstate(ui, repo):
146 '''
146 '''
147 Return a dirstate object that tracks largefiles: i.e. its root is
147 Return a dirstate object that tracks largefiles: i.e. its root is
148 the repo root, but it is saved in .hg/largefiles/dirstate.
148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 '''
149 '''
150 admin = repo.join(longname)
150 admin = repo.join(longname)
151 opener = scmutil.opener(admin)
151 opener = scmutil.opener(admin)
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 repo.dirstate._validate)
153 repo.dirstate._validate)
154
154
155 # If the largefiles dirstate does not exist, populate and create
155 # If the largefiles dirstate does not exist, populate and create
156 # it. This ensures that we create it on the first meaningful
156 # it. This ensures that we create it on the first meaningful
157 # largefiles operation in a new clone.
157 # largefiles operation in a new clone.
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
159 util.makedirs(admin)
159 util.makedirs(admin)
160 matcher = getstandinmatcher(repo)
160 matcher = getstandinmatcher(repo)
161 for standin in dirstate_walk(repo.dirstate, matcher):
161 for standin in dirstate_walk(repo.dirstate, matcher):
162 lfile = splitstandin(standin)
162 lfile = splitstandin(standin)
163 hash = readstandin(repo, lfile)
163 hash = readstandin(repo, lfile)
164 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
165 try:
165 try:
166 if hash == hashfile(repo.wjoin(lfile)):
166 if hash == hashfile(repo.wjoin(lfile)):
167 lfdirstate.normal(lfile)
167 lfdirstate.normal(lfile)
168 except OSError, err:
168 except OSError, err:
169 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
170 raise
170 raise
171 return lfdirstate
171 return lfdirstate
172
172
173 def lfdirstate_status(lfdirstate, repo, rev):
173 def lfdirstate_status(lfdirstate, repo, rev):
174 match = match_.always(repo.root, repo.getcwd())
174 match = match_.always(repo.root, repo.getcwd())
175 s = lfdirstate.status(match, [], False, False, False)
175 s = lfdirstate.status(match, [], False, False, False)
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 for lfile in unsure:
177 for lfile in unsure:
178 if repo[rev][standin(lfile)].data().strip() != \
178 if repo[rev][standin(lfile)].data().strip() != \
179 hashfile(repo.wjoin(lfile)):
179 hashfile(repo.wjoin(lfile)):
180 modified.append(lfile)
180 modified.append(lfile)
181 else:
181 else:
182 clean.append(lfile)
182 clean.append(lfile)
183 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
184 return (modified, added, removed, missing, unknown, ignored, clean)
184 return (modified, added, removed, missing, unknown, ignored, clean)
185
185
186 def listlfiles(repo, rev=None, matcher=None):
186 def listlfiles(repo, rev=None, matcher=None):
187 '''return a list of largefiles in the working copy or the
187 '''return a list of largefiles in the working copy or the
188 specified changeset'''
188 specified changeset'''
189
189
190 if matcher is None:
190 if matcher is None:
191 matcher = getstandinmatcher(repo)
191 matcher = getstandinmatcher(repo)
192
192
193 # ignore unknown files in working directory
193 # ignore unknown files in working directory
194 return [splitstandin(f)
194 return [splitstandin(f)
195 for f in repo[rev].walk(matcher)
195 for f in repo[rev].walk(matcher)
196 if rev is not None or repo.dirstate[f] != '?']
196 if rev is not None or repo.dirstate[f] != '?']
197
197
198 def instore(repo, hash):
198 def instore(repo, hash):
199 return os.path.exists(storepath(repo, hash))
199 return os.path.exists(storepath(repo, hash))
200
200
201 def storepath(repo, hash):
201 def storepath(repo, hash):
202 return repo.join(os.path.join(longname, hash))
202 return repo.join(os.path.join(longname, hash))
203
203
204 def copyfromcache(repo, hash, filename):
204 def copyfromcache(repo, hash, filename):
205 '''Copy the specified largefile from the repo or system cache to
205 '''Copy the specified largefile from the repo or system cache to
206 filename in the repository. Return true on success or false if the
206 filename in the repository. Return true on success or false if the
207 file was not found in either cache (which should not happened:
207 file was not found in either cache (which should not happened:
208 this is meant to be called only after ensuring that the needed
208 this is meant to be called only after ensuring that the needed
209 largefile exists in the cache).'''
209 largefile exists in the cache).'''
210 path = findfile(repo, hash)
210 path = findfile(repo, hash)
211 if path is None:
211 if path is None:
212 return False
212 return False
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 # The write may fail before the file is fully written, but we
214 # The write may fail before the file is fully written, but we
215 # don't use atomic writes in the working copy.
215 # don't use atomic writes in the working copy.
216 shutil.copy(path, repo.wjoin(filename))
216 shutil.copy(path, repo.wjoin(filename))
217 return True
217 return True
218
218
219 def copytostore(repo, rev, file, uploaded=False):
219 def copytostore(repo, rev, file, uploaded=False):
220 hash = readstandin(repo, file)
220 hash = readstandin(repo, file)
221 if instore(repo, hash):
221 if instore(repo, hash):
222 return
222 return
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224
224
225 def copyalltostore(repo, node):
225 def copyalltostore(repo, node):
226 '''Copy all largefiles in a given revision to the store'''
226 '''Copy all largefiles in a given revision to the store'''
227
227
228 ctx = repo[node]
228 ctx = repo[node]
229 for filename in ctx.files():
229 for filename in ctx.files():
230 if isstandin(filename) and filename in ctx.manifest():
230 if isstandin(filename) and filename in ctx.manifest():
231 realfile = splitstandin(filename)
231 realfile = splitstandin(filename)
232 copytostore(repo, ctx.node(), realfile)
232 copytostore(repo, ctx.node(), realfile)
233
233
234
234
235 def copytostoreabsolute(repo, file, hash):
235 def copytostoreabsolute(repo, file, hash):
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 if inusercache(repo.ui, hash):
237 if inusercache(repo.ui, hash):
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 else:
239 else:
240 dst = util.atomictempfile(storepath(repo, hash))
240 dst = util.atomictempfile(storepath(repo, hash))
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 util.copymode(file, storepath(repo, hash))
244 util.copymode(file, storepath(repo, hash))
245 linktousercache(repo, hash)
245 linktousercache(repo, hash)
246
246
247 def linktousercache(repo, hash):
247 def linktousercache(repo, hash):
248 path = usercachepath(repo.ui, hash)
248 path = usercachepath(repo.ui, hash)
249 if path:
249 if path:
250 util.makedirs(os.path.dirname(path))
250 util.makedirs(os.path.dirname(path))
251 link(storepath(repo, hash), path)
251 link(storepath(repo, hash), path)
252
252
253 def getstandinmatcher(repo, pats=[], opts={}):
253 def getstandinmatcher(repo, pats=[], opts={}):
254 '''Return a match object that applies pats to the standin directory'''
254 '''Return a match object that applies pats to the standin directory'''
255 standindir = repo.pathto(shortname)
255 standindir = repo.pathto(shortname)
256 if pats:
256 if pats:
257 # patterns supplied: search standin directory relative to current dir
257 # patterns supplied: search standin directory relative to current dir
258 cwd = repo.getcwd()
258 cwd = repo.getcwd()
259 if os.path.isabs(cwd):
259 if os.path.isabs(cwd):
260 # cwd is an absolute path for hg -R <reponame>
260 # cwd is an absolute path for hg -R <reponame>
261 # work relative to the repository root in this case
261 # work relative to the repository root in this case
262 cwd = ''
262 cwd = ''
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 elif os.path.isdir(standindir):
264 elif os.path.isdir(standindir):
265 # no patterns: relative to repo root
265 # no patterns: relative to repo root
266 pats = [standindir]
266 pats = [standindir]
267 else:
267 else:
268 # no patterns and no standin dir: return matcher that matches nothing
268 # no patterns and no standin dir: return matcher that matches nothing
269 match = match_.match(repo.root, None, [], exact=True)
269 match = match_.match(repo.root, None, [], exact=True)
270 match.matchfn = lambda f: False
270 match.matchfn = lambda f: False
271 return match
271 return match
272 return getmatcher(repo, pats, opts, showbad=False)
272 return getmatcher(repo, pats, opts, showbad=False)
273
273
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 '''Wrapper around scmutil.match() that adds showbad: if false,
275 '''Wrapper around scmutil.match() that adds showbad: if false,
276 neuter the match object's bad() method so it does not print any
276 neuter the match object's bad() method so it does not print any
277 warnings about missing files or directories.'''
277 warnings about missing files or directories.'''
278 match = scmutil.match(repo[None], pats, opts)
278 match = scmutil.match(repo[None], pats, opts)
279
279
280 if not showbad:
280 if not showbad:
281 match.bad = lambda f, msg: None
281 match.bad = lambda f, msg: None
282 return match
282 return match
283
283
284 def composestandinmatcher(repo, rmatcher):
284 def composestandinmatcher(repo, rmatcher):
285 '''Return a matcher that accepts standins corresponding to the
285 '''Return a matcher that accepts standins corresponding to the
286 files accepted by rmatcher. Pass the list of files in the matcher
286 files accepted by rmatcher. Pass the list of files in the matcher
287 as the paths specified by the user.'''
287 as the paths specified by the user.'''
288 smatcher = getstandinmatcher(repo, rmatcher.files())
288 smatcher = getstandinmatcher(repo, rmatcher.files())
289 isstandin = smatcher.matchfn
289 isstandin = smatcher.matchfn
290 def composed_matchfn(f):
290 def composed_matchfn(f):
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 smatcher.matchfn = composed_matchfn
292 smatcher.matchfn = composed_matchfn
293
293
294 return smatcher
294 return smatcher
295
295
296 def standin(filename):
296 def standin(filename):
297 '''Return the repo-relative path to the standin for the specified big
297 '''Return the repo-relative path to the standin for the specified big
298 file.'''
298 file.'''
299 # Notes:
299 # Notes:
300 # 1) Most callers want an absolute path, but _create_standin() needs
300 # 1) Most callers want an absolute path, but _create_standin() needs
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # it up to the caller to use repo.wjoin() to get an absolute path.
303 # 2) Join with '/' because that's what dirstate always uses, even on
303 # 2) Join with '/' because that's what dirstate always uses, even on
304 # Windows. Change existing separator to '/' first in case we are
304 # Windows. Change existing separator to '/' first in case we are
305 # passed filenames from an external source (like the command line).
305 # passed filenames from an external source (like the command line).
306 return shortname + '/' + util.pconvert(filename)
306 return shortname + '/' + util.pconvert(filename)
307
307
308 def isstandin(filename):
308 def isstandin(filename):
309 '''Return true if filename is a big file standin. filename must be
309 '''Return true if filename is a big file standin. filename must be
310 in Mercurial's internal form (slash-separated).'''
310 in Mercurial's internal form (slash-separated).'''
311 return filename.startswith(shortname + '/')
311 return filename.startswith(shortname + '/')
312
312
313 def splitstandin(filename):
313 def splitstandin(filename):
314 # Split on / because that's what dirstate always uses, even on Windows.
314 # Split on / because that's what dirstate always uses, even on Windows.
315 # Change local separator to / first just in case we are passed filenames
315 # Change local separator to / first just in case we are passed filenames
316 # from an external source (like the command line).
316 # from an external source (like the command line).
317 bits = util.pconvert(filename).split('/', 1)
317 bits = util.pconvert(filename).split('/', 1)
318 if len(bits) == 2 and bits[0] == shortname:
318 if len(bits) == 2 and bits[0] == shortname:
319 return bits[1]
319 return bits[1]
320 else:
320 else:
321 return None
321 return None
322
322
323 def updatestandin(repo, standin):
323 def updatestandin(repo, standin):
324 file = repo.wjoin(splitstandin(standin))
324 file = repo.wjoin(splitstandin(standin))
325 if os.path.exists(file):
325 if os.path.exists(file):
326 hash = hashfile(file)
326 hash = hashfile(file)
327 executable = getexecutable(file)
327 executable = getexecutable(file)
328 writestandin(repo, standin, hash, executable)
328 writestandin(repo, standin, hash, executable)
329
329
330 def readstandin(repo, filename, node=None):
330 def readstandin(repo, filename, node=None):
331 '''read hex hash from standin for filename at given node, or working
331 '''read hex hash from standin for filename at given node, or working
332 directory if no node is given'''
332 directory if no node is given'''
333 return repo[node][standin(filename)].data().strip()
333 return repo[node][standin(filename)].data().strip()
334
334
335 def writestandin(repo, standin, hash, executable):
335 def writestandin(repo, standin, hash, executable):
336 '''write hash to <repo.root>/<standin>'''
336 '''write hash to <repo.root>/<standin>'''
337 writehash(hash, repo.wjoin(standin), executable)
337 writehash(hash, repo.wjoin(standin), executable)
338
338
339 def copyandhash(instream, outfile):
339 def copyandhash(instream, outfile):
340 '''Read bytes from instream (iterable) and write them to outfile,
340 '''Read bytes from instream (iterable) and write them to outfile,
341 computing the SHA-1 hash of the data along the way. Close outfile
341 computing the SHA-1 hash of the data along the way. Close outfile
342 when done and return the binary hash.'''
342 when done and return the binary hash.'''
343 hasher = util.sha1('')
343 hasher = util.sha1('')
344 for data in instream:
344 for data in instream:
345 hasher.update(data)
345 hasher.update(data)
346 outfile.write(data)
346 outfile.write(data)
347
347
348 # Blecch: closing a file that somebody else opened is rude and
348 # Blecch: closing a file that somebody else opened is rude and
349 # wrong. But it's so darn convenient and practical! After all,
349 # wrong. But it's so darn convenient and practical! After all,
350 # outfile was opened just to copy and hash.
350 # outfile was opened just to copy and hash.
351 outfile.close()
351 outfile.close()
352
352
353 return hasher.digest()
353 return hasher.digest()
354
354
355 def hashrepofile(repo, file):
355 def hashrepofile(repo, file):
356 return hashfile(repo.wjoin(file))
356 return hashfile(repo.wjoin(file))
357
357
358 def hashfile(file):
358 def hashfile(file):
359 if not os.path.exists(file):
359 if not os.path.exists(file):
360 return ''
360 return ''
361 hasher = util.sha1('')
361 hasher = util.sha1('')
362 fd = open(file, 'rb')
362 fd = open(file, 'rb')
363 for data in blockstream(fd):
363 for data in blockstream(fd):
364 hasher.update(data)
364 hasher.update(data)
365 fd.close()
365 fd.close()
366 return hasher.hexdigest()
366 return hasher.hexdigest()
367
367
368 class limitreader(object):
368 class limitreader(object):
369 def __init__(self, f, limit):
369 def __init__(self, f, limit):
370 self.f = f
370 self.f = f
371 self.limit = limit
371 self.limit = limit
372
372
373 def read(self, length):
373 def read(self, length):
374 if self.limit == 0:
374 if self.limit == 0:
375 return ''
375 return ''
376 length = length > self.limit and self.limit or length
376 length = length > self.limit and self.limit or length
377 self.limit -= length
377 self.limit -= length
378 return self.f.read(length)
378 return self.f.read(length)
379
379
380 def close(self):
380 def close(self):
381 pass
381 pass
382
382
383 def blockstream(infile, blocksize=128 * 1024):
383 def blockstream(infile, blocksize=128 * 1024):
384 """Generator that yields blocks of data from infile and closes infile."""
384 """Generator that yields blocks of data from infile and closes infile."""
385 while True:
385 while True:
386 data = infile.read(blocksize)
386 data = infile.read(blocksize)
387 if not data:
387 if not data:
388 break
388 break
389 yield data
389 yield data
390 # same blecch as copyandhash() above
390 # same blecch as copyandhash() above
391 infile.close()
391 infile.close()
392
392
393 def writehash(hash, filename, executable):
393 def writehash(hash, filename, executable):
394 util.makedirs(os.path.dirname(filename))
394 util.makedirs(os.path.dirname(filename))
395 util.writefile(filename, hash + '\n')
395 util.writefile(filename, hash + '\n')
396 os.chmod(filename, getmode(executable))
396 os.chmod(filename, getmode(executable))
397
397
398 def getexecutable(filename):
398 def getexecutable(filename):
399 mode = os.stat(filename).st_mode
399 mode = os.stat(filename).st_mode
400 return ((mode & stat.S_IXUSR) and
400 return ((mode & stat.S_IXUSR) and
401 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXGRP) and
402 (mode & stat.S_IXOTH))
402 (mode & stat.S_IXOTH))
403
403
404 def getmode(executable):
404 def getmode(executable):
405 if executable:
405 if executable:
406 return 0755
406 return 0755
407 else:
407 else:
408 return 0644
408 return 0644
409
409
410 def urljoin(first, second, *arg):
410 def urljoin(first, second, *arg):
411 def join(left, right):
411 def join(left, right):
412 if not left.endswith('/'):
412 if not left.endswith('/'):
413 left += '/'
413 left += '/'
414 if right.startswith('/'):
414 if right.startswith('/'):
415 right = right[1:]
415 right = right[1:]
416 return left + right
416 return left + right
417
417
418 url = join(first, second)
418 url = join(first, second)
419 for a in arg:
419 for a in arg:
420 url = join(url, a)
420 url = join(url, a)
421 return url
421 return url
422
422
423 def hexsha1(data):
423 def hexsha1(data):
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 object data"""
425 object data"""
426 h = util.sha1()
426 h = util.sha1()
427 for chunk in util.filechunkiter(data):
427 for chunk in util.filechunkiter(data):
428 h.update(chunk)
428 h.update(chunk)
429 return h.hexdigest()
429 return h.hexdigest()
430
430
431 def httpsendfile(ui, filename):
431 def httpsendfile(ui, filename):
432 return httpconnection.httpsendfile(ui, filename, 'rb')
432 return httpconnection.httpsendfile(ui, filename, 'rb')
433
433
434 def unixpath(path):
434 def unixpath(path):
435 '''Return a version of path normalized for use with the lfdirstate.'''
435 '''Return a version of path normalized for use with the lfdirstate.'''
436 return util.pconvert(os.path.normpath(path))
436 return util.pconvert(os.path.normpath(path))
437
437
438 def islfilesrepo(repo):
438 def islfilesrepo(repo):
439 return ('largefiles' in repo.requirements and
439 return ('largefiles' in repo.requirements and
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441
441
442 def mkstemp(repo, prefix):
442 def mkstemp(repo, prefix):
443 '''Returns a file descriptor and a filename corresponding to a temporary
443 '''Returns a file descriptor and a filename corresponding to a temporary
444 file in the repo's largefiles store.'''
444 file in the repo's largefiles store.'''
445 path = repo.join(longname)
445 path = repo.join(longname)
446 util.makedirs(path)
446 util.makedirs(path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
448
448
449 class storeprotonotcapable(Exception):
449 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
450 def __init__(self, storetypes):
451 self.storetypes = storetypes
451 self.storetypes = storetypes
452
453 def getcurrentheads(repo):
454 branches = repo.branchmap()
455 heads = []
456 for branch in branches:
457 newheads = repo.branchheads(branch)
458 heads = heads + newheads
459 return heads
@@ -1,964 +1,964 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(override_match)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def add_largefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repo_add(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def remove_largefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n' % f))
160 ui.status(_('removing %s\n' % f))
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 util.unlinkpath(repo.wjoin(f))
162 util.unlinkpath(repo.wjoin(f))
163 lfdirstate.remove(f)
163 lfdirstate.remove(f)
164 lfdirstate.write()
164 lfdirstate.write()
165 forget = [lfutil.standin(f) for f in forget]
165 forget = [lfutil.standin(f) for f in forget]
166 remove = [lfutil.standin(f) for f in remove]
166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repo_forget(repo, forget)
168 # If this is being called by addremove, let the original addremove
168 # If this is being called by addremove, let the original addremove
169 # function handle this.
169 # function handle this.
170 if not getattr(repo, "_isaddremove", False):
170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.repo_remove(repo, remove, unlink=True)
172 finally:
172 finally:
173 wlock.release()
173 wlock.release()
174
174
175 # -- Wrappers: modify existing commands --------------------------------
175 # -- Wrappers: modify existing commands --------------------------------
176
176
177 # Add works by going through the files that the user wanted to add and
177 # Add works by going through the files that the user wanted to add and
178 # checking if they should be added as largefiles. Then it makes a new
178 # checking if they should be added as largefiles. Then it makes a new
179 # matcher which matches only the normal files and runs the original
179 # matcher which matches only the normal files and runs the original
180 # version of add.
180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def override_add(orig, ui, repo, *pats, **opts):
182 normal = opts.pop('normal')
182 normal = opts.pop('normal')
183 if normal:
183 if normal:
184 if opts.get('large'):
184 if opts.get('large'):
185 raise util.Abort(_('--normal cannot be used with --large'))
185 raise util.Abort(_('--normal cannot be used with --large'))
186 return orig(ui, repo, *pats, **opts)
186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
188 installnormalfilesmatchfn(repo[None].manifest())
188 installnormalfilesmatchfn(repo[None].manifest())
189 result = orig(ui, repo, *pats, **opts)
189 result = orig(ui, repo, *pats, **opts)
190 restorematchfn()
190 restorematchfn()
191
191
192 return (result == 1 or bad) and 1 or 0
192 return (result == 1 or bad) and 1 or 0
193
193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def override_remove(orig, ui, repo, *pats, **opts):
195 installnormalfilesmatchfn(repo[None].manifest())
195 installnormalfilesmatchfn(repo[None].manifest())
196 orig(ui, repo, *pats, **opts)
196 orig(ui, repo, *pats, **opts)
197 restorematchfn()
197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 remove_largefiles(ui, repo, *pats, **opts)
199
199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def override_status(orig, ui, repo, *pats, **opts):
201 try:
201 try:
202 repo.lfstatus = True
202 repo.lfstatus = True
203 return orig(ui, repo, *pats, **opts)
203 return orig(ui, repo, *pats, **opts)
204 finally:
204 finally:
205 repo.lfstatus = False
205 repo.lfstatus = False
206
206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def override_log(orig, ui, repo, *pats, **opts):
208 try:
208 try:
209 repo.lfstatus = True
209 repo.lfstatus = True
210 orig(ui, repo, *pats, **opts)
210 orig(ui, repo, *pats, **opts)
211 finally:
211 finally:
212 repo.lfstatus = False
212 repo.lfstatus = False
213
213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def override_verify(orig, ui, repo, *pats, **opts):
215 large = opts.pop('large', False)
215 large = opts.pop('large', False)
216 all = opts.pop('lfa', False)
216 all = opts.pop('lfa', False)
217 contents = opts.pop('lfc', False)
217 contents = opts.pop('lfc', False)
218
218
219 result = orig(ui, repo, *pats, **opts)
219 result = orig(ui, repo, *pats, **opts)
220 if large:
220 if large:
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 return result
222 return result
223
223
224 # Override needs to refresh standins so that update's normal merge
224 # Override needs to refresh standins so that update's normal merge
225 # will go through properly. Then the other update hook (overriding repo.update)
225 # will go through properly. Then the other update hook (overriding repo.update)
226 # will get the new files. Filemerge is also overriden so that the merge
226 # will get the new files. Filemerge is also overriden so that the merge
227 # will merge standins correctly.
227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def override_update(orig, ui, repo, *pats, **opts):
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 False, False)
231 False, False)
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233
233
234 # Need to lock between the standins getting updated and their
234 # Need to lock between the standins getting updated and their
235 # largefiles getting updated
235 # largefiles getting updated
236 wlock = repo.wlock()
236 wlock = repo.wlock()
237 try:
237 try:
238 if opts['check']:
238 if opts['check']:
239 mod = len(modified) > 0
239 mod = len(modified) > 0
240 for lfile in unsure:
240 for lfile in unsure:
241 standin = lfutil.standin(lfile)
241 standin = lfutil.standin(lfile)
242 if repo['.'][standin].data().strip() != \
242 if repo['.'][standin].data().strip() != \
243 lfutil.hashfile(repo.wjoin(lfile)):
243 lfutil.hashfile(repo.wjoin(lfile)):
244 mod = True
244 mod = True
245 else:
245 else:
246 lfdirstate.normal(lfile)
246 lfdirstate.normal(lfile)
247 lfdirstate.write()
247 lfdirstate.write()
248 if mod:
248 if mod:
249 raise util.Abort(_('uncommitted local changes'))
249 raise util.Abort(_('uncommitted local changes'))
250 # XXX handle removed differently
250 # XXX handle removed differently
251 if not opts['clean']:
251 if not opts['clean']:
252 for lfile in unsure + modified + added:
252 for lfile in unsure + modified + added:
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 finally:
254 finally:
255 wlock.release()
255 wlock.release()
256 return orig(ui, repo, *pats, **opts)
256 return orig(ui, repo, *pats, **opts)
257
257
258 # Before starting the manifest merge, merge.updates will call
258 # Before starting the manifest merge, merge.updates will call
259 # _checkunknown to check if there are any files in the merged-in
259 # _checkunknown to check if there are any files in the merged-in
260 # changeset that collide with unknown files in the working copy.
260 # changeset that collide with unknown files in the working copy.
261 #
261 #
262 # The largefiles are seen as unknown, so this prevents us from merging
262 # The largefiles are seen as unknown, so this prevents us from merging
263 # in a file 'foo' if we already have a largefile with the same name.
263 # in a file 'foo' if we already have a largefile with the same name.
264 #
264 #
265 # The overridden function filters the unknown files by removing any
265 # The overridden function filters the unknown files by removing any
266 # largefiles. This makes the merge proceed and we can then handle this
266 # largefiles. This makes the merge proceed and we can then handle this
267 # case further in the overridden manifestmerge function below.
267 # case further in the overridden manifestmerge function below.
268 def override_checkunknown(origfn, wctx, mctx, folding):
268 def override_checkunknown(origfn, wctx, mctx, folding):
269 origunknown = wctx.unknown()
269 origunknown = wctx.unknown()
270 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
270 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
271 try:
271 try:
272 return origfn(wctx, mctx, folding)
272 return origfn(wctx, mctx, folding)
273 finally:
273 finally:
274 wctx._unknown = origunknown
274 wctx._unknown = origunknown
275
275
276 # The manifest merge handles conflicts on the manifest level. We want
276 # The manifest merge handles conflicts on the manifest level. We want
277 # to handle changes in largefile-ness of files at this level too.
277 # to handle changes in largefile-ness of files at this level too.
278 #
278 #
279 # The strategy is to run the original manifestmerge and then process
279 # The strategy is to run the original manifestmerge and then process
280 # the action list it outputs. There are two cases we need to deal with:
280 # the action list it outputs. There are two cases we need to deal with:
281 #
281 #
282 # 1. Normal file in p1, largefile in p2. Here the largefile is
282 # 1. Normal file in p1, largefile in p2. Here the largefile is
283 # detected via its standin file, which will enter the working copy
283 # detected via its standin file, which will enter the working copy
284 # with a "get" action. It is not "merge" since the standin is all
284 # with a "get" action. It is not "merge" since the standin is all
285 # Mercurial is concerned with at this level -- the link to the
285 # Mercurial is concerned with at this level -- the link to the
286 # existing normal file is not relevant here.
286 # existing normal file is not relevant here.
287 #
287 #
288 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
288 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
289 # since the largefile will be present in the working copy and
289 # since the largefile will be present in the working copy and
290 # different from the normal file in p2. Mercurial therefore
290 # different from the normal file in p2. Mercurial therefore
291 # triggers a merge action.
291 # triggers a merge action.
292 #
292 #
293 # In both cases, we prompt the user and emit new actions to either
293 # In both cases, we prompt the user and emit new actions to either
294 # remove the standin (if the normal file was kept) or to remove the
294 # remove the standin (if the normal file was kept) or to remove the
295 # normal file and get the standin (if the largefile was kept). The
295 # normal file and get the standin (if the largefile was kept). The
296 # default prompt answer is to use the largefile version since it was
296 # default prompt answer is to use the largefile version since it was
297 # presumably changed on purpose.
297 # presumably changed on purpose.
298 #
298 #
299 # Finally, the merge.applyupdates function will then take care of
299 # Finally, the merge.applyupdates function will then take care of
300 # writing the files into the working copy and lfcommands.updatelfiles
300 # writing the files into the working copy and lfcommands.updatelfiles
301 # will update the largefiles.
301 # will update the largefiles.
302 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
302 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
303 actions = origfn(repo, p1, p2, pa, overwrite, partial)
303 actions = origfn(repo, p1, p2, pa, overwrite, partial)
304 processed = []
304 processed = []
305
305
306 for action in actions:
306 for action in actions:
307 if overwrite:
307 if overwrite:
308 processed.append(action)
308 processed.append(action)
309 continue
309 continue
310 f, m = action[:2]
310 f, m = action[:2]
311
311
312 choices = (_('&Largefile'), _('&Normal file'))
312 choices = (_('&Largefile'), _('&Normal file'))
313 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
313 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
314 # Case 1: normal file in the working copy, largefile in
314 # Case 1: normal file in the working copy, largefile in
315 # the second parent
315 # the second parent
316 lfile = lfutil.splitstandin(f)
316 lfile = lfutil.splitstandin(f)
317 standin = f
317 standin = f
318 msg = _('%s has been turned into a largefile\n'
318 msg = _('%s has been turned into a largefile\n'
319 'use (l)argefile or keep as (n)ormal file?') % lfile
319 'use (l)argefile or keep as (n)ormal file?') % lfile
320 if repo.ui.promptchoice(msg, choices, 0) == 0:
320 if repo.ui.promptchoice(msg, choices, 0) == 0:
321 processed.append((lfile, "r"))
321 processed.append((lfile, "r"))
322 processed.append((standin, "g", p2.flags(standin)))
322 processed.append((standin, "g", p2.flags(standin)))
323 else:
323 else:
324 processed.append((standin, "r"))
324 processed.append((standin, "r"))
325 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
325 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
326 # Case 2: largefile in the working copy, normal file in
326 # Case 2: largefile in the working copy, normal file in
327 # the second parent
327 # the second parent
328 standin = lfutil.standin(f)
328 standin = lfutil.standin(f)
329 lfile = f
329 lfile = f
330 msg = _('%s has been turned into a normal file\n'
330 msg = _('%s has been turned into a normal file\n'
331 'keep as (l)argefile or use (n)ormal file?') % lfile
331 'keep as (l)argefile or use (n)ormal file?') % lfile
332 if repo.ui.promptchoice(msg, choices, 0) == 0:
332 if repo.ui.promptchoice(msg, choices, 0) == 0:
333 processed.append((lfile, "r"))
333 processed.append((lfile, "r"))
334 else:
334 else:
335 processed.append((standin, "r"))
335 processed.append((standin, "r"))
336 processed.append((lfile, "g", p2.flags(lfile)))
336 processed.append((lfile, "g", p2.flags(lfile)))
337 else:
337 else:
338 processed.append(action)
338 processed.append(action)
339
339
340 return processed
340 return processed
341
341
342 # Override filemerge to prompt the user about how they wish to merge
342 # Override filemerge to prompt the user about how they wish to merge
343 # largefiles. This will handle identical edits, and copy/rename +
343 # largefiles. This will handle identical edits, and copy/rename +
344 # edit without prompting the user.
344 # edit without prompting the user.
345 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
345 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
346 # Use better variable names here. Because this is a wrapper we cannot
346 # Use better variable names here. Because this is a wrapper we cannot
347 # change the variable names in the function declaration.
347 # change the variable names in the function declaration.
348 fcdest, fcother, fcancestor = fcd, fco, fca
348 fcdest, fcother, fcancestor = fcd, fco, fca
349 if not lfutil.isstandin(orig):
349 if not lfutil.isstandin(orig):
350 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
350 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
351 else:
351 else:
352 if not fcother.cmp(fcdest): # files identical?
352 if not fcother.cmp(fcdest): # files identical?
353 return None
353 return None
354
354
355 # backwards, use working dir parent as ancestor
355 # backwards, use working dir parent as ancestor
356 if fcancestor == fcother:
356 if fcancestor == fcother:
357 fcancestor = fcdest.parents()[0]
357 fcancestor = fcdest.parents()[0]
358
358
359 if orig != fcother.path():
359 if orig != fcother.path():
360 repo.ui.status(_('merging %s and %s to %s\n')
360 repo.ui.status(_('merging %s and %s to %s\n')
361 % (lfutil.splitstandin(orig),
361 % (lfutil.splitstandin(orig),
362 lfutil.splitstandin(fcother.path()),
362 lfutil.splitstandin(fcother.path()),
363 lfutil.splitstandin(fcdest.path())))
363 lfutil.splitstandin(fcdest.path())))
364 else:
364 else:
365 repo.ui.status(_('merging %s\n')
365 repo.ui.status(_('merging %s\n')
366 % lfutil.splitstandin(fcdest.path()))
366 % lfutil.splitstandin(fcdest.path()))
367
367
368 if fcancestor.path() != fcother.path() and fcother.data() == \
368 if fcancestor.path() != fcother.path() and fcother.data() == \
369 fcancestor.data():
369 fcancestor.data():
370 return 0
370 return 0
371 if fcancestor.path() != fcdest.path() and fcdest.data() == \
371 if fcancestor.path() != fcdest.path() and fcdest.data() == \
372 fcancestor.data():
372 fcancestor.data():
373 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
373 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
374 return 0
374 return 0
375
375
376 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
376 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
377 'keep (l)ocal or take (o)ther?') %
377 'keep (l)ocal or take (o)ther?') %
378 lfutil.splitstandin(orig),
378 lfutil.splitstandin(orig),
379 (_('&Local'), _('&Other')), 0) == 0:
379 (_('&Local'), _('&Other')), 0) == 0:
380 return 0
380 return 0
381 else:
381 else:
382 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
382 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
383 return 0
383 return 0
384
384
385 # Copy first changes the matchers to match standins instead of
385 # Copy first changes the matchers to match standins instead of
386 # largefiles. Then it overrides util.copyfile in that function it
386 # largefiles. Then it overrides util.copyfile in that function it
387 # checks if the destination largefile already exists. It also keeps a
387 # checks if the destination largefile already exists. It also keeps a
388 # list of copied files so that the largefiles can be copied and the
388 # list of copied files so that the largefiles can be copied and the
389 # dirstate updated.
389 # dirstate updated.
390 def override_copy(orig, ui, repo, pats, opts, rename=False):
390 def override_copy(orig, ui, repo, pats, opts, rename=False):
391 # doesn't remove largefile on rename
391 # doesn't remove largefile on rename
392 if len(pats) < 2:
392 if len(pats) < 2:
393 # this isn't legal, let the original function deal with it
393 # this isn't legal, let the original function deal with it
394 return orig(ui, repo, pats, opts, rename)
394 return orig(ui, repo, pats, opts, rename)
395
395
396 def makestandin(relpath):
396 def makestandin(relpath):
397 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
397 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
398 return os.path.join(repo.wjoin(lfutil.standin(path)))
398 return os.path.join(repo.wjoin(lfutil.standin(path)))
399
399
400 fullpats = scmutil.expandpats(pats)
400 fullpats = scmutil.expandpats(pats)
401 dest = fullpats[-1]
401 dest = fullpats[-1]
402
402
403 if os.path.isdir(dest):
403 if os.path.isdir(dest):
404 if not os.path.isdir(makestandin(dest)):
404 if not os.path.isdir(makestandin(dest)):
405 os.makedirs(makestandin(dest))
405 os.makedirs(makestandin(dest))
406 # This could copy both lfiles and normal files in one command,
406 # This could copy both lfiles and normal files in one command,
407 # but we don't want to do that. First replace their matcher to
407 # but we don't want to do that. First replace their matcher to
408 # only match normal files and run it, then replace it to just
408 # only match normal files and run it, then replace it to just
409 # match largefiles and run it again.
409 # match largefiles and run it again.
410 nonormalfiles = False
410 nonormalfiles = False
411 nolfiles = False
411 nolfiles = False
412 try:
412 try:
413 try:
413 try:
414 installnormalfilesmatchfn(repo[None].manifest())
414 installnormalfilesmatchfn(repo[None].manifest())
415 result = orig(ui, repo, pats, opts, rename)
415 result = orig(ui, repo, pats, opts, rename)
416 except util.Abort, e:
416 except util.Abort, e:
417 if str(e) != 'no files to copy':
417 if str(e) != 'no files to copy':
418 raise e
418 raise e
419 else:
419 else:
420 nonormalfiles = True
420 nonormalfiles = True
421 result = 0
421 result = 0
422 finally:
422 finally:
423 restorematchfn()
423 restorematchfn()
424
424
425 # The first rename can cause our current working directory to be removed.
425 # The first rename can cause our current working directory to be removed.
426 # In that case there is nothing left to copy/rename so just quit.
426 # In that case there is nothing left to copy/rename so just quit.
427 try:
427 try:
428 repo.getcwd()
428 repo.getcwd()
429 except OSError:
429 except OSError:
430 return result
430 return result
431
431
432 try:
432 try:
433 try:
433 try:
434 # When we call orig below it creates the standins but we don't add them
434 # When we call orig below it creates the standins but we don't add them
435 # to the dir state until later so lock during that time.
435 # to the dir state until later so lock during that time.
436 wlock = repo.wlock()
436 wlock = repo.wlock()
437
437
438 manifest = repo[None].manifest()
438 manifest = repo[None].manifest()
439 oldmatch = None # for the closure
439 oldmatch = None # for the closure
440 def override_match(ctx, pats=[], opts={}, globbed=False,
440 def override_match(ctx, pats=[], opts={}, globbed=False,
441 default='relpath'):
441 default='relpath'):
442 newpats = []
442 newpats = []
443 # The patterns were previously mangled to add the standin
443 # The patterns were previously mangled to add the standin
444 # directory; we need to remove that now
444 # directory; we need to remove that now
445 for pat in pats:
445 for pat in pats:
446 if match_.patkind(pat) is None and lfutil.shortname in pat:
446 if match_.patkind(pat) is None and lfutil.shortname in pat:
447 newpats.append(pat.replace(lfutil.shortname, ''))
447 newpats.append(pat.replace(lfutil.shortname, ''))
448 else:
448 else:
449 newpats.append(pat)
449 newpats.append(pat)
450 match = oldmatch(ctx, newpats, opts, globbed, default)
450 match = oldmatch(ctx, newpats, opts, globbed, default)
451 m = copy.copy(match)
451 m = copy.copy(match)
452 lfile = lambda f: lfutil.standin(f) in manifest
452 lfile = lambda f: lfutil.standin(f) in manifest
453 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
453 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
454 m._fmap = set(m._files)
454 m._fmap = set(m._files)
455 orig_matchfn = m.matchfn
455 orig_matchfn = m.matchfn
456 m.matchfn = lambda f: (lfutil.isstandin(f) and
456 m.matchfn = lambda f: (lfutil.isstandin(f) and
457 lfile(lfutil.splitstandin(f)) and
457 lfile(lfutil.splitstandin(f)) and
458 orig_matchfn(lfutil.splitstandin(f)) or
458 orig_matchfn(lfutil.splitstandin(f)) or
459 None)
459 None)
460 return m
460 return m
461 oldmatch = installmatchfn(override_match)
461 oldmatch = installmatchfn(override_match)
462 listpats = []
462 listpats = []
463 for pat in pats:
463 for pat in pats:
464 if match_.patkind(pat) is not None:
464 if match_.patkind(pat) is not None:
465 listpats.append(pat)
465 listpats.append(pat)
466 else:
466 else:
467 listpats.append(makestandin(pat))
467 listpats.append(makestandin(pat))
468
468
469 try:
469 try:
470 origcopyfile = util.copyfile
470 origcopyfile = util.copyfile
471 copiedfiles = []
471 copiedfiles = []
472 def override_copyfile(src, dest):
472 def override_copyfile(src, dest):
473 if (lfutil.shortname in src and
473 if (lfutil.shortname in src and
474 dest.startswith(repo.wjoin(lfutil.shortname))):
474 dest.startswith(repo.wjoin(lfutil.shortname))):
475 destlfile = dest.replace(lfutil.shortname, '')
475 destlfile = dest.replace(lfutil.shortname, '')
476 if not opts['force'] and os.path.exists(destlfile):
476 if not opts['force'] and os.path.exists(destlfile):
477 raise IOError('',
477 raise IOError('',
478 _('destination largefile already exists'))
478 _('destination largefile already exists'))
479 copiedfiles.append((src, dest))
479 copiedfiles.append((src, dest))
480 origcopyfile(src, dest)
480 origcopyfile(src, dest)
481
481
482 util.copyfile = override_copyfile
482 util.copyfile = override_copyfile
483 result += orig(ui, repo, listpats, opts, rename)
483 result += orig(ui, repo, listpats, opts, rename)
484 finally:
484 finally:
485 util.copyfile = origcopyfile
485 util.copyfile = origcopyfile
486
486
487 lfdirstate = lfutil.openlfdirstate(ui, repo)
487 lfdirstate = lfutil.openlfdirstate(ui, repo)
488 for (src, dest) in copiedfiles:
488 for (src, dest) in copiedfiles:
489 if (lfutil.shortname in src and
489 if (lfutil.shortname in src and
490 dest.startswith(repo.wjoin(lfutil.shortname))):
490 dest.startswith(repo.wjoin(lfutil.shortname))):
491 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
491 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
492 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
492 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
493 destlfiledir = os.path.dirname(destlfile) or '.'
493 destlfiledir = os.path.dirname(destlfile) or '.'
494 if not os.path.isdir(destlfiledir):
494 if not os.path.isdir(destlfiledir):
495 os.makedirs(destlfiledir)
495 os.makedirs(destlfiledir)
496 if rename:
496 if rename:
497 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
497 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
498 lfdirstate.remove(srclfile)
498 lfdirstate.remove(srclfile)
499 else:
499 else:
500 util.copyfile(srclfile, destlfile)
500 util.copyfile(srclfile, destlfile)
501 lfdirstate.add(destlfile)
501 lfdirstate.add(destlfile)
502 lfdirstate.write()
502 lfdirstate.write()
503 except util.Abort, e:
503 except util.Abort, e:
504 if str(e) != 'no files to copy':
504 if str(e) != 'no files to copy':
505 raise e
505 raise e
506 else:
506 else:
507 nolfiles = True
507 nolfiles = True
508 finally:
508 finally:
509 restorematchfn()
509 restorematchfn()
510 wlock.release()
510 wlock.release()
511
511
512 if nolfiles and nonormalfiles:
512 if nolfiles and nonormalfiles:
513 raise util.Abort(_('no files to copy'))
513 raise util.Abort(_('no files to copy'))
514
514
515 return result
515 return result
516
516
517 # When the user calls revert, we have to be careful to not revert any
517 # When the user calls revert, we have to be careful to not revert any
518 # changes to other largefiles accidentally. This means we have to keep
518 # changes to other largefiles accidentally. This means we have to keep
519 # track of the largefiles that are being reverted so we only pull down
519 # track of the largefiles that are being reverted so we only pull down
520 # the necessary largefiles.
520 # the necessary largefiles.
521 #
521 #
522 # Standins are only updated (to match the hash of largefiles) before
522 # Standins are only updated (to match the hash of largefiles) before
523 # commits. Update the standins then run the original revert, changing
523 # commits. Update the standins then run the original revert, changing
524 # the matcher to hit standins instead of largefiles. Based on the
524 # the matcher to hit standins instead of largefiles. Based on the
525 # resulting standins update the largefiles. Then return the standins
525 # resulting standins update the largefiles. Then return the standins
526 # to their proper state
526 # to their proper state
527 def override_revert(orig, ui, repo, *pats, **opts):
527 def override_revert(orig, ui, repo, *pats, **opts):
528 # Because we put the standins in a bad state (by updating them)
528 # Because we put the standins in a bad state (by updating them)
529 # and then return them to a correct state we need to lock to
529 # and then return them to a correct state we need to lock to
530 # prevent others from changing them in their incorrect state.
530 # prevent others from changing them in their incorrect state.
531 wlock = repo.wlock()
531 wlock = repo.wlock()
532 try:
532 try:
533 lfdirstate = lfutil.openlfdirstate(ui, repo)
533 lfdirstate = lfutil.openlfdirstate(ui, repo)
534 (modified, added, removed, missing, unknown, ignored, clean) = \
534 (modified, added, removed, missing, unknown, ignored, clean) = \
535 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
535 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
536 for lfile in modified:
536 for lfile in modified:
537 lfutil.updatestandin(repo, lfutil.standin(lfile))
537 lfutil.updatestandin(repo, lfutil.standin(lfile))
538 for lfile in missing:
538 for lfile in missing:
539 os.unlink(repo.wjoin(lfutil.standin(lfile)))
539 os.unlink(repo.wjoin(lfutil.standin(lfile)))
540
540
541 try:
541 try:
542 ctx = repo[opts.get('rev')]
542 ctx = repo[opts.get('rev')]
543 oldmatch = None # for the closure
543 oldmatch = None # for the closure
544 def override_match(ctx, pats=[], opts={}, globbed=False,
544 def override_match(ctx, pats=[], opts={}, globbed=False,
545 default='relpath'):
545 default='relpath'):
546 match = oldmatch(ctx, pats, opts, globbed, default)
546 match = oldmatch(ctx, pats, opts, globbed, default)
547 m = copy.copy(match)
547 m = copy.copy(match)
548 def tostandin(f):
548 def tostandin(f):
549 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
549 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
550 return lfutil.standin(f)
550 return lfutil.standin(f)
551 elif lfutil.standin(f) in repo[None]:
551 elif lfutil.standin(f) in repo[None]:
552 return None
552 return None
553 return f
553 return f
554 m._files = [tostandin(f) for f in m._files]
554 m._files = [tostandin(f) for f in m._files]
555 m._files = [f for f in m._files if f is not None]
555 m._files = [f for f in m._files if f is not None]
556 m._fmap = set(m._files)
556 m._fmap = set(m._files)
557 orig_matchfn = m.matchfn
557 orig_matchfn = m.matchfn
558 def matchfn(f):
558 def matchfn(f):
559 if lfutil.isstandin(f):
559 if lfutil.isstandin(f):
560 # We need to keep track of what largefiles are being
560 # We need to keep track of what largefiles are being
561 # matched so we know which ones to update later --
561 # matched so we know which ones to update later --
562 # otherwise we accidentally revert changes to other
562 # otherwise we accidentally revert changes to other
563 # largefiles. This is repo-specific, so duckpunch the
563 # largefiles. This is repo-specific, so duckpunch the
564 # repo object to keep the list of largefiles for us
564 # repo object to keep the list of largefiles for us
565 # later.
565 # later.
566 if orig_matchfn(lfutil.splitstandin(f)) and \
566 if orig_matchfn(lfutil.splitstandin(f)) and \
567 (f in repo[None] or f in ctx):
567 (f in repo[None] or f in ctx):
568 lfileslist = getattr(repo, '_lfilestoupdate', [])
568 lfileslist = getattr(repo, '_lfilestoupdate', [])
569 lfileslist.append(lfutil.splitstandin(f))
569 lfileslist.append(lfutil.splitstandin(f))
570 repo._lfilestoupdate = lfileslist
570 repo._lfilestoupdate = lfileslist
571 return True
571 return True
572 else:
572 else:
573 return False
573 return False
574 return orig_matchfn(f)
574 return orig_matchfn(f)
575 m.matchfn = matchfn
575 m.matchfn = matchfn
576 return m
576 return m
577 oldmatch = installmatchfn(override_match)
577 oldmatch = installmatchfn(override_match)
578 scmutil.match
578 scmutil.match
579 matches = override_match(repo[None], pats, opts)
579 matches = override_match(repo[None], pats, opts)
580 orig(ui, repo, *pats, **opts)
580 orig(ui, repo, *pats, **opts)
581 finally:
581 finally:
582 restorematchfn()
582 restorematchfn()
583 lfileslist = getattr(repo, '_lfilestoupdate', [])
583 lfileslist = getattr(repo, '_lfilestoupdate', [])
584 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
584 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
585 printmessage=False)
585 printmessage=False)
586
586
587 # empty out the largefiles list so we start fresh next time
587 # empty out the largefiles list so we start fresh next time
588 repo._lfilestoupdate = []
588 repo._lfilestoupdate = []
589 for lfile in modified:
589 for lfile in modified:
590 if lfile in lfileslist:
590 if lfile in lfileslist:
591 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
591 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
592 in repo['.']:
592 in repo['.']:
593 lfutil.writestandin(repo, lfutil.standin(lfile),
593 lfutil.writestandin(repo, lfutil.standin(lfile),
594 repo['.'][lfile].data().strip(),
594 repo['.'][lfile].data().strip(),
595 'x' in repo['.'][lfile].flags())
595 'x' in repo['.'][lfile].flags())
596 lfdirstate = lfutil.openlfdirstate(ui, repo)
596 lfdirstate = lfutil.openlfdirstate(ui, repo)
597 for lfile in added:
597 for lfile in added:
598 standin = lfutil.standin(lfile)
598 standin = lfutil.standin(lfile)
599 if standin not in ctx and (standin in matches or opts.get('all')):
599 if standin not in ctx and (standin in matches or opts.get('all')):
600 if lfile in lfdirstate:
600 if lfile in lfdirstate:
601 lfdirstate.drop(lfile)
601 lfdirstate.drop(lfile)
602 util.unlinkpath(repo.wjoin(standin))
602 util.unlinkpath(repo.wjoin(standin))
603 lfdirstate.write()
603 lfdirstate.write()
604 finally:
604 finally:
605 wlock.release()
605 wlock.release()
606
606
607 def hg_update(orig, repo, node):
607 def hg_update(orig, repo, node):
608 result = orig(repo, node)
608 result = orig(repo, node)
609 lfcommands.updatelfiles(repo.ui, repo)
609 lfcommands.updatelfiles(repo.ui, repo)
610 return result
610 return result
611
611
612 def hg_clean(orig, repo, node, show_stats=True):
612 def hg_clean(orig, repo, node, show_stats=True):
613 result = orig(repo, node, show_stats)
613 result = orig(repo, node, show_stats)
614 lfcommands.updatelfiles(repo.ui, repo)
614 lfcommands.updatelfiles(repo.ui, repo)
615 return result
615 return result
616
616
617 def hg_merge(orig, repo, node, force=None, remind=True):
617 def hg_merge(orig, repo, node, force=None, remind=True):
618 # Mark the repo as being in the middle of a merge, so that
618 # Mark the repo as being in the middle of a merge, so that
619 # updatelfiles() will know that it needs to trust the standins in
619 # updatelfiles() will know that it needs to trust the standins in
620 # the working copy, not in the standins in the current node
620 # the working copy, not in the standins in the current node
621 repo._ismerging = True
621 repo._ismerging = True
622 try:
622 try:
623 result = orig(repo, node, force, remind)
623 result = orig(repo, node, force, remind)
624 lfcommands.updatelfiles(repo.ui, repo)
624 lfcommands.updatelfiles(repo.ui, repo)
625 finally:
625 finally:
626 repo._ismerging = False
626 repo._ismerging = False
627 return result
627 return result
628
628
629 # When we rebase a repository with remotely changed largefiles, we need to
629 # When we rebase a repository with remotely changed largefiles, we need to
630 # take some extra care so that the largefiles are correctly updated in the
630 # take some extra care so that the largefiles are correctly updated in the
631 # working copy
631 # working copy
632 def override_pull(orig, ui, repo, source=None, **opts):
632 def override_pull(orig, ui, repo, source=None, **opts):
633 if opts.get('rebase', False):
633 if opts.get('rebase', False):
634 repo._isrebasing = True
634 repo._isrebasing = True
635 try:
635 try:
636 if opts.get('update'):
636 if opts.get('update'):
637 del opts['update']
637 del opts['update']
638 ui.debug('--update and --rebase are not compatible, ignoring '
638 ui.debug('--update and --rebase are not compatible, ignoring '
639 'the update flag\n')
639 'the update flag\n')
640 del opts['rebase']
640 del opts['rebase']
641 cmdutil.bailifchanged(repo)
641 cmdutil.bailifchanged(repo)
642 revsprepull = len(repo)
642 revsprepull = len(repo)
643 origpostincoming = commands.postincoming
643 origpostincoming = commands.postincoming
644 def _dummy(*args, **kwargs):
644 def _dummy(*args, **kwargs):
645 pass
645 pass
646 commands.postincoming = _dummy
646 commands.postincoming = _dummy
647 repo.lfpullsource = source
647 repo.lfpullsource = source
648 if not source:
648 if not source:
649 source = 'default'
649 source = 'default'
650 try:
650 try:
651 result = commands.pull(ui, repo, source, **opts)
651 result = commands.pull(ui, repo, source, **opts)
652 finally:
652 finally:
653 commands.postincoming = origpostincoming
653 commands.postincoming = origpostincoming
654 revspostpull = len(repo)
654 revspostpull = len(repo)
655 if revspostpull > revsprepull:
655 if revspostpull > revsprepull:
656 result = result or rebase.rebase(ui, repo)
656 result = result or rebase.rebase(ui, repo)
657 finally:
657 finally:
658 repo._isrebasing = False
658 repo._isrebasing = False
659 else:
659 else:
660 repo.lfpullsource = source
660 repo.lfpullsource = source
661 if not source:
661 if not source:
662 source = 'default'
662 source = 'default'
663 oldheads = lfutil.getcurrentheads(repo)
663 result = orig(ui, repo, source, **opts)
664 result = orig(ui, repo, source, **opts)
664 # If we do not have the new largefiles for any new heads we pulled, we
665 # If we do not have the new largefiles for any new heads we pulled, we
665 # will run into a problem later if we try to merge or rebase with one of
666 # will run into a problem later if we try to merge or rebase with one of
666 # these heads, so cache the largefiles now direclty into the system
667 # these heads, so cache the largefiles now direclty into the system
667 # cache.
668 # cache.
668 ui.status(_("caching new largefiles\n"))
669 ui.status(_("caching new largefiles\n"))
669 numcached = 0
670 numcached = 0
670 branches = repo.branchmap()
671 heads = lfutil.getcurrentheads(repo)
671 for branch in branches:
672 newheads = set(heads).difference(set(oldheads))
672 heads = repo.branchheads(branch)
673 for head in newheads:
673 for head in heads:
674 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
674 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
675 numcached += len(cached)
675 numcached += len(cached)
676 ui.status(_("%d largefiles cached\n" % numcached))
676 ui.status(_("%d largefiles cached\n" % numcached))
677 return result
677 return result
678
678
679 def override_rebase(orig, ui, repo, **opts):
679 def override_rebase(orig, ui, repo, **opts):
680 repo._isrebasing = True
680 repo._isrebasing = True
681 try:
681 try:
682 orig(ui, repo, **opts)
682 orig(ui, repo, **opts)
683 finally:
683 finally:
684 repo._isrebasing = False
684 repo._isrebasing = False
685
685
686 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
686 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
687 prefix=None, mtime=None, subrepos=None):
687 prefix=None, mtime=None, subrepos=None):
688 # No need to lock because we are only reading history and
688 # No need to lock because we are only reading history and
689 # largefile caches, neither of which are modified.
689 # largefile caches, neither of which are modified.
690 lfcommands.cachelfiles(repo.ui, repo, node)
690 lfcommands.cachelfiles(repo.ui, repo, node)
691
691
692 if kind not in archival.archivers:
692 if kind not in archival.archivers:
693 raise util.Abort(_("unknown archive type '%s'") % kind)
693 raise util.Abort(_("unknown archive type '%s'") % kind)
694
694
695 ctx = repo[node]
695 ctx = repo[node]
696
696
697 if kind == 'files':
697 if kind == 'files':
698 if prefix:
698 if prefix:
699 raise util.Abort(
699 raise util.Abort(
700 _('cannot give prefix when archiving to files'))
700 _('cannot give prefix when archiving to files'))
701 else:
701 else:
702 prefix = archival.tidyprefix(dest, kind, prefix)
702 prefix = archival.tidyprefix(dest, kind, prefix)
703
703
704 def write(name, mode, islink, getdata):
704 def write(name, mode, islink, getdata):
705 if matchfn and not matchfn(name):
705 if matchfn and not matchfn(name):
706 return
706 return
707 data = getdata()
707 data = getdata()
708 if decode:
708 if decode:
709 data = repo.wwritedata(name, data)
709 data = repo.wwritedata(name, data)
710 archiver.addfile(prefix + name, mode, islink, data)
710 archiver.addfile(prefix + name, mode, islink, data)
711
711
712 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
712 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
713
713
714 if repo.ui.configbool("ui", "archivemeta", True):
714 if repo.ui.configbool("ui", "archivemeta", True):
715 def metadata():
715 def metadata():
716 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
716 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
717 hex(repo.changelog.node(0)), hex(node), ctx.branch())
717 hex(repo.changelog.node(0)), hex(node), ctx.branch())
718
718
719 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
719 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
720 if repo.tagtype(t) == 'global')
720 if repo.tagtype(t) == 'global')
721 if not tags:
721 if not tags:
722 repo.ui.pushbuffer()
722 repo.ui.pushbuffer()
723 opts = {'template': '{latesttag}\n{latesttagdistance}',
723 opts = {'template': '{latesttag}\n{latesttagdistance}',
724 'style': '', 'patch': None, 'git': None}
724 'style': '', 'patch': None, 'git': None}
725 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
725 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
726 ltags, dist = repo.ui.popbuffer().split('\n')
726 ltags, dist = repo.ui.popbuffer().split('\n')
727 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
727 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
728 tags += 'latesttagdistance: %s\n' % dist
728 tags += 'latesttagdistance: %s\n' % dist
729
729
730 return base + tags
730 return base + tags
731
731
732 write('.hg_archival.txt', 0644, False, metadata)
732 write('.hg_archival.txt', 0644, False, metadata)
733
733
734 for f in ctx:
734 for f in ctx:
735 ff = ctx.flags(f)
735 ff = ctx.flags(f)
736 getdata = ctx[f].data
736 getdata = ctx[f].data
737 if lfutil.isstandin(f):
737 if lfutil.isstandin(f):
738 path = lfutil.findfile(repo, getdata().strip())
738 path = lfutil.findfile(repo, getdata().strip())
739 if path is None:
739 if path is None:
740 raise util.Abort(
740 raise util.Abort(
741 _('largefile %s not found in repo store or system cache')
741 _('largefile %s not found in repo store or system cache')
742 % lfutil.splitstandin(f))
742 % lfutil.splitstandin(f))
743 f = lfutil.splitstandin(f)
743 f = lfutil.splitstandin(f)
744
744
745 def getdatafn():
745 def getdatafn():
746 fd = None
746 fd = None
747 try:
747 try:
748 fd = open(path, 'rb')
748 fd = open(path, 'rb')
749 return fd.read()
749 return fd.read()
750 finally:
750 finally:
751 if fd:
751 if fd:
752 fd.close()
752 fd.close()
753
753
754 getdata = getdatafn
754 getdata = getdatafn
755 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
755 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
756
756
757 if subrepos:
757 if subrepos:
758 for subpath in ctx.substate:
758 for subpath in ctx.substate:
759 sub = ctx.sub(subpath)
759 sub = ctx.sub(subpath)
760 sub.archive(repo.ui, archiver, prefix)
760 sub.archive(repo.ui, archiver, prefix)
761
761
762 archiver.done()
762 archiver.done()
763
763
764 # If a largefile is modified, the change is not reflected in its
764 # If a largefile is modified, the change is not reflected in its
765 # standin until a commit. cmdutil.bailifchanged() raises an exception
765 # standin until a commit. cmdutil.bailifchanged() raises an exception
766 # if the repo has uncommitted changes. Wrap it to also check if
766 # if the repo has uncommitted changes. Wrap it to also check if
767 # largefiles were changed. This is used by bisect and backout.
767 # largefiles were changed. This is used by bisect and backout.
768 def override_bailifchanged(orig, repo):
768 def override_bailifchanged(orig, repo):
769 orig(repo)
769 orig(repo)
770 repo.lfstatus = True
770 repo.lfstatus = True
771 modified, added, removed, deleted = repo.status()[:4]
771 modified, added, removed, deleted = repo.status()[:4]
772 repo.lfstatus = False
772 repo.lfstatus = False
773 if modified or added or removed or deleted:
773 if modified or added or removed or deleted:
774 raise util.Abort(_('outstanding uncommitted changes'))
774 raise util.Abort(_('outstanding uncommitted changes'))
775
775
776 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
776 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
777 def override_fetch(orig, ui, repo, *pats, **opts):
777 def override_fetch(orig, ui, repo, *pats, **opts):
778 repo.lfstatus = True
778 repo.lfstatus = True
779 modified, added, removed, deleted = repo.status()[:4]
779 modified, added, removed, deleted = repo.status()[:4]
780 repo.lfstatus = False
780 repo.lfstatus = False
781 if modified or added or removed or deleted:
781 if modified or added or removed or deleted:
782 raise util.Abort(_('outstanding uncommitted changes'))
782 raise util.Abort(_('outstanding uncommitted changes'))
783 return orig(ui, repo, *pats, **opts)
783 return orig(ui, repo, *pats, **opts)
784
784
785 def override_forget(orig, ui, repo, *pats, **opts):
785 def override_forget(orig, ui, repo, *pats, **opts):
786 installnormalfilesmatchfn(repo[None].manifest())
786 installnormalfilesmatchfn(repo[None].manifest())
787 orig(ui, repo, *pats, **opts)
787 orig(ui, repo, *pats, **opts)
788 restorematchfn()
788 restorematchfn()
789 m = scmutil.match(repo[None], pats, opts)
789 m = scmutil.match(repo[None], pats, opts)
790
790
791 try:
791 try:
792 repo.lfstatus = True
792 repo.lfstatus = True
793 s = repo.status(match=m, clean=True)
793 s = repo.status(match=m, clean=True)
794 finally:
794 finally:
795 repo.lfstatus = False
795 repo.lfstatus = False
796 forget = sorted(s[0] + s[1] + s[3] + s[6])
796 forget = sorted(s[0] + s[1] + s[3] + s[6])
797 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
797 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
798
798
799 for f in forget:
799 for f in forget:
800 if lfutil.standin(f) not in repo.dirstate and not \
800 if lfutil.standin(f) not in repo.dirstate and not \
801 os.path.isdir(m.rel(lfutil.standin(f))):
801 os.path.isdir(m.rel(lfutil.standin(f))):
802 ui.warn(_('not removing %s: file is already untracked\n')
802 ui.warn(_('not removing %s: file is already untracked\n')
803 % m.rel(f))
803 % m.rel(f))
804
804
805 for f in forget:
805 for f in forget:
806 if ui.verbose or not m.exact(f):
806 if ui.verbose or not m.exact(f):
807 ui.status(_('removing %s\n') % m.rel(f))
807 ui.status(_('removing %s\n') % m.rel(f))
808
808
809 # Need to lock because standin files are deleted then removed from the
809 # Need to lock because standin files are deleted then removed from the
810 # repository and we could race inbetween.
810 # repository and we could race inbetween.
811 wlock = repo.wlock()
811 wlock = repo.wlock()
812 try:
812 try:
813 lfdirstate = lfutil.openlfdirstate(ui, repo)
813 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 for f in forget:
814 for f in forget:
815 if lfdirstate[f] == 'a':
815 if lfdirstate[f] == 'a':
816 lfdirstate.drop(f)
816 lfdirstate.drop(f)
817 else:
817 else:
818 lfdirstate.remove(f)
818 lfdirstate.remove(f)
819 lfdirstate.write()
819 lfdirstate.write()
820 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
820 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
821 unlink=True)
821 unlink=True)
822 finally:
822 finally:
823 wlock.release()
823 wlock.release()
824
824
825 def getoutgoinglfiles(ui, repo, dest=None, **opts):
825 def getoutgoinglfiles(ui, repo, dest=None, **opts):
826 dest = ui.expandpath(dest or 'default-push', dest or 'default')
826 dest = ui.expandpath(dest or 'default-push', dest or 'default')
827 dest, branches = hg.parseurl(dest, opts.get('branch'))
827 dest, branches = hg.parseurl(dest, opts.get('branch'))
828 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
828 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
829 if revs:
829 if revs:
830 revs = [repo.lookup(rev) for rev in revs]
830 revs = [repo.lookup(rev) for rev in revs]
831
831
832 remoteui = hg.remoteui
832 remoteui = hg.remoteui
833
833
834 try:
834 try:
835 remote = hg.repository(remoteui(repo, opts), dest)
835 remote = hg.repository(remoteui(repo, opts), dest)
836 except error.RepoError:
836 except error.RepoError:
837 return None
837 return None
838 o = lfutil.findoutgoing(repo, remote, False)
838 o = lfutil.findoutgoing(repo, remote, False)
839 if not o:
839 if not o:
840 return None
840 return None
841 o = repo.changelog.nodesbetween(o, revs)[0]
841 o = repo.changelog.nodesbetween(o, revs)[0]
842 if opts.get('newest_first'):
842 if opts.get('newest_first'):
843 o.reverse()
843 o.reverse()
844
844
845 toupload = set()
845 toupload = set()
846 for n in o:
846 for n in o:
847 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
847 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
848 ctx = repo[n]
848 ctx = repo[n]
849 files = set(ctx.files())
849 files = set(ctx.files())
850 if len(parents) == 2:
850 if len(parents) == 2:
851 mc = ctx.manifest()
851 mc = ctx.manifest()
852 mp1 = ctx.parents()[0].manifest()
852 mp1 = ctx.parents()[0].manifest()
853 mp2 = ctx.parents()[1].manifest()
853 mp2 = ctx.parents()[1].manifest()
854 for f in mp1:
854 for f in mp1:
855 if f not in mc:
855 if f not in mc:
856 files.add(f)
856 files.add(f)
857 for f in mp2:
857 for f in mp2:
858 if f not in mc:
858 if f not in mc:
859 files.add(f)
859 files.add(f)
860 for f in mc:
860 for f in mc:
861 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
861 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
862 files.add(f)
862 files.add(f)
863 toupload = toupload.union(
863 toupload = toupload.union(
864 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
864 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
865 return toupload
865 return toupload
866
866
867 def override_outgoing(orig, ui, repo, dest=None, **opts):
867 def override_outgoing(orig, ui, repo, dest=None, **opts):
868 orig(ui, repo, dest, **opts)
868 orig(ui, repo, dest, **opts)
869
869
870 if opts.pop('large', None):
870 if opts.pop('large', None):
871 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
871 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
872 if toupload is None:
872 if toupload is None:
873 ui.status(_('largefiles: No remote repo\n'))
873 ui.status(_('largefiles: No remote repo\n'))
874 else:
874 else:
875 ui.status(_('largefiles to upload:\n'))
875 ui.status(_('largefiles to upload:\n'))
876 for file in toupload:
876 for file in toupload:
877 ui.status(lfutil.splitstandin(file) + '\n')
877 ui.status(lfutil.splitstandin(file) + '\n')
878 ui.status('\n')
878 ui.status('\n')
879
879
880 def override_summary(orig, ui, repo, *pats, **opts):
880 def override_summary(orig, ui, repo, *pats, **opts):
881 try:
881 try:
882 repo.lfstatus = True
882 repo.lfstatus = True
883 orig(ui, repo, *pats, **opts)
883 orig(ui, repo, *pats, **opts)
884 finally:
884 finally:
885 repo.lfstatus = False
885 repo.lfstatus = False
886
886
887 if opts.pop('large', None):
887 if opts.pop('large', None):
888 toupload = getoutgoinglfiles(ui, repo, None, **opts)
888 toupload = getoutgoinglfiles(ui, repo, None, **opts)
889 if toupload is None:
889 if toupload is None:
890 ui.status(_('largefiles: No remote repo\n'))
890 ui.status(_('largefiles: No remote repo\n'))
891 else:
891 else:
892 ui.status(_('largefiles: %d to upload\n') % len(toupload))
892 ui.status(_('largefiles: %d to upload\n') % len(toupload))
893
893
894 def override_addremove(orig, ui, repo, *pats, **opts):
894 def override_addremove(orig, ui, repo, *pats, **opts):
895 # Get the list of missing largefiles so we can remove them
895 # Get the list of missing largefiles so we can remove them
896 lfdirstate = lfutil.openlfdirstate(ui, repo)
896 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
897 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
898 False, False)
898 False, False)
899 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
899 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
900
900
901 # Call into the normal remove code, but the removing of the standin, we want
901 # Call into the normal remove code, but the removing of the standin, we want
902 # to have handled by original addremove. Monkey patching here makes sure
902 # to have handled by original addremove. Monkey patching here makes sure
903 # we don't remove the standin in the largefiles code, preventing a very
903 # we don't remove the standin in the largefiles code, preventing a very
904 # confused state later.
904 # confused state later.
905 if missing:
905 if missing:
906 repo._isaddremove = True
906 repo._isaddremove = True
907 remove_largefiles(ui, repo, *missing, **opts)
907 remove_largefiles(ui, repo, *missing, **opts)
908 repo._isaddremove = False
908 repo._isaddremove = False
909 # Call into the normal add code, and any files that *should* be added as
909 # Call into the normal add code, and any files that *should* be added as
910 # largefiles will be
910 # largefiles will be
911 add_largefiles(ui, repo, *pats, **opts)
911 add_largefiles(ui, repo, *pats, **opts)
912 # Now that we've handled largefiles, hand off to the original addremove
912 # Now that we've handled largefiles, hand off to the original addremove
913 # function to take care of the rest. Make sure it doesn't do anything with
913 # function to take care of the rest. Make sure it doesn't do anything with
914 # largefiles by installing a matcher that will ignore them.
914 # largefiles by installing a matcher that will ignore them.
915 installnormalfilesmatchfn(repo[None].manifest())
915 installnormalfilesmatchfn(repo[None].manifest())
916 result = orig(ui, repo, *pats, **opts)
916 result = orig(ui, repo, *pats, **opts)
917 restorematchfn()
917 restorematchfn()
918 return result
918 return result
919
919
920 # Calling purge with --all will cause the largefiles to be deleted.
920 # Calling purge with --all will cause the largefiles to be deleted.
921 # Override repo.status to prevent this from happening.
921 # Override repo.status to prevent this from happening.
922 def override_purge(orig, ui, repo, *dirs, **opts):
922 def override_purge(orig, ui, repo, *dirs, **opts):
923 oldstatus = repo.status
923 oldstatus = repo.status
924 def override_status(node1='.', node2=None, match=None, ignored=False,
924 def override_status(node1='.', node2=None, match=None, ignored=False,
925 clean=False, unknown=False, listsubrepos=False):
925 clean=False, unknown=False, listsubrepos=False):
926 r = oldstatus(node1, node2, match, ignored, clean, unknown,
926 r = oldstatus(node1, node2, match, ignored, clean, unknown,
927 listsubrepos)
927 listsubrepos)
928 lfdirstate = lfutil.openlfdirstate(ui, repo)
928 lfdirstate = lfutil.openlfdirstate(ui, repo)
929 modified, added, removed, deleted, unknown, ignored, clean = r
929 modified, added, removed, deleted, unknown, ignored, clean = r
930 unknown = [f for f in unknown if lfdirstate[f] == '?']
930 unknown = [f for f in unknown if lfdirstate[f] == '?']
931 ignored = [f for f in ignored if lfdirstate[f] == '?']
931 ignored = [f for f in ignored if lfdirstate[f] == '?']
932 return modified, added, removed, deleted, unknown, ignored, clean
932 return modified, added, removed, deleted, unknown, ignored, clean
933 repo.status = override_status
933 repo.status = override_status
934 orig(ui, repo, *dirs, **opts)
934 orig(ui, repo, *dirs, **opts)
935 repo.status = oldstatus
935 repo.status = oldstatus
936
936
937 def override_rollback(orig, ui, repo, **opts):
937 def override_rollback(orig, ui, repo, **opts):
938 result = orig(ui, repo, **opts)
938 result = orig(ui, repo, **opts)
939 merge.update(repo, node=None, branchmerge=False, force=True,
939 merge.update(repo, node=None, branchmerge=False, force=True,
940 partial=lfutil.isstandin)
940 partial=lfutil.isstandin)
941 wlock = repo.wlock()
941 wlock = repo.wlock()
942 try:
942 try:
943 lfdirstate = lfutil.openlfdirstate(ui, repo)
943 lfdirstate = lfutil.openlfdirstate(ui, repo)
944 lfiles = lfutil.listlfiles(repo)
944 lfiles = lfutil.listlfiles(repo)
945 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
945 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
946 for file in lfiles:
946 for file in lfiles:
947 if file in oldlfiles:
947 if file in oldlfiles:
948 lfdirstate.normallookup(file)
948 lfdirstate.normallookup(file)
949 else:
949 else:
950 lfdirstate.add(file)
950 lfdirstate.add(file)
951 lfdirstate.write()
951 lfdirstate.write()
952 finally:
952 finally:
953 wlock.release()
953 wlock.release()
954 return result
954 return result
955
955
956 def override_transplant(orig, ui, repo, *revs, **opts):
956 def override_transplant(orig, ui, repo, *revs, **opts):
957 try:
957 try:
958 repo._istransplanting = True
958 repo._istransplanting = True
959 result = orig(ui, repo, *revs, **opts)
959 result = orig(ui, repo, *revs, **opts)
960 lfcommands.updatelfiles(ui, repo, filelist=None,
960 lfcommands.updatelfiles(ui, repo, filelist=None,
961 printmessage=False)
961 printmessage=False)
962 finally:
962 finally:
963 repo._istransplanting = False
963 repo._istransplanting = False
964 return result
964 return result
General Comments 0
You need to be logged in to leave comments. Login now