##// END OF EJS Templates
largefiles: move calculation of largefiles for updating to utility function
Na'Tosha Bard -
r16245:a18ad914 default
parent child Browse files
Show More
@@ -1,459 +1,467 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
64 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on atomic copy
79 # if hardlinks fail, fallback on atomic copy
80 dst = util.atomictempfile(dest)
80 dst = util.atomictempfile(dest)
81 for chunk in util.filechunkiter(open(src, 'rb')):
81 for chunk in util.filechunkiter(open(src, 'rb')):
82 dst.write(chunk)
82 dst.write(chunk)
83 dst.close()
83 dst.close()
84 os.chmod(dest, os.stat(src).st_mode)
84 os.chmod(dest, os.stat(src).st_mode)
85
85
86 def usercachepath(ui, hash):
86 def usercachepath(ui, hash):
87 path = ui.configpath(longname, 'usercache', None)
87 path = ui.configpath(longname, 'usercache', None)
88 if path:
88 if path:
89 path = os.path.join(path, hash)
89 path = os.path.join(path, hash)
90 else:
90 else:
91 if os.name == 'nt':
91 if os.name == 'nt':
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 if appdata:
93 if appdata:
94 path = os.path.join(appdata, longname, hash)
94 path = os.path.join(appdata, longname, hash)
95 elif platform.system() == 'Darwin':
95 elif platform.system() == 'Darwin':
96 home = os.getenv('HOME')
96 home = os.getenv('HOME')
97 if home:
97 if home:
98 path = os.path.join(home, 'Library', 'Caches',
98 path = os.path.join(home, 'Library', 'Caches',
99 longname, hash)
99 longname, hash)
100 elif os.name == 'posix':
100 elif os.name == 'posix':
101 path = os.getenv('XDG_CACHE_HOME')
101 path = os.getenv('XDG_CACHE_HOME')
102 if path:
102 if path:
103 path = os.path.join(path, longname, hash)
103 path = os.path.join(path, longname, hash)
104 else:
104 else:
105 home = os.getenv('HOME')
105 home = os.getenv('HOME')
106 if home:
106 if home:
107 path = os.path.join(home, '.cache', longname, hash)
107 path = os.path.join(home, '.cache', longname, hash)
108 else:
108 else:
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 return path
110 return path
111
111
112 def inusercache(ui, hash):
112 def inusercache(ui, hash):
113 path = usercachepath(ui, hash)
113 path = usercachepath(ui, hash)
114 return path and os.path.exists(path)
114 return path and os.path.exists(path)
115
115
116 def findfile(repo, hash):
116 def findfile(repo, hash):
117 if instore(repo, hash):
117 if instore(repo, hash):
118 repo.ui.note(_('Found %s in store\n') % hash)
118 repo.ui.note(_('Found %s in store\n') % hash)
119 return storepath(repo, hash)
119 return storepath(repo, hash)
120 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
122 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
125 return path
125 return path
126 return None
126 return None
127
127
128 class largefiles_dirstate(dirstate.dirstate):
128 class largefiles_dirstate(dirstate.dirstate):
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
130 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 def normal(self, f):
131 def normal(self, f):
132 return super(largefiles_dirstate, self).normal(unixpath(f))
132 return super(largefiles_dirstate, self).normal(unixpath(f))
133 def remove(self, f):
133 def remove(self, f):
134 return super(largefiles_dirstate, self).remove(unixpath(f))
134 return super(largefiles_dirstate, self).remove(unixpath(f))
135 def add(self, f):
135 def add(self, f):
136 return super(largefiles_dirstate, self).add(unixpath(f))
136 return super(largefiles_dirstate, self).add(unixpath(f))
137 def drop(self, f):
137 def drop(self, f):
138 return super(largefiles_dirstate, self).drop(unixpath(f))
138 return super(largefiles_dirstate, self).drop(unixpath(f))
139 def forget(self, f):
139 def forget(self, f):
140 return super(largefiles_dirstate, self).forget(unixpath(f))
140 return super(largefiles_dirstate, self).forget(unixpath(f))
141 def normallookup(self, f):
141 def normallookup(self, f):
142 return super(largefiles_dirstate, self).normallookup(unixpath(f))
142 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143
143
144 def openlfdirstate(ui, repo):
144 def openlfdirstate(ui, repo):
145 '''
145 '''
146 Return a dirstate object that tracks largefiles: i.e. its root is
146 Return a dirstate object that tracks largefiles: i.e. its root is
147 the repo root, but it is saved in .hg/largefiles/dirstate.
147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 '''
148 '''
149 admin = repo.join(longname)
149 admin = repo.join(longname)
150 opener = scmutil.opener(admin)
150 opener = scmutil.opener(admin)
151 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
151 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 repo.dirstate._validate)
152 repo.dirstate._validate)
153
153
154 # If the largefiles dirstate does not exist, populate and create
154 # If the largefiles dirstate does not exist, populate and create
155 # it. This ensures that we create it on the first meaningful
155 # it. This ensures that we create it on the first meaningful
156 # largefiles operation in a new clone.
156 # largefiles operation in a new clone.
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 util.makedirs(admin)
158 util.makedirs(admin)
159 matcher = getstandinmatcher(repo)
159 matcher = getstandinmatcher(repo)
160 for standin in dirstate_walk(repo.dirstate, matcher):
160 for standin in dirstate_walk(repo.dirstate, matcher):
161 lfile = splitstandin(standin)
161 lfile = splitstandin(standin)
162 hash = readstandin(repo, lfile)
162 hash = readstandin(repo, lfile)
163 lfdirstate.normallookup(lfile)
163 lfdirstate.normallookup(lfile)
164 try:
164 try:
165 if hash == hashfile(repo.wjoin(lfile)):
165 if hash == hashfile(repo.wjoin(lfile)):
166 lfdirstate.normal(lfile)
166 lfdirstate.normal(lfile)
167 except OSError, err:
167 except OSError, err:
168 if err.errno != errno.ENOENT:
168 if err.errno != errno.ENOENT:
169 raise
169 raise
170 return lfdirstate
170 return lfdirstate
171
171
172 def lfdirstate_status(lfdirstate, repo, rev):
172 def lfdirstate_status(lfdirstate, repo, rev):
173 match = match_.always(repo.root, repo.getcwd())
173 match = match_.always(repo.root, repo.getcwd())
174 s = lfdirstate.status(match, [], False, False, False)
174 s = lfdirstate.status(match, [], False, False, False)
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 for lfile in unsure:
176 for lfile in unsure:
177 if repo[rev][standin(lfile)].data().strip() != \
177 if repo[rev][standin(lfile)].data().strip() != \
178 hashfile(repo.wjoin(lfile)):
178 hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
179 modified.append(lfile)
180 else:
180 else:
181 clean.append(lfile)
181 clean.append(lfile)
182 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
183 return (modified, added, removed, missing, unknown, ignored, clean)
183 return (modified, added, removed, missing, unknown, ignored, clean)
184
184
185 def listlfiles(repo, rev=None, matcher=None):
185 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
186 '''return a list of largefiles in the working copy or the
187 specified changeset'''
187 specified changeset'''
188
188
189 if matcher is None:
189 if matcher is None:
190 matcher = getstandinmatcher(repo)
190 matcher = getstandinmatcher(repo)
191
191
192 # ignore unknown files in working directory
192 # ignore unknown files in working directory
193 return [splitstandin(f)
193 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
194 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
195 if rev is not None or repo.dirstate[f] != '?']
196
196
197 def instore(repo, hash):
197 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
198 return os.path.exists(storepath(repo, hash))
199
199
200 def storepath(repo, hash):
200 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
201 return repo.join(os.path.join(longname, hash))
202
202
203 def copyfromcache(repo, hash, filename):
203 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
204 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
205 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
206 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
207 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
208 largefile exists in the cache).'''
209 path = findfile(repo, hash)
209 path = findfile(repo, hash)
210 if path is None:
210 if path is None:
211 return False
211 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 # The write may fail before the file is fully written, but we
213 # The write may fail before the file is fully written, but we
214 # don't use atomic writes in the working copy.
214 # don't use atomic writes in the working copy.
215 shutil.copy(path, repo.wjoin(filename))
215 shutil.copy(path, repo.wjoin(filename))
216 return True
216 return True
217
217
218 def copytostore(repo, rev, file, uploaded=False):
218 def copytostore(repo, rev, file, uploaded=False):
219 hash = readstandin(repo, file)
219 hash = readstandin(repo, file)
220 if instore(repo, hash):
220 if instore(repo, hash):
221 return
221 return
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223
223
224 def copyalltostore(repo, node):
224 def copyalltostore(repo, node):
225 '''Copy all largefiles in a given revision to the store'''
225 '''Copy all largefiles in a given revision to the store'''
226
226
227 ctx = repo[node]
227 ctx = repo[node]
228 for filename in ctx.files():
228 for filename in ctx.files():
229 if isstandin(filename) and filename in ctx.manifest():
229 if isstandin(filename) and filename in ctx.manifest():
230 realfile = splitstandin(filename)
230 realfile = splitstandin(filename)
231 copytostore(repo, ctx.node(), realfile)
231 copytostore(repo, ctx.node(), realfile)
232
232
233
233
234 def copytostoreabsolute(repo, file, hash):
234 def copytostoreabsolute(repo, file, hash):
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 if inusercache(repo.ui, hash):
236 if inusercache(repo.ui, hash):
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 else:
238 else:
239 dst = util.atomictempfile(storepath(repo, hash),
239 dst = util.atomictempfile(storepath(repo, hash),
240 createmode=repo.store.createmode)
240 createmode=repo.store.createmode)
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 linktousercache(repo, hash)
244 linktousercache(repo, hash)
245
245
246 def linktousercache(repo, hash):
246 def linktousercache(repo, hash):
247 path = usercachepath(repo.ui, hash)
247 path = usercachepath(repo.ui, hash)
248 if path:
248 if path:
249 util.makedirs(os.path.dirname(path))
249 util.makedirs(os.path.dirname(path))
250 link(storepath(repo, hash), path)
250 link(storepath(repo, hash), path)
251
251
252 def getstandinmatcher(repo, pats=[], opts={}):
252 def getstandinmatcher(repo, pats=[], opts={}):
253 '''Return a match object that applies pats to the standin directory'''
253 '''Return a match object that applies pats to the standin directory'''
254 standindir = repo.pathto(shortname)
254 standindir = repo.pathto(shortname)
255 if pats:
255 if pats:
256 # patterns supplied: search standin directory relative to current dir
256 # patterns supplied: search standin directory relative to current dir
257 cwd = repo.getcwd()
257 cwd = repo.getcwd()
258 if os.path.isabs(cwd):
258 if os.path.isabs(cwd):
259 # cwd is an absolute path for hg -R <reponame>
259 # cwd is an absolute path for hg -R <reponame>
260 # work relative to the repository root in this case
260 # work relative to the repository root in this case
261 cwd = ''
261 cwd = ''
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 elif os.path.isdir(standindir):
263 elif os.path.isdir(standindir):
264 # no patterns: relative to repo root
264 # no patterns: relative to repo root
265 pats = [standindir]
265 pats = [standindir]
266 else:
266 else:
267 # no patterns and no standin dir: return matcher that matches nothing
267 # no patterns and no standin dir: return matcher that matches nothing
268 match = match_.match(repo.root, None, [], exact=True)
268 match = match_.match(repo.root, None, [], exact=True)
269 match.matchfn = lambda f: False
269 match.matchfn = lambda f: False
270 return match
270 return match
271 return getmatcher(repo, pats, opts, showbad=False)
271 return getmatcher(repo, pats, opts, showbad=False)
272
272
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 '''Wrapper around scmutil.match() that adds showbad: if false,
274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 neuter the match object's bad() method so it does not print any
275 neuter the match object's bad() method so it does not print any
276 warnings about missing files or directories.'''
276 warnings about missing files or directories.'''
277 match = scmutil.match(repo[None], pats, opts)
277 match = scmutil.match(repo[None], pats, opts)
278
278
279 if not showbad:
279 if not showbad:
280 match.bad = lambda f, msg: None
280 match.bad = lambda f, msg: None
281 return match
281 return match
282
282
283 def composestandinmatcher(repo, rmatcher):
283 def composestandinmatcher(repo, rmatcher):
284 '''Return a matcher that accepts standins corresponding to the
284 '''Return a matcher that accepts standins corresponding to the
285 files accepted by rmatcher. Pass the list of files in the matcher
285 files accepted by rmatcher. Pass the list of files in the matcher
286 as the paths specified by the user.'''
286 as the paths specified by the user.'''
287 smatcher = getstandinmatcher(repo, rmatcher.files())
287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 isstandin = smatcher.matchfn
288 isstandin = smatcher.matchfn
289 def composed_matchfn(f):
289 def composed_matchfn(f):
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 smatcher.matchfn = composed_matchfn
291 smatcher.matchfn = composed_matchfn
292
292
293 return smatcher
293 return smatcher
294
294
295 def standin(filename):
295 def standin(filename):
296 '''Return the repo-relative path to the standin for the specified big
296 '''Return the repo-relative path to the standin for the specified big
297 file.'''
297 file.'''
298 # Notes:
298 # Notes:
299 # 1) Most callers want an absolute path, but _create_standin() needs
299 # 1) Most callers want an absolute path, but _create_standin() needs
300 # it repo-relative so lfadd() can pass it to repo_add(). So leave
300 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 # it up to the caller to use repo.wjoin() to get an absolute path.
301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # 2) Join with '/' because that's what dirstate always uses, even on
302 # 2) Join with '/' because that's what dirstate always uses, even on
303 # Windows. Change existing separator to '/' first in case we are
303 # Windows. Change existing separator to '/' first in case we are
304 # passed filenames from an external source (like the command line).
304 # passed filenames from an external source (like the command line).
305 return shortname + '/' + util.pconvert(filename)
305 return shortname + '/' + util.pconvert(filename)
306
306
307 def isstandin(filename):
307 def isstandin(filename):
308 '''Return true if filename is a big file standin. filename must be
308 '''Return true if filename is a big file standin. filename must be
309 in Mercurial's internal form (slash-separated).'''
309 in Mercurial's internal form (slash-separated).'''
310 return filename.startswith(shortname + '/')
310 return filename.startswith(shortname + '/')
311
311
312 def splitstandin(filename):
312 def splitstandin(filename):
313 # Split on / because that's what dirstate always uses, even on Windows.
313 # Split on / because that's what dirstate always uses, even on Windows.
314 # Change local separator to / first just in case we are passed filenames
314 # Change local separator to / first just in case we are passed filenames
315 # from an external source (like the command line).
315 # from an external source (like the command line).
316 bits = util.pconvert(filename).split('/', 1)
316 bits = util.pconvert(filename).split('/', 1)
317 if len(bits) == 2 and bits[0] == shortname:
317 if len(bits) == 2 and bits[0] == shortname:
318 return bits[1]
318 return bits[1]
319 else:
319 else:
320 return None
320 return None
321
321
322 def updatestandin(repo, standin):
322 def updatestandin(repo, standin):
323 file = repo.wjoin(splitstandin(standin))
323 file = repo.wjoin(splitstandin(standin))
324 if os.path.exists(file):
324 if os.path.exists(file):
325 hash = hashfile(file)
325 hash = hashfile(file)
326 executable = getexecutable(file)
326 executable = getexecutable(file)
327 writestandin(repo, standin, hash, executable)
327 writestandin(repo, standin, hash, executable)
328
328
329 def readstandin(repo, filename, node=None):
329 def readstandin(repo, filename, node=None):
330 '''read hex hash from standin for filename at given node, or working
330 '''read hex hash from standin for filename at given node, or working
331 directory if no node is given'''
331 directory if no node is given'''
332 return repo[node][standin(filename)].data().strip()
332 return repo[node][standin(filename)].data().strip()
333
333
334 def writestandin(repo, standin, hash, executable):
334 def writestandin(repo, standin, hash, executable):
335 '''write hash to <repo.root>/<standin>'''
335 '''write hash to <repo.root>/<standin>'''
336 writehash(hash, repo.wjoin(standin), executable)
336 writehash(hash, repo.wjoin(standin), executable)
337
337
338 def copyandhash(instream, outfile):
338 def copyandhash(instream, outfile):
339 '''Read bytes from instream (iterable) and write them to outfile,
339 '''Read bytes from instream (iterable) and write them to outfile,
340 computing the SHA-1 hash of the data along the way. Close outfile
340 computing the SHA-1 hash of the data along the way. Close outfile
341 when done and return the binary hash.'''
341 when done and return the binary hash.'''
342 hasher = util.sha1('')
342 hasher = util.sha1('')
343 for data in instream:
343 for data in instream:
344 hasher.update(data)
344 hasher.update(data)
345 outfile.write(data)
345 outfile.write(data)
346
346
347 # Blecch: closing a file that somebody else opened is rude and
347 # Blecch: closing a file that somebody else opened is rude and
348 # wrong. But it's so darn convenient and practical! After all,
348 # wrong. But it's so darn convenient and practical! After all,
349 # outfile was opened just to copy and hash.
349 # outfile was opened just to copy and hash.
350 outfile.close()
350 outfile.close()
351
351
352 return hasher.digest()
352 return hasher.digest()
353
353
354 def hashrepofile(repo, file):
354 def hashrepofile(repo, file):
355 return hashfile(repo.wjoin(file))
355 return hashfile(repo.wjoin(file))
356
356
357 def hashfile(file):
357 def hashfile(file):
358 if not os.path.exists(file):
358 if not os.path.exists(file):
359 return ''
359 return ''
360 hasher = util.sha1('')
360 hasher = util.sha1('')
361 fd = open(file, 'rb')
361 fd = open(file, 'rb')
362 for data in blockstream(fd):
362 for data in blockstream(fd):
363 hasher.update(data)
363 hasher.update(data)
364 fd.close()
364 fd.close()
365 return hasher.hexdigest()
365 return hasher.hexdigest()
366
366
367 class limitreader(object):
367 class limitreader(object):
368 def __init__(self, f, limit):
368 def __init__(self, f, limit):
369 self.f = f
369 self.f = f
370 self.limit = limit
370 self.limit = limit
371
371
372 def read(self, length):
372 def read(self, length):
373 if self.limit == 0:
373 if self.limit == 0:
374 return ''
374 return ''
375 length = length > self.limit and self.limit or length
375 length = length > self.limit and self.limit or length
376 self.limit -= length
376 self.limit -= length
377 return self.f.read(length)
377 return self.f.read(length)
378
378
379 def close(self):
379 def close(self):
380 pass
380 pass
381
381
382 def blockstream(infile, blocksize=128 * 1024):
382 def blockstream(infile, blocksize=128 * 1024):
383 """Generator that yields blocks of data from infile and closes infile."""
383 """Generator that yields blocks of data from infile and closes infile."""
384 while True:
384 while True:
385 data = infile.read(blocksize)
385 data = infile.read(blocksize)
386 if not data:
386 if not data:
387 break
387 break
388 yield data
388 yield data
389 # same blecch as copyandhash() above
389 # same blecch as copyandhash() above
390 infile.close()
390 infile.close()
391
391
392 def writehash(hash, filename, executable):
392 def writehash(hash, filename, executable):
393 util.makedirs(os.path.dirname(filename))
393 util.makedirs(os.path.dirname(filename))
394 util.writefile(filename, hash + '\n')
394 util.writefile(filename, hash + '\n')
395 os.chmod(filename, getmode(executable))
395 os.chmod(filename, getmode(executable))
396
396
397 def getexecutable(filename):
397 def getexecutable(filename):
398 mode = os.stat(filename).st_mode
398 mode = os.stat(filename).st_mode
399 return ((mode & stat.S_IXUSR) and
399 return ((mode & stat.S_IXUSR) and
400 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXOTH))
401 (mode & stat.S_IXOTH))
402
402
403 def getmode(executable):
403 def getmode(executable):
404 if executable:
404 if executable:
405 return 0755
405 return 0755
406 else:
406 else:
407 return 0644
407 return 0644
408
408
409 def urljoin(first, second, *arg):
409 def urljoin(first, second, *arg):
410 def join(left, right):
410 def join(left, right):
411 if not left.endswith('/'):
411 if not left.endswith('/'):
412 left += '/'
412 left += '/'
413 if right.startswith('/'):
413 if right.startswith('/'):
414 right = right[1:]
414 right = right[1:]
415 return left + right
415 return left + right
416
416
417 url = join(first, second)
417 url = join(first, second)
418 for a in arg:
418 for a in arg:
419 url = join(url, a)
419 url = join(url, a)
420 return url
420 return url
421
421
422 def hexsha1(data):
422 def hexsha1(data):
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 object data"""
424 object data"""
425 h = util.sha1()
425 h = util.sha1()
426 for chunk in util.filechunkiter(data):
426 for chunk in util.filechunkiter(data):
427 h.update(chunk)
427 h.update(chunk)
428 return h.hexdigest()
428 return h.hexdigest()
429
429
430 def httpsendfile(ui, filename):
430 def httpsendfile(ui, filename):
431 return httpconnection.httpsendfile(ui, filename, 'rb')
431 return httpconnection.httpsendfile(ui, filename, 'rb')
432
432
433 def unixpath(path):
433 def unixpath(path):
434 '''Return a version of path normalized for use with the lfdirstate.'''
434 '''Return a version of path normalized for use with the lfdirstate.'''
435 return util.pconvert(os.path.normpath(path))
435 return util.pconvert(os.path.normpath(path))
436
436
437 def islfilesrepo(repo):
437 def islfilesrepo(repo):
438 return ('largefiles' in repo.requirements and
438 return ('largefiles' in repo.requirements and
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440
440
441 class storeprotonotcapable(Exception):
441 class storeprotonotcapable(Exception):
442 def __init__(self, storetypes):
442 def __init__(self, storetypes):
443 self.storetypes = storetypes
443 self.storetypes = storetypes
444
444
445 def getcurrentheads(repo):
445 def getcurrentheads(repo):
446 branches = repo.branchmap()
446 branches = repo.branchmap()
447 heads = []
447 heads = []
448 for branch in branches:
448 for branch in branches:
449 newheads = repo.branchheads(branch)
449 newheads = repo.branchheads(branch)
450 heads = heads + newheads
450 heads = heads + newheads
451 return heads
451 return heads
452
452
453 def getstandinsstate(repo):
453 def getstandinsstate(repo):
454 standins = []
454 standins = []
455 matcher = getstandinmatcher(repo)
455 matcher = getstandinmatcher(repo)
456 for standin in dirstate_walk(repo.dirstate, matcher):
456 for standin in dirstate_walk(repo.dirstate, matcher):
457 lfile = splitstandin(standin)
457 lfile = splitstandin(standin)
458 standins.append((lfile, readstandin(repo, lfile)))
458 standins.append((lfile, readstandin(repo, lfile)))
459 return standins
459 return standins
460
461 def getlfilestoupdate(oldstandins, newstandins):
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 filelist = []
464 for f in changedstandins:
465 if f[0] not in filelist:
466 filelist.append(f[0])
467 return filelist
@@ -1,973 +1,965 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(override_match)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def add_largefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repo_add(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def remove_largefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n') % f)
160 ui.status(_('removing %s\n') % f)
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 util.unlinkpath(repo.wjoin(f))
162 util.unlinkpath(repo.wjoin(f))
163 lfdirstate.remove(f)
163 lfdirstate.remove(f)
164 lfdirstate.write()
164 lfdirstate.write()
165 forget = [lfutil.standin(f) for f in forget]
165 forget = [lfutil.standin(f) for f in forget]
166 remove = [lfutil.standin(f) for f in remove]
166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repo_forget(repo, forget)
168 # If this is being called by addremove, let the original addremove
168 # If this is being called by addremove, let the original addremove
169 # function handle this.
169 # function handle this.
170 if not getattr(repo, "_isaddremove", False):
170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.repo_remove(repo, remove, unlink=True)
172 finally:
172 finally:
173 wlock.release()
173 wlock.release()
174
174
175 # -- Wrappers: modify existing commands --------------------------------
175 # -- Wrappers: modify existing commands --------------------------------
176
176
177 # Add works by going through the files that the user wanted to add and
177 # Add works by going through the files that the user wanted to add and
178 # checking if they should be added as largefiles. Then it makes a new
178 # checking if they should be added as largefiles. Then it makes a new
179 # matcher which matches only the normal files and runs the original
179 # matcher which matches only the normal files and runs the original
180 # version of add.
180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def override_add(orig, ui, repo, *pats, **opts):
182 normal = opts.pop('normal')
182 normal = opts.pop('normal')
183 if normal:
183 if normal:
184 if opts.get('large'):
184 if opts.get('large'):
185 raise util.Abort(_('--normal cannot be used with --large'))
185 raise util.Abort(_('--normal cannot be used with --large'))
186 return orig(ui, repo, *pats, **opts)
186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
188 installnormalfilesmatchfn(repo[None].manifest())
188 installnormalfilesmatchfn(repo[None].manifest())
189 result = orig(ui, repo, *pats, **opts)
189 result = orig(ui, repo, *pats, **opts)
190 restorematchfn()
190 restorematchfn()
191
191
192 return (result == 1 or bad) and 1 or 0
192 return (result == 1 or bad) and 1 or 0
193
193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def override_remove(orig, ui, repo, *pats, **opts):
195 installnormalfilesmatchfn(repo[None].manifest())
195 installnormalfilesmatchfn(repo[None].manifest())
196 orig(ui, repo, *pats, **opts)
196 orig(ui, repo, *pats, **opts)
197 restorematchfn()
197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 remove_largefiles(ui, repo, *pats, **opts)
199
199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def override_status(orig, ui, repo, *pats, **opts):
201 try:
201 try:
202 repo.lfstatus = True
202 repo.lfstatus = True
203 return orig(ui, repo, *pats, **opts)
203 return orig(ui, repo, *pats, **opts)
204 finally:
204 finally:
205 repo.lfstatus = False
205 repo.lfstatus = False
206
206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def override_log(orig, ui, repo, *pats, **opts):
208 try:
208 try:
209 repo.lfstatus = True
209 repo.lfstatus = True
210 orig(ui, repo, *pats, **opts)
210 orig(ui, repo, *pats, **opts)
211 finally:
211 finally:
212 repo.lfstatus = False
212 repo.lfstatus = False
213
213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def override_verify(orig, ui, repo, *pats, **opts):
215 large = opts.pop('large', False)
215 large = opts.pop('large', False)
216 all = opts.pop('lfa', False)
216 all = opts.pop('lfa', False)
217 contents = opts.pop('lfc', False)
217 contents = opts.pop('lfc', False)
218
218
219 result = orig(ui, repo, *pats, **opts)
219 result = orig(ui, repo, *pats, **opts)
220 if large:
220 if large:
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 return result
222 return result
223
223
224 # Override needs to refresh standins so that update's normal merge
224 # Override needs to refresh standins so that update's normal merge
225 # will go through properly. Then the other update hook (overriding repo.update)
225 # will go through properly. Then the other update hook (overriding repo.update)
226 # will get the new files. Filemerge is also overriden so that the merge
226 # will get the new files. Filemerge is also overriden so that the merge
227 # will merge standins correctly.
227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def override_update(orig, ui, repo, *pats, **opts):
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 False, False)
231 False, False)
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233
233
234 # Need to lock between the standins getting updated and their
234 # Need to lock between the standins getting updated and their
235 # largefiles getting updated
235 # largefiles getting updated
236 wlock = repo.wlock()
236 wlock = repo.wlock()
237 try:
237 try:
238 if opts['check']:
238 if opts['check']:
239 mod = len(modified) > 0
239 mod = len(modified) > 0
240 for lfile in unsure:
240 for lfile in unsure:
241 standin = lfutil.standin(lfile)
241 standin = lfutil.standin(lfile)
242 if repo['.'][standin].data().strip() != \
242 if repo['.'][standin].data().strip() != \
243 lfutil.hashfile(repo.wjoin(lfile)):
243 lfutil.hashfile(repo.wjoin(lfile)):
244 mod = True
244 mod = True
245 else:
245 else:
246 lfdirstate.normal(lfile)
246 lfdirstate.normal(lfile)
247 lfdirstate.write()
247 lfdirstate.write()
248 if mod:
248 if mod:
249 raise util.Abort(_('uncommitted local changes'))
249 raise util.Abort(_('uncommitted local changes'))
250 # XXX handle removed differently
250 # XXX handle removed differently
251 if not opts['clean']:
251 if not opts['clean']:
252 for lfile in unsure + modified + added:
252 for lfile in unsure + modified + added:
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 finally:
254 finally:
255 wlock.release()
255 wlock.release()
256 return orig(ui, repo, *pats, **opts)
256 return orig(ui, repo, *pats, **opts)
257
257
258 # Before starting the manifest merge, merge.updates will call
258 # Before starting the manifest merge, merge.updates will call
259 # _checkunknown to check if there are any files in the merged-in
259 # _checkunknown to check if there are any files in the merged-in
260 # changeset that collide with unknown files in the working copy.
260 # changeset that collide with unknown files in the working copy.
261 #
261 #
262 # The largefiles are seen as unknown, so this prevents us from merging
262 # The largefiles are seen as unknown, so this prevents us from merging
263 # in a file 'foo' if we already have a largefile with the same name.
263 # in a file 'foo' if we already have a largefile with the same name.
264 #
264 #
265 # The overridden function filters the unknown files by removing any
265 # The overridden function filters the unknown files by removing any
266 # largefiles. This makes the merge proceed and we can then handle this
266 # largefiles. This makes the merge proceed and we can then handle this
267 # case further in the overridden manifestmerge function below.
267 # case further in the overridden manifestmerge function below.
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
269 if lfutil.standin(f) in wctx:
269 if lfutil.standin(f) in wctx:
270 return False
270 return False
271 return origfn(repo, wctx, mctx, f)
271 return origfn(repo, wctx, mctx, f)
272
272
273 # The manifest merge handles conflicts on the manifest level. We want
273 # The manifest merge handles conflicts on the manifest level. We want
274 # to handle changes in largefile-ness of files at this level too.
274 # to handle changes in largefile-ness of files at this level too.
275 #
275 #
276 # The strategy is to run the original manifestmerge and then process
276 # The strategy is to run the original manifestmerge and then process
277 # the action list it outputs. There are two cases we need to deal with:
277 # the action list it outputs. There are two cases we need to deal with:
278 #
278 #
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 # detected via its standin file, which will enter the working copy
280 # detected via its standin file, which will enter the working copy
281 # with a "get" action. It is not "merge" since the standin is all
281 # with a "get" action. It is not "merge" since the standin is all
282 # Mercurial is concerned with at this level -- the link to the
282 # Mercurial is concerned with at this level -- the link to the
283 # existing normal file is not relevant here.
283 # existing normal file is not relevant here.
284 #
284 #
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 # since the largefile will be present in the working copy and
286 # since the largefile will be present in the working copy and
287 # different from the normal file in p2. Mercurial therefore
287 # different from the normal file in p2. Mercurial therefore
288 # triggers a merge action.
288 # triggers a merge action.
289 #
289 #
290 # In both cases, we prompt the user and emit new actions to either
290 # In both cases, we prompt the user and emit new actions to either
291 # remove the standin (if the normal file was kept) or to remove the
291 # remove the standin (if the normal file was kept) or to remove the
292 # normal file and get the standin (if the largefile was kept). The
292 # normal file and get the standin (if the largefile was kept). The
293 # default prompt answer is to use the largefile version since it was
293 # default prompt answer is to use the largefile version since it was
294 # presumably changed on purpose.
294 # presumably changed on purpose.
295 #
295 #
296 # Finally, the merge.applyupdates function will then take care of
296 # Finally, the merge.applyupdates function will then take care of
297 # writing the files into the working copy and lfcommands.updatelfiles
297 # writing the files into the working copy and lfcommands.updatelfiles
298 # will update the largefiles.
298 # will update the largefiles.
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 processed = []
301 processed = []
302
302
303 for action in actions:
303 for action in actions:
304 if overwrite:
304 if overwrite:
305 processed.append(action)
305 processed.append(action)
306 continue
306 continue
307 f, m = action[:2]
307 f, m = action[:2]
308
308
309 choices = (_('&Largefile'), _('&Normal file'))
309 choices = (_('&Largefile'), _('&Normal file'))
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 # Case 1: normal file in the working copy, largefile in
311 # Case 1: normal file in the working copy, largefile in
312 # the second parent
312 # the second parent
313 lfile = lfutil.splitstandin(f)
313 lfile = lfutil.splitstandin(f)
314 standin = f
314 standin = f
315 msg = _('%s has been turned into a largefile\n'
315 msg = _('%s has been turned into a largefile\n'
316 'use (l)argefile or keep as (n)ormal file?') % lfile
316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 processed.append((lfile, "r"))
318 processed.append((lfile, "r"))
319 processed.append((standin, "g", p2.flags(standin)))
319 processed.append((standin, "g", p2.flags(standin)))
320 else:
320 else:
321 processed.append((standin, "r"))
321 processed.append((standin, "r"))
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 # Case 2: largefile in the working copy, normal file in
323 # Case 2: largefile in the working copy, normal file in
324 # the second parent
324 # the second parent
325 standin = lfutil.standin(f)
325 standin = lfutil.standin(f)
326 lfile = f
326 lfile = f
327 msg = _('%s has been turned into a normal file\n'
327 msg = _('%s has been turned into a normal file\n'
328 'keep as (l)argefile or use (n)ormal file?') % lfile
328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 processed.append((lfile, "r"))
330 processed.append((lfile, "r"))
331 else:
331 else:
332 processed.append((standin, "r"))
332 processed.append((standin, "r"))
333 processed.append((lfile, "g", p2.flags(lfile)))
333 processed.append((lfile, "g", p2.flags(lfile)))
334 else:
334 else:
335 processed.append(action)
335 processed.append(action)
336
336
337 return processed
337 return processed
338
338
339 # Override filemerge to prompt the user about how they wish to merge
339 # Override filemerge to prompt the user about how they wish to merge
340 # largefiles. This will handle identical edits, and copy/rename +
340 # largefiles. This will handle identical edits, and copy/rename +
341 # edit without prompting the user.
341 # edit without prompting the user.
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 # Use better variable names here. Because this is a wrapper we cannot
343 # Use better variable names here. Because this is a wrapper we cannot
344 # change the variable names in the function declaration.
344 # change the variable names in the function declaration.
345 fcdest, fcother, fcancestor = fcd, fco, fca
345 fcdest, fcother, fcancestor = fcd, fco, fca
346 if not lfutil.isstandin(orig):
346 if not lfutil.isstandin(orig):
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 else:
348 else:
349 if not fcother.cmp(fcdest): # files identical?
349 if not fcother.cmp(fcdest): # files identical?
350 return None
350 return None
351
351
352 # backwards, use working dir parent as ancestor
352 # backwards, use working dir parent as ancestor
353 if fcancestor == fcother:
353 if fcancestor == fcother:
354 fcancestor = fcdest.parents()[0]
354 fcancestor = fcdest.parents()[0]
355
355
356 if orig != fcother.path():
356 if orig != fcother.path():
357 repo.ui.status(_('merging %s and %s to %s\n')
357 repo.ui.status(_('merging %s and %s to %s\n')
358 % (lfutil.splitstandin(orig),
358 % (lfutil.splitstandin(orig),
359 lfutil.splitstandin(fcother.path()),
359 lfutil.splitstandin(fcother.path()),
360 lfutil.splitstandin(fcdest.path())))
360 lfutil.splitstandin(fcdest.path())))
361 else:
361 else:
362 repo.ui.status(_('merging %s\n')
362 repo.ui.status(_('merging %s\n')
363 % lfutil.splitstandin(fcdest.path()))
363 % lfutil.splitstandin(fcdest.path()))
364
364
365 if fcancestor.path() != fcother.path() and fcother.data() == \
365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 fcancestor.data():
366 fcancestor.data():
367 return 0
367 return 0
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 fcancestor.data():
369 fcancestor.data():
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 return 0
371 return 0
372
372
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 'keep (l)ocal or take (o)ther?') %
374 'keep (l)ocal or take (o)ther?') %
375 lfutil.splitstandin(orig),
375 lfutil.splitstandin(orig),
376 (_('&Local'), _('&Other')), 0) == 0:
376 (_('&Local'), _('&Other')), 0) == 0:
377 return 0
377 return 0
378 else:
378 else:
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 return 0
380 return 0
381
381
382 # Copy first changes the matchers to match standins instead of
382 # Copy first changes the matchers to match standins instead of
383 # largefiles. Then it overrides util.copyfile in that function it
383 # largefiles. Then it overrides util.copyfile in that function it
384 # checks if the destination largefile already exists. It also keeps a
384 # checks if the destination largefile already exists. It also keeps a
385 # list of copied files so that the largefiles can be copied and the
385 # list of copied files so that the largefiles can be copied and the
386 # dirstate updated.
386 # dirstate updated.
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
388 # doesn't remove largefile on rename
388 # doesn't remove largefile on rename
389 if len(pats) < 2:
389 if len(pats) < 2:
390 # this isn't legal, let the original function deal with it
390 # this isn't legal, let the original function deal with it
391 return orig(ui, repo, pats, opts, rename)
391 return orig(ui, repo, pats, opts, rename)
392
392
393 def makestandin(relpath):
393 def makestandin(relpath):
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396
396
397 fullpats = scmutil.expandpats(pats)
397 fullpats = scmutil.expandpats(pats)
398 dest = fullpats[-1]
398 dest = fullpats[-1]
399
399
400 if os.path.isdir(dest):
400 if os.path.isdir(dest):
401 if not os.path.isdir(makestandin(dest)):
401 if not os.path.isdir(makestandin(dest)):
402 os.makedirs(makestandin(dest))
402 os.makedirs(makestandin(dest))
403 # This could copy both lfiles and normal files in one command,
403 # This could copy both lfiles and normal files in one command,
404 # but we don't want to do that. First replace their matcher to
404 # but we don't want to do that. First replace their matcher to
405 # only match normal files and run it, then replace it to just
405 # only match normal files and run it, then replace it to just
406 # match largefiles and run it again.
406 # match largefiles and run it again.
407 nonormalfiles = False
407 nonormalfiles = False
408 nolfiles = False
408 nolfiles = False
409 try:
409 try:
410 try:
410 try:
411 installnormalfilesmatchfn(repo[None].manifest())
411 installnormalfilesmatchfn(repo[None].manifest())
412 result = orig(ui, repo, pats, opts, rename)
412 result = orig(ui, repo, pats, opts, rename)
413 except util.Abort, e:
413 except util.Abort, e:
414 if str(e) != 'no files to copy':
414 if str(e) != 'no files to copy':
415 raise e
415 raise e
416 else:
416 else:
417 nonormalfiles = True
417 nonormalfiles = True
418 result = 0
418 result = 0
419 finally:
419 finally:
420 restorematchfn()
420 restorematchfn()
421
421
422 # The first rename can cause our current working directory to be removed.
422 # The first rename can cause our current working directory to be removed.
423 # In that case there is nothing left to copy/rename so just quit.
423 # In that case there is nothing left to copy/rename so just quit.
424 try:
424 try:
425 repo.getcwd()
425 repo.getcwd()
426 except OSError:
426 except OSError:
427 return result
427 return result
428
428
429 try:
429 try:
430 try:
430 try:
431 # When we call orig below it creates the standins but we don't add them
431 # When we call orig below it creates the standins but we don't add them
432 # to the dir state until later so lock during that time.
432 # to the dir state until later so lock during that time.
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434
434
435 manifest = repo[None].manifest()
435 manifest = repo[None].manifest()
436 oldmatch = None # for the closure
436 oldmatch = None # for the closure
437 def override_match(ctx, pats=[], opts={}, globbed=False,
437 def override_match(ctx, pats=[], opts={}, globbed=False,
438 default='relpath'):
438 default='relpath'):
439 newpats = []
439 newpats = []
440 # The patterns were previously mangled to add the standin
440 # The patterns were previously mangled to add the standin
441 # directory; we need to remove that now
441 # directory; we need to remove that now
442 for pat in pats:
442 for pat in pats:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 newpats.append(pat.replace(lfutil.shortname, ''))
444 newpats.append(pat.replace(lfutil.shortname, ''))
445 else:
445 else:
446 newpats.append(pat)
446 newpats.append(pat)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 m = copy.copy(match)
448 m = copy.copy(match)
449 lfile = lambda f: lfutil.standin(f) in manifest
449 lfile = lambda f: lfutil.standin(f) in manifest
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 m._fmap = set(m._files)
451 m._fmap = set(m._files)
452 orig_matchfn = m.matchfn
452 orig_matchfn = m.matchfn
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 (f in manifest) and
454 (f in manifest) and
455 orig_matchfn(lfutil.splitstandin(f)) or
455 orig_matchfn(lfutil.splitstandin(f)) or
456 None)
456 None)
457 return m
457 return m
458 oldmatch = installmatchfn(override_match)
458 oldmatch = installmatchfn(override_match)
459 listpats = []
459 listpats = []
460 for pat in pats:
460 for pat in pats:
461 if match_.patkind(pat) is not None:
461 if match_.patkind(pat) is not None:
462 listpats.append(pat)
462 listpats.append(pat)
463 else:
463 else:
464 listpats.append(makestandin(pat))
464 listpats.append(makestandin(pat))
465
465
466 try:
466 try:
467 origcopyfile = util.copyfile
467 origcopyfile = util.copyfile
468 copiedfiles = []
468 copiedfiles = []
469 def override_copyfile(src, dest):
469 def override_copyfile(src, dest):
470 if (lfutil.shortname in src and
470 if (lfutil.shortname in src and
471 dest.startswith(repo.wjoin(lfutil.shortname))):
471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 destlfile = dest.replace(lfutil.shortname, '')
472 destlfile = dest.replace(lfutil.shortname, '')
473 if not opts['force'] and os.path.exists(destlfile):
473 if not opts['force'] and os.path.exists(destlfile):
474 raise IOError('',
474 raise IOError('',
475 _('destination largefile already exists'))
475 _('destination largefile already exists'))
476 copiedfiles.append((src, dest))
476 copiedfiles.append((src, dest))
477 origcopyfile(src, dest)
477 origcopyfile(src, dest)
478
478
479 util.copyfile = override_copyfile
479 util.copyfile = override_copyfile
480 result += orig(ui, repo, listpats, opts, rename)
480 result += orig(ui, repo, listpats, opts, rename)
481 finally:
481 finally:
482 util.copyfile = origcopyfile
482 util.copyfile = origcopyfile
483
483
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 for (src, dest) in copiedfiles:
485 for (src, dest) in copiedfiles:
486 if (lfutil.shortname in src and
486 if (lfutil.shortname in src and
487 dest.startswith(repo.wjoin(lfutil.shortname))):
487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 destlfiledir = os.path.dirname(destlfile) or '.'
490 destlfiledir = os.path.dirname(destlfile) or '.'
491 if not os.path.isdir(destlfiledir):
491 if not os.path.isdir(destlfiledir):
492 os.makedirs(destlfiledir)
492 os.makedirs(destlfiledir)
493 if rename:
493 if rename:
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 lfdirstate.remove(srclfile)
495 lfdirstate.remove(srclfile)
496 else:
496 else:
497 util.copyfile(srclfile, destlfile)
497 util.copyfile(srclfile, destlfile)
498 lfdirstate.add(destlfile)
498 lfdirstate.add(destlfile)
499 lfdirstate.write()
499 lfdirstate.write()
500 except util.Abort, e:
500 except util.Abort, e:
501 if str(e) != 'no files to copy':
501 if str(e) != 'no files to copy':
502 raise e
502 raise e
503 else:
503 else:
504 nolfiles = True
504 nolfiles = True
505 finally:
505 finally:
506 restorematchfn()
506 restorematchfn()
507 wlock.release()
507 wlock.release()
508
508
509 if nolfiles and nonormalfiles:
509 if nolfiles and nonormalfiles:
510 raise util.Abort(_('no files to copy'))
510 raise util.Abort(_('no files to copy'))
511
511
512 return result
512 return result
513
513
514 # When the user calls revert, we have to be careful to not revert any
514 # When the user calls revert, we have to be careful to not revert any
515 # changes to other largefiles accidentally. This means we have to keep
515 # changes to other largefiles accidentally. This means we have to keep
516 # track of the largefiles that are being reverted so we only pull down
516 # track of the largefiles that are being reverted so we only pull down
517 # the necessary largefiles.
517 # the necessary largefiles.
518 #
518 #
519 # Standins are only updated (to match the hash of largefiles) before
519 # Standins are only updated (to match the hash of largefiles) before
520 # commits. Update the standins then run the original revert, changing
520 # commits. Update the standins then run the original revert, changing
521 # the matcher to hit standins instead of largefiles. Based on the
521 # the matcher to hit standins instead of largefiles. Based on the
522 # resulting standins update the largefiles. Then return the standins
522 # resulting standins update the largefiles. Then return the standins
523 # to their proper state
523 # to their proper state
524 def override_revert(orig, ui, repo, *pats, **opts):
524 def override_revert(orig, ui, repo, *pats, **opts):
525 # Because we put the standins in a bad state (by updating them)
525 # Because we put the standins in a bad state (by updating them)
526 # and then return them to a correct state we need to lock to
526 # and then return them to a correct state we need to lock to
527 # prevent others from changing them in their incorrect state.
527 # prevent others from changing them in their incorrect state.
528 wlock = repo.wlock()
528 wlock = repo.wlock()
529 try:
529 try:
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 (modified, added, removed, missing, unknown, ignored, clean) = \
531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
533 for lfile in modified:
533 for lfile in modified:
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 for lfile in missing:
535 for lfile in missing:
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537
537
538 try:
538 try:
539 ctx = repo[opts.get('rev')]
539 ctx = repo[opts.get('rev')]
540 oldmatch = None # for the closure
540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 default='relpath'):
542 default='relpath'):
543 match = oldmatch(ctx, pats, opts, globbed, default)
543 match = oldmatch(ctx, pats, opts, globbed, default)
544 m = copy.copy(match)
544 m = copy.copy(match)
545 def tostandin(f):
545 def tostandin(f):
546 if lfutil.standin(f) in ctx:
546 if lfutil.standin(f) in ctx:
547 return lfutil.standin(f)
547 return lfutil.standin(f)
548 elif lfutil.standin(f) in repo[None]:
548 elif lfutil.standin(f) in repo[None]:
549 return None
549 return None
550 return f
550 return f
551 m._files = [tostandin(f) for f in m._files]
551 m._files = [tostandin(f) for f in m._files]
552 m._files = [f for f in m._files if f is not None]
552 m._files = [f for f in m._files if f is not None]
553 m._fmap = set(m._files)
553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 orig_matchfn = m.matchfn
555 def matchfn(f):
555 def matchfn(f):
556 if lfutil.isstandin(f):
556 if lfutil.isstandin(f):
557 # We need to keep track of what largefiles are being
557 # We need to keep track of what largefiles are being
558 # matched so we know which ones to update later --
558 # matched so we know which ones to update later --
559 # otherwise we accidentally revert changes to other
559 # otherwise we accidentally revert changes to other
560 # largefiles. This is repo-specific, so duckpunch the
560 # largefiles. This is repo-specific, so duckpunch the
561 # repo object to keep the list of largefiles for us
561 # repo object to keep the list of largefiles for us
562 # later.
562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 (f in repo[None] or f in ctx):
564 (f in repo[None] or f in ctx):
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 lfileslist.append(lfutil.splitstandin(f))
566 lfileslist.append(lfutil.splitstandin(f))
567 repo._lfilestoupdate = lfileslist
567 repo._lfilestoupdate = lfileslist
568 return True
568 return True
569 else:
569 else:
570 return False
570 return False
571 return orig_matchfn(f)
571 return orig_matchfn(f)
572 m.matchfn = matchfn
572 m.matchfn = matchfn
573 return m
573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(override_match)
575 scmutil.match
575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = override_match(repo[None], pats, opts)
577 orig(ui, repo, *pats, **opts)
577 orig(ui, repo, *pats, **opts)
578 finally:
578 finally:
579 restorematchfn()
579 restorematchfn()
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 printmessage=False)
582 printmessage=False)
583
583
584 # empty out the largefiles list so we start fresh next time
584 # empty out the largefiles list so we start fresh next time
585 repo._lfilestoupdate = []
585 repo._lfilestoupdate = []
586 for lfile in modified:
586 for lfile in modified:
587 if lfile in lfileslist:
587 if lfile in lfileslist:
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 in repo['.']:
589 in repo['.']:
590 lfutil.writestandin(repo, lfutil.standin(lfile),
590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 repo['.'][lfile].data().strip(),
591 repo['.'][lfile].data().strip(),
592 'x' in repo['.'][lfile].flags())
592 'x' in repo['.'][lfile].flags())
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 for lfile in added:
594 for lfile in added:
595 standin = lfutil.standin(lfile)
595 standin = lfutil.standin(lfile)
596 if standin not in ctx and (standin in matches or opts.get('all')):
596 if standin not in ctx and (standin in matches or opts.get('all')):
597 if lfile in lfdirstate:
597 if lfile in lfdirstate:
598 lfdirstate.drop(lfile)
598 lfdirstate.drop(lfile)
599 util.unlinkpath(repo.wjoin(standin))
599 util.unlinkpath(repo.wjoin(standin))
600 lfdirstate.write()
600 lfdirstate.write()
601 finally:
601 finally:
602 wlock.release()
602 wlock.release()
603
603
604 def hg_update(orig, repo, node):
604 def hg_update(orig, repo, node):
605 # In order to not waste a lot of extra time during the update largefiles
605 # Only call updatelfiles the standins that have changed to save time
606 # step, we keep track of the state of the standins before and after we
607 # call the original update function, and only update the standins that
608 # have changed in the hg.update() call
609 oldstandins = lfutil.getstandinsstate(repo)
606 oldstandins = lfutil.getstandinsstate(repo)
610 result = orig(repo, node)
607 result = orig(repo, node)
611 newstandins = lfutil.getstandinsstate(repo)
608 newstandins = lfutil.getstandinsstate(repo)
612 tobeupdated = set(oldstandins).symmetric_difference(set(newstandins))
609 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
613 filelist = []
614 for f in tobeupdated:
615 if f[0] not in filelist:
616 filelist.append(f[0])
617
618 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
610 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
619 return result
611 return result
620
612
621 def hg_clean(orig, repo, node, show_stats=True):
613 def hg_clean(orig, repo, node, show_stats=True):
622 result = orig(repo, node, show_stats)
614 result = orig(repo, node, show_stats)
623 lfcommands.updatelfiles(repo.ui, repo)
615 lfcommands.updatelfiles(repo.ui, repo)
624 return result
616 return result
625
617
626 def hg_merge(orig, repo, node, force=None, remind=True):
618 def hg_merge(orig, repo, node, force=None, remind=True):
627 # Mark the repo as being in the middle of a merge, so that
619 # Mark the repo as being in the middle of a merge, so that
628 # updatelfiles() will know that it needs to trust the standins in
620 # updatelfiles() will know that it needs to trust the standins in
629 # the working copy, not in the standins in the current node
621 # the working copy, not in the standins in the current node
630 repo._ismerging = True
622 repo._ismerging = True
631 try:
623 try:
632 result = orig(repo, node, force, remind)
624 result = orig(repo, node, force, remind)
633 lfcommands.updatelfiles(repo.ui, repo)
625 lfcommands.updatelfiles(repo.ui, repo)
634 finally:
626 finally:
635 repo._ismerging = False
627 repo._ismerging = False
636 return result
628 return result
637
629
638 # When we rebase a repository with remotely changed largefiles, we need to
630 # When we rebase a repository with remotely changed largefiles, we need to
639 # take some extra care so that the largefiles are correctly updated in the
631 # take some extra care so that the largefiles are correctly updated in the
640 # working copy
632 # working copy
641 def override_pull(orig, ui, repo, source=None, **opts):
633 def override_pull(orig, ui, repo, source=None, **opts):
642 if opts.get('rebase', False):
634 if opts.get('rebase', False):
643 repo._isrebasing = True
635 repo._isrebasing = True
644 try:
636 try:
645 if opts.get('update'):
637 if opts.get('update'):
646 del opts['update']
638 del opts['update']
647 ui.debug('--update and --rebase are not compatible, ignoring '
639 ui.debug('--update and --rebase are not compatible, ignoring '
648 'the update flag\n')
640 'the update flag\n')
649 del opts['rebase']
641 del opts['rebase']
650 cmdutil.bailifchanged(repo)
642 cmdutil.bailifchanged(repo)
651 revsprepull = len(repo)
643 revsprepull = len(repo)
652 origpostincoming = commands.postincoming
644 origpostincoming = commands.postincoming
653 def _dummy(*args, **kwargs):
645 def _dummy(*args, **kwargs):
654 pass
646 pass
655 commands.postincoming = _dummy
647 commands.postincoming = _dummy
656 repo.lfpullsource = source
648 repo.lfpullsource = source
657 if not source:
649 if not source:
658 source = 'default'
650 source = 'default'
659 try:
651 try:
660 result = commands.pull(ui, repo, source, **opts)
652 result = commands.pull(ui, repo, source, **opts)
661 finally:
653 finally:
662 commands.postincoming = origpostincoming
654 commands.postincoming = origpostincoming
663 revspostpull = len(repo)
655 revspostpull = len(repo)
664 if revspostpull > revsprepull:
656 if revspostpull > revsprepull:
665 result = result or rebase.rebase(ui, repo)
657 result = result or rebase.rebase(ui, repo)
666 finally:
658 finally:
667 repo._isrebasing = False
659 repo._isrebasing = False
668 else:
660 else:
669 repo.lfpullsource = source
661 repo.lfpullsource = source
670 if not source:
662 if not source:
671 source = 'default'
663 source = 'default'
672 oldheads = lfutil.getcurrentheads(repo)
664 oldheads = lfutil.getcurrentheads(repo)
673 result = orig(ui, repo, source, **opts)
665 result = orig(ui, repo, source, **opts)
674 # If we do not have the new largefiles for any new heads we pulled, we
666 # If we do not have the new largefiles for any new heads we pulled, we
675 # will run into a problem later if we try to merge or rebase with one of
667 # will run into a problem later if we try to merge or rebase with one of
676 # these heads, so cache the largefiles now direclty into the system
668 # these heads, so cache the largefiles now direclty into the system
677 # cache.
669 # cache.
678 ui.status(_("caching new largefiles\n"))
670 ui.status(_("caching new largefiles\n"))
679 numcached = 0
671 numcached = 0
680 heads = lfutil.getcurrentheads(repo)
672 heads = lfutil.getcurrentheads(repo)
681 newheads = set(heads).difference(set(oldheads))
673 newheads = set(heads).difference(set(oldheads))
682 for head in newheads:
674 for head in newheads:
683 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
675 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
684 numcached += len(cached)
676 numcached += len(cached)
685 ui.status(_("%d largefiles cached\n") % numcached)
677 ui.status(_("%d largefiles cached\n") % numcached)
686 return result
678 return result
687
679
688 def override_rebase(orig, ui, repo, **opts):
680 def override_rebase(orig, ui, repo, **opts):
689 repo._isrebasing = True
681 repo._isrebasing = True
690 try:
682 try:
691 orig(ui, repo, **opts)
683 orig(ui, repo, **opts)
692 finally:
684 finally:
693 repo._isrebasing = False
685 repo._isrebasing = False
694
686
695 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
687 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
696 prefix=None, mtime=None, subrepos=None):
688 prefix=None, mtime=None, subrepos=None):
697 # No need to lock because we are only reading history and
689 # No need to lock because we are only reading history and
698 # largefile caches, neither of which are modified.
690 # largefile caches, neither of which are modified.
699 lfcommands.cachelfiles(repo.ui, repo, node)
691 lfcommands.cachelfiles(repo.ui, repo, node)
700
692
701 if kind not in archival.archivers:
693 if kind not in archival.archivers:
702 raise util.Abort(_("unknown archive type '%s'") % kind)
694 raise util.Abort(_("unknown archive type '%s'") % kind)
703
695
704 ctx = repo[node]
696 ctx = repo[node]
705
697
706 if kind == 'files':
698 if kind == 'files':
707 if prefix:
699 if prefix:
708 raise util.Abort(
700 raise util.Abort(
709 _('cannot give prefix when archiving to files'))
701 _('cannot give prefix when archiving to files'))
710 else:
702 else:
711 prefix = archival.tidyprefix(dest, kind, prefix)
703 prefix = archival.tidyprefix(dest, kind, prefix)
712
704
713 def write(name, mode, islink, getdata):
705 def write(name, mode, islink, getdata):
714 if matchfn and not matchfn(name):
706 if matchfn and not matchfn(name):
715 return
707 return
716 data = getdata()
708 data = getdata()
717 if decode:
709 if decode:
718 data = repo.wwritedata(name, data)
710 data = repo.wwritedata(name, data)
719 archiver.addfile(prefix + name, mode, islink, data)
711 archiver.addfile(prefix + name, mode, islink, data)
720
712
721 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
713 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
722
714
723 if repo.ui.configbool("ui", "archivemeta", True):
715 if repo.ui.configbool("ui", "archivemeta", True):
724 def metadata():
716 def metadata():
725 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
717 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
726 hex(repo.changelog.node(0)), hex(node), ctx.branch())
718 hex(repo.changelog.node(0)), hex(node), ctx.branch())
727
719
728 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
720 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
729 if repo.tagtype(t) == 'global')
721 if repo.tagtype(t) == 'global')
730 if not tags:
722 if not tags:
731 repo.ui.pushbuffer()
723 repo.ui.pushbuffer()
732 opts = {'template': '{latesttag}\n{latesttagdistance}',
724 opts = {'template': '{latesttag}\n{latesttagdistance}',
733 'style': '', 'patch': None, 'git': None}
725 'style': '', 'patch': None, 'git': None}
734 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
726 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
735 ltags, dist = repo.ui.popbuffer().split('\n')
727 ltags, dist = repo.ui.popbuffer().split('\n')
736 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
728 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
737 tags += 'latesttagdistance: %s\n' % dist
729 tags += 'latesttagdistance: %s\n' % dist
738
730
739 return base + tags
731 return base + tags
740
732
741 write('.hg_archival.txt', 0644, False, metadata)
733 write('.hg_archival.txt', 0644, False, metadata)
742
734
743 for f in ctx:
735 for f in ctx:
744 ff = ctx.flags(f)
736 ff = ctx.flags(f)
745 getdata = ctx[f].data
737 getdata = ctx[f].data
746 if lfutil.isstandin(f):
738 if lfutil.isstandin(f):
747 path = lfutil.findfile(repo, getdata().strip())
739 path = lfutil.findfile(repo, getdata().strip())
748 if path is None:
740 if path is None:
749 raise util.Abort(
741 raise util.Abort(
750 _('largefile %s not found in repo store or system cache')
742 _('largefile %s not found in repo store or system cache')
751 % lfutil.splitstandin(f))
743 % lfutil.splitstandin(f))
752 f = lfutil.splitstandin(f)
744 f = lfutil.splitstandin(f)
753
745
754 def getdatafn():
746 def getdatafn():
755 fd = None
747 fd = None
756 try:
748 try:
757 fd = open(path, 'rb')
749 fd = open(path, 'rb')
758 return fd.read()
750 return fd.read()
759 finally:
751 finally:
760 if fd:
752 if fd:
761 fd.close()
753 fd.close()
762
754
763 getdata = getdatafn
755 getdata = getdatafn
764 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
756 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
765
757
766 if subrepos:
758 if subrepos:
767 for subpath in ctx.substate:
759 for subpath in ctx.substate:
768 sub = ctx.sub(subpath)
760 sub = ctx.sub(subpath)
769 sub.archive(repo.ui, archiver, prefix)
761 sub.archive(repo.ui, archiver, prefix)
770
762
771 archiver.done()
763 archiver.done()
772
764
773 # If a largefile is modified, the change is not reflected in its
765 # If a largefile is modified, the change is not reflected in its
774 # standin until a commit. cmdutil.bailifchanged() raises an exception
766 # standin until a commit. cmdutil.bailifchanged() raises an exception
775 # if the repo has uncommitted changes. Wrap it to also check if
767 # if the repo has uncommitted changes. Wrap it to also check if
776 # largefiles were changed. This is used by bisect and backout.
768 # largefiles were changed. This is used by bisect and backout.
777 def override_bailifchanged(orig, repo):
769 def override_bailifchanged(orig, repo):
778 orig(repo)
770 orig(repo)
779 repo.lfstatus = True
771 repo.lfstatus = True
780 modified, added, removed, deleted = repo.status()[:4]
772 modified, added, removed, deleted = repo.status()[:4]
781 repo.lfstatus = False
773 repo.lfstatus = False
782 if modified or added or removed or deleted:
774 if modified or added or removed or deleted:
783 raise util.Abort(_('outstanding uncommitted changes'))
775 raise util.Abort(_('outstanding uncommitted changes'))
784
776
785 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
777 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
786 def override_fetch(orig, ui, repo, *pats, **opts):
778 def override_fetch(orig, ui, repo, *pats, **opts):
787 repo.lfstatus = True
779 repo.lfstatus = True
788 modified, added, removed, deleted = repo.status()[:4]
780 modified, added, removed, deleted = repo.status()[:4]
789 repo.lfstatus = False
781 repo.lfstatus = False
790 if modified or added or removed or deleted:
782 if modified or added or removed or deleted:
791 raise util.Abort(_('outstanding uncommitted changes'))
783 raise util.Abort(_('outstanding uncommitted changes'))
792 return orig(ui, repo, *pats, **opts)
784 return orig(ui, repo, *pats, **opts)
793
785
794 def override_forget(orig, ui, repo, *pats, **opts):
786 def override_forget(orig, ui, repo, *pats, **opts):
795 installnormalfilesmatchfn(repo[None].manifest())
787 installnormalfilesmatchfn(repo[None].manifest())
796 orig(ui, repo, *pats, **opts)
788 orig(ui, repo, *pats, **opts)
797 restorematchfn()
789 restorematchfn()
798 m = scmutil.match(repo[None], pats, opts)
790 m = scmutil.match(repo[None], pats, opts)
799
791
800 try:
792 try:
801 repo.lfstatus = True
793 repo.lfstatus = True
802 s = repo.status(match=m, clean=True)
794 s = repo.status(match=m, clean=True)
803 finally:
795 finally:
804 repo.lfstatus = False
796 repo.lfstatus = False
805 forget = sorted(s[0] + s[1] + s[3] + s[6])
797 forget = sorted(s[0] + s[1] + s[3] + s[6])
806 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
798 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
807
799
808 for f in forget:
800 for f in forget:
809 if lfutil.standin(f) not in repo.dirstate and not \
801 if lfutil.standin(f) not in repo.dirstate and not \
810 os.path.isdir(m.rel(lfutil.standin(f))):
802 os.path.isdir(m.rel(lfutil.standin(f))):
811 ui.warn(_('not removing %s: file is already untracked\n')
803 ui.warn(_('not removing %s: file is already untracked\n')
812 % m.rel(f))
804 % m.rel(f))
813
805
814 for f in forget:
806 for f in forget:
815 if ui.verbose or not m.exact(f):
807 if ui.verbose or not m.exact(f):
816 ui.status(_('removing %s\n') % m.rel(f))
808 ui.status(_('removing %s\n') % m.rel(f))
817
809
818 # Need to lock because standin files are deleted then removed from the
810 # Need to lock because standin files are deleted then removed from the
819 # repository and we could race inbetween.
811 # repository and we could race inbetween.
820 wlock = repo.wlock()
812 wlock = repo.wlock()
821 try:
813 try:
822 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 lfdirstate = lfutil.openlfdirstate(ui, repo)
823 for f in forget:
815 for f in forget:
824 if lfdirstate[f] == 'a':
816 if lfdirstate[f] == 'a':
825 lfdirstate.drop(f)
817 lfdirstate.drop(f)
826 else:
818 else:
827 lfdirstate.remove(f)
819 lfdirstate.remove(f)
828 lfdirstate.write()
820 lfdirstate.write()
829 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
821 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
830 unlink=True)
822 unlink=True)
831 finally:
823 finally:
832 wlock.release()
824 wlock.release()
833
825
834 def getoutgoinglfiles(ui, repo, dest=None, **opts):
826 def getoutgoinglfiles(ui, repo, dest=None, **opts):
835 dest = ui.expandpath(dest or 'default-push', dest or 'default')
827 dest = ui.expandpath(dest or 'default-push', dest or 'default')
836 dest, branches = hg.parseurl(dest, opts.get('branch'))
828 dest, branches = hg.parseurl(dest, opts.get('branch'))
837 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
829 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
838 if revs:
830 if revs:
839 revs = [repo.lookup(rev) for rev in revs]
831 revs = [repo.lookup(rev) for rev in revs]
840
832
841 remoteui = hg.remoteui
833 remoteui = hg.remoteui
842
834
843 try:
835 try:
844 remote = hg.repository(remoteui(repo, opts), dest)
836 remote = hg.repository(remoteui(repo, opts), dest)
845 except error.RepoError:
837 except error.RepoError:
846 return None
838 return None
847 o = lfutil.findoutgoing(repo, remote, False)
839 o = lfutil.findoutgoing(repo, remote, False)
848 if not o:
840 if not o:
849 return None
841 return None
850 o = repo.changelog.nodesbetween(o, revs)[0]
842 o = repo.changelog.nodesbetween(o, revs)[0]
851 if opts.get('newest_first'):
843 if opts.get('newest_first'):
852 o.reverse()
844 o.reverse()
853
845
854 toupload = set()
846 toupload = set()
855 for n in o:
847 for n in o:
856 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
848 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
857 ctx = repo[n]
849 ctx = repo[n]
858 files = set(ctx.files())
850 files = set(ctx.files())
859 if len(parents) == 2:
851 if len(parents) == 2:
860 mc = ctx.manifest()
852 mc = ctx.manifest()
861 mp1 = ctx.parents()[0].manifest()
853 mp1 = ctx.parents()[0].manifest()
862 mp2 = ctx.parents()[1].manifest()
854 mp2 = ctx.parents()[1].manifest()
863 for f in mp1:
855 for f in mp1:
864 if f not in mc:
856 if f not in mc:
865 files.add(f)
857 files.add(f)
866 for f in mp2:
858 for f in mp2:
867 if f not in mc:
859 if f not in mc:
868 files.add(f)
860 files.add(f)
869 for f in mc:
861 for f in mc:
870 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
862 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
871 files.add(f)
863 files.add(f)
872 toupload = toupload.union(
864 toupload = toupload.union(
873 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
865 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
874 return toupload
866 return toupload
875
867
876 def override_outgoing(orig, ui, repo, dest=None, **opts):
868 def override_outgoing(orig, ui, repo, dest=None, **opts):
877 orig(ui, repo, dest, **opts)
869 orig(ui, repo, dest, **opts)
878
870
879 if opts.pop('large', None):
871 if opts.pop('large', None):
880 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
872 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
881 if toupload is None:
873 if toupload is None:
882 ui.status(_('largefiles: No remote repo\n'))
874 ui.status(_('largefiles: No remote repo\n'))
883 else:
875 else:
884 ui.status(_('largefiles to upload:\n'))
876 ui.status(_('largefiles to upload:\n'))
885 for file in toupload:
877 for file in toupload:
886 ui.status(lfutil.splitstandin(file) + '\n')
878 ui.status(lfutil.splitstandin(file) + '\n')
887 ui.status('\n')
879 ui.status('\n')
888
880
889 def override_summary(orig, ui, repo, *pats, **opts):
881 def override_summary(orig, ui, repo, *pats, **opts):
890 try:
882 try:
891 repo.lfstatus = True
883 repo.lfstatus = True
892 orig(ui, repo, *pats, **opts)
884 orig(ui, repo, *pats, **opts)
893 finally:
885 finally:
894 repo.lfstatus = False
886 repo.lfstatus = False
895
887
896 if opts.pop('large', None):
888 if opts.pop('large', None):
897 toupload = getoutgoinglfiles(ui, repo, None, **opts)
889 toupload = getoutgoinglfiles(ui, repo, None, **opts)
898 if toupload is None:
890 if toupload is None:
899 ui.status(_('largefiles: No remote repo\n'))
891 ui.status(_('largefiles: No remote repo\n'))
900 else:
892 else:
901 ui.status(_('largefiles: %d to upload\n') % len(toupload))
893 ui.status(_('largefiles: %d to upload\n') % len(toupload))
902
894
903 def override_addremove(orig, ui, repo, *pats, **opts):
895 def override_addremove(orig, ui, repo, *pats, **opts):
904 # Get the list of missing largefiles so we can remove them
896 # Get the list of missing largefiles so we can remove them
905 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 lfdirstate = lfutil.openlfdirstate(ui, repo)
906 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
898 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
907 False, False)
899 False, False)
908 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
900 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
909
901
910 # Call into the normal remove code, but the removing of the standin, we want
902 # Call into the normal remove code, but the removing of the standin, we want
911 # to have handled by original addremove. Monkey patching here makes sure
903 # to have handled by original addremove. Monkey patching here makes sure
912 # we don't remove the standin in the largefiles code, preventing a very
904 # we don't remove the standin in the largefiles code, preventing a very
913 # confused state later.
905 # confused state later.
914 if missing:
906 if missing:
915 repo._isaddremove = True
907 repo._isaddremove = True
916 remove_largefiles(ui, repo, *missing, **opts)
908 remove_largefiles(ui, repo, *missing, **opts)
917 repo._isaddremove = False
909 repo._isaddremove = False
918 # Call into the normal add code, and any files that *should* be added as
910 # Call into the normal add code, and any files that *should* be added as
919 # largefiles will be
911 # largefiles will be
920 add_largefiles(ui, repo, *pats, **opts)
912 add_largefiles(ui, repo, *pats, **opts)
921 # Now that we've handled largefiles, hand off to the original addremove
913 # Now that we've handled largefiles, hand off to the original addremove
922 # function to take care of the rest. Make sure it doesn't do anything with
914 # function to take care of the rest. Make sure it doesn't do anything with
923 # largefiles by installing a matcher that will ignore them.
915 # largefiles by installing a matcher that will ignore them.
924 installnormalfilesmatchfn(repo[None].manifest())
916 installnormalfilesmatchfn(repo[None].manifest())
925 result = orig(ui, repo, *pats, **opts)
917 result = orig(ui, repo, *pats, **opts)
926 restorematchfn()
918 restorematchfn()
927 return result
919 return result
928
920
929 # Calling purge with --all will cause the largefiles to be deleted.
921 # Calling purge with --all will cause the largefiles to be deleted.
930 # Override repo.status to prevent this from happening.
922 # Override repo.status to prevent this from happening.
931 def override_purge(orig, ui, repo, *dirs, **opts):
923 def override_purge(orig, ui, repo, *dirs, **opts):
932 oldstatus = repo.status
924 oldstatus = repo.status
933 def override_status(node1='.', node2=None, match=None, ignored=False,
925 def override_status(node1='.', node2=None, match=None, ignored=False,
934 clean=False, unknown=False, listsubrepos=False):
926 clean=False, unknown=False, listsubrepos=False):
935 r = oldstatus(node1, node2, match, ignored, clean, unknown,
927 r = oldstatus(node1, node2, match, ignored, clean, unknown,
936 listsubrepos)
928 listsubrepos)
937 lfdirstate = lfutil.openlfdirstate(ui, repo)
929 lfdirstate = lfutil.openlfdirstate(ui, repo)
938 modified, added, removed, deleted, unknown, ignored, clean = r
930 modified, added, removed, deleted, unknown, ignored, clean = r
939 unknown = [f for f in unknown if lfdirstate[f] == '?']
931 unknown = [f for f in unknown if lfdirstate[f] == '?']
940 ignored = [f for f in ignored if lfdirstate[f] == '?']
932 ignored = [f for f in ignored if lfdirstate[f] == '?']
941 return modified, added, removed, deleted, unknown, ignored, clean
933 return modified, added, removed, deleted, unknown, ignored, clean
942 repo.status = override_status
934 repo.status = override_status
943 orig(ui, repo, *dirs, **opts)
935 orig(ui, repo, *dirs, **opts)
944 repo.status = oldstatus
936 repo.status = oldstatus
945
937
946 def override_rollback(orig, ui, repo, **opts):
938 def override_rollback(orig, ui, repo, **opts):
947 result = orig(ui, repo, **opts)
939 result = orig(ui, repo, **opts)
948 merge.update(repo, node=None, branchmerge=False, force=True,
940 merge.update(repo, node=None, branchmerge=False, force=True,
949 partial=lfutil.isstandin)
941 partial=lfutil.isstandin)
950 wlock = repo.wlock()
942 wlock = repo.wlock()
951 try:
943 try:
952 lfdirstate = lfutil.openlfdirstate(ui, repo)
944 lfdirstate = lfutil.openlfdirstate(ui, repo)
953 lfiles = lfutil.listlfiles(repo)
945 lfiles = lfutil.listlfiles(repo)
954 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
946 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
955 for file in lfiles:
947 for file in lfiles:
956 if file in oldlfiles:
948 if file in oldlfiles:
957 lfdirstate.normallookup(file)
949 lfdirstate.normallookup(file)
958 else:
950 else:
959 lfdirstate.add(file)
951 lfdirstate.add(file)
960 lfdirstate.write()
952 lfdirstate.write()
961 finally:
953 finally:
962 wlock.release()
954 wlock.release()
963 return result
955 return result
964
956
965 def override_transplant(orig, ui, repo, *revs, **opts):
957 def override_transplant(orig, ui, repo, *revs, **opts):
966 try:
958 try:
967 repo._istransplanting = True
959 repo._istransplanting = True
968 result = orig(ui, repo, *revs, **opts)
960 result = orig(ui, repo, *revs, **opts)
969 lfcommands.updatelfiles(ui, repo, filelist=None,
961 lfcommands.updatelfiles(ui, repo, filelist=None,
970 printmessage=False)
962 printmessage=False)
971 finally:
963 finally:
972 repo._istransplanting = False
964 repo._istransplanting = False
973 return result
965 return result
General Comments 0
You need to be logged in to leave comments. Login now