##// END OF EJS Templates
largefiles: optimize update speed by only updating changed largefiles...
Na'Tosha Bard -
r16120:47ee41fc default
parent child Browse files
Show More
@@ -1,459 +1,467 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src, 'rb')):
82 for chunk in util.filechunkiter(open(src, 'rb')):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 if appdata:
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 path = os.path.join(appdata, longname, hash)
96 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
97 home = os.getenv('HOME')
97 home = os.getenv('HOME')
98 if home:
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
100 longname, hash)
101 elif os.name == 'posix':
101 elif os.name == 'posix':
102 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
103 if path:
103 if path:
104 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
105 else:
105 else:
106 home = os.getenv('HOME')
106 home = os.getenv('HOME')
107 if home:
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
108 path = os.path.join(home, '.cache', longname, hash)
109 else:
109 else:
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 return path
111 return path
112
112
113 def inusercache(ui, hash):
113 def inusercache(ui, hash):
114 path = usercachepath(ui, hash)
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
115 return path and os.path.exists(path)
116
116
117 def findfile(repo, hash):
117 def findfile(repo, hash):
118 if instore(repo, hash):
118 if instore(repo, hash):
119 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
120 return storepath(repo, hash)
120 return storepath(repo, hash)
121 elif inusercache(repo.ui, hash):
121 elif inusercache(repo.ui, hash):
122 repo.ui.note(_('Found %s in system cache\n') % hash)
122 repo.ui.note(_('Found %s in system cache\n') % hash)
123 path = storepath(repo, hash)
123 path = storepath(repo, hash)
124 util.makedirs(os.path.dirname(path))
124 util.makedirs(os.path.dirname(path))
125 link(usercachepath(repo.ui, hash), path)
125 link(usercachepath(repo.ui, hash), path)
126 return path
126 return path
127 return None
127 return None
128
128
129 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
130 def __getitem__(self, key):
130 def __getitem__(self, key):
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 def normal(self, f):
132 def normal(self, f):
133 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 def remove(self, f):
134 def remove(self, f):
135 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 def add(self, f):
136 def add(self, f):
137 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
138 def drop(self, f):
138 def drop(self, f):
139 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 def forget(self, f):
140 def forget(self, f):
141 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 def normallookup(self, f):
142 def normallookup(self, f):
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144
144
145 def openlfdirstate(ui, repo):
145 def openlfdirstate(ui, repo):
146 '''
146 '''
147 Return a dirstate object that tracks largefiles: i.e. its root is
147 Return a dirstate object that tracks largefiles: i.e. its root is
148 the repo root, but it is saved in .hg/largefiles/dirstate.
148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 '''
149 '''
150 admin = repo.join(longname)
150 admin = repo.join(longname)
151 opener = scmutil.opener(admin)
151 opener = scmutil.opener(admin)
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 repo.dirstate._validate)
153 repo.dirstate._validate)
154
154
155 # If the largefiles dirstate does not exist, populate and create
155 # If the largefiles dirstate does not exist, populate and create
156 # it. This ensures that we create it on the first meaningful
156 # it. This ensures that we create it on the first meaningful
157 # largefiles operation in a new clone.
157 # largefiles operation in a new clone.
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
159 util.makedirs(admin)
159 util.makedirs(admin)
160 matcher = getstandinmatcher(repo)
160 matcher = getstandinmatcher(repo)
161 for standin in dirstate_walk(repo.dirstate, matcher):
161 for standin in dirstate_walk(repo.dirstate, matcher):
162 lfile = splitstandin(standin)
162 lfile = splitstandin(standin)
163 hash = readstandin(repo, lfile)
163 hash = readstandin(repo, lfile)
164 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
165 try:
165 try:
166 if hash == hashfile(repo.wjoin(lfile)):
166 if hash == hashfile(repo.wjoin(lfile)):
167 lfdirstate.normal(lfile)
167 lfdirstate.normal(lfile)
168 except OSError, err:
168 except OSError, err:
169 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
170 raise
170 raise
171 return lfdirstate
171 return lfdirstate
172
172
173 def lfdirstate_status(lfdirstate, repo, rev):
173 def lfdirstate_status(lfdirstate, repo, rev):
174 match = match_.always(repo.root, repo.getcwd())
174 match = match_.always(repo.root, repo.getcwd())
175 s = lfdirstate.status(match, [], False, False, False)
175 s = lfdirstate.status(match, [], False, False, False)
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 for lfile in unsure:
177 for lfile in unsure:
178 if repo[rev][standin(lfile)].data().strip() != \
178 if repo[rev][standin(lfile)].data().strip() != \
179 hashfile(repo.wjoin(lfile)):
179 hashfile(repo.wjoin(lfile)):
180 modified.append(lfile)
180 modified.append(lfile)
181 else:
181 else:
182 clean.append(lfile)
182 clean.append(lfile)
183 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
184 return (modified, added, removed, missing, unknown, ignored, clean)
184 return (modified, added, removed, missing, unknown, ignored, clean)
185
185
186 def listlfiles(repo, rev=None, matcher=None):
186 def listlfiles(repo, rev=None, matcher=None):
187 '''return a list of largefiles in the working copy or the
187 '''return a list of largefiles in the working copy or the
188 specified changeset'''
188 specified changeset'''
189
189
190 if matcher is None:
190 if matcher is None:
191 matcher = getstandinmatcher(repo)
191 matcher = getstandinmatcher(repo)
192
192
193 # ignore unknown files in working directory
193 # ignore unknown files in working directory
194 return [splitstandin(f)
194 return [splitstandin(f)
195 for f in repo[rev].walk(matcher)
195 for f in repo[rev].walk(matcher)
196 if rev is not None or repo.dirstate[f] != '?']
196 if rev is not None or repo.dirstate[f] != '?']
197
197
198 def instore(repo, hash):
198 def instore(repo, hash):
199 return os.path.exists(storepath(repo, hash))
199 return os.path.exists(storepath(repo, hash))
200
200
201 def storepath(repo, hash):
201 def storepath(repo, hash):
202 return repo.join(os.path.join(longname, hash))
202 return repo.join(os.path.join(longname, hash))
203
203
204 def copyfromcache(repo, hash, filename):
204 def copyfromcache(repo, hash, filename):
205 '''Copy the specified largefile from the repo or system cache to
205 '''Copy the specified largefile from the repo or system cache to
206 filename in the repository. Return true on success or false if the
206 filename in the repository. Return true on success or false if the
207 file was not found in either cache (which should not happened:
207 file was not found in either cache (which should not happened:
208 this is meant to be called only after ensuring that the needed
208 this is meant to be called only after ensuring that the needed
209 largefile exists in the cache).'''
209 largefile exists in the cache).'''
210 path = findfile(repo, hash)
210 path = findfile(repo, hash)
211 if path is None:
211 if path is None:
212 return False
212 return False
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 # The write may fail before the file is fully written, but we
214 # The write may fail before the file is fully written, but we
215 # don't use atomic writes in the working copy.
215 # don't use atomic writes in the working copy.
216 shutil.copy(path, repo.wjoin(filename))
216 shutil.copy(path, repo.wjoin(filename))
217 return True
217 return True
218
218
219 def copytostore(repo, rev, file, uploaded=False):
219 def copytostore(repo, rev, file, uploaded=False):
220 hash = readstandin(repo, file)
220 hash = readstandin(repo, file)
221 if instore(repo, hash):
221 if instore(repo, hash):
222 return
222 return
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224
224
225 def copyalltostore(repo, node):
225 def copyalltostore(repo, node):
226 '''Copy all largefiles in a given revision to the store'''
226 '''Copy all largefiles in a given revision to the store'''
227
227
228 ctx = repo[node]
228 ctx = repo[node]
229 for filename in ctx.files():
229 for filename in ctx.files():
230 if isstandin(filename) and filename in ctx.manifest():
230 if isstandin(filename) and filename in ctx.manifest():
231 realfile = splitstandin(filename)
231 realfile = splitstandin(filename)
232 copytostore(repo, ctx.node(), realfile)
232 copytostore(repo, ctx.node(), realfile)
233
233
234
234
235 def copytostoreabsolute(repo, file, hash):
235 def copytostoreabsolute(repo, file, hash):
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 if inusercache(repo.ui, hash):
237 if inusercache(repo.ui, hash):
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 else:
239 else:
240 dst = util.atomictempfile(storepath(repo, hash))
240 dst = util.atomictempfile(storepath(repo, hash))
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 util.copymode(file, storepath(repo, hash))
244 util.copymode(file, storepath(repo, hash))
245 linktousercache(repo, hash)
245 linktousercache(repo, hash)
246
246
247 def linktousercache(repo, hash):
247 def linktousercache(repo, hash):
248 path = usercachepath(repo.ui, hash)
248 path = usercachepath(repo.ui, hash)
249 if path:
249 if path:
250 util.makedirs(os.path.dirname(path))
250 util.makedirs(os.path.dirname(path))
251 link(storepath(repo, hash), path)
251 link(storepath(repo, hash), path)
252
252
253 def getstandinmatcher(repo, pats=[], opts={}):
253 def getstandinmatcher(repo, pats=[], opts={}):
254 '''Return a match object that applies pats to the standin directory'''
254 '''Return a match object that applies pats to the standin directory'''
255 standindir = repo.pathto(shortname)
255 standindir = repo.pathto(shortname)
256 if pats:
256 if pats:
257 # patterns supplied: search standin directory relative to current dir
257 # patterns supplied: search standin directory relative to current dir
258 cwd = repo.getcwd()
258 cwd = repo.getcwd()
259 if os.path.isabs(cwd):
259 if os.path.isabs(cwd):
260 # cwd is an absolute path for hg -R <reponame>
260 # cwd is an absolute path for hg -R <reponame>
261 # work relative to the repository root in this case
261 # work relative to the repository root in this case
262 cwd = ''
262 cwd = ''
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 elif os.path.isdir(standindir):
264 elif os.path.isdir(standindir):
265 # no patterns: relative to repo root
265 # no patterns: relative to repo root
266 pats = [standindir]
266 pats = [standindir]
267 else:
267 else:
268 # no patterns and no standin dir: return matcher that matches nothing
268 # no patterns and no standin dir: return matcher that matches nothing
269 match = match_.match(repo.root, None, [], exact=True)
269 match = match_.match(repo.root, None, [], exact=True)
270 match.matchfn = lambda f: False
270 match.matchfn = lambda f: False
271 return match
271 return match
272 return getmatcher(repo, pats, opts, showbad=False)
272 return getmatcher(repo, pats, opts, showbad=False)
273
273
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 '''Wrapper around scmutil.match() that adds showbad: if false,
275 '''Wrapper around scmutil.match() that adds showbad: if false,
276 neuter the match object's bad() method so it does not print any
276 neuter the match object's bad() method so it does not print any
277 warnings about missing files or directories.'''
277 warnings about missing files or directories.'''
278 match = scmutil.match(repo[None], pats, opts)
278 match = scmutil.match(repo[None], pats, opts)
279
279
280 if not showbad:
280 if not showbad:
281 match.bad = lambda f, msg: None
281 match.bad = lambda f, msg: None
282 return match
282 return match
283
283
284 def composestandinmatcher(repo, rmatcher):
284 def composestandinmatcher(repo, rmatcher):
285 '''Return a matcher that accepts standins corresponding to the
285 '''Return a matcher that accepts standins corresponding to the
286 files accepted by rmatcher. Pass the list of files in the matcher
286 files accepted by rmatcher. Pass the list of files in the matcher
287 as the paths specified by the user.'''
287 as the paths specified by the user.'''
288 smatcher = getstandinmatcher(repo, rmatcher.files())
288 smatcher = getstandinmatcher(repo, rmatcher.files())
289 isstandin = smatcher.matchfn
289 isstandin = smatcher.matchfn
290 def composed_matchfn(f):
290 def composed_matchfn(f):
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 smatcher.matchfn = composed_matchfn
292 smatcher.matchfn = composed_matchfn
293
293
294 return smatcher
294 return smatcher
295
295
296 def standin(filename):
296 def standin(filename):
297 '''Return the repo-relative path to the standin for the specified big
297 '''Return the repo-relative path to the standin for the specified big
298 file.'''
298 file.'''
299 # Notes:
299 # Notes:
300 # 1) Most callers want an absolute path, but _create_standin() needs
300 # 1) Most callers want an absolute path, but _create_standin() needs
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # it up to the caller to use repo.wjoin() to get an absolute path.
303 # 2) Join with '/' because that's what dirstate always uses, even on
303 # 2) Join with '/' because that's what dirstate always uses, even on
304 # Windows. Change existing separator to '/' first in case we are
304 # Windows. Change existing separator to '/' first in case we are
305 # passed filenames from an external source (like the command line).
305 # passed filenames from an external source (like the command line).
306 return shortname + '/' + util.pconvert(filename)
306 return shortname + '/' + util.pconvert(filename)
307
307
308 def isstandin(filename):
308 def isstandin(filename):
309 '''Return true if filename is a big file standin. filename must be
309 '''Return true if filename is a big file standin. filename must be
310 in Mercurial's internal form (slash-separated).'''
310 in Mercurial's internal form (slash-separated).'''
311 return filename.startswith(shortname + '/')
311 return filename.startswith(shortname + '/')
312
312
313 def splitstandin(filename):
313 def splitstandin(filename):
314 # Split on / because that's what dirstate always uses, even on Windows.
314 # Split on / because that's what dirstate always uses, even on Windows.
315 # Change local separator to / first just in case we are passed filenames
315 # Change local separator to / first just in case we are passed filenames
316 # from an external source (like the command line).
316 # from an external source (like the command line).
317 bits = util.pconvert(filename).split('/', 1)
317 bits = util.pconvert(filename).split('/', 1)
318 if len(bits) == 2 and bits[0] == shortname:
318 if len(bits) == 2 and bits[0] == shortname:
319 return bits[1]
319 return bits[1]
320 else:
320 else:
321 return None
321 return None
322
322
323 def updatestandin(repo, standin):
323 def updatestandin(repo, standin):
324 file = repo.wjoin(splitstandin(standin))
324 file = repo.wjoin(splitstandin(standin))
325 if os.path.exists(file):
325 if os.path.exists(file):
326 hash = hashfile(file)
326 hash = hashfile(file)
327 executable = getexecutable(file)
327 executable = getexecutable(file)
328 writestandin(repo, standin, hash, executable)
328 writestandin(repo, standin, hash, executable)
329
329
330 def readstandin(repo, filename, node=None):
330 def readstandin(repo, filename, node=None):
331 '''read hex hash from standin for filename at given node, or working
331 '''read hex hash from standin for filename at given node, or working
332 directory if no node is given'''
332 directory if no node is given'''
333 return repo[node][standin(filename)].data().strip()
333 return repo[node][standin(filename)].data().strip()
334
334
335 def writestandin(repo, standin, hash, executable):
335 def writestandin(repo, standin, hash, executable):
336 '''write hash to <repo.root>/<standin>'''
336 '''write hash to <repo.root>/<standin>'''
337 writehash(hash, repo.wjoin(standin), executable)
337 writehash(hash, repo.wjoin(standin), executable)
338
338
339 def copyandhash(instream, outfile):
339 def copyandhash(instream, outfile):
340 '''Read bytes from instream (iterable) and write them to outfile,
340 '''Read bytes from instream (iterable) and write them to outfile,
341 computing the SHA-1 hash of the data along the way. Close outfile
341 computing the SHA-1 hash of the data along the way. Close outfile
342 when done and return the binary hash.'''
342 when done and return the binary hash.'''
343 hasher = util.sha1('')
343 hasher = util.sha1('')
344 for data in instream:
344 for data in instream:
345 hasher.update(data)
345 hasher.update(data)
346 outfile.write(data)
346 outfile.write(data)
347
347
348 # Blecch: closing a file that somebody else opened is rude and
348 # Blecch: closing a file that somebody else opened is rude and
349 # wrong. But it's so darn convenient and practical! After all,
349 # wrong. But it's so darn convenient and practical! After all,
350 # outfile was opened just to copy and hash.
350 # outfile was opened just to copy and hash.
351 outfile.close()
351 outfile.close()
352
352
353 return hasher.digest()
353 return hasher.digest()
354
354
355 def hashrepofile(repo, file):
355 def hashrepofile(repo, file):
356 return hashfile(repo.wjoin(file))
356 return hashfile(repo.wjoin(file))
357
357
358 def hashfile(file):
358 def hashfile(file):
359 if not os.path.exists(file):
359 if not os.path.exists(file):
360 return ''
360 return ''
361 hasher = util.sha1('')
361 hasher = util.sha1('')
362 fd = open(file, 'rb')
362 fd = open(file, 'rb')
363 for data in blockstream(fd):
363 for data in blockstream(fd):
364 hasher.update(data)
364 hasher.update(data)
365 fd.close()
365 fd.close()
366 return hasher.hexdigest()
366 return hasher.hexdigest()
367
367
368 class limitreader(object):
368 class limitreader(object):
369 def __init__(self, f, limit):
369 def __init__(self, f, limit):
370 self.f = f
370 self.f = f
371 self.limit = limit
371 self.limit = limit
372
372
373 def read(self, length):
373 def read(self, length):
374 if self.limit == 0:
374 if self.limit == 0:
375 return ''
375 return ''
376 length = length > self.limit and self.limit or length
376 length = length > self.limit and self.limit or length
377 self.limit -= length
377 self.limit -= length
378 return self.f.read(length)
378 return self.f.read(length)
379
379
380 def close(self):
380 def close(self):
381 pass
381 pass
382
382
383 def blockstream(infile, blocksize=128 * 1024):
383 def blockstream(infile, blocksize=128 * 1024):
384 """Generator that yields blocks of data from infile and closes infile."""
384 """Generator that yields blocks of data from infile and closes infile."""
385 while True:
385 while True:
386 data = infile.read(blocksize)
386 data = infile.read(blocksize)
387 if not data:
387 if not data:
388 break
388 break
389 yield data
389 yield data
390 # same blecch as copyandhash() above
390 # same blecch as copyandhash() above
391 infile.close()
391 infile.close()
392
392
393 def writehash(hash, filename, executable):
393 def writehash(hash, filename, executable):
394 util.makedirs(os.path.dirname(filename))
394 util.makedirs(os.path.dirname(filename))
395 util.writefile(filename, hash + '\n')
395 util.writefile(filename, hash + '\n')
396 os.chmod(filename, getmode(executable))
396 os.chmod(filename, getmode(executable))
397
397
398 def getexecutable(filename):
398 def getexecutable(filename):
399 mode = os.stat(filename).st_mode
399 mode = os.stat(filename).st_mode
400 return ((mode & stat.S_IXUSR) and
400 return ((mode & stat.S_IXUSR) and
401 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXGRP) and
402 (mode & stat.S_IXOTH))
402 (mode & stat.S_IXOTH))
403
403
404 def getmode(executable):
404 def getmode(executable):
405 if executable:
405 if executable:
406 return 0755
406 return 0755
407 else:
407 else:
408 return 0644
408 return 0644
409
409
410 def urljoin(first, second, *arg):
410 def urljoin(first, second, *arg):
411 def join(left, right):
411 def join(left, right):
412 if not left.endswith('/'):
412 if not left.endswith('/'):
413 left += '/'
413 left += '/'
414 if right.startswith('/'):
414 if right.startswith('/'):
415 right = right[1:]
415 right = right[1:]
416 return left + right
416 return left + right
417
417
418 url = join(first, second)
418 url = join(first, second)
419 for a in arg:
419 for a in arg:
420 url = join(url, a)
420 url = join(url, a)
421 return url
421 return url
422
422
423 def hexsha1(data):
423 def hexsha1(data):
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 object data"""
425 object data"""
426 h = util.sha1()
426 h = util.sha1()
427 for chunk in util.filechunkiter(data):
427 for chunk in util.filechunkiter(data):
428 h.update(chunk)
428 h.update(chunk)
429 return h.hexdigest()
429 return h.hexdigest()
430
430
431 def httpsendfile(ui, filename):
431 def httpsendfile(ui, filename):
432 return httpconnection.httpsendfile(ui, filename, 'rb')
432 return httpconnection.httpsendfile(ui, filename, 'rb')
433
433
434 def unixpath(path):
434 def unixpath(path):
435 '''Return a version of path normalized for use with the lfdirstate.'''
435 '''Return a version of path normalized for use with the lfdirstate.'''
436 return util.pconvert(os.path.normpath(path))
436 return util.pconvert(os.path.normpath(path))
437
437
438 def islfilesrepo(repo):
438 def islfilesrepo(repo):
439 return ('largefiles' in repo.requirements and
439 return ('largefiles' in repo.requirements and
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441
441
442 def mkstemp(repo, prefix):
442 def mkstemp(repo, prefix):
443 '''Returns a file descriptor and a filename corresponding to a temporary
443 '''Returns a file descriptor and a filename corresponding to a temporary
444 file in the repo's largefiles store.'''
444 file in the repo's largefiles store.'''
445 path = repo.join(longname)
445 path = repo.join(longname)
446 util.makedirs(path)
446 util.makedirs(path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
448
448
449 class storeprotonotcapable(Exception):
449 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
450 def __init__(self, storetypes):
451 self.storetypes = storetypes
451 self.storetypes = storetypes
452
452
453 def getcurrentheads(repo):
453 def getcurrentheads(repo):
454 branches = repo.branchmap()
454 branches = repo.branchmap()
455 heads = []
455 heads = []
456 for branch in branches:
456 for branch in branches:
457 newheads = repo.branchheads(branch)
457 newheads = repo.branchheads(branch)
458 heads = heads + newheads
458 heads = heads + newheads
459 return heads
459 return heads
460
461 def getstandinsstate(repo):
462 standins = []
463 matcher = getstandinmatcher(repo)
464 for standin in dirstate_walk(repo.dirstate, matcher):
465 lfile = splitstandin(standin)
466 standins.append((lfile, readstandin(repo, lfile)))
467 return standins
@@ -1,961 +1,973 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge
15 node, archival, error, merge
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(override_match)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def add_largefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if exact or not exists:
84 if exact or not exists:
85 abovemin = (lfsize and
85 abovemin = (lfsize and
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 lfnames.append(f)
88 lfnames.append(f)
89 if ui.verbose or not exact:
89 if ui.verbose or not exact:
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91
91
92 bad = []
92 bad = []
93 standins = []
93 standins = []
94
94
95 # Need to lock, otherwise there could be a race condition between
95 # Need to lock, otherwise there could be a race condition between
96 # when standins are created and added to the repo.
96 # when standins are created and added to the repo.
97 wlock = repo.wlock()
97 wlock = repo.wlock()
98 try:
98 try:
99 if not opts.get('dry_run'):
99 if not opts.get('dry_run'):
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 for f in lfnames:
101 for f in lfnames:
102 standinname = lfutil.standin(f)
102 standinname = lfutil.standin(f)
103 lfutil.writestandin(repo, standinname, hash='',
103 lfutil.writestandin(repo, standinname, hash='',
104 executable=lfutil.getexecutable(repo.wjoin(f)))
104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 standins.append(standinname)
105 standins.append(standinname)
106 if lfdirstate[f] == 'r':
106 if lfdirstate[f] == 'r':
107 lfdirstate.normallookup(f)
107 lfdirstate.normallookup(f)
108 else:
108 else:
109 lfdirstate.add(f)
109 lfdirstate.add(f)
110 lfdirstate.write()
110 lfdirstate.write()
111 bad += [lfutil.splitstandin(f)
111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repo_add(repo, standins)
113 if f in m.files()]
113 if f in m.files()]
114 finally:
114 finally:
115 wlock.release()
115 wlock.release()
116 return bad
116 return bad
117
117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def remove_largefiles(ui, repo, *pats, **opts):
119 after = opts.get('after')
119 after = opts.get('after')
120 if not pats and not after:
120 if not pats and not after:
121 raise util.Abort(_('no files specified'))
121 raise util.Abort(_('no files specified'))
122 m = scmutil.match(repo[None], pats, opts)
122 m = scmutil.match(repo[None], pats, opts)
123 try:
123 try:
124 repo.lfstatus = True
124 repo.lfstatus = True
125 s = repo.status(match=m, clean=True)
125 s = repo.status(match=m, clean=True)
126 finally:
126 finally:
127 repo.lfstatus = False
127 repo.lfstatus = False
128 manifest = repo[None].manifest()
128 manifest = repo[None].manifest()
129 modified, added, deleted, clean = [[f for f in list
129 modified, added, deleted, clean = [[f for f in list
130 if lfutil.standin(f) in manifest]
130 if lfutil.standin(f) in manifest]
131 for list in [s[0], s[1], s[3], s[6]]]
131 for list in [s[0], s[1], s[3], s[6]]]
132
132
133 def warn(files, reason):
133 def warn(files, reason):
134 for f in files:
134 for f in files:
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 % (m.rel(f), reason))
136 % (m.rel(f), reason))
137
137
138 if after:
138 if after:
139 remove, forget = deleted, []
139 remove, forget = deleted, []
140 warn(modified + added + clean, _('file still exists'))
140 warn(modified + added + clean, _('file still exists'))
141 else:
141 else:
142 remove, forget = deleted + clean, []
142 remove, forget = deleted + clean, []
143 warn(modified, _('file is modified'))
143 warn(modified, _('file is modified'))
144 warn(added, _('file has been marked for add'))
144 warn(added, _('file has been marked for add'))
145
145
146 for f in sorted(remove + forget):
146 for f in sorted(remove + forget):
147 if ui.verbose or not m.exact(f):
147 if ui.verbose or not m.exact(f):
148 ui.status(_('removing %s\n') % m.rel(f))
148 ui.status(_('removing %s\n') % m.rel(f))
149
149
150 # Need to lock because standin files are deleted then removed from the
150 # Need to lock because standin files are deleted then removed from the
151 # repository and we could race inbetween.
151 # repository and we could race inbetween.
152 wlock = repo.wlock()
152 wlock = repo.wlock()
153 try:
153 try:
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 for f in remove:
155 for f in remove:
156 if not after:
156 if not after:
157 # If this is being called by addremove, notify the user that we
157 # If this is being called by addremove, notify the user that we
158 # are removing the file.
158 # are removing the file.
159 if getattr(repo, "_isaddremove", False):
159 if getattr(repo, "_isaddremove", False):
160 ui.status(_('removing %s\n' % f))
160 ui.status(_('removing %s\n' % f))
161 if os.path.exists(repo.wjoin(f)):
161 if os.path.exists(repo.wjoin(f)):
162 util.unlinkpath(repo.wjoin(f))
162 util.unlinkpath(repo.wjoin(f))
163 lfdirstate.remove(f)
163 lfdirstate.remove(f)
164 lfdirstate.write()
164 lfdirstate.write()
165 forget = [lfutil.standin(f) for f in forget]
165 forget = [lfutil.standin(f) for f in forget]
166 remove = [lfutil.standin(f) for f in remove]
166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repo_forget(repo, forget)
168 # If this is being called by addremove, let the original addremove
168 # If this is being called by addremove, let the original addremove
169 # function handle this.
169 # function handle this.
170 if not getattr(repo, "_isaddremove", False):
170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.repo_remove(repo, remove, unlink=True)
172 finally:
172 finally:
173 wlock.release()
173 wlock.release()
174
174
175 # -- Wrappers: modify existing commands --------------------------------
175 # -- Wrappers: modify existing commands --------------------------------
176
176
177 # Add works by going through the files that the user wanted to add and
177 # Add works by going through the files that the user wanted to add and
178 # checking if they should be added as largefiles. Then it makes a new
178 # checking if they should be added as largefiles. Then it makes a new
179 # matcher which matches only the normal files and runs the original
179 # matcher which matches only the normal files and runs the original
180 # version of add.
180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def override_add(orig, ui, repo, *pats, **opts):
182 normal = opts.pop('normal')
182 normal = opts.pop('normal')
183 if normal:
183 if normal:
184 if opts.get('large'):
184 if opts.get('large'):
185 raise util.Abort(_('--normal cannot be used with --large'))
185 raise util.Abort(_('--normal cannot be used with --large'))
186 return orig(ui, repo, *pats, **opts)
186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
188 installnormalfilesmatchfn(repo[None].manifest())
188 installnormalfilesmatchfn(repo[None].manifest())
189 result = orig(ui, repo, *pats, **opts)
189 result = orig(ui, repo, *pats, **opts)
190 restorematchfn()
190 restorematchfn()
191
191
192 return (result == 1 or bad) and 1 or 0
192 return (result == 1 or bad) and 1 or 0
193
193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def override_remove(orig, ui, repo, *pats, **opts):
195 installnormalfilesmatchfn(repo[None].manifest())
195 installnormalfilesmatchfn(repo[None].manifest())
196 orig(ui, repo, *pats, **opts)
196 orig(ui, repo, *pats, **opts)
197 restorematchfn()
197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 remove_largefiles(ui, repo, *pats, **opts)
199
199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def override_status(orig, ui, repo, *pats, **opts):
201 try:
201 try:
202 repo.lfstatus = True
202 repo.lfstatus = True
203 return orig(ui, repo, *pats, **opts)
203 return orig(ui, repo, *pats, **opts)
204 finally:
204 finally:
205 repo.lfstatus = False
205 repo.lfstatus = False
206
206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def override_log(orig, ui, repo, *pats, **opts):
208 try:
208 try:
209 repo.lfstatus = True
209 repo.lfstatus = True
210 orig(ui, repo, *pats, **opts)
210 orig(ui, repo, *pats, **opts)
211 finally:
211 finally:
212 repo.lfstatus = False
212 repo.lfstatus = False
213
213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def override_verify(orig, ui, repo, *pats, **opts):
215 large = opts.pop('large', False)
215 large = opts.pop('large', False)
216 all = opts.pop('lfa', False)
216 all = opts.pop('lfa', False)
217 contents = opts.pop('lfc', False)
217 contents = opts.pop('lfc', False)
218
218
219 result = orig(ui, repo, *pats, **opts)
219 result = orig(ui, repo, *pats, **opts)
220 if large:
220 if large:
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 return result
222 return result
223
223
224 # Override needs to refresh standins so that update's normal merge
224 # Override needs to refresh standins so that update's normal merge
225 # will go through properly. Then the other update hook (overriding repo.update)
225 # will go through properly. Then the other update hook (overriding repo.update)
226 # will get the new files. Filemerge is also overriden so that the merge
226 # will get the new files. Filemerge is also overriden so that the merge
227 # will merge standins correctly.
227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def override_update(orig, ui, repo, *pats, **opts):
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 False, False)
231 False, False)
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233
233
234 # Need to lock between the standins getting updated and their
234 # Need to lock between the standins getting updated and their
235 # largefiles getting updated
235 # largefiles getting updated
236 wlock = repo.wlock()
236 wlock = repo.wlock()
237 try:
237 try:
238 if opts['check']:
238 if opts['check']:
239 mod = len(modified) > 0
239 mod = len(modified) > 0
240 for lfile in unsure:
240 for lfile in unsure:
241 standin = lfutil.standin(lfile)
241 standin = lfutil.standin(lfile)
242 if repo['.'][standin].data().strip() != \
242 if repo['.'][standin].data().strip() != \
243 lfutil.hashfile(repo.wjoin(lfile)):
243 lfutil.hashfile(repo.wjoin(lfile)):
244 mod = True
244 mod = True
245 else:
245 else:
246 lfdirstate.normal(lfile)
246 lfdirstate.normal(lfile)
247 lfdirstate.write()
247 lfdirstate.write()
248 if mod:
248 if mod:
249 raise util.Abort(_('uncommitted local changes'))
249 raise util.Abort(_('uncommitted local changes'))
250 # XXX handle removed differently
250 # XXX handle removed differently
251 if not opts['clean']:
251 if not opts['clean']:
252 for lfile in unsure + modified + added:
252 for lfile in unsure + modified + added:
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 finally:
254 finally:
255 wlock.release()
255 wlock.release()
256 return orig(ui, repo, *pats, **opts)
256 return orig(ui, repo, *pats, **opts)
257
257
258 # Before starting the manifest merge, merge.updates will call
258 # Before starting the manifest merge, merge.updates will call
259 # _checkunknown to check if there are any files in the merged-in
259 # _checkunknown to check if there are any files in the merged-in
260 # changeset that collide with unknown files in the working copy.
260 # changeset that collide with unknown files in the working copy.
261 #
261 #
262 # The largefiles are seen as unknown, so this prevents us from merging
262 # The largefiles are seen as unknown, so this prevents us from merging
263 # in a file 'foo' if we already have a largefile with the same name.
263 # in a file 'foo' if we already have a largefile with the same name.
264 #
264 #
265 # The overridden function filters the unknown files by removing any
265 # The overridden function filters the unknown files by removing any
266 # largefiles. This makes the merge proceed and we can then handle this
266 # largefiles. This makes the merge proceed and we can then handle this
267 # case further in the overridden manifestmerge function below.
267 # case further in the overridden manifestmerge function below.
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
269 if lfutil.standin(f) in wctx:
269 if lfutil.standin(f) in wctx:
270 return False
270 return False
271 return origfn(repo, wctx, mctx, f)
271 return origfn(repo, wctx, mctx, f)
272
272
273 # The manifest merge handles conflicts on the manifest level. We want
273 # The manifest merge handles conflicts on the manifest level. We want
274 # to handle changes in largefile-ness of files at this level too.
274 # to handle changes in largefile-ness of files at this level too.
275 #
275 #
276 # The strategy is to run the original manifestmerge and then process
276 # The strategy is to run the original manifestmerge and then process
277 # the action list it outputs. There are two cases we need to deal with:
277 # the action list it outputs. There are two cases we need to deal with:
278 #
278 #
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 # detected via its standin file, which will enter the working copy
280 # detected via its standin file, which will enter the working copy
281 # with a "get" action. It is not "merge" since the standin is all
281 # with a "get" action. It is not "merge" since the standin is all
282 # Mercurial is concerned with at this level -- the link to the
282 # Mercurial is concerned with at this level -- the link to the
283 # existing normal file is not relevant here.
283 # existing normal file is not relevant here.
284 #
284 #
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 # since the largefile will be present in the working copy and
286 # since the largefile will be present in the working copy and
287 # different from the normal file in p2. Mercurial therefore
287 # different from the normal file in p2. Mercurial therefore
288 # triggers a merge action.
288 # triggers a merge action.
289 #
289 #
290 # In both cases, we prompt the user and emit new actions to either
290 # In both cases, we prompt the user and emit new actions to either
291 # remove the standin (if the normal file was kept) or to remove the
291 # remove the standin (if the normal file was kept) or to remove the
292 # normal file and get the standin (if the largefile was kept). The
292 # normal file and get the standin (if the largefile was kept). The
293 # default prompt answer is to use the largefile version since it was
293 # default prompt answer is to use the largefile version since it was
294 # presumably changed on purpose.
294 # presumably changed on purpose.
295 #
295 #
296 # Finally, the merge.applyupdates function will then take care of
296 # Finally, the merge.applyupdates function will then take care of
297 # writing the files into the working copy and lfcommands.updatelfiles
297 # writing the files into the working copy and lfcommands.updatelfiles
298 # will update the largefiles.
298 # will update the largefiles.
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 processed = []
301 processed = []
302
302
303 for action in actions:
303 for action in actions:
304 if overwrite:
304 if overwrite:
305 processed.append(action)
305 processed.append(action)
306 continue
306 continue
307 f, m = action[:2]
307 f, m = action[:2]
308
308
309 choices = (_('&Largefile'), _('&Normal file'))
309 choices = (_('&Largefile'), _('&Normal file'))
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 # Case 1: normal file in the working copy, largefile in
311 # Case 1: normal file in the working copy, largefile in
312 # the second parent
312 # the second parent
313 lfile = lfutil.splitstandin(f)
313 lfile = lfutil.splitstandin(f)
314 standin = f
314 standin = f
315 msg = _('%s has been turned into a largefile\n'
315 msg = _('%s has been turned into a largefile\n'
316 'use (l)argefile or keep as (n)ormal file?') % lfile
316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 processed.append((lfile, "r"))
318 processed.append((lfile, "r"))
319 processed.append((standin, "g", p2.flags(standin)))
319 processed.append((standin, "g", p2.flags(standin)))
320 else:
320 else:
321 processed.append((standin, "r"))
321 processed.append((standin, "r"))
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 # Case 2: largefile in the working copy, normal file in
323 # Case 2: largefile in the working copy, normal file in
324 # the second parent
324 # the second parent
325 standin = lfutil.standin(f)
325 standin = lfutil.standin(f)
326 lfile = f
326 lfile = f
327 msg = _('%s has been turned into a normal file\n'
327 msg = _('%s has been turned into a normal file\n'
328 'keep as (l)argefile or use (n)ormal file?') % lfile
328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 processed.append((lfile, "r"))
330 processed.append((lfile, "r"))
331 else:
331 else:
332 processed.append((standin, "r"))
332 processed.append((standin, "r"))
333 processed.append((lfile, "g", p2.flags(lfile)))
333 processed.append((lfile, "g", p2.flags(lfile)))
334 else:
334 else:
335 processed.append(action)
335 processed.append(action)
336
336
337 return processed
337 return processed
338
338
339 # Override filemerge to prompt the user about how they wish to merge
339 # Override filemerge to prompt the user about how they wish to merge
340 # largefiles. This will handle identical edits, and copy/rename +
340 # largefiles. This will handle identical edits, and copy/rename +
341 # edit without prompting the user.
341 # edit without prompting the user.
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 # Use better variable names here. Because this is a wrapper we cannot
343 # Use better variable names here. Because this is a wrapper we cannot
344 # change the variable names in the function declaration.
344 # change the variable names in the function declaration.
345 fcdest, fcother, fcancestor = fcd, fco, fca
345 fcdest, fcother, fcancestor = fcd, fco, fca
346 if not lfutil.isstandin(orig):
346 if not lfutil.isstandin(orig):
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 else:
348 else:
349 if not fcother.cmp(fcdest): # files identical?
349 if not fcother.cmp(fcdest): # files identical?
350 return None
350 return None
351
351
352 # backwards, use working dir parent as ancestor
352 # backwards, use working dir parent as ancestor
353 if fcancestor == fcother:
353 if fcancestor == fcother:
354 fcancestor = fcdest.parents()[0]
354 fcancestor = fcdest.parents()[0]
355
355
356 if orig != fcother.path():
356 if orig != fcother.path():
357 repo.ui.status(_('merging %s and %s to %s\n')
357 repo.ui.status(_('merging %s and %s to %s\n')
358 % (lfutil.splitstandin(orig),
358 % (lfutil.splitstandin(orig),
359 lfutil.splitstandin(fcother.path()),
359 lfutil.splitstandin(fcother.path()),
360 lfutil.splitstandin(fcdest.path())))
360 lfutil.splitstandin(fcdest.path())))
361 else:
361 else:
362 repo.ui.status(_('merging %s\n')
362 repo.ui.status(_('merging %s\n')
363 % lfutil.splitstandin(fcdest.path()))
363 % lfutil.splitstandin(fcdest.path()))
364
364
365 if fcancestor.path() != fcother.path() and fcother.data() == \
365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 fcancestor.data():
366 fcancestor.data():
367 return 0
367 return 0
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 fcancestor.data():
369 fcancestor.data():
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 return 0
371 return 0
372
372
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 'keep (l)ocal or take (o)ther?') %
374 'keep (l)ocal or take (o)ther?') %
375 lfutil.splitstandin(orig),
375 lfutil.splitstandin(orig),
376 (_('&Local'), _('&Other')), 0) == 0:
376 (_('&Local'), _('&Other')), 0) == 0:
377 return 0
377 return 0
378 else:
378 else:
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 return 0
380 return 0
381
381
382 # Copy first changes the matchers to match standins instead of
382 # Copy first changes the matchers to match standins instead of
383 # largefiles. Then it overrides util.copyfile in that function it
383 # largefiles. Then it overrides util.copyfile in that function it
384 # checks if the destination largefile already exists. It also keeps a
384 # checks if the destination largefile already exists. It also keeps a
385 # list of copied files so that the largefiles can be copied and the
385 # list of copied files so that the largefiles can be copied and the
386 # dirstate updated.
386 # dirstate updated.
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
388 # doesn't remove largefile on rename
388 # doesn't remove largefile on rename
389 if len(pats) < 2:
389 if len(pats) < 2:
390 # this isn't legal, let the original function deal with it
390 # this isn't legal, let the original function deal with it
391 return orig(ui, repo, pats, opts, rename)
391 return orig(ui, repo, pats, opts, rename)
392
392
393 def makestandin(relpath):
393 def makestandin(relpath):
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396
396
397 fullpats = scmutil.expandpats(pats)
397 fullpats = scmutil.expandpats(pats)
398 dest = fullpats[-1]
398 dest = fullpats[-1]
399
399
400 if os.path.isdir(dest):
400 if os.path.isdir(dest):
401 if not os.path.isdir(makestandin(dest)):
401 if not os.path.isdir(makestandin(dest)):
402 os.makedirs(makestandin(dest))
402 os.makedirs(makestandin(dest))
403 # This could copy both lfiles and normal files in one command,
403 # This could copy both lfiles and normal files in one command,
404 # but we don't want to do that. First replace their matcher to
404 # but we don't want to do that. First replace their matcher to
405 # only match normal files and run it, then replace it to just
405 # only match normal files and run it, then replace it to just
406 # match largefiles and run it again.
406 # match largefiles and run it again.
407 nonormalfiles = False
407 nonormalfiles = False
408 nolfiles = False
408 nolfiles = False
409 try:
409 try:
410 try:
410 try:
411 installnormalfilesmatchfn(repo[None].manifest())
411 installnormalfilesmatchfn(repo[None].manifest())
412 result = orig(ui, repo, pats, opts, rename)
412 result = orig(ui, repo, pats, opts, rename)
413 except util.Abort, e:
413 except util.Abort, e:
414 if str(e) != 'no files to copy':
414 if str(e) != 'no files to copy':
415 raise e
415 raise e
416 else:
416 else:
417 nonormalfiles = True
417 nonormalfiles = True
418 result = 0
418 result = 0
419 finally:
419 finally:
420 restorematchfn()
420 restorematchfn()
421
421
422 # The first rename can cause our current working directory to be removed.
422 # The first rename can cause our current working directory to be removed.
423 # In that case there is nothing left to copy/rename so just quit.
423 # In that case there is nothing left to copy/rename so just quit.
424 try:
424 try:
425 repo.getcwd()
425 repo.getcwd()
426 except OSError:
426 except OSError:
427 return result
427 return result
428
428
429 try:
429 try:
430 try:
430 try:
431 # When we call orig below it creates the standins but we don't add them
431 # When we call orig below it creates the standins but we don't add them
432 # to the dir state until later so lock during that time.
432 # to the dir state until later so lock during that time.
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434
434
435 manifest = repo[None].manifest()
435 manifest = repo[None].manifest()
436 oldmatch = None # for the closure
436 oldmatch = None # for the closure
437 def override_match(ctx, pats=[], opts={}, globbed=False,
437 def override_match(ctx, pats=[], opts={}, globbed=False,
438 default='relpath'):
438 default='relpath'):
439 newpats = []
439 newpats = []
440 # The patterns were previously mangled to add the standin
440 # The patterns were previously mangled to add the standin
441 # directory; we need to remove that now
441 # directory; we need to remove that now
442 for pat in pats:
442 for pat in pats:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 newpats.append(pat.replace(lfutil.shortname, ''))
444 newpats.append(pat.replace(lfutil.shortname, ''))
445 else:
445 else:
446 newpats.append(pat)
446 newpats.append(pat)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 m = copy.copy(match)
448 m = copy.copy(match)
449 lfile = lambda f: lfutil.standin(f) in manifest
449 lfile = lambda f: lfutil.standin(f) in manifest
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 m._fmap = set(m._files)
451 m._fmap = set(m._files)
452 orig_matchfn = m.matchfn
452 orig_matchfn = m.matchfn
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 (f in manifest) and
454 (f in manifest) and
455 orig_matchfn(lfutil.splitstandin(f)) or
455 orig_matchfn(lfutil.splitstandin(f)) or
456 None)
456 None)
457 return m
457 return m
458 oldmatch = installmatchfn(override_match)
458 oldmatch = installmatchfn(override_match)
459 listpats = []
459 listpats = []
460 for pat in pats:
460 for pat in pats:
461 if match_.patkind(pat) is not None:
461 if match_.patkind(pat) is not None:
462 listpats.append(pat)
462 listpats.append(pat)
463 else:
463 else:
464 listpats.append(makestandin(pat))
464 listpats.append(makestandin(pat))
465
465
466 try:
466 try:
467 origcopyfile = util.copyfile
467 origcopyfile = util.copyfile
468 copiedfiles = []
468 copiedfiles = []
469 def override_copyfile(src, dest):
469 def override_copyfile(src, dest):
470 if (lfutil.shortname in src and
470 if (lfutil.shortname in src and
471 dest.startswith(repo.wjoin(lfutil.shortname))):
471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 destlfile = dest.replace(lfutil.shortname, '')
472 destlfile = dest.replace(lfutil.shortname, '')
473 if not opts['force'] and os.path.exists(destlfile):
473 if not opts['force'] and os.path.exists(destlfile):
474 raise IOError('',
474 raise IOError('',
475 _('destination largefile already exists'))
475 _('destination largefile already exists'))
476 copiedfiles.append((src, dest))
476 copiedfiles.append((src, dest))
477 origcopyfile(src, dest)
477 origcopyfile(src, dest)
478
478
479 util.copyfile = override_copyfile
479 util.copyfile = override_copyfile
480 result += orig(ui, repo, listpats, opts, rename)
480 result += orig(ui, repo, listpats, opts, rename)
481 finally:
481 finally:
482 util.copyfile = origcopyfile
482 util.copyfile = origcopyfile
483
483
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 for (src, dest) in copiedfiles:
485 for (src, dest) in copiedfiles:
486 if (lfutil.shortname in src and
486 if (lfutil.shortname in src and
487 dest.startswith(repo.wjoin(lfutil.shortname))):
487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 destlfiledir = os.path.dirname(destlfile) or '.'
490 destlfiledir = os.path.dirname(destlfile) or '.'
491 if not os.path.isdir(destlfiledir):
491 if not os.path.isdir(destlfiledir):
492 os.makedirs(destlfiledir)
492 os.makedirs(destlfiledir)
493 if rename:
493 if rename:
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 lfdirstate.remove(srclfile)
495 lfdirstate.remove(srclfile)
496 else:
496 else:
497 util.copyfile(srclfile, destlfile)
497 util.copyfile(srclfile, destlfile)
498 lfdirstate.add(destlfile)
498 lfdirstate.add(destlfile)
499 lfdirstate.write()
499 lfdirstate.write()
500 except util.Abort, e:
500 except util.Abort, e:
501 if str(e) != 'no files to copy':
501 if str(e) != 'no files to copy':
502 raise e
502 raise e
503 else:
503 else:
504 nolfiles = True
504 nolfiles = True
505 finally:
505 finally:
506 restorematchfn()
506 restorematchfn()
507 wlock.release()
507 wlock.release()
508
508
509 if nolfiles and nonormalfiles:
509 if nolfiles and nonormalfiles:
510 raise util.Abort(_('no files to copy'))
510 raise util.Abort(_('no files to copy'))
511
511
512 return result
512 return result
513
513
514 # When the user calls revert, we have to be careful to not revert any
514 # When the user calls revert, we have to be careful to not revert any
515 # changes to other largefiles accidentally. This means we have to keep
515 # changes to other largefiles accidentally. This means we have to keep
516 # track of the largefiles that are being reverted so we only pull down
516 # track of the largefiles that are being reverted so we only pull down
517 # the necessary largefiles.
517 # the necessary largefiles.
518 #
518 #
519 # Standins are only updated (to match the hash of largefiles) before
519 # Standins are only updated (to match the hash of largefiles) before
520 # commits. Update the standins then run the original revert, changing
520 # commits. Update the standins then run the original revert, changing
521 # the matcher to hit standins instead of largefiles. Based on the
521 # the matcher to hit standins instead of largefiles. Based on the
522 # resulting standins update the largefiles. Then return the standins
522 # resulting standins update the largefiles. Then return the standins
523 # to their proper state
523 # to their proper state
524 def override_revert(orig, ui, repo, *pats, **opts):
524 def override_revert(orig, ui, repo, *pats, **opts):
525 # Because we put the standins in a bad state (by updating them)
525 # Because we put the standins in a bad state (by updating them)
526 # and then return them to a correct state we need to lock to
526 # and then return them to a correct state we need to lock to
527 # prevent others from changing them in their incorrect state.
527 # prevent others from changing them in their incorrect state.
528 wlock = repo.wlock()
528 wlock = repo.wlock()
529 try:
529 try:
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 (modified, added, removed, missing, unknown, ignored, clean) = \
531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
533 for lfile in modified:
533 for lfile in modified:
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 for lfile in missing:
535 for lfile in missing:
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537
537
538 try:
538 try:
539 ctx = repo[opts.get('rev')]
539 ctx = repo[opts.get('rev')]
540 oldmatch = None # for the closure
540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 default='relpath'):
542 default='relpath'):
543 match = oldmatch(ctx, pats, opts, globbed, default)
543 match = oldmatch(ctx, pats, opts, globbed, default)
544 m = copy.copy(match)
544 m = copy.copy(match)
545 def tostandin(f):
545 def tostandin(f):
546 if lfutil.standin(f) in ctx:
546 if lfutil.standin(f) in ctx:
547 return lfutil.standin(f)
547 return lfutil.standin(f)
548 elif lfutil.standin(f) in repo[None]:
548 elif lfutil.standin(f) in repo[None]:
549 return None
549 return None
550 return f
550 return f
551 m._files = [tostandin(f) for f in m._files]
551 m._files = [tostandin(f) for f in m._files]
552 m._files = [f for f in m._files if f is not None]
552 m._files = [f for f in m._files if f is not None]
553 m._fmap = set(m._files)
553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 orig_matchfn = m.matchfn
555 def matchfn(f):
555 def matchfn(f):
556 if lfutil.isstandin(f):
556 if lfutil.isstandin(f):
557 # We need to keep track of what largefiles are being
557 # We need to keep track of what largefiles are being
558 # matched so we know which ones to update later --
558 # matched so we know which ones to update later --
559 # otherwise we accidentally revert changes to other
559 # otherwise we accidentally revert changes to other
560 # largefiles. This is repo-specific, so duckpunch the
560 # largefiles. This is repo-specific, so duckpunch the
561 # repo object to keep the list of largefiles for us
561 # repo object to keep the list of largefiles for us
562 # later.
562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 (f in repo[None] or f in ctx):
564 (f in repo[None] or f in ctx):
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 lfileslist.append(lfutil.splitstandin(f))
566 lfileslist.append(lfutil.splitstandin(f))
567 repo._lfilestoupdate = lfileslist
567 repo._lfilestoupdate = lfileslist
568 return True
568 return True
569 else:
569 else:
570 return False
570 return False
571 return orig_matchfn(f)
571 return orig_matchfn(f)
572 m.matchfn = matchfn
572 m.matchfn = matchfn
573 return m
573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(override_match)
575 scmutil.match
575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = override_match(repo[None], pats, opts)
577 orig(ui, repo, *pats, **opts)
577 orig(ui, repo, *pats, **opts)
578 finally:
578 finally:
579 restorematchfn()
579 restorematchfn()
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 printmessage=False)
582 printmessage=False)
583
583
584 # empty out the largefiles list so we start fresh next time
584 # empty out the largefiles list so we start fresh next time
585 repo._lfilestoupdate = []
585 repo._lfilestoupdate = []
586 for lfile in modified:
586 for lfile in modified:
587 if lfile in lfileslist:
587 if lfile in lfileslist:
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 in repo['.']:
589 in repo['.']:
590 lfutil.writestandin(repo, lfutil.standin(lfile),
590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 repo['.'][lfile].data().strip(),
591 repo['.'][lfile].data().strip(),
592 'x' in repo['.'][lfile].flags())
592 'x' in repo['.'][lfile].flags())
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 for lfile in added:
594 for lfile in added:
595 standin = lfutil.standin(lfile)
595 standin = lfutil.standin(lfile)
596 if standin not in ctx and (standin in matches or opts.get('all')):
596 if standin not in ctx and (standin in matches or opts.get('all')):
597 if lfile in lfdirstate:
597 if lfile in lfdirstate:
598 lfdirstate.drop(lfile)
598 lfdirstate.drop(lfile)
599 util.unlinkpath(repo.wjoin(standin))
599 util.unlinkpath(repo.wjoin(standin))
600 lfdirstate.write()
600 lfdirstate.write()
601 finally:
601 finally:
602 wlock.release()
602 wlock.release()
603
603
604 def hg_update(orig, repo, node):
604 def hg_update(orig, repo, node):
605 # In order to not waste a lot of extra time during the update largefiles
606 # step, we keep track of the state of the standins before and after we
607 # call the original update function, and only update the standins that
608 # have changed in the hg.update() call
609 oldstandins = lfutil.getstandinsstate(repo)
605 result = orig(repo, node)
610 result = orig(repo, node)
606 lfcommands.updatelfiles(repo.ui, repo)
611 newstandins = lfutil.getstandinsstate(repo)
612 tobeupdated = set(oldstandins).symmetric_difference(set(newstandins))
613 filelist = []
614 for f in tobeupdated:
615 if f[0] not in filelist:
616 filelist.append(f[0])
617
618 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
607 return result
619 return result
608
620
609 def hg_clean(orig, repo, node, show_stats=True):
621 def hg_clean(orig, repo, node, show_stats=True):
610 result = orig(repo, node, show_stats)
622 result = orig(repo, node, show_stats)
611 lfcommands.updatelfiles(repo.ui, repo)
623 lfcommands.updatelfiles(repo.ui, repo)
612 return result
624 return result
613
625
614 def hg_merge(orig, repo, node, force=None, remind=True):
626 def hg_merge(orig, repo, node, force=None, remind=True):
615 # Mark the repo as being in the middle of a merge, so that
627 # Mark the repo as being in the middle of a merge, so that
616 # updatelfiles() will know that it needs to trust the standins in
628 # updatelfiles() will know that it needs to trust the standins in
617 # the working copy, not in the standins in the current node
629 # the working copy, not in the standins in the current node
618 repo._ismerging = True
630 repo._ismerging = True
619 try:
631 try:
620 result = orig(repo, node, force, remind)
632 result = orig(repo, node, force, remind)
621 lfcommands.updatelfiles(repo.ui, repo)
633 lfcommands.updatelfiles(repo.ui, repo)
622 finally:
634 finally:
623 repo._ismerging = False
635 repo._ismerging = False
624 return result
636 return result
625
637
626 # When we rebase a repository with remotely changed largefiles, we need to
638 # When we rebase a repository with remotely changed largefiles, we need to
627 # take some extra care so that the largefiles are correctly updated in the
639 # take some extra care so that the largefiles are correctly updated in the
628 # working copy
640 # working copy
629 def override_pull(orig, ui, repo, source=None, **opts):
641 def override_pull(orig, ui, repo, source=None, **opts):
630 if opts.get('rebase', False):
642 if opts.get('rebase', False):
631 repo._isrebasing = True
643 repo._isrebasing = True
632 try:
644 try:
633 if opts.get('update'):
645 if opts.get('update'):
634 del opts['update']
646 del opts['update']
635 ui.debug('--update and --rebase are not compatible, ignoring '
647 ui.debug('--update and --rebase are not compatible, ignoring '
636 'the update flag\n')
648 'the update flag\n')
637 del opts['rebase']
649 del opts['rebase']
638 cmdutil.bailifchanged(repo)
650 cmdutil.bailifchanged(repo)
639 revsprepull = len(repo)
651 revsprepull = len(repo)
640 origpostincoming = commands.postincoming
652 origpostincoming = commands.postincoming
641 def _dummy(*args, **kwargs):
653 def _dummy(*args, **kwargs):
642 pass
654 pass
643 commands.postincoming = _dummy
655 commands.postincoming = _dummy
644 repo.lfpullsource = source
656 repo.lfpullsource = source
645 if not source:
657 if not source:
646 source = 'default'
658 source = 'default'
647 try:
659 try:
648 result = commands.pull(ui, repo, source, **opts)
660 result = commands.pull(ui, repo, source, **opts)
649 finally:
661 finally:
650 commands.postincoming = origpostincoming
662 commands.postincoming = origpostincoming
651 revspostpull = len(repo)
663 revspostpull = len(repo)
652 if revspostpull > revsprepull:
664 if revspostpull > revsprepull:
653 result = result or rebase.rebase(ui, repo)
665 result = result or rebase.rebase(ui, repo)
654 finally:
666 finally:
655 repo._isrebasing = False
667 repo._isrebasing = False
656 else:
668 else:
657 repo.lfpullsource = source
669 repo.lfpullsource = source
658 if not source:
670 if not source:
659 source = 'default'
671 source = 'default'
660 oldheads = lfutil.getcurrentheads(repo)
672 oldheads = lfutil.getcurrentheads(repo)
661 result = orig(ui, repo, source, **opts)
673 result = orig(ui, repo, source, **opts)
662 # If we do not have the new largefiles for any new heads we pulled, we
674 # If we do not have the new largefiles for any new heads we pulled, we
663 # will run into a problem later if we try to merge or rebase with one of
675 # will run into a problem later if we try to merge or rebase with one of
664 # these heads, so cache the largefiles now direclty into the system
676 # these heads, so cache the largefiles now direclty into the system
665 # cache.
677 # cache.
666 ui.status(_("caching new largefiles\n"))
678 ui.status(_("caching new largefiles\n"))
667 numcached = 0
679 numcached = 0
668 heads = lfutil.getcurrentheads(repo)
680 heads = lfutil.getcurrentheads(repo)
669 newheads = set(heads).difference(set(oldheads))
681 newheads = set(heads).difference(set(oldheads))
670 for head in newheads:
682 for head in newheads:
671 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
683 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
672 numcached += len(cached)
684 numcached += len(cached)
673 ui.status(_("%d largefiles cached\n" % numcached))
685 ui.status(_("%d largefiles cached\n" % numcached))
674 return result
686 return result
675
687
676 def override_rebase(orig, ui, repo, **opts):
688 def override_rebase(orig, ui, repo, **opts):
677 repo._isrebasing = True
689 repo._isrebasing = True
678 try:
690 try:
679 orig(ui, repo, **opts)
691 orig(ui, repo, **opts)
680 finally:
692 finally:
681 repo._isrebasing = False
693 repo._isrebasing = False
682
694
683 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
695 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
684 prefix=None, mtime=None, subrepos=None):
696 prefix=None, mtime=None, subrepos=None):
685 # No need to lock because we are only reading history and
697 # No need to lock because we are only reading history and
686 # largefile caches, neither of which are modified.
698 # largefile caches, neither of which are modified.
687 lfcommands.cachelfiles(repo.ui, repo, node)
699 lfcommands.cachelfiles(repo.ui, repo, node)
688
700
689 if kind not in archival.archivers:
701 if kind not in archival.archivers:
690 raise util.Abort(_("unknown archive type '%s'") % kind)
702 raise util.Abort(_("unknown archive type '%s'") % kind)
691
703
692 ctx = repo[node]
704 ctx = repo[node]
693
705
694 if kind == 'files':
706 if kind == 'files':
695 if prefix:
707 if prefix:
696 raise util.Abort(
708 raise util.Abort(
697 _('cannot give prefix when archiving to files'))
709 _('cannot give prefix when archiving to files'))
698 else:
710 else:
699 prefix = archival.tidyprefix(dest, kind, prefix)
711 prefix = archival.tidyprefix(dest, kind, prefix)
700
712
701 def write(name, mode, islink, getdata):
713 def write(name, mode, islink, getdata):
702 if matchfn and not matchfn(name):
714 if matchfn and not matchfn(name):
703 return
715 return
704 data = getdata()
716 data = getdata()
705 if decode:
717 if decode:
706 data = repo.wwritedata(name, data)
718 data = repo.wwritedata(name, data)
707 archiver.addfile(prefix + name, mode, islink, data)
719 archiver.addfile(prefix + name, mode, islink, data)
708
720
709 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
721 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
710
722
711 if repo.ui.configbool("ui", "archivemeta", True):
723 if repo.ui.configbool("ui", "archivemeta", True):
712 def metadata():
724 def metadata():
713 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
725 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
714 hex(repo.changelog.node(0)), hex(node), ctx.branch())
726 hex(repo.changelog.node(0)), hex(node), ctx.branch())
715
727
716 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
728 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
717 if repo.tagtype(t) == 'global')
729 if repo.tagtype(t) == 'global')
718 if not tags:
730 if not tags:
719 repo.ui.pushbuffer()
731 repo.ui.pushbuffer()
720 opts = {'template': '{latesttag}\n{latesttagdistance}',
732 opts = {'template': '{latesttag}\n{latesttagdistance}',
721 'style': '', 'patch': None, 'git': None}
733 'style': '', 'patch': None, 'git': None}
722 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
734 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
723 ltags, dist = repo.ui.popbuffer().split('\n')
735 ltags, dist = repo.ui.popbuffer().split('\n')
724 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
736 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
725 tags += 'latesttagdistance: %s\n' % dist
737 tags += 'latesttagdistance: %s\n' % dist
726
738
727 return base + tags
739 return base + tags
728
740
729 write('.hg_archival.txt', 0644, False, metadata)
741 write('.hg_archival.txt', 0644, False, metadata)
730
742
731 for f in ctx:
743 for f in ctx:
732 ff = ctx.flags(f)
744 ff = ctx.flags(f)
733 getdata = ctx[f].data
745 getdata = ctx[f].data
734 if lfutil.isstandin(f):
746 if lfutil.isstandin(f):
735 path = lfutil.findfile(repo, getdata().strip())
747 path = lfutil.findfile(repo, getdata().strip())
736 if path is None:
748 if path is None:
737 raise util.Abort(
749 raise util.Abort(
738 _('largefile %s not found in repo store or system cache')
750 _('largefile %s not found in repo store or system cache')
739 % lfutil.splitstandin(f))
751 % lfutil.splitstandin(f))
740 f = lfutil.splitstandin(f)
752 f = lfutil.splitstandin(f)
741
753
742 def getdatafn():
754 def getdatafn():
743 fd = None
755 fd = None
744 try:
756 try:
745 fd = open(path, 'rb')
757 fd = open(path, 'rb')
746 return fd.read()
758 return fd.read()
747 finally:
759 finally:
748 if fd:
760 if fd:
749 fd.close()
761 fd.close()
750
762
751 getdata = getdatafn
763 getdata = getdatafn
752 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
764 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
753
765
754 if subrepos:
766 if subrepos:
755 for subpath in ctx.substate:
767 for subpath in ctx.substate:
756 sub = ctx.sub(subpath)
768 sub = ctx.sub(subpath)
757 sub.archive(repo.ui, archiver, prefix)
769 sub.archive(repo.ui, archiver, prefix)
758
770
759 archiver.done()
771 archiver.done()
760
772
761 # If a largefile is modified, the change is not reflected in its
773 # If a largefile is modified, the change is not reflected in its
762 # standin until a commit. cmdutil.bailifchanged() raises an exception
774 # standin until a commit. cmdutil.bailifchanged() raises an exception
763 # if the repo has uncommitted changes. Wrap it to also check if
775 # if the repo has uncommitted changes. Wrap it to also check if
764 # largefiles were changed. This is used by bisect and backout.
776 # largefiles were changed. This is used by bisect and backout.
765 def override_bailifchanged(orig, repo):
777 def override_bailifchanged(orig, repo):
766 orig(repo)
778 orig(repo)
767 repo.lfstatus = True
779 repo.lfstatus = True
768 modified, added, removed, deleted = repo.status()[:4]
780 modified, added, removed, deleted = repo.status()[:4]
769 repo.lfstatus = False
781 repo.lfstatus = False
770 if modified or added or removed or deleted:
782 if modified or added or removed or deleted:
771 raise util.Abort(_('outstanding uncommitted changes'))
783 raise util.Abort(_('outstanding uncommitted changes'))
772
784
773 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
785 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
774 def override_fetch(orig, ui, repo, *pats, **opts):
786 def override_fetch(orig, ui, repo, *pats, **opts):
775 repo.lfstatus = True
787 repo.lfstatus = True
776 modified, added, removed, deleted = repo.status()[:4]
788 modified, added, removed, deleted = repo.status()[:4]
777 repo.lfstatus = False
789 repo.lfstatus = False
778 if modified or added or removed or deleted:
790 if modified or added or removed or deleted:
779 raise util.Abort(_('outstanding uncommitted changes'))
791 raise util.Abort(_('outstanding uncommitted changes'))
780 return orig(ui, repo, *pats, **opts)
792 return orig(ui, repo, *pats, **opts)
781
793
782 def override_forget(orig, ui, repo, *pats, **opts):
794 def override_forget(orig, ui, repo, *pats, **opts):
783 installnormalfilesmatchfn(repo[None].manifest())
795 installnormalfilesmatchfn(repo[None].manifest())
784 orig(ui, repo, *pats, **opts)
796 orig(ui, repo, *pats, **opts)
785 restorematchfn()
797 restorematchfn()
786 m = scmutil.match(repo[None], pats, opts)
798 m = scmutil.match(repo[None], pats, opts)
787
799
788 try:
800 try:
789 repo.lfstatus = True
801 repo.lfstatus = True
790 s = repo.status(match=m, clean=True)
802 s = repo.status(match=m, clean=True)
791 finally:
803 finally:
792 repo.lfstatus = False
804 repo.lfstatus = False
793 forget = sorted(s[0] + s[1] + s[3] + s[6])
805 forget = sorted(s[0] + s[1] + s[3] + s[6])
794 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
806 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
795
807
796 for f in forget:
808 for f in forget:
797 if lfutil.standin(f) not in repo.dirstate and not \
809 if lfutil.standin(f) not in repo.dirstate and not \
798 os.path.isdir(m.rel(lfutil.standin(f))):
810 os.path.isdir(m.rel(lfutil.standin(f))):
799 ui.warn(_('not removing %s: file is already untracked\n')
811 ui.warn(_('not removing %s: file is already untracked\n')
800 % m.rel(f))
812 % m.rel(f))
801
813
802 for f in forget:
814 for f in forget:
803 if ui.verbose or not m.exact(f):
815 if ui.verbose or not m.exact(f):
804 ui.status(_('removing %s\n') % m.rel(f))
816 ui.status(_('removing %s\n') % m.rel(f))
805
817
806 # Need to lock because standin files are deleted then removed from the
818 # Need to lock because standin files are deleted then removed from the
807 # repository and we could race inbetween.
819 # repository and we could race inbetween.
808 wlock = repo.wlock()
820 wlock = repo.wlock()
809 try:
821 try:
810 lfdirstate = lfutil.openlfdirstate(ui, repo)
822 lfdirstate = lfutil.openlfdirstate(ui, repo)
811 for f in forget:
823 for f in forget:
812 if lfdirstate[f] == 'a':
824 if lfdirstate[f] == 'a':
813 lfdirstate.drop(f)
825 lfdirstate.drop(f)
814 else:
826 else:
815 lfdirstate.remove(f)
827 lfdirstate.remove(f)
816 lfdirstate.write()
828 lfdirstate.write()
817 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
829 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
818 unlink=True)
830 unlink=True)
819 finally:
831 finally:
820 wlock.release()
832 wlock.release()
821
833
822 def getoutgoinglfiles(ui, repo, dest=None, **opts):
834 def getoutgoinglfiles(ui, repo, dest=None, **opts):
823 dest = ui.expandpath(dest or 'default-push', dest or 'default')
835 dest = ui.expandpath(dest or 'default-push', dest or 'default')
824 dest, branches = hg.parseurl(dest, opts.get('branch'))
836 dest, branches = hg.parseurl(dest, opts.get('branch'))
825 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
837 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
826 if revs:
838 if revs:
827 revs = [repo.lookup(rev) for rev in revs]
839 revs = [repo.lookup(rev) for rev in revs]
828
840
829 remoteui = hg.remoteui
841 remoteui = hg.remoteui
830
842
831 try:
843 try:
832 remote = hg.repository(remoteui(repo, opts), dest)
844 remote = hg.repository(remoteui(repo, opts), dest)
833 except error.RepoError:
845 except error.RepoError:
834 return None
846 return None
835 o = lfutil.findoutgoing(repo, remote, False)
847 o = lfutil.findoutgoing(repo, remote, False)
836 if not o:
848 if not o:
837 return None
849 return None
838 o = repo.changelog.nodesbetween(o, revs)[0]
850 o = repo.changelog.nodesbetween(o, revs)[0]
839 if opts.get('newest_first'):
851 if opts.get('newest_first'):
840 o.reverse()
852 o.reverse()
841
853
842 toupload = set()
854 toupload = set()
843 for n in o:
855 for n in o:
844 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
856 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
845 ctx = repo[n]
857 ctx = repo[n]
846 files = set(ctx.files())
858 files = set(ctx.files())
847 if len(parents) == 2:
859 if len(parents) == 2:
848 mc = ctx.manifest()
860 mc = ctx.manifest()
849 mp1 = ctx.parents()[0].manifest()
861 mp1 = ctx.parents()[0].manifest()
850 mp2 = ctx.parents()[1].manifest()
862 mp2 = ctx.parents()[1].manifest()
851 for f in mp1:
863 for f in mp1:
852 if f not in mc:
864 if f not in mc:
853 files.add(f)
865 files.add(f)
854 for f in mp2:
866 for f in mp2:
855 if f not in mc:
867 if f not in mc:
856 files.add(f)
868 files.add(f)
857 for f in mc:
869 for f in mc:
858 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
870 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
859 files.add(f)
871 files.add(f)
860 toupload = toupload.union(
872 toupload = toupload.union(
861 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
873 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
862 return toupload
874 return toupload
863
875
864 def override_outgoing(orig, ui, repo, dest=None, **opts):
876 def override_outgoing(orig, ui, repo, dest=None, **opts):
865 orig(ui, repo, dest, **opts)
877 orig(ui, repo, dest, **opts)
866
878
867 if opts.pop('large', None):
879 if opts.pop('large', None):
868 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
880 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
869 if toupload is None:
881 if toupload is None:
870 ui.status(_('largefiles: No remote repo\n'))
882 ui.status(_('largefiles: No remote repo\n'))
871 else:
883 else:
872 ui.status(_('largefiles to upload:\n'))
884 ui.status(_('largefiles to upload:\n'))
873 for file in toupload:
885 for file in toupload:
874 ui.status(lfutil.splitstandin(file) + '\n')
886 ui.status(lfutil.splitstandin(file) + '\n')
875 ui.status('\n')
887 ui.status('\n')
876
888
877 def override_summary(orig, ui, repo, *pats, **opts):
889 def override_summary(orig, ui, repo, *pats, **opts):
878 try:
890 try:
879 repo.lfstatus = True
891 repo.lfstatus = True
880 orig(ui, repo, *pats, **opts)
892 orig(ui, repo, *pats, **opts)
881 finally:
893 finally:
882 repo.lfstatus = False
894 repo.lfstatus = False
883
895
884 if opts.pop('large', None):
896 if opts.pop('large', None):
885 toupload = getoutgoinglfiles(ui, repo, None, **opts)
897 toupload = getoutgoinglfiles(ui, repo, None, **opts)
886 if toupload is None:
898 if toupload is None:
887 ui.status(_('largefiles: No remote repo\n'))
899 ui.status(_('largefiles: No remote repo\n'))
888 else:
900 else:
889 ui.status(_('largefiles: %d to upload\n') % len(toupload))
901 ui.status(_('largefiles: %d to upload\n') % len(toupload))
890
902
891 def override_addremove(orig, ui, repo, *pats, **opts):
903 def override_addremove(orig, ui, repo, *pats, **opts):
892 # Get the list of missing largefiles so we can remove them
904 # Get the list of missing largefiles so we can remove them
893 lfdirstate = lfutil.openlfdirstate(ui, repo)
905 lfdirstate = lfutil.openlfdirstate(ui, repo)
894 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
906 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
895 False, False)
907 False, False)
896 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
908 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
897
909
898 # Call into the normal remove code, but the removing of the standin, we want
910 # Call into the normal remove code, but the removing of the standin, we want
899 # to have handled by original addremove. Monkey patching here makes sure
911 # to have handled by original addremove. Monkey patching here makes sure
900 # we don't remove the standin in the largefiles code, preventing a very
912 # we don't remove the standin in the largefiles code, preventing a very
901 # confused state later.
913 # confused state later.
902 if missing:
914 if missing:
903 repo._isaddremove = True
915 repo._isaddremove = True
904 remove_largefiles(ui, repo, *missing, **opts)
916 remove_largefiles(ui, repo, *missing, **opts)
905 repo._isaddremove = False
917 repo._isaddremove = False
906 # Call into the normal add code, and any files that *should* be added as
918 # Call into the normal add code, and any files that *should* be added as
907 # largefiles will be
919 # largefiles will be
908 add_largefiles(ui, repo, *pats, **opts)
920 add_largefiles(ui, repo, *pats, **opts)
909 # Now that we've handled largefiles, hand off to the original addremove
921 # Now that we've handled largefiles, hand off to the original addremove
910 # function to take care of the rest. Make sure it doesn't do anything with
922 # function to take care of the rest. Make sure it doesn't do anything with
911 # largefiles by installing a matcher that will ignore them.
923 # largefiles by installing a matcher that will ignore them.
912 installnormalfilesmatchfn(repo[None].manifest())
924 installnormalfilesmatchfn(repo[None].manifest())
913 result = orig(ui, repo, *pats, **opts)
925 result = orig(ui, repo, *pats, **opts)
914 restorematchfn()
926 restorematchfn()
915 return result
927 return result
916
928
917 # Calling purge with --all will cause the largefiles to be deleted.
929 # Calling purge with --all will cause the largefiles to be deleted.
918 # Override repo.status to prevent this from happening.
930 # Override repo.status to prevent this from happening.
919 def override_purge(orig, ui, repo, *dirs, **opts):
931 def override_purge(orig, ui, repo, *dirs, **opts):
920 oldstatus = repo.status
932 oldstatus = repo.status
921 def override_status(node1='.', node2=None, match=None, ignored=False,
933 def override_status(node1='.', node2=None, match=None, ignored=False,
922 clean=False, unknown=False, listsubrepos=False):
934 clean=False, unknown=False, listsubrepos=False):
923 r = oldstatus(node1, node2, match, ignored, clean, unknown,
935 r = oldstatus(node1, node2, match, ignored, clean, unknown,
924 listsubrepos)
936 listsubrepos)
925 lfdirstate = lfutil.openlfdirstate(ui, repo)
937 lfdirstate = lfutil.openlfdirstate(ui, repo)
926 modified, added, removed, deleted, unknown, ignored, clean = r
938 modified, added, removed, deleted, unknown, ignored, clean = r
927 unknown = [f for f in unknown if lfdirstate[f] == '?']
939 unknown = [f for f in unknown if lfdirstate[f] == '?']
928 ignored = [f for f in ignored if lfdirstate[f] == '?']
940 ignored = [f for f in ignored if lfdirstate[f] == '?']
929 return modified, added, removed, deleted, unknown, ignored, clean
941 return modified, added, removed, deleted, unknown, ignored, clean
930 repo.status = override_status
942 repo.status = override_status
931 orig(ui, repo, *dirs, **opts)
943 orig(ui, repo, *dirs, **opts)
932 repo.status = oldstatus
944 repo.status = oldstatus
933
945
934 def override_rollback(orig, ui, repo, **opts):
946 def override_rollback(orig, ui, repo, **opts):
935 result = orig(ui, repo, **opts)
947 result = orig(ui, repo, **opts)
936 merge.update(repo, node=None, branchmerge=False, force=True,
948 merge.update(repo, node=None, branchmerge=False, force=True,
937 partial=lfutil.isstandin)
949 partial=lfutil.isstandin)
938 wlock = repo.wlock()
950 wlock = repo.wlock()
939 try:
951 try:
940 lfdirstate = lfutil.openlfdirstate(ui, repo)
952 lfdirstate = lfutil.openlfdirstate(ui, repo)
941 lfiles = lfutil.listlfiles(repo)
953 lfiles = lfutil.listlfiles(repo)
942 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
954 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
943 for file in lfiles:
955 for file in lfiles:
944 if file in oldlfiles:
956 if file in oldlfiles:
945 lfdirstate.normallookup(file)
957 lfdirstate.normallookup(file)
946 else:
958 else:
947 lfdirstate.add(file)
959 lfdirstate.add(file)
948 lfdirstate.write()
960 lfdirstate.write()
949 finally:
961 finally:
950 wlock.release()
962 wlock.release()
951 return result
963 return result
952
964
953 def override_transplant(orig, ui, repo, *revs, **opts):
965 def override_transplant(orig, ui, repo, *revs, **opts):
954 try:
966 try:
955 repo._istransplanting = True
967 repo._istransplanting = True
956 result = orig(ui, repo, *revs, **opts)
968 result = orig(ui, repo, *revs, **opts)
957 lfcommands.updatelfiles(ui, repo, filelist=None,
969 lfcommands.updatelfiles(ui, repo, filelist=None,
958 printmessage=False)
970 printmessage=False)
959 finally:
971 finally:
960 repo._istransplanting = False
972 repo._istransplanting = False
961 return result
973 return result
General Comments 0
You need to be logged in to leave comments. Login now