##// END OF EJS Templates
largefiles: remove trivial portability wrappers
Mads Kiilerich -
r18154:93c697d9 default
parent child Browse files
Show More
@@ -1,443 +1,430
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
26
27 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
29
30 def repoadd(repo, list):
31 add = repo[None].add
32 return add(list)
33
34 def repoforget(repo, list):
35 forget = repo[None].forget
36 return forget(list)
37
38 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
39
26
40 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
41 lfsize = opt
28 lfsize = opt
42 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
43 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
44 if lfsize:
31 if lfsize:
45 try:
32 try:
46 lfsize = float(lfsize)
33 lfsize = float(lfsize)
47 except ValueError:
34 except ValueError:
48 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
49 % lfsize)
36 % lfsize)
50 if lfsize is None:
37 if lfsize is None:
51 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
52 return lfsize
39 return lfsize
53
40
54 def link(src, dest):
41 def link(src, dest):
55 try:
42 try:
56 util.oslink(src, dest)
43 util.oslink(src, dest)
57 except OSError:
44 except OSError:
58 # if hardlinks fail, fallback on atomic copy
45 # if hardlinks fail, fallback on atomic copy
59 dst = util.atomictempfile(dest)
46 dst = util.atomictempfile(dest)
60 for chunk in util.filechunkiter(open(src, 'rb')):
47 for chunk in util.filechunkiter(open(src, 'rb')):
61 dst.write(chunk)
48 dst.write(chunk)
62 dst.close()
49 dst.close()
63 os.chmod(dest, os.stat(src).st_mode)
50 os.chmod(dest, os.stat(src).st_mode)
64
51
65 def usercachepath(ui, hash):
52 def usercachepath(ui, hash):
66 path = ui.configpath(longname, 'usercache', None)
53 path = ui.configpath(longname, 'usercache', None)
67 if path:
54 if path:
68 path = os.path.join(path, hash)
55 path = os.path.join(path, hash)
69 else:
56 else:
70 if os.name == 'nt':
57 if os.name == 'nt':
71 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
58 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
72 if appdata:
59 if appdata:
73 path = os.path.join(appdata, longname, hash)
60 path = os.path.join(appdata, longname, hash)
74 elif platform.system() == 'Darwin':
61 elif platform.system() == 'Darwin':
75 home = os.getenv('HOME')
62 home = os.getenv('HOME')
76 if home:
63 if home:
77 path = os.path.join(home, 'Library', 'Caches',
64 path = os.path.join(home, 'Library', 'Caches',
78 longname, hash)
65 longname, hash)
79 elif os.name == 'posix':
66 elif os.name == 'posix':
80 path = os.getenv('XDG_CACHE_HOME')
67 path = os.getenv('XDG_CACHE_HOME')
81 if path:
68 if path:
82 path = os.path.join(path, longname, hash)
69 path = os.path.join(path, longname, hash)
83 else:
70 else:
84 home = os.getenv('HOME')
71 home = os.getenv('HOME')
85 if home:
72 if home:
86 path = os.path.join(home, '.cache', longname, hash)
73 path = os.path.join(home, '.cache', longname, hash)
87 else:
74 else:
88 raise util.Abort(_('unknown operating system: %s\n') % os.name)
75 raise util.Abort(_('unknown operating system: %s\n') % os.name)
89 return path
76 return path
90
77
91 def inusercache(ui, hash):
78 def inusercache(ui, hash):
92 path = usercachepath(ui, hash)
79 path = usercachepath(ui, hash)
93 return path and os.path.exists(path)
80 return path and os.path.exists(path)
94
81
95 def findfile(repo, hash):
82 def findfile(repo, hash):
96 if instore(repo, hash):
83 if instore(repo, hash):
97 repo.ui.note(_('found %s in store\n') % hash)
84 repo.ui.note(_('found %s in store\n') % hash)
98 return storepath(repo, hash)
85 return storepath(repo, hash)
99 elif inusercache(repo.ui, hash):
86 elif inusercache(repo.ui, hash):
100 repo.ui.note(_('found %s in system cache\n') % hash)
87 repo.ui.note(_('found %s in system cache\n') % hash)
101 path = storepath(repo, hash)
88 path = storepath(repo, hash)
102 util.makedirs(os.path.dirname(path))
89 util.makedirs(os.path.dirname(path))
103 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
104 return path
91 return path
105 return None
92 return None
106
93
107 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
108 def __getitem__(self, key):
95 def __getitem__(self, key):
109 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
110 def normal(self, f):
97 def normal(self, f):
111 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
112 def remove(self, f):
99 def remove(self, f):
113 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
114 def add(self, f):
101 def add(self, f):
115 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
116 def drop(self, f):
103 def drop(self, f):
117 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
118 def forget(self, f):
105 def forget(self, f):
119 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
120 def normallookup(self, f):
107 def normallookup(self, f):
121 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
122 def _ignore(self):
109 def _ignore(self):
123 return False
110 return False
124
111
125 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
126 '''
113 '''
127 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
128 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
129 '''
116 '''
130 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
131 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
132 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
133 repo.dirstate._validate)
120 repo.dirstate._validate)
134
121
135 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
136 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
137 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
138 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
139 util.makedirs(lfstoredir)
126 util.makedirs(lfstoredir)
140 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
141 for standin in dirstatewalk(repo.dirstate, matcher):
128 for standin in repo.dirstate.walk(matcher, [], False, False):
142 lfile = splitstandin(standin)
129 lfile = splitstandin(standin)
143 hash = readstandin(repo, lfile)
130 hash = readstandin(repo, lfile)
144 lfdirstate.normallookup(lfile)
131 lfdirstate.normallookup(lfile)
145 try:
132 try:
146 if hash == hashfile(repo.wjoin(lfile)):
133 if hash == hashfile(repo.wjoin(lfile)):
147 lfdirstate.normal(lfile)
134 lfdirstate.normal(lfile)
148 except OSError, err:
135 except OSError, err:
149 if err.errno != errno.ENOENT:
136 if err.errno != errno.ENOENT:
150 raise
137 raise
151 return lfdirstate
138 return lfdirstate
152
139
153 def lfdirstatestatus(lfdirstate, repo, rev):
140 def lfdirstatestatus(lfdirstate, repo, rev):
154 match = match_.always(repo.root, repo.getcwd())
141 match = match_.always(repo.root, repo.getcwd())
155 s = lfdirstate.status(match, [], False, False, False)
142 s = lfdirstate.status(match, [], False, False, False)
156 unsure, modified, added, removed, missing, unknown, ignored, clean = s
143 unsure, modified, added, removed, missing, unknown, ignored, clean = s
157 for lfile in unsure:
144 for lfile in unsure:
158 if repo[rev][standin(lfile)].data().strip() != \
145 if repo[rev][standin(lfile)].data().strip() != \
159 hashfile(repo.wjoin(lfile)):
146 hashfile(repo.wjoin(lfile)):
160 modified.append(lfile)
147 modified.append(lfile)
161 else:
148 else:
162 clean.append(lfile)
149 clean.append(lfile)
163 lfdirstate.normal(lfile)
150 lfdirstate.normal(lfile)
164 return (modified, added, removed, missing, unknown, ignored, clean)
151 return (modified, added, removed, missing, unknown, ignored, clean)
165
152
166 def listlfiles(repo, rev=None, matcher=None):
153 def listlfiles(repo, rev=None, matcher=None):
167 '''return a list of largefiles in the working copy or the
154 '''return a list of largefiles in the working copy or the
168 specified changeset'''
155 specified changeset'''
169
156
170 if matcher is None:
157 if matcher is None:
171 matcher = getstandinmatcher(repo)
158 matcher = getstandinmatcher(repo)
172
159
173 # ignore unknown files in working directory
160 # ignore unknown files in working directory
174 return [splitstandin(f)
161 return [splitstandin(f)
175 for f in repo[rev].walk(matcher)
162 for f in repo[rev].walk(matcher)
176 if rev is not None or repo.dirstate[f] != '?']
163 if rev is not None or repo.dirstate[f] != '?']
177
164
178 def instore(repo, hash):
165 def instore(repo, hash):
179 return os.path.exists(storepath(repo, hash))
166 return os.path.exists(storepath(repo, hash))
180
167
181 def storepath(repo, hash):
168 def storepath(repo, hash):
182 return repo.join(os.path.join(longname, hash))
169 return repo.join(os.path.join(longname, hash))
183
170
184 def copyfromcache(repo, hash, filename):
171 def copyfromcache(repo, hash, filename):
185 '''Copy the specified largefile from the repo or system cache to
172 '''Copy the specified largefile from the repo or system cache to
186 filename in the repository. Return true on success or false if the
173 filename in the repository. Return true on success or false if the
187 file was not found in either cache (which should not happened:
174 file was not found in either cache (which should not happened:
188 this is meant to be called only after ensuring that the needed
175 this is meant to be called only after ensuring that the needed
189 largefile exists in the cache).'''
176 largefile exists in the cache).'''
190 path = findfile(repo, hash)
177 path = findfile(repo, hash)
191 if path is None:
178 if path is None:
192 return False
179 return False
193 util.makedirs(os.path.dirname(repo.wjoin(filename)))
180 util.makedirs(os.path.dirname(repo.wjoin(filename)))
194 # The write may fail before the file is fully written, but we
181 # The write may fail before the file is fully written, but we
195 # don't use atomic writes in the working copy.
182 # don't use atomic writes in the working copy.
196 shutil.copy(path, repo.wjoin(filename))
183 shutil.copy(path, repo.wjoin(filename))
197 return True
184 return True
198
185
199 def copytostore(repo, rev, file, uploaded=False):
186 def copytostore(repo, rev, file, uploaded=False):
200 hash = readstandin(repo, file, rev)
187 hash = readstandin(repo, file, rev)
201 if instore(repo, hash):
188 if instore(repo, hash):
202 return
189 return
203 copytostoreabsolute(repo, repo.wjoin(file), hash)
190 copytostoreabsolute(repo, repo.wjoin(file), hash)
204
191
205 def copyalltostore(repo, node):
192 def copyalltostore(repo, node):
206 '''Copy all largefiles in a given revision to the store'''
193 '''Copy all largefiles in a given revision to the store'''
207
194
208 ctx = repo[node]
195 ctx = repo[node]
209 for filename in ctx.files():
196 for filename in ctx.files():
210 if isstandin(filename) and filename in ctx.manifest():
197 if isstandin(filename) and filename in ctx.manifest():
211 realfile = splitstandin(filename)
198 realfile = splitstandin(filename)
212 copytostore(repo, ctx.node(), realfile)
199 copytostore(repo, ctx.node(), realfile)
213
200
214
201
215 def copytostoreabsolute(repo, file, hash):
202 def copytostoreabsolute(repo, file, hash):
216 util.makedirs(os.path.dirname(storepath(repo, hash)))
203 util.makedirs(os.path.dirname(storepath(repo, hash)))
217 if inusercache(repo.ui, hash):
204 if inusercache(repo.ui, hash):
218 link(usercachepath(repo.ui, hash), storepath(repo, hash))
205 link(usercachepath(repo.ui, hash), storepath(repo, hash))
219 elif not getattr(repo, "_isconverting", False):
206 elif not getattr(repo, "_isconverting", False):
220 dst = util.atomictempfile(storepath(repo, hash),
207 dst = util.atomictempfile(storepath(repo, hash),
221 createmode=repo.store.createmode)
208 createmode=repo.store.createmode)
222 for chunk in util.filechunkiter(open(file, 'rb')):
209 for chunk in util.filechunkiter(open(file, 'rb')):
223 dst.write(chunk)
210 dst.write(chunk)
224 dst.close()
211 dst.close()
225 linktousercache(repo, hash)
212 linktousercache(repo, hash)
226
213
227 def linktousercache(repo, hash):
214 def linktousercache(repo, hash):
228 path = usercachepath(repo.ui, hash)
215 path = usercachepath(repo.ui, hash)
229 if path:
216 if path:
230 util.makedirs(os.path.dirname(path))
217 util.makedirs(os.path.dirname(path))
231 link(storepath(repo, hash), path)
218 link(storepath(repo, hash), path)
232
219
233 def getstandinmatcher(repo, pats=[], opts={}):
220 def getstandinmatcher(repo, pats=[], opts={}):
234 '''Return a match object that applies pats to the standin directory'''
221 '''Return a match object that applies pats to the standin directory'''
235 standindir = repo.wjoin(shortname)
222 standindir = repo.wjoin(shortname)
236 if pats:
223 if pats:
237 # patterns supplied: search standin directory relative to current dir
224 # patterns supplied: search standin directory relative to current dir
238 cwd = repo.getcwd()
225 cwd = repo.getcwd()
239 if os.path.isabs(cwd):
226 if os.path.isabs(cwd):
240 # cwd is an absolute path for hg -R <reponame>
227 # cwd is an absolute path for hg -R <reponame>
241 # work relative to the repository root in this case
228 # work relative to the repository root in this case
242 cwd = ''
229 cwd = ''
243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
230 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
244 elif os.path.isdir(standindir):
231 elif os.path.isdir(standindir):
245 # no patterns: relative to repo root
232 # no patterns: relative to repo root
246 pats = [standindir]
233 pats = [standindir]
247 else:
234 else:
248 # no patterns and no standin dir: return matcher that matches nothing
235 # no patterns and no standin dir: return matcher that matches nothing
249 return match_.match(repo.root, None, [], exact=True)
236 return match_.match(repo.root, None, [], exact=True)
250
237
251 # no warnings about missing files or directories
238 # no warnings about missing files or directories
252 match = scmutil.match(repo[None], pats, opts)
239 match = scmutil.match(repo[None], pats, opts)
253 match.bad = lambda f, msg: None
240 match.bad = lambda f, msg: None
254 return match
241 return match
255
242
256 def composestandinmatcher(repo, rmatcher):
243 def composestandinmatcher(repo, rmatcher):
257 '''Return a matcher that accepts standins corresponding to the
244 '''Return a matcher that accepts standins corresponding to the
258 files accepted by rmatcher. Pass the list of files in the matcher
245 files accepted by rmatcher. Pass the list of files in the matcher
259 as the paths specified by the user.'''
246 as the paths specified by the user.'''
260 smatcher = getstandinmatcher(repo, rmatcher.files())
247 smatcher = getstandinmatcher(repo, rmatcher.files())
261 isstandin = smatcher.matchfn
248 isstandin = smatcher.matchfn
262 def composedmatchfn(f):
249 def composedmatchfn(f):
263 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
250 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
264 smatcher.matchfn = composedmatchfn
251 smatcher.matchfn = composedmatchfn
265
252
266 return smatcher
253 return smatcher
267
254
268 def standin(filename):
255 def standin(filename):
269 '''Return the repo-relative path to the standin for the specified big
256 '''Return the repo-relative path to the standin for the specified big
270 file.'''
257 file.'''
271 # Notes:
258 # Notes:
272 # 1) Some callers want an absolute path, but for instance addlargefiles
259 # 1) Some callers want an absolute path, but for instance addlargefiles
273 # needs it repo-relative so it can be passed to repoadd(). So leave
260 # needs it repo-relative so it can be passed to repo[None].add(). So
274 # it up to the caller to use repo.wjoin() to get an absolute path.
261 # leave it up to the caller to use repo.wjoin() to get an absolute path.
275 # 2) Join with '/' because that's what dirstate always uses, even on
262 # 2) Join with '/' because that's what dirstate always uses, even on
276 # Windows. Change existing separator to '/' first in case we are
263 # Windows. Change existing separator to '/' first in case we are
277 # passed filenames from an external source (like the command line).
264 # passed filenames from an external source (like the command line).
278 return shortnameslash + util.pconvert(filename)
265 return shortnameslash + util.pconvert(filename)
279
266
280 def isstandin(filename):
267 def isstandin(filename):
281 '''Return true if filename is a big file standin. filename must be
268 '''Return true if filename is a big file standin. filename must be
282 in Mercurial's internal form (slash-separated).'''
269 in Mercurial's internal form (slash-separated).'''
283 return filename.startswith(shortnameslash)
270 return filename.startswith(shortnameslash)
284
271
285 def splitstandin(filename):
272 def splitstandin(filename):
286 # Split on / because that's what dirstate always uses, even on Windows.
273 # Split on / because that's what dirstate always uses, even on Windows.
287 # Change local separator to / first just in case we are passed filenames
274 # Change local separator to / first just in case we are passed filenames
288 # from an external source (like the command line).
275 # from an external source (like the command line).
289 bits = util.pconvert(filename).split('/', 1)
276 bits = util.pconvert(filename).split('/', 1)
290 if len(bits) == 2 and bits[0] == shortname:
277 if len(bits) == 2 and bits[0] == shortname:
291 return bits[1]
278 return bits[1]
292 else:
279 else:
293 return None
280 return None
294
281
295 def updatestandin(repo, standin):
282 def updatestandin(repo, standin):
296 file = repo.wjoin(splitstandin(standin))
283 file = repo.wjoin(splitstandin(standin))
297 if os.path.exists(file):
284 if os.path.exists(file):
298 hash = hashfile(file)
285 hash = hashfile(file)
299 executable = getexecutable(file)
286 executable = getexecutable(file)
300 writestandin(repo, standin, hash, executable)
287 writestandin(repo, standin, hash, executable)
301
288
302 def readstandin(repo, filename, node=None):
289 def readstandin(repo, filename, node=None):
303 '''read hex hash from standin for filename at given node, or working
290 '''read hex hash from standin for filename at given node, or working
304 directory if no node is given'''
291 directory if no node is given'''
305 return repo[node][standin(filename)].data().strip()
292 return repo[node][standin(filename)].data().strip()
306
293
307 def writestandin(repo, standin, hash, executable):
294 def writestandin(repo, standin, hash, executable):
308 '''write hash to <repo.root>/<standin>'''
295 '''write hash to <repo.root>/<standin>'''
309 writehash(hash, repo.wjoin(standin), executable)
296 writehash(hash, repo.wjoin(standin), executable)
310
297
311 def copyandhash(instream, outfile):
298 def copyandhash(instream, outfile):
312 '''Read bytes from instream (iterable) and write them to outfile,
299 '''Read bytes from instream (iterable) and write them to outfile,
313 computing the SHA-1 hash of the data along the way. Close outfile
300 computing the SHA-1 hash of the data along the way. Close outfile
314 when done and return the binary hash.'''
301 when done and return the binary hash.'''
315 hasher = util.sha1('')
302 hasher = util.sha1('')
316 for data in instream:
303 for data in instream:
317 hasher.update(data)
304 hasher.update(data)
318 outfile.write(data)
305 outfile.write(data)
319
306
320 # Blecch: closing a file that somebody else opened is rude and
307 # Blecch: closing a file that somebody else opened is rude and
321 # wrong. But it's so darn convenient and practical! After all,
308 # wrong. But it's so darn convenient and practical! After all,
322 # outfile was opened just to copy and hash.
309 # outfile was opened just to copy and hash.
323 outfile.close()
310 outfile.close()
324
311
325 return hasher.digest()
312 return hasher.digest()
326
313
327 def hashrepofile(repo, file):
314 def hashrepofile(repo, file):
328 return hashfile(repo.wjoin(file))
315 return hashfile(repo.wjoin(file))
329
316
330 def hashfile(file):
317 def hashfile(file):
331 if not os.path.exists(file):
318 if not os.path.exists(file):
332 return ''
319 return ''
333 hasher = util.sha1('')
320 hasher = util.sha1('')
334 fd = open(file, 'rb')
321 fd = open(file, 'rb')
335 for data in blockstream(fd):
322 for data in blockstream(fd):
336 hasher.update(data)
323 hasher.update(data)
337 fd.close()
324 fd.close()
338 return hasher.hexdigest()
325 return hasher.hexdigest()
339
326
340 class limitreader(object):
327 class limitreader(object):
341 def __init__(self, f, limit):
328 def __init__(self, f, limit):
342 self.f = f
329 self.f = f
343 self.limit = limit
330 self.limit = limit
344
331
345 def read(self, length):
332 def read(self, length):
346 if self.limit == 0:
333 if self.limit == 0:
347 return ''
334 return ''
348 length = length > self.limit and self.limit or length
335 length = length > self.limit and self.limit or length
349 self.limit -= length
336 self.limit -= length
350 return self.f.read(length)
337 return self.f.read(length)
351
338
352 def close(self):
339 def close(self):
353 pass
340 pass
354
341
355 def blockstream(infile, blocksize=128 * 1024):
342 def blockstream(infile, blocksize=128 * 1024):
356 """Generator that yields blocks of data from infile and closes infile."""
343 """Generator that yields blocks of data from infile and closes infile."""
357 while True:
344 while True:
358 data = infile.read(blocksize)
345 data = infile.read(blocksize)
359 if not data:
346 if not data:
360 break
347 break
361 yield data
348 yield data
362 # same blecch as copyandhash() above
349 # same blecch as copyandhash() above
363 infile.close()
350 infile.close()
364
351
365 def writehash(hash, filename, executable):
352 def writehash(hash, filename, executable):
366 util.makedirs(os.path.dirname(filename))
353 util.makedirs(os.path.dirname(filename))
367 util.writefile(filename, hash + '\n')
354 util.writefile(filename, hash + '\n')
368 os.chmod(filename, getmode(executable))
355 os.chmod(filename, getmode(executable))
369
356
370 def getexecutable(filename):
357 def getexecutable(filename):
371 mode = os.stat(filename).st_mode
358 mode = os.stat(filename).st_mode
372 return ((mode & stat.S_IXUSR) and
359 return ((mode & stat.S_IXUSR) and
373 (mode & stat.S_IXGRP) and
360 (mode & stat.S_IXGRP) and
374 (mode & stat.S_IXOTH))
361 (mode & stat.S_IXOTH))
375
362
376 def getmode(executable):
363 def getmode(executable):
377 if executable:
364 if executable:
378 return 0755
365 return 0755
379 else:
366 else:
380 return 0644
367 return 0644
381
368
382 def urljoin(first, second, *arg):
369 def urljoin(first, second, *arg):
383 def join(left, right):
370 def join(left, right):
384 if not left.endswith('/'):
371 if not left.endswith('/'):
385 left += '/'
372 left += '/'
386 if right.startswith('/'):
373 if right.startswith('/'):
387 right = right[1:]
374 right = right[1:]
388 return left + right
375 return left + right
389
376
390 url = join(first, second)
377 url = join(first, second)
391 for a in arg:
378 for a in arg:
392 url = join(url, a)
379 url = join(url, a)
393 return url
380 return url
394
381
395 def hexsha1(data):
382 def hexsha1(data):
396 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
383 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
397 object data"""
384 object data"""
398 h = util.sha1()
385 h = util.sha1()
399 for chunk in util.filechunkiter(data):
386 for chunk in util.filechunkiter(data):
400 h.update(chunk)
387 h.update(chunk)
401 return h.hexdigest()
388 return h.hexdigest()
402
389
403 def httpsendfile(ui, filename):
390 def httpsendfile(ui, filename):
404 return httpconnection.httpsendfile(ui, filename, 'rb')
391 return httpconnection.httpsendfile(ui, filename, 'rb')
405
392
406 def unixpath(path):
393 def unixpath(path):
407 '''Return a version of path normalized for use with the lfdirstate.'''
394 '''Return a version of path normalized for use with the lfdirstate.'''
408 return util.pconvert(os.path.normpath(path))
395 return util.pconvert(os.path.normpath(path))
409
396
410 def islfilesrepo(repo):
397 def islfilesrepo(repo):
411 if ('largefiles' in repo.requirements and
398 if ('largefiles' in repo.requirements and
412 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
399 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
413 return True
400 return True
414
401
415 return util.any(openlfdirstate(repo.ui, repo, False))
402 return util.any(openlfdirstate(repo.ui, repo, False))
416
403
417 class storeprotonotcapable(Exception):
404 class storeprotonotcapable(Exception):
418 def __init__(self, storetypes):
405 def __init__(self, storetypes):
419 self.storetypes = storetypes
406 self.storetypes = storetypes
420
407
421 def getcurrentheads(repo):
408 def getcurrentheads(repo):
422 branches = repo.branchmap()
409 branches = repo.branchmap()
423 heads = []
410 heads = []
424 for branch in branches:
411 for branch in branches:
425 newheads = repo.branchheads(branch)
412 newheads = repo.branchheads(branch)
426 heads = heads + newheads
413 heads = heads + newheads
427 return heads
414 return heads
428
415
429 def getstandinsstate(repo):
416 def getstandinsstate(repo):
430 standins = []
417 standins = []
431 matcher = getstandinmatcher(repo)
418 matcher = getstandinmatcher(repo)
432 for standin in dirstatewalk(repo.dirstate, matcher):
419 for standin in repo.dirstate.walk(matcher, [], False, False):
433 lfile = splitstandin(standin)
420 lfile = splitstandin(standin)
434 standins.append((lfile, readstandin(repo, lfile)))
421 standins.append((lfile, readstandin(repo, lfile)))
435 return standins
422 return standins
436
423
437 def getlfilestoupdate(oldstandins, newstandins):
424 def getlfilestoupdate(oldstandins, newstandins):
438 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
425 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
439 filelist = []
426 filelist = []
440 for f in changedstandins:
427 for f in changedstandins:
441 if f[0] not in filelist:
428 if f[0] not in filelist:
442 filelist.append(f[0])
429 filelist.append(f[0])
443 return filelist
430 return filelist
@@ -1,1141 +1,1141
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge, discovery
15 node, archival, error, merge, discovery
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def installnormalfilesmatchfn(manifest):
25 def installnormalfilesmatchfn(manifest):
26 '''overrides scmutil.match so that the matcher it returns will ignore all
26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 largefiles'''
27 largefiles'''
28 oldmatch = None # for the closure
28 oldmatch = None # for the closure
29 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 default='relpath'):
30 default='relpath'):
31 match = oldmatch(ctx, pats, opts, globbed, default)
31 match = oldmatch(ctx, pats, opts, globbed, default)
32 m = copy.copy(match)
32 m = copy.copy(match)
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 manifest)
34 manifest)
35 m._files = filter(notlfile, m._files)
35 m._files = filter(notlfile, m._files)
36 m._fmap = set(m._files)
36 m._fmap = set(m._files)
37 origmatchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(overridematch)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 oldmatch = scmutil.match
43 oldmatch = scmutil.match
44 setattr(f, 'oldmatch', oldmatch)
44 setattr(f, 'oldmatch', oldmatch)
45 scmutil.match = f
45 scmutil.match = f
46 return oldmatch
46 return oldmatch
47
47
48 def restorematchfn():
48 def restorematchfn():
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 was called. no-op if scmutil.match is its original function.
50 was called. no-op if scmutil.match is its original function.
51
51
52 Note that n calls to installnormalfilesmatchfn will require n calls to
52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 restore matchfn to reverse'''
53 restore matchfn to reverse'''
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55
55
56 def addlargefiles(ui, repo, *pats, **opts):
56 def addlargefiles(ui, repo, *pats, **opts):
57 large = opts.pop('large', None)
57 large = opts.pop('large', None)
58 lfsize = lfutil.getminsize(
58 lfsize = lfutil.getminsize(
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60
60
61 lfmatcher = None
61 lfmatcher = None
62 if lfutil.islfilesrepo(repo):
62 if lfutil.islfilesrepo(repo):
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 if lfpats:
64 if lfpats:
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66
66
67 lfnames = []
67 lfnames = []
68 m = scmutil.match(repo[None], pats, opts)
68 m = scmutil.match(repo[None], pats, opts)
69 m.bad = lambda x, y: None
69 m.bad = lambda x, y: None
70 wctx = repo[None]
70 wctx = repo[None]
71 for f in repo.walk(m):
71 for f in repo.walk(m):
72 exact = m.exact(f)
72 exact = m.exact(f)
73 lfile = lfutil.standin(f) in wctx
73 lfile = lfutil.standin(f) in wctx
74 nfile = f in wctx
74 nfile = f in wctx
75 exists = lfile or nfile
75 exists = lfile or nfile
76
76
77 # Don't warn the user when they attempt to add a normal tracked file.
77 # Don't warn the user when they attempt to add a normal tracked file.
78 # The normal add code will do that for us.
78 # The normal add code will do that for us.
79 if exact and exists:
79 if exact and exists:
80 if lfile:
80 if lfile:
81 ui.warn(_('%s already a largefile\n') % f)
81 ui.warn(_('%s already a largefile\n') % f)
82 continue
82 continue
83
83
84 if (exact or not exists) and not lfutil.isstandin(f):
84 if (exact or not exists) and not lfutil.isstandin(f):
85 wfile = repo.wjoin(f)
85 wfile = repo.wjoin(f)
86
86
87 # In case the file was removed previously, but not committed
87 # In case the file was removed previously, but not committed
88 # (issue3507)
88 # (issue3507)
89 if not os.path.exists(wfile):
89 if not os.path.exists(wfile):
90 continue
90 continue
91
91
92 abovemin = (lfsize and
92 abovemin = (lfsize and
93 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
93 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
94 if large or abovemin or (lfmatcher and lfmatcher(f)):
94 if large or abovemin or (lfmatcher and lfmatcher(f)):
95 lfnames.append(f)
95 lfnames.append(f)
96 if ui.verbose or not exact:
96 if ui.verbose or not exact:
97 ui.status(_('adding %s as a largefile\n') % m.rel(f))
97 ui.status(_('adding %s as a largefile\n') % m.rel(f))
98
98
99 bad = []
99 bad = []
100 standins = []
100 standins = []
101
101
102 # Need to lock, otherwise there could be a race condition between
102 # Need to lock, otherwise there could be a race condition between
103 # when standins are created and added to the repo.
103 # when standins are created and added to the repo.
104 wlock = repo.wlock()
104 wlock = repo.wlock()
105 try:
105 try:
106 if not opts.get('dry_run'):
106 if not opts.get('dry_run'):
107 lfdirstate = lfutil.openlfdirstate(ui, repo)
107 lfdirstate = lfutil.openlfdirstate(ui, repo)
108 for f in lfnames:
108 for f in lfnames:
109 standinname = lfutil.standin(f)
109 standinname = lfutil.standin(f)
110 lfutil.writestandin(repo, standinname, hash='',
110 lfutil.writestandin(repo, standinname, hash='',
111 executable=lfutil.getexecutable(repo.wjoin(f)))
111 executable=lfutil.getexecutable(repo.wjoin(f)))
112 standins.append(standinname)
112 standins.append(standinname)
113 if lfdirstate[f] == 'r':
113 if lfdirstate[f] == 'r':
114 lfdirstate.normallookup(f)
114 lfdirstate.normallookup(f)
115 else:
115 else:
116 lfdirstate.add(f)
116 lfdirstate.add(f)
117 lfdirstate.write()
117 lfdirstate.write()
118 bad += [lfutil.splitstandin(f)
118 bad += [lfutil.splitstandin(f)
119 for f in lfutil.repoadd(repo, standins)
119 for f in repo[None].add(standins)
120 if f in m.files()]
120 if f in m.files()]
121 finally:
121 finally:
122 wlock.release()
122 wlock.release()
123 return bad
123 return bad
124
124
125 def removelargefiles(ui, repo, *pats, **opts):
125 def removelargefiles(ui, repo, *pats, **opts):
126 after = opts.get('after')
126 after = opts.get('after')
127 if not pats and not after:
127 if not pats and not after:
128 raise util.Abort(_('no files specified'))
128 raise util.Abort(_('no files specified'))
129 m = scmutil.match(repo[None], pats, opts)
129 m = scmutil.match(repo[None], pats, opts)
130 try:
130 try:
131 repo.lfstatus = True
131 repo.lfstatus = True
132 s = repo.status(match=m, clean=True)
132 s = repo.status(match=m, clean=True)
133 finally:
133 finally:
134 repo.lfstatus = False
134 repo.lfstatus = False
135 manifest = repo[None].manifest()
135 manifest = repo[None].manifest()
136 modified, added, deleted, clean = [[f for f in list
136 modified, added, deleted, clean = [[f for f in list
137 if lfutil.standin(f) in manifest]
137 if lfutil.standin(f) in manifest]
138 for list in [s[0], s[1], s[3], s[6]]]
138 for list in [s[0], s[1], s[3], s[6]]]
139
139
140 def warn(files, msg):
140 def warn(files, msg):
141 for f in files:
141 for f in files:
142 ui.warn(msg % m.rel(f))
142 ui.warn(msg % m.rel(f))
143 return int(len(files) > 0)
143 return int(len(files) > 0)
144
144
145 result = 0
145 result = 0
146
146
147 if after:
147 if after:
148 remove, forget = deleted, []
148 remove, forget = deleted, []
149 result = warn(modified + added + clean,
149 result = warn(modified + added + clean,
150 _('not removing %s: file still exists\n'))
150 _('not removing %s: file still exists\n'))
151 else:
151 else:
152 remove, forget = deleted + clean, []
152 remove, forget = deleted + clean, []
153 result = warn(modified, _('not removing %s: file is modified (use -f'
153 result = warn(modified, _('not removing %s: file is modified (use -f'
154 ' to force removal)\n'))
154 ' to force removal)\n'))
155 result = warn(added, _('not removing %s: file has been marked for add'
155 result = warn(added, _('not removing %s: file has been marked for add'
156 ' (use forget to undo)\n')) or result
156 ' (use forget to undo)\n')) or result
157
157
158 for f in sorted(remove + forget):
158 for f in sorted(remove + forget):
159 if ui.verbose or not m.exact(f):
159 if ui.verbose or not m.exact(f):
160 ui.status(_('removing %s\n') % m.rel(f))
160 ui.status(_('removing %s\n') % m.rel(f))
161
161
162 # Need to lock because standin files are deleted then removed from the
162 # Need to lock because standin files are deleted then removed from the
163 # repository and we could race in-between.
163 # repository and we could race in-between.
164 wlock = repo.wlock()
164 wlock = repo.wlock()
165 try:
165 try:
166 lfdirstate = lfutil.openlfdirstate(ui, repo)
166 lfdirstate = lfutil.openlfdirstate(ui, repo)
167 for f in remove:
167 for f in remove:
168 if not after:
168 if not after:
169 # If this is being called by addremove, notify the user that we
169 # If this is being called by addremove, notify the user that we
170 # are removing the file.
170 # are removing the file.
171 if getattr(repo, "_isaddremove", False):
171 if getattr(repo, "_isaddremove", False):
172 ui.status(_('removing %s\n') % f)
172 ui.status(_('removing %s\n') % f)
173 if os.path.exists(repo.wjoin(f)):
173 if os.path.exists(repo.wjoin(f)):
174 util.unlinkpath(repo.wjoin(f))
174 util.unlinkpath(repo.wjoin(f))
175 lfdirstate.remove(f)
175 lfdirstate.remove(f)
176 lfdirstate.write()
176 lfdirstate.write()
177 forget = [lfutil.standin(f) for f in forget]
177 forget = [lfutil.standin(f) for f in forget]
178 remove = [lfutil.standin(f) for f in remove]
178 remove = [lfutil.standin(f) for f in remove]
179 lfutil.repoforget(repo, forget)
179 repo[None].forget(forget)
180 # If this is being called by addremove, let the original addremove
180 # If this is being called by addremove, let the original addremove
181 # function handle this.
181 # function handle this.
182 if not getattr(repo, "_isaddremove", False):
182 if not getattr(repo, "_isaddremove", False):
183 for f in remove:
183 for f in remove:
184 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
184 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
185 repo[None].forget(remove)
185 repo[None].forget(remove)
186 finally:
186 finally:
187 wlock.release()
187 wlock.release()
188
188
189 return result
189 return result
190
190
191 # For overriding mercurial.hgweb.webcommands so that largefiles will
191 # For overriding mercurial.hgweb.webcommands so that largefiles will
192 # appear at their right place in the manifests.
192 # appear at their right place in the manifests.
193 def decodepath(orig, path):
193 def decodepath(orig, path):
194 return lfutil.splitstandin(path) or path
194 return lfutil.splitstandin(path) or path
195
195
196 # -- Wrappers: modify existing commands --------------------------------
196 # -- Wrappers: modify existing commands --------------------------------
197
197
198 # Add works by going through the files that the user wanted to add and
198 # Add works by going through the files that the user wanted to add and
199 # checking if they should be added as largefiles. Then it makes a new
199 # checking if they should be added as largefiles. Then it makes a new
200 # matcher which matches only the normal files and runs the original
200 # matcher which matches only the normal files and runs the original
201 # version of add.
201 # version of add.
202 def overrideadd(orig, ui, repo, *pats, **opts):
202 def overrideadd(orig, ui, repo, *pats, **opts):
203 normal = opts.pop('normal')
203 normal = opts.pop('normal')
204 if normal:
204 if normal:
205 if opts.get('large'):
205 if opts.get('large'):
206 raise util.Abort(_('--normal cannot be used with --large'))
206 raise util.Abort(_('--normal cannot be used with --large'))
207 return orig(ui, repo, *pats, **opts)
207 return orig(ui, repo, *pats, **opts)
208 bad = addlargefiles(ui, repo, *pats, **opts)
208 bad = addlargefiles(ui, repo, *pats, **opts)
209 installnormalfilesmatchfn(repo[None].manifest())
209 installnormalfilesmatchfn(repo[None].manifest())
210 result = orig(ui, repo, *pats, **opts)
210 result = orig(ui, repo, *pats, **opts)
211 restorematchfn()
211 restorematchfn()
212
212
213 return (result == 1 or bad) and 1 or 0
213 return (result == 1 or bad) and 1 or 0
214
214
215 def overrideremove(orig, ui, repo, *pats, **opts):
215 def overrideremove(orig, ui, repo, *pats, **opts):
216 installnormalfilesmatchfn(repo[None].manifest())
216 installnormalfilesmatchfn(repo[None].manifest())
217 result = orig(ui, repo, *pats, **opts)
217 result = orig(ui, repo, *pats, **opts)
218 restorematchfn()
218 restorematchfn()
219 return removelargefiles(ui, repo, *pats, **opts) or result
219 return removelargefiles(ui, repo, *pats, **opts) or result
220
220
221 def overridestatusfn(orig, repo, rev2, **opts):
221 def overridestatusfn(orig, repo, rev2, **opts):
222 try:
222 try:
223 repo._repo.lfstatus = True
223 repo._repo.lfstatus = True
224 return orig(repo, rev2, **opts)
224 return orig(repo, rev2, **opts)
225 finally:
225 finally:
226 repo._repo.lfstatus = False
226 repo._repo.lfstatus = False
227
227
228 def overridestatus(orig, ui, repo, *pats, **opts):
228 def overridestatus(orig, ui, repo, *pats, **opts):
229 try:
229 try:
230 repo.lfstatus = True
230 repo.lfstatus = True
231 return orig(ui, repo, *pats, **opts)
231 return orig(ui, repo, *pats, **opts)
232 finally:
232 finally:
233 repo.lfstatus = False
233 repo.lfstatus = False
234
234
235 def overridedirty(orig, repo, ignoreupdate=False):
235 def overridedirty(orig, repo, ignoreupdate=False):
236 try:
236 try:
237 repo._repo.lfstatus = True
237 repo._repo.lfstatus = True
238 return orig(repo, ignoreupdate)
238 return orig(repo, ignoreupdate)
239 finally:
239 finally:
240 repo._repo.lfstatus = False
240 repo._repo.lfstatus = False
241
241
242 def overridelog(orig, ui, repo, *pats, **opts):
242 def overridelog(orig, ui, repo, *pats, **opts):
243 try:
243 try:
244 repo.lfstatus = True
244 repo.lfstatus = True
245 return orig(ui, repo, *pats, **opts)
245 return orig(ui, repo, *pats, **opts)
246 finally:
246 finally:
247 repo.lfstatus = False
247 repo.lfstatus = False
248
248
249 def overrideverify(orig, ui, repo, *pats, **opts):
249 def overrideverify(orig, ui, repo, *pats, **opts):
250 large = opts.pop('large', False)
250 large = opts.pop('large', False)
251 all = opts.pop('lfa', False)
251 all = opts.pop('lfa', False)
252 contents = opts.pop('lfc', False)
252 contents = opts.pop('lfc', False)
253
253
254 result = orig(ui, repo, *pats, **opts)
254 result = orig(ui, repo, *pats, **opts)
255 if large:
255 if large:
256 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
256 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
257 return result
257 return result
258
258
259 def overridedebugstate(orig, ui, repo, *pats, **opts):
259 def overridedebugstate(orig, ui, repo, *pats, **opts):
260 large = opts.pop('large', False)
260 large = opts.pop('large', False)
261 if large:
261 if large:
262 lfcommands.debugdirstate(ui, repo)
262 lfcommands.debugdirstate(ui, repo)
263 else:
263 else:
264 orig(ui, repo, *pats, **opts)
264 orig(ui, repo, *pats, **opts)
265
265
266 # Override needs to refresh standins so that update's normal merge
266 # Override needs to refresh standins so that update's normal merge
267 # will go through properly. Then the other update hook (overriding repo.update)
267 # will go through properly. Then the other update hook (overriding repo.update)
268 # will get the new files. Filemerge is also overridden so that the merge
268 # will get the new files. Filemerge is also overridden so that the merge
269 # will merge standins correctly.
269 # will merge standins correctly.
270 def overrideupdate(orig, ui, repo, *pats, **opts):
270 def overrideupdate(orig, ui, repo, *pats, **opts):
271 lfdirstate = lfutil.openlfdirstate(ui, repo)
271 lfdirstate = lfutil.openlfdirstate(ui, repo)
272 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
272 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
273 False, False)
273 False, False)
274 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
274 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
275
275
276 # Need to lock between the standins getting updated and their
276 # Need to lock between the standins getting updated and their
277 # largefiles getting updated
277 # largefiles getting updated
278 wlock = repo.wlock()
278 wlock = repo.wlock()
279 try:
279 try:
280 if opts['check']:
280 if opts['check']:
281 mod = len(modified) > 0
281 mod = len(modified) > 0
282 for lfile in unsure:
282 for lfile in unsure:
283 standin = lfutil.standin(lfile)
283 standin = lfutil.standin(lfile)
284 if repo['.'][standin].data().strip() != \
284 if repo['.'][standin].data().strip() != \
285 lfutil.hashfile(repo.wjoin(lfile)):
285 lfutil.hashfile(repo.wjoin(lfile)):
286 mod = True
286 mod = True
287 else:
287 else:
288 lfdirstate.normal(lfile)
288 lfdirstate.normal(lfile)
289 lfdirstate.write()
289 lfdirstate.write()
290 if mod:
290 if mod:
291 raise util.Abort(_('uncommitted local changes'))
291 raise util.Abort(_('uncommitted local changes'))
292 # XXX handle removed differently
292 # XXX handle removed differently
293 if not opts['clean']:
293 if not opts['clean']:
294 for lfile in unsure + modified + added:
294 for lfile in unsure + modified + added:
295 lfutil.updatestandin(repo, lfutil.standin(lfile))
295 lfutil.updatestandin(repo, lfutil.standin(lfile))
296 finally:
296 finally:
297 wlock.release()
297 wlock.release()
298 return orig(ui, repo, *pats, **opts)
298 return orig(ui, repo, *pats, **opts)
299
299
300 # Before starting the manifest merge, merge.updates will call
300 # Before starting the manifest merge, merge.updates will call
301 # _checkunknown to check if there are any files in the merged-in
301 # _checkunknown to check if there are any files in the merged-in
302 # changeset that collide with unknown files in the working copy.
302 # changeset that collide with unknown files in the working copy.
303 #
303 #
304 # The largefiles are seen as unknown, so this prevents us from merging
304 # The largefiles are seen as unknown, so this prevents us from merging
305 # in a file 'foo' if we already have a largefile with the same name.
305 # in a file 'foo' if we already have a largefile with the same name.
306 #
306 #
307 # The overridden function filters the unknown files by removing any
307 # The overridden function filters the unknown files by removing any
308 # largefiles. This makes the merge proceed and we can then handle this
308 # largefiles. This makes the merge proceed and we can then handle this
309 # case further in the overridden manifestmerge function below.
309 # case further in the overridden manifestmerge function below.
310 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
310 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
311 if lfutil.standin(f) in wctx:
311 if lfutil.standin(f) in wctx:
312 return False
312 return False
313 return origfn(repo, wctx, mctx, f)
313 return origfn(repo, wctx, mctx, f)
314
314
315 # The manifest merge handles conflicts on the manifest level. We want
315 # The manifest merge handles conflicts on the manifest level. We want
316 # to handle changes in largefile-ness of files at this level too.
316 # to handle changes in largefile-ness of files at this level too.
317 #
317 #
318 # The strategy is to run the original manifestmerge and then process
318 # The strategy is to run the original manifestmerge and then process
319 # the action list it outputs. There are two cases we need to deal with:
319 # the action list it outputs. There are two cases we need to deal with:
320 #
320 #
321 # 1. Normal file in p1, largefile in p2. Here the largefile is
321 # 1. Normal file in p1, largefile in p2. Here the largefile is
322 # detected via its standin file, which will enter the working copy
322 # detected via its standin file, which will enter the working copy
323 # with a "get" action. It is not "merge" since the standin is all
323 # with a "get" action. It is not "merge" since the standin is all
324 # Mercurial is concerned with at this level -- the link to the
324 # Mercurial is concerned with at this level -- the link to the
325 # existing normal file is not relevant here.
325 # existing normal file is not relevant here.
326 #
326 #
327 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
327 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
328 # since the largefile will be present in the working copy and
328 # since the largefile will be present in the working copy and
329 # different from the normal file in p2. Mercurial therefore
329 # different from the normal file in p2. Mercurial therefore
330 # triggers a merge action.
330 # triggers a merge action.
331 #
331 #
332 # In both cases, we prompt the user and emit new actions to either
332 # In both cases, we prompt the user and emit new actions to either
333 # remove the standin (if the normal file was kept) or to remove the
333 # remove the standin (if the normal file was kept) or to remove the
334 # normal file and get the standin (if the largefile was kept). The
334 # normal file and get the standin (if the largefile was kept). The
335 # default prompt answer is to use the largefile version since it was
335 # default prompt answer is to use the largefile version since it was
336 # presumably changed on purpose.
336 # presumably changed on purpose.
337 #
337 #
338 # Finally, the merge.applyupdates function will then take care of
338 # Finally, the merge.applyupdates function will then take care of
339 # writing the files into the working copy and lfcommands.updatelfiles
339 # writing the files into the working copy and lfcommands.updatelfiles
340 # will update the largefiles.
340 # will update the largefiles.
341 def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
341 def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
342 actions = origfn(repo, p1, p2, pa, overwrite, partial)
342 actions = origfn(repo, p1, p2, pa, overwrite, partial)
343 processed = []
343 processed = []
344
344
345 for action in actions:
345 for action in actions:
346 if overwrite:
346 if overwrite:
347 processed.append(action)
347 processed.append(action)
348 continue
348 continue
349 f, m = action[:2]
349 f, m = action[:2]
350
350
351 choices = (_('&Largefile'), _('&Normal file'))
351 choices = (_('&Largefile'), _('&Normal file'))
352 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
352 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
353 # Case 1: normal file in the working copy, largefile in
353 # Case 1: normal file in the working copy, largefile in
354 # the second parent
354 # the second parent
355 lfile = lfutil.splitstandin(f)
355 lfile = lfutil.splitstandin(f)
356 standin = f
356 standin = f
357 msg = _('%s has been turned into a largefile\n'
357 msg = _('%s has been turned into a largefile\n'
358 'use (l)argefile or keep as (n)ormal file?') % lfile
358 'use (l)argefile or keep as (n)ormal file?') % lfile
359 if repo.ui.promptchoice(msg, choices, 0) == 0:
359 if repo.ui.promptchoice(msg, choices, 0) == 0:
360 processed.append((lfile, "r"))
360 processed.append((lfile, "r"))
361 processed.append((standin, "g", p2.flags(standin)))
361 processed.append((standin, "g", p2.flags(standin)))
362 else:
362 else:
363 processed.append((standin, "r"))
363 processed.append((standin, "r"))
364 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
364 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
365 # Case 2: largefile in the working copy, normal file in
365 # Case 2: largefile in the working copy, normal file in
366 # the second parent
366 # the second parent
367 standin = lfutil.standin(f)
367 standin = lfutil.standin(f)
368 lfile = f
368 lfile = f
369 msg = _('%s has been turned into a normal file\n'
369 msg = _('%s has been turned into a normal file\n'
370 'keep as (l)argefile or use (n)ormal file?') % lfile
370 'keep as (l)argefile or use (n)ormal file?') % lfile
371 if repo.ui.promptchoice(msg, choices, 0) == 0:
371 if repo.ui.promptchoice(msg, choices, 0) == 0:
372 processed.append((lfile, "r"))
372 processed.append((lfile, "r"))
373 else:
373 else:
374 processed.append((standin, "r"))
374 processed.append((standin, "r"))
375 processed.append((lfile, "g", p2.flags(lfile)))
375 processed.append((lfile, "g", p2.flags(lfile)))
376 else:
376 else:
377 processed.append(action)
377 processed.append(action)
378
378
379 return processed
379 return processed
380
380
381 # Override filemerge to prompt the user about how they wish to merge
381 # Override filemerge to prompt the user about how they wish to merge
382 # largefiles. This will handle identical edits, and copy/rename +
382 # largefiles. This will handle identical edits, and copy/rename +
383 # edit without prompting the user.
383 # edit without prompting the user.
384 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
384 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
385 # Use better variable names here. Because this is a wrapper we cannot
385 # Use better variable names here. Because this is a wrapper we cannot
386 # change the variable names in the function declaration.
386 # change the variable names in the function declaration.
387 fcdest, fcother, fcancestor = fcd, fco, fca
387 fcdest, fcother, fcancestor = fcd, fco, fca
388 if not lfutil.isstandin(orig):
388 if not lfutil.isstandin(orig):
389 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
389 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
390 else:
390 else:
391 if not fcother.cmp(fcdest): # files identical?
391 if not fcother.cmp(fcdest): # files identical?
392 return None
392 return None
393
393
394 # backwards, use working dir parent as ancestor
394 # backwards, use working dir parent as ancestor
395 if fcancestor == fcother:
395 if fcancestor == fcother:
396 fcancestor = fcdest.parents()[0]
396 fcancestor = fcdest.parents()[0]
397
397
398 if orig != fcother.path():
398 if orig != fcother.path():
399 repo.ui.status(_('merging %s and %s to %s\n')
399 repo.ui.status(_('merging %s and %s to %s\n')
400 % (lfutil.splitstandin(orig),
400 % (lfutil.splitstandin(orig),
401 lfutil.splitstandin(fcother.path()),
401 lfutil.splitstandin(fcother.path()),
402 lfutil.splitstandin(fcdest.path())))
402 lfutil.splitstandin(fcdest.path())))
403 else:
403 else:
404 repo.ui.status(_('merging %s\n')
404 repo.ui.status(_('merging %s\n')
405 % lfutil.splitstandin(fcdest.path()))
405 % lfutil.splitstandin(fcdest.path()))
406
406
407 if fcancestor.path() != fcother.path() and fcother.data() == \
407 if fcancestor.path() != fcother.path() and fcother.data() == \
408 fcancestor.data():
408 fcancestor.data():
409 return 0
409 return 0
410 if fcancestor.path() != fcdest.path() and fcdest.data() == \
410 if fcancestor.path() != fcdest.path() and fcdest.data() == \
411 fcancestor.data():
411 fcancestor.data():
412 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
412 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
413 return 0
413 return 0
414
414
415 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
415 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
416 'keep (l)ocal or take (o)ther?') %
416 'keep (l)ocal or take (o)ther?') %
417 lfutil.splitstandin(orig),
417 lfutil.splitstandin(orig),
418 (_('&Local'), _('&Other')), 0) == 0:
418 (_('&Local'), _('&Other')), 0) == 0:
419 return 0
419 return 0
420 else:
420 else:
421 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
421 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
422 return 0
422 return 0
423
423
424 # Copy first changes the matchers to match standins instead of
424 # Copy first changes the matchers to match standins instead of
425 # largefiles. Then it overrides util.copyfile in that function it
425 # largefiles. Then it overrides util.copyfile in that function it
426 # checks if the destination largefile already exists. It also keeps a
426 # checks if the destination largefile already exists. It also keeps a
427 # list of copied files so that the largefiles can be copied and the
427 # list of copied files so that the largefiles can be copied and the
428 # dirstate updated.
428 # dirstate updated.
429 def overridecopy(orig, ui, repo, pats, opts, rename=False):
429 def overridecopy(orig, ui, repo, pats, opts, rename=False):
430 # doesn't remove largefile on rename
430 # doesn't remove largefile on rename
431 if len(pats) < 2:
431 if len(pats) < 2:
432 # this isn't legal, let the original function deal with it
432 # this isn't legal, let the original function deal with it
433 return orig(ui, repo, pats, opts, rename)
433 return orig(ui, repo, pats, opts, rename)
434
434
435 def makestandin(relpath):
435 def makestandin(relpath):
436 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
436 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
437 return os.path.join(repo.wjoin(lfutil.standin(path)))
437 return os.path.join(repo.wjoin(lfutil.standin(path)))
438
438
439 fullpats = scmutil.expandpats(pats)
439 fullpats = scmutil.expandpats(pats)
440 dest = fullpats[-1]
440 dest = fullpats[-1]
441
441
442 if os.path.isdir(dest):
442 if os.path.isdir(dest):
443 if not os.path.isdir(makestandin(dest)):
443 if not os.path.isdir(makestandin(dest)):
444 os.makedirs(makestandin(dest))
444 os.makedirs(makestandin(dest))
445 # This could copy both lfiles and normal files in one command,
445 # This could copy both lfiles and normal files in one command,
446 # but we don't want to do that. First replace their matcher to
446 # but we don't want to do that. First replace their matcher to
447 # only match normal files and run it, then replace it to just
447 # only match normal files and run it, then replace it to just
448 # match largefiles and run it again.
448 # match largefiles and run it again.
449 nonormalfiles = False
449 nonormalfiles = False
450 nolfiles = False
450 nolfiles = False
451 try:
451 try:
452 try:
452 try:
453 installnormalfilesmatchfn(repo[None].manifest())
453 installnormalfilesmatchfn(repo[None].manifest())
454 result = orig(ui, repo, pats, opts, rename)
454 result = orig(ui, repo, pats, opts, rename)
455 except util.Abort, e:
455 except util.Abort, e:
456 if str(e) != _('no files to copy'):
456 if str(e) != _('no files to copy'):
457 raise e
457 raise e
458 else:
458 else:
459 nonormalfiles = True
459 nonormalfiles = True
460 result = 0
460 result = 0
461 finally:
461 finally:
462 restorematchfn()
462 restorematchfn()
463
463
464 # The first rename can cause our current working directory to be removed.
464 # The first rename can cause our current working directory to be removed.
465 # In that case there is nothing left to copy/rename so just quit.
465 # In that case there is nothing left to copy/rename so just quit.
466 try:
466 try:
467 repo.getcwd()
467 repo.getcwd()
468 except OSError:
468 except OSError:
469 return result
469 return result
470
470
471 try:
471 try:
472 try:
472 try:
473 # When we call orig below it creates the standins but we don't add
473 # When we call orig below it creates the standins but we don't add
474 # them to the dir state until later so lock during that time.
474 # them to the dir state until later so lock during that time.
475 wlock = repo.wlock()
475 wlock = repo.wlock()
476
476
477 manifest = repo[None].manifest()
477 manifest = repo[None].manifest()
478 oldmatch = None # for the closure
478 oldmatch = None # for the closure
479 def overridematch(ctx, pats=[], opts={}, globbed=False,
479 def overridematch(ctx, pats=[], opts={}, globbed=False,
480 default='relpath'):
480 default='relpath'):
481 newpats = []
481 newpats = []
482 # The patterns were previously mangled to add the standin
482 # The patterns were previously mangled to add the standin
483 # directory; we need to remove that now
483 # directory; we need to remove that now
484 for pat in pats:
484 for pat in pats:
485 if match_.patkind(pat) is None and lfutil.shortname in pat:
485 if match_.patkind(pat) is None and lfutil.shortname in pat:
486 newpats.append(pat.replace(lfutil.shortname, ''))
486 newpats.append(pat.replace(lfutil.shortname, ''))
487 else:
487 else:
488 newpats.append(pat)
488 newpats.append(pat)
489 match = oldmatch(ctx, newpats, opts, globbed, default)
489 match = oldmatch(ctx, newpats, opts, globbed, default)
490 m = copy.copy(match)
490 m = copy.copy(match)
491 lfile = lambda f: lfutil.standin(f) in manifest
491 lfile = lambda f: lfutil.standin(f) in manifest
492 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
492 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
493 m._fmap = set(m._files)
493 m._fmap = set(m._files)
494 origmatchfn = m.matchfn
494 origmatchfn = m.matchfn
495 m.matchfn = lambda f: (lfutil.isstandin(f) and
495 m.matchfn = lambda f: (lfutil.isstandin(f) and
496 (f in manifest) and
496 (f in manifest) and
497 origmatchfn(lfutil.splitstandin(f)) or
497 origmatchfn(lfutil.splitstandin(f)) or
498 None)
498 None)
499 return m
499 return m
500 oldmatch = installmatchfn(overridematch)
500 oldmatch = installmatchfn(overridematch)
501 listpats = []
501 listpats = []
502 for pat in pats:
502 for pat in pats:
503 if match_.patkind(pat) is not None:
503 if match_.patkind(pat) is not None:
504 listpats.append(pat)
504 listpats.append(pat)
505 else:
505 else:
506 listpats.append(makestandin(pat))
506 listpats.append(makestandin(pat))
507
507
508 try:
508 try:
509 origcopyfile = util.copyfile
509 origcopyfile = util.copyfile
510 copiedfiles = []
510 copiedfiles = []
511 def overridecopyfile(src, dest):
511 def overridecopyfile(src, dest):
512 if (lfutil.shortname in src and
512 if (lfutil.shortname in src and
513 dest.startswith(repo.wjoin(lfutil.shortname))):
513 dest.startswith(repo.wjoin(lfutil.shortname))):
514 destlfile = dest.replace(lfutil.shortname, '')
514 destlfile = dest.replace(lfutil.shortname, '')
515 if not opts['force'] and os.path.exists(destlfile):
515 if not opts['force'] and os.path.exists(destlfile):
516 raise IOError('',
516 raise IOError('',
517 _('destination largefile already exists'))
517 _('destination largefile already exists'))
518 copiedfiles.append((src, dest))
518 copiedfiles.append((src, dest))
519 origcopyfile(src, dest)
519 origcopyfile(src, dest)
520
520
521 util.copyfile = overridecopyfile
521 util.copyfile = overridecopyfile
522 result += orig(ui, repo, listpats, opts, rename)
522 result += orig(ui, repo, listpats, opts, rename)
523 finally:
523 finally:
524 util.copyfile = origcopyfile
524 util.copyfile = origcopyfile
525
525
526 lfdirstate = lfutil.openlfdirstate(ui, repo)
526 lfdirstate = lfutil.openlfdirstate(ui, repo)
527 for (src, dest) in copiedfiles:
527 for (src, dest) in copiedfiles:
528 if (lfutil.shortname in src and
528 if (lfutil.shortname in src and
529 dest.startswith(repo.wjoin(lfutil.shortname))):
529 dest.startswith(repo.wjoin(lfutil.shortname))):
530 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
530 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
531 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
531 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
532 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
532 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
533 if not os.path.isdir(destlfiledir):
533 if not os.path.isdir(destlfiledir):
534 os.makedirs(destlfiledir)
534 os.makedirs(destlfiledir)
535 if rename:
535 if rename:
536 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
536 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
537 lfdirstate.remove(srclfile)
537 lfdirstate.remove(srclfile)
538 else:
538 else:
539 util.copyfile(repo.wjoin(srclfile),
539 util.copyfile(repo.wjoin(srclfile),
540 repo.wjoin(destlfile))
540 repo.wjoin(destlfile))
541
541
542 lfdirstate.add(destlfile)
542 lfdirstate.add(destlfile)
543 lfdirstate.write()
543 lfdirstate.write()
544 except util.Abort, e:
544 except util.Abort, e:
545 if str(e) != _('no files to copy'):
545 if str(e) != _('no files to copy'):
546 raise e
546 raise e
547 else:
547 else:
548 nolfiles = True
548 nolfiles = True
549 finally:
549 finally:
550 restorematchfn()
550 restorematchfn()
551 wlock.release()
551 wlock.release()
552
552
553 if nolfiles and nonormalfiles:
553 if nolfiles and nonormalfiles:
554 raise util.Abort(_('no files to copy'))
554 raise util.Abort(_('no files to copy'))
555
555
556 return result
556 return result
557
557
558 # When the user calls revert, we have to be careful to not revert any
558 # When the user calls revert, we have to be careful to not revert any
559 # changes to other largefiles accidentally. This means we have to keep
559 # changes to other largefiles accidentally. This means we have to keep
560 # track of the largefiles that are being reverted so we only pull down
560 # track of the largefiles that are being reverted so we only pull down
561 # the necessary largefiles.
561 # the necessary largefiles.
562 #
562 #
563 # Standins are only updated (to match the hash of largefiles) before
563 # Standins are only updated (to match the hash of largefiles) before
564 # commits. Update the standins then run the original revert, changing
564 # commits. Update the standins then run the original revert, changing
565 # the matcher to hit standins instead of largefiles. Based on the
565 # the matcher to hit standins instead of largefiles. Based on the
566 # resulting standins update the largefiles. Then return the standins
566 # resulting standins update the largefiles. Then return the standins
567 # to their proper state
567 # to their proper state
568 def overriderevert(orig, ui, repo, *pats, **opts):
568 def overriderevert(orig, ui, repo, *pats, **opts):
569 # Because we put the standins in a bad state (by updating them)
569 # Because we put the standins in a bad state (by updating them)
570 # and then return them to a correct state we need to lock to
570 # and then return them to a correct state we need to lock to
571 # prevent others from changing them in their incorrect state.
571 # prevent others from changing them in their incorrect state.
572 wlock = repo.wlock()
572 wlock = repo.wlock()
573 try:
573 try:
574 lfdirstate = lfutil.openlfdirstate(ui, repo)
574 lfdirstate = lfutil.openlfdirstate(ui, repo)
575 (modified, added, removed, missing, unknown, ignored, clean) = \
575 (modified, added, removed, missing, unknown, ignored, clean) = \
576 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
576 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
577 lfdirstate.write()
577 lfdirstate.write()
578 for lfile in modified:
578 for lfile in modified:
579 lfutil.updatestandin(repo, lfutil.standin(lfile))
579 lfutil.updatestandin(repo, lfutil.standin(lfile))
580 for lfile in missing:
580 for lfile in missing:
581 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
581 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
582 os.unlink(repo.wjoin(lfutil.standin(lfile)))
582 os.unlink(repo.wjoin(lfutil.standin(lfile)))
583
583
584 try:
584 try:
585 ctx = scmutil.revsingle(repo, opts.get('rev'))
585 ctx = scmutil.revsingle(repo, opts.get('rev'))
586 oldmatch = None # for the closure
586 oldmatch = None # for the closure
587 def overridematch(ctx, pats=[], opts={}, globbed=False,
587 def overridematch(ctx, pats=[], opts={}, globbed=False,
588 default='relpath'):
588 default='relpath'):
589 match = oldmatch(ctx, pats, opts, globbed, default)
589 match = oldmatch(ctx, pats, opts, globbed, default)
590 m = copy.copy(match)
590 m = copy.copy(match)
591 def tostandin(f):
591 def tostandin(f):
592 if lfutil.standin(f) in ctx:
592 if lfutil.standin(f) in ctx:
593 return lfutil.standin(f)
593 return lfutil.standin(f)
594 elif lfutil.standin(f) in repo[None]:
594 elif lfutil.standin(f) in repo[None]:
595 return None
595 return None
596 return f
596 return f
597 m._files = [tostandin(f) for f in m._files]
597 m._files = [tostandin(f) for f in m._files]
598 m._files = [f for f in m._files if f is not None]
598 m._files = [f for f in m._files if f is not None]
599 m._fmap = set(m._files)
599 m._fmap = set(m._files)
600 origmatchfn = m.matchfn
600 origmatchfn = m.matchfn
601 def matchfn(f):
601 def matchfn(f):
602 if lfutil.isstandin(f):
602 if lfutil.isstandin(f):
603 # We need to keep track of what largefiles are being
603 # We need to keep track of what largefiles are being
604 # matched so we know which ones to update later --
604 # matched so we know which ones to update later --
605 # otherwise we accidentally revert changes to other
605 # otherwise we accidentally revert changes to other
606 # largefiles. This is repo-specific, so duckpunch the
606 # largefiles. This is repo-specific, so duckpunch the
607 # repo object to keep the list of largefiles for us
607 # repo object to keep the list of largefiles for us
608 # later.
608 # later.
609 if origmatchfn(lfutil.splitstandin(f)) and \
609 if origmatchfn(lfutil.splitstandin(f)) and \
610 (f in repo[None] or f in ctx):
610 (f in repo[None] or f in ctx):
611 lfileslist = getattr(repo, '_lfilestoupdate', [])
611 lfileslist = getattr(repo, '_lfilestoupdate', [])
612 lfileslist.append(lfutil.splitstandin(f))
612 lfileslist.append(lfutil.splitstandin(f))
613 repo._lfilestoupdate = lfileslist
613 repo._lfilestoupdate = lfileslist
614 return True
614 return True
615 else:
615 else:
616 return False
616 return False
617 return origmatchfn(f)
617 return origmatchfn(f)
618 m.matchfn = matchfn
618 m.matchfn = matchfn
619 return m
619 return m
620 oldmatch = installmatchfn(overridematch)
620 oldmatch = installmatchfn(overridematch)
621 scmutil.match
621 scmutil.match
622 matches = overridematch(repo[None], pats, opts)
622 matches = overridematch(repo[None], pats, opts)
623 orig(ui, repo, *pats, **opts)
623 orig(ui, repo, *pats, **opts)
624 finally:
624 finally:
625 restorematchfn()
625 restorematchfn()
626 lfileslist = getattr(repo, '_lfilestoupdate', [])
626 lfileslist = getattr(repo, '_lfilestoupdate', [])
627 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
627 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
628 printmessage=False)
628 printmessage=False)
629
629
630 # empty out the largefiles list so we start fresh next time
630 # empty out the largefiles list so we start fresh next time
631 repo._lfilestoupdate = []
631 repo._lfilestoupdate = []
632 for lfile in modified:
632 for lfile in modified:
633 if lfile in lfileslist:
633 if lfile in lfileslist:
634 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
634 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
635 in repo['.']:
635 in repo['.']:
636 lfutil.writestandin(repo, lfutil.standin(lfile),
636 lfutil.writestandin(repo, lfutil.standin(lfile),
637 repo['.'][lfile].data().strip(),
637 repo['.'][lfile].data().strip(),
638 'x' in repo['.'][lfile].flags())
638 'x' in repo['.'][lfile].flags())
639 lfdirstate = lfutil.openlfdirstate(ui, repo)
639 lfdirstate = lfutil.openlfdirstate(ui, repo)
640 for lfile in added:
640 for lfile in added:
641 standin = lfutil.standin(lfile)
641 standin = lfutil.standin(lfile)
642 if standin not in ctx and (standin in matches or opts.get('all')):
642 if standin not in ctx and (standin in matches or opts.get('all')):
643 if lfile in lfdirstate:
643 if lfile in lfdirstate:
644 lfdirstate.drop(lfile)
644 lfdirstate.drop(lfile)
645 util.unlinkpath(repo.wjoin(standin))
645 util.unlinkpath(repo.wjoin(standin))
646 lfdirstate.write()
646 lfdirstate.write()
647 finally:
647 finally:
648 wlock.release()
648 wlock.release()
649
649
650 def hgupdate(orig, repo, node):
650 def hgupdate(orig, repo, node):
651 # Only call updatelfiles the standins that have changed to save time
651 # Only call updatelfiles the standins that have changed to save time
652 oldstandins = lfutil.getstandinsstate(repo)
652 oldstandins = lfutil.getstandinsstate(repo)
653 result = orig(repo, node)
653 result = orig(repo, node)
654 newstandins = lfutil.getstandinsstate(repo)
654 newstandins = lfutil.getstandinsstate(repo)
655 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
655 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
656 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
656 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
657 return result
657 return result
658
658
659 def hgclean(orig, repo, node, show_stats=True):
659 def hgclean(orig, repo, node, show_stats=True):
660 result = orig(repo, node, show_stats)
660 result = orig(repo, node, show_stats)
661 lfcommands.updatelfiles(repo.ui, repo)
661 lfcommands.updatelfiles(repo.ui, repo)
662 return result
662 return result
663
663
664 def hgmerge(orig, repo, node, force=None, remind=True):
664 def hgmerge(orig, repo, node, force=None, remind=True):
665 # Mark the repo as being in the middle of a merge, so that
665 # Mark the repo as being in the middle of a merge, so that
666 # updatelfiles() will know that it needs to trust the standins in
666 # updatelfiles() will know that it needs to trust the standins in
667 # the working copy, not in the standins in the current node
667 # the working copy, not in the standins in the current node
668 repo._ismerging = True
668 repo._ismerging = True
669 try:
669 try:
670 result = orig(repo, node, force, remind)
670 result = orig(repo, node, force, remind)
671 lfcommands.updatelfiles(repo.ui, repo)
671 lfcommands.updatelfiles(repo.ui, repo)
672 finally:
672 finally:
673 repo._ismerging = False
673 repo._ismerging = False
674 return result
674 return result
675
675
676 # When we rebase a repository with remotely changed largefiles, we need to
676 # When we rebase a repository with remotely changed largefiles, we need to
677 # take some extra care so that the largefiles are correctly updated in the
677 # take some extra care so that the largefiles are correctly updated in the
678 # working copy
678 # working copy
679 def overridepull(orig, ui, repo, source=None, **opts):
679 def overridepull(orig, ui, repo, source=None, **opts):
680 revsprepull = len(repo)
680 revsprepull = len(repo)
681 if opts.get('rebase', False):
681 if opts.get('rebase', False):
682 repo._isrebasing = True
682 repo._isrebasing = True
683 try:
683 try:
684 if opts.get('update'):
684 if opts.get('update'):
685 del opts['update']
685 del opts['update']
686 ui.debug('--update and --rebase are not compatible, ignoring '
686 ui.debug('--update and --rebase are not compatible, ignoring '
687 'the update flag\n')
687 'the update flag\n')
688 del opts['rebase']
688 del opts['rebase']
689 cmdutil.bailifchanged(repo)
689 cmdutil.bailifchanged(repo)
690 origpostincoming = commands.postincoming
690 origpostincoming = commands.postincoming
691 def _dummy(*args, **kwargs):
691 def _dummy(*args, **kwargs):
692 pass
692 pass
693 commands.postincoming = _dummy
693 commands.postincoming = _dummy
694 if not source:
694 if not source:
695 source = 'default'
695 source = 'default'
696 repo.lfpullsource = source
696 repo.lfpullsource = source
697 try:
697 try:
698 result = commands.pull(ui, repo, source, **opts)
698 result = commands.pull(ui, repo, source, **opts)
699 finally:
699 finally:
700 commands.postincoming = origpostincoming
700 commands.postincoming = origpostincoming
701 revspostpull = len(repo)
701 revspostpull = len(repo)
702 if revspostpull > revsprepull:
702 if revspostpull > revsprepull:
703 result = result or rebase.rebase(ui, repo)
703 result = result or rebase.rebase(ui, repo)
704 finally:
704 finally:
705 repo._isrebasing = False
705 repo._isrebasing = False
706 else:
706 else:
707 if not source:
707 if not source:
708 source = 'default'
708 source = 'default'
709 repo.lfpullsource = source
709 repo.lfpullsource = source
710 oldheads = lfutil.getcurrentheads(repo)
710 oldheads = lfutil.getcurrentheads(repo)
711 result = orig(ui, repo, source, **opts)
711 result = orig(ui, repo, source, **opts)
712 # If we do not have the new largefiles for any new heads we pulled, we
712 # If we do not have the new largefiles for any new heads we pulled, we
713 # will run into a problem later if we try to merge or rebase with one of
713 # will run into a problem later if we try to merge or rebase with one of
714 # these heads, so cache the largefiles now directly into the system
714 # these heads, so cache the largefiles now directly into the system
715 # cache.
715 # cache.
716 ui.status(_("caching new largefiles\n"))
716 ui.status(_("caching new largefiles\n"))
717 numcached = 0
717 numcached = 0
718 heads = lfutil.getcurrentheads(repo)
718 heads = lfutil.getcurrentheads(repo)
719 newheads = set(heads).difference(set(oldheads))
719 newheads = set(heads).difference(set(oldheads))
720 for head in newheads:
720 for head in newheads:
721 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
721 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
722 numcached += len(cached)
722 numcached += len(cached)
723 ui.status(_("%d largefiles cached\n") % numcached)
723 ui.status(_("%d largefiles cached\n") % numcached)
724 if opts.get('all_largefiles'):
724 if opts.get('all_largefiles'):
725 revspostpull = len(repo)
725 revspostpull = len(repo)
726 revs = []
726 revs = []
727 for rev in xrange(revsprepull + 1, revspostpull):
727 for rev in xrange(revsprepull + 1, revspostpull):
728 revs.append(repo[rev].rev())
728 revs.append(repo[rev].rev())
729 lfcommands.downloadlfiles(ui, repo, revs)
729 lfcommands.downloadlfiles(ui, repo, revs)
730 return result
730 return result
731
731
732 def overrideclone(orig, ui, source, dest=None, **opts):
732 def overrideclone(orig, ui, source, dest=None, **opts):
733 d = dest
733 d = dest
734 if d is None:
734 if d is None:
735 d = hg.defaultdest(source)
735 d = hg.defaultdest(source)
736 if opts.get('all_largefiles') and not hg.islocal(d):
736 if opts.get('all_largefiles') and not hg.islocal(d):
737 raise util.Abort(_(
737 raise util.Abort(_(
738 '--all-largefiles is incompatible with non-local destination %s' %
738 '--all-largefiles is incompatible with non-local destination %s' %
739 d))
739 d))
740
740
741 return orig(ui, source, dest, **opts)
741 return orig(ui, source, dest, **opts)
742
742
743 def hgclone(orig, ui, opts, *args, **kwargs):
743 def hgclone(orig, ui, opts, *args, **kwargs):
744 result = orig(ui, opts, *args, **kwargs)
744 result = orig(ui, opts, *args, **kwargs)
745
745
746 if result is not None:
746 if result is not None:
747 sourcerepo, destrepo = result
747 sourcerepo, destrepo = result
748 repo = destrepo.local()
748 repo = destrepo.local()
749
749
750 # The .hglf directory must exist for the standin matcher to match
750 # The .hglf directory must exist for the standin matcher to match
751 # anything (which listlfiles uses for each rev), and .hg/largefiles is
751 # anything (which listlfiles uses for each rev), and .hg/largefiles is
752 # assumed to exist by the code that caches the downloaded file. These
752 # assumed to exist by the code that caches the downloaded file. These
753 # directories exist if clone updated to any rev. (If the repo does not
753 # directories exist if clone updated to any rev. (If the repo does not
754 # have largefiles, download never gets to the point of needing
754 # have largefiles, download never gets to the point of needing
755 # .hg/largefiles, and the standin matcher won't match anything anyway.)
755 # .hg/largefiles, and the standin matcher won't match anything anyway.)
756 if 'largefiles' in repo.requirements:
756 if 'largefiles' in repo.requirements:
757 if opts.get('noupdate'):
757 if opts.get('noupdate'):
758 util.makedirs(repo.wjoin(lfutil.shortname))
758 util.makedirs(repo.wjoin(lfutil.shortname))
759 util.makedirs(repo.join(lfutil.longname))
759 util.makedirs(repo.join(lfutil.longname))
760
760
761 # Caching is implicitly limited to 'rev' option, since the dest repo was
761 # Caching is implicitly limited to 'rev' option, since the dest repo was
762 # truncated at that point. The user may expect a download count with
762 # truncated at that point. The user may expect a download count with
763 # this option, so attempt whether or not this is a largefile repo.
763 # this option, so attempt whether or not this is a largefile repo.
764 if opts.get('all_largefiles'):
764 if opts.get('all_largefiles'):
765 success, missing = lfcommands.downloadlfiles(ui, repo, None)
765 success, missing = lfcommands.downloadlfiles(ui, repo, None)
766
766
767 if missing != 0:
767 if missing != 0:
768 return None
768 return None
769
769
770 return result
770 return result
771
771
772 def overriderebase(orig, ui, repo, **opts):
772 def overriderebase(orig, ui, repo, **opts):
773 repo._isrebasing = True
773 repo._isrebasing = True
774 try:
774 try:
775 return orig(ui, repo, **opts)
775 return orig(ui, repo, **opts)
776 finally:
776 finally:
777 repo._isrebasing = False
777 repo._isrebasing = False
778
778
779 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
779 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
780 prefix=None, mtime=None, subrepos=None):
780 prefix=None, mtime=None, subrepos=None):
781 # No need to lock because we are only reading history and
781 # No need to lock because we are only reading history and
782 # largefile caches, neither of which are modified.
782 # largefile caches, neither of which are modified.
783 lfcommands.cachelfiles(repo.ui, repo, node)
783 lfcommands.cachelfiles(repo.ui, repo, node)
784
784
785 if kind not in archival.archivers:
785 if kind not in archival.archivers:
786 raise util.Abort(_("unknown archive type '%s'") % kind)
786 raise util.Abort(_("unknown archive type '%s'") % kind)
787
787
788 ctx = repo[node]
788 ctx = repo[node]
789
789
790 if kind == 'files':
790 if kind == 'files':
791 if prefix:
791 if prefix:
792 raise util.Abort(
792 raise util.Abort(
793 _('cannot give prefix when archiving to files'))
793 _('cannot give prefix when archiving to files'))
794 else:
794 else:
795 prefix = archival.tidyprefix(dest, kind, prefix)
795 prefix = archival.tidyprefix(dest, kind, prefix)
796
796
797 def write(name, mode, islink, getdata):
797 def write(name, mode, islink, getdata):
798 if matchfn and not matchfn(name):
798 if matchfn and not matchfn(name):
799 return
799 return
800 data = getdata()
800 data = getdata()
801 if decode:
801 if decode:
802 data = repo.wwritedata(name, data)
802 data = repo.wwritedata(name, data)
803 archiver.addfile(prefix + name, mode, islink, data)
803 archiver.addfile(prefix + name, mode, islink, data)
804
804
805 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
805 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
806
806
807 if repo.ui.configbool("ui", "archivemeta", True):
807 if repo.ui.configbool("ui", "archivemeta", True):
808 def metadata():
808 def metadata():
809 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
809 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
810 hex(repo.changelog.node(0)), hex(node), ctx.branch())
810 hex(repo.changelog.node(0)), hex(node), ctx.branch())
811
811
812 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
812 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
813 if repo.tagtype(t) == 'global')
813 if repo.tagtype(t) == 'global')
814 if not tags:
814 if not tags:
815 repo.ui.pushbuffer()
815 repo.ui.pushbuffer()
816 opts = {'template': '{latesttag}\n{latesttagdistance}',
816 opts = {'template': '{latesttag}\n{latesttagdistance}',
817 'style': '', 'patch': None, 'git': None}
817 'style': '', 'patch': None, 'git': None}
818 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
818 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
819 ltags, dist = repo.ui.popbuffer().split('\n')
819 ltags, dist = repo.ui.popbuffer().split('\n')
820 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
820 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
821 tags += 'latesttagdistance: %s\n' % dist
821 tags += 'latesttagdistance: %s\n' % dist
822
822
823 return base + tags
823 return base + tags
824
824
825 write('.hg_archival.txt', 0644, False, metadata)
825 write('.hg_archival.txt', 0644, False, metadata)
826
826
827 for f in ctx:
827 for f in ctx:
828 ff = ctx.flags(f)
828 ff = ctx.flags(f)
829 getdata = ctx[f].data
829 getdata = ctx[f].data
830 if lfutil.isstandin(f):
830 if lfutil.isstandin(f):
831 path = lfutil.findfile(repo, getdata().strip())
831 path = lfutil.findfile(repo, getdata().strip())
832 if path is None:
832 if path is None:
833 raise util.Abort(
833 raise util.Abort(
834 _('largefile %s not found in repo store or system cache')
834 _('largefile %s not found in repo store or system cache')
835 % lfutil.splitstandin(f))
835 % lfutil.splitstandin(f))
836 f = lfutil.splitstandin(f)
836 f = lfutil.splitstandin(f)
837
837
838 def getdatafn():
838 def getdatafn():
839 fd = None
839 fd = None
840 try:
840 try:
841 fd = open(path, 'rb')
841 fd = open(path, 'rb')
842 return fd.read()
842 return fd.read()
843 finally:
843 finally:
844 if fd:
844 if fd:
845 fd.close()
845 fd.close()
846
846
847 getdata = getdatafn
847 getdata = getdatafn
848 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
848 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
849
849
850 if subrepos:
850 if subrepos:
851 for subpath in ctx.substate:
851 for subpath in ctx.substate:
852 sub = ctx.sub(subpath)
852 sub = ctx.sub(subpath)
853 submatch = match_.narrowmatcher(subpath, matchfn)
853 submatch = match_.narrowmatcher(subpath, matchfn)
854 sub.archive(repo.ui, archiver, prefix, submatch)
854 sub.archive(repo.ui, archiver, prefix, submatch)
855
855
856 archiver.done()
856 archiver.done()
857
857
858 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
858 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
859 repo._get(repo._state + ('hg',))
859 repo._get(repo._state + ('hg',))
860 rev = repo._state[1]
860 rev = repo._state[1]
861 ctx = repo._repo[rev]
861 ctx = repo._repo[rev]
862
862
863 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
863 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
864
864
865 def write(name, mode, islink, getdata):
865 def write(name, mode, islink, getdata):
866 # At this point, the standin has been replaced with the largefile name,
866 # At this point, the standin has been replaced with the largefile name,
867 # so the normal matcher works here without the lfutil variants.
867 # so the normal matcher works here without the lfutil variants.
868 if match and not match(f):
868 if match and not match(f):
869 return
869 return
870 data = getdata()
870 data = getdata()
871
871
872 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
872 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
873
873
874 for f in ctx:
874 for f in ctx:
875 ff = ctx.flags(f)
875 ff = ctx.flags(f)
876 getdata = ctx[f].data
876 getdata = ctx[f].data
877 if lfutil.isstandin(f):
877 if lfutil.isstandin(f):
878 path = lfutil.findfile(repo._repo, getdata().strip())
878 path = lfutil.findfile(repo._repo, getdata().strip())
879 if path is None:
879 if path is None:
880 raise util.Abort(
880 raise util.Abort(
881 _('largefile %s not found in repo store or system cache')
881 _('largefile %s not found in repo store or system cache')
882 % lfutil.splitstandin(f))
882 % lfutil.splitstandin(f))
883 f = lfutil.splitstandin(f)
883 f = lfutil.splitstandin(f)
884
884
885 def getdatafn():
885 def getdatafn():
886 fd = None
886 fd = None
887 try:
887 try:
888 fd = open(os.path.join(prefix, path), 'rb')
888 fd = open(os.path.join(prefix, path), 'rb')
889 return fd.read()
889 return fd.read()
890 finally:
890 finally:
891 if fd:
891 if fd:
892 fd.close()
892 fd.close()
893
893
894 getdata = getdatafn
894 getdata = getdatafn
895
895
896 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
896 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
897
897
898 for subpath in ctx.substate:
898 for subpath in ctx.substate:
899 sub = ctx.sub(subpath)
899 sub = ctx.sub(subpath)
900 submatch = match_.narrowmatcher(subpath, match)
900 submatch = match_.narrowmatcher(subpath, match)
901 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
901 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
902 submatch)
902 submatch)
903
903
904 # If a largefile is modified, the change is not reflected in its
904 # If a largefile is modified, the change is not reflected in its
905 # standin until a commit. cmdutil.bailifchanged() raises an exception
905 # standin until a commit. cmdutil.bailifchanged() raises an exception
906 # if the repo has uncommitted changes. Wrap it to also check if
906 # if the repo has uncommitted changes. Wrap it to also check if
907 # largefiles were changed. This is used by bisect and backout.
907 # largefiles were changed. This is used by bisect and backout.
908 def overridebailifchanged(orig, repo):
908 def overridebailifchanged(orig, repo):
909 orig(repo)
909 orig(repo)
910 repo.lfstatus = True
910 repo.lfstatus = True
911 modified, added, removed, deleted = repo.status()[:4]
911 modified, added, removed, deleted = repo.status()[:4]
912 repo.lfstatus = False
912 repo.lfstatus = False
913 if modified or added or removed or deleted:
913 if modified or added or removed or deleted:
914 raise util.Abort(_('outstanding uncommitted changes'))
914 raise util.Abort(_('outstanding uncommitted changes'))
915
915
916 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
916 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
917 def overridefetch(orig, ui, repo, *pats, **opts):
917 def overridefetch(orig, ui, repo, *pats, **opts):
918 repo.lfstatus = True
918 repo.lfstatus = True
919 modified, added, removed, deleted = repo.status()[:4]
919 modified, added, removed, deleted = repo.status()[:4]
920 repo.lfstatus = False
920 repo.lfstatus = False
921 if modified or added or removed or deleted:
921 if modified or added or removed or deleted:
922 raise util.Abort(_('outstanding uncommitted changes'))
922 raise util.Abort(_('outstanding uncommitted changes'))
923 return orig(ui, repo, *pats, **opts)
923 return orig(ui, repo, *pats, **opts)
924
924
925 def overrideforget(orig, ui, repo, *pats, **opts):
925 def overrideforget(orig, ui, repo, *pats, **opts):
926 installnormalfilesmatchfn(repo[None].manifest())
926 installnormalfilesmatchfn(repo[None].manifest())
927 result = orig(ui, repo, *pats, **opts)
927 result = orig(ui, repo, *pats, **opts)
928 restorematchfn()
928 restorematchfn()
929 m = scmutil.match(repo[None], pats, opts)
929 m = scmutil.match(repo[None], pats, opts)
930
930
931 try:
931 try:
932 repo.lfstatus = True
932 repo.lfstatus = True
933 s = repo.status(match=m, clean=True)
933 s = repo.status(match=m, clean=True)
934 finally:
934 finally:
935 repo.lfstatus = False
935 repo.lfstatus = False
936 forget = sorted(s[0] + s[1] + s[3] + s[6])
936 forget = sorted(s[0] + s[1] + s[3] + s[6])
937 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
937 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
938
938
939 for f in forget:
939 for f in forget:
940 if lfutil.standin(f) not in repo.dirstate and not \
940 if lfutil.standin(f) not in repo.dirstate and not \
941 os.path.isdir(m.rel(lfutil.standin(f))):
941 os.path.isdir(m.rel(lfutil.standin(f))):
942 ui.warn(_('not removing %s: file is already untracked\n')
942 ui.warn(_('not removing %s: file is already untracked\n')
943 % m.rel(f))
943 % m.rel(f))
944 result = 1
944 result = 1
945
945
946 for f in forget:
946 for f in forget:
947 if ui.verbose or not m.exact(f):
947 if ui.verbose or not m.exact(f):
948 ui.status(_('removing %s\n') % m.rel(f))
948 ui.status(_('removing %s\n') % m.rel(f))
949
949
950 # Need to lock because standin files are deleted then removed from the
950 # Need to lock because standin files are deleted then removed from the
951 # repository and we could race in-between.
951 # repository and we could race in-between.
952 wlock = repo.wlock()
952 wlock = repo.wlock()
953 try:
953 try:
954 lfdirstate = lfutil.openlfdirstate(ui, repo)
954 lfdirstate = lfutil.openlfdirstate(ui, repo)
955 for f in forget:
955 for f in forget:
956 if lfdirstate[f] == 'a':
956 if lfdirstate[f] == 'a':
957 lfdirstate.drop(f)
957 lfdirstate.drop(f)
958 else:
958 else:
959 lfdirstate.remove(f)
959 lfdirstate.remove(f)
960 lfdirstate.write()
960 lfdirstate.write()
961 standins = [lfutil.standin(f) for f in forget]
961 standins = [lfutil.standin(f) for f in forget]
962 for f in standins:
962 for f in standins:
963 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
963 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
964 repo[None].forget(standins)
964 repo[None].forget(standins)
965 finally:
965 finally:
966 wlock.release()
966 wlock.release()
967
967
968 return result
968 return result
969
969
970 def getoutgoinglfiles(ui, repo, dest=None, **opts):
970 def getoutgoinglfiles(ui, repo, dest=None, **opts):
971 dest = ui.expandpath(dest or 'default-push', dest or 'default')
971 dest = ui.expandpath(dest or 'default-push', dest or 'default')
972 dest, branches = hg.parseurl(dest, opts.get('branch'))
972 dest, branches = hg.parseurl(dest, opts.get('branch'))
973 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
973 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
974 if revs:
974 if revs:
975 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
975 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
976
976
977 try:
977 try:
978 remote = hg.peer(repo, opts, dest)
978 remote = hg.peer(repo, opts, dest)
979 except error.RepoError:
979 except error.RepoError:
980 return None
980 return None
981 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
981 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
982 if not outgoing.missing:
982 if not outgoing.missing:
983 return outgoing.missing
983 return outgoing.missing
984 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
984 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
985 if opts.get('newest_first'):
985 if opts.get('newest_first'):
986 o.reverse()
986 o.reverse()
987
987
988 toupload = set()
988 toupload = set()
989 for n in o:
989 for n in o:
990 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
990 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
991 ctx = repo[n]
991 ctx = repo[n]
992 files = set(ctx.files())
992 files = set(ctx.files())
993 if len(parents) == 2:
993 if len(parents) == 2:
994 mc = ctx.manifest()
994 mc = ctx.manifest()
995 mp1 = ctx.parents()[0].manifest()
995 mp1 = ctx.parents()[0].manifest()
996 mp2 = ctx.parents()[1].manifest()
996 mp2 = ctx.parents()[1].manifest()
997 for f in mp1:
997 for f in mp1:
998 if f not in mc:
998 if f not in mc:
999 files.add(f)
999 files.add(f)
1000 for f in mp2:
1000 for f in mp2:
1001 if f not in mc:
1001 if f not in mc:
1002 files.add(f)
1002 files.add(f)
1003 for f in mc:
1003 for f in mc:
1004 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1004 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1005 files.add(f)
1005 files.add(f)
1006 toupload = toupload.union(
1006 toupload = toupload.union(
1007 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1007 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1008 return toupload
1008 return toupload
1009
1009
1010 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1010 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1011 result = orig(ui, repo, dest, **opts)
1011 result = orig(ui, repo, dest, **opts)
1012
1012
1013 if opts.pop('large', None):
1013 if opts.pop('large', None):
1014 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1014 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1015 if toupload is None:
1015 if toupload is None:
1016 ui.status(_('largefiles: No remote repo\n'))
1016 ui.status(_('largefiles: No remote repo\n'))
1017 elif not toupload:
1017 elif not toupload:
1018 ui.status(_('largefiles: no files to upload\n'))
1018 ui.status(_('largefiles: no files to upload\n'))
1019 else:
1019 else:
1020 ui.status(_('largefiles to upload:\n'))
1020 ui.status(_('largefiles to upload:\n'))
1021 for file in toupload:
1021 for file in toupload:
1022 ui.status(lfutil.splitstandin(file) + '\n')
1022 ui.status(lfutil.splitstandin(file) + '\n')
1023 ui.status('\n')
1023 ui.status('\n')
1024
1024
1025 return result
1025 return result
1026
1026
1027 def overridesummary(orig, ui, repo, *pats, **opts):
1027 def overridesummary(orig, ui, repo, *pats, **opts):
1028 try:
1028 try:
1029 repo.lfstatus = True
1029 repo.lfstatus = True
1030 orig(ui, repo, *pats, **opts)
1030 orig(ui, repo, *pats, **opts)
1031 finally:
1031 finally:
1032 repo.lfstatus = False
1032 repo.lfstatus = False
1033
1033
1034 if opts.pop('large', None):
1034 if opts.pop('large', None):
1035 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1035 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1036 if toupload is None:
1036 if toupload is None:
1037 # i18n: column positioning for "hg summary"
1037 # i18n: column positioning for "hg summary"
1038 ui.status(_('largefiles: (no remote repo)\n'))
1038 ui.status(_('largefiles: (no remote repo)\n'))
1039 elif not toupload:
1039 elif not toupload:
1040 # i18n: column positioning for "hg summary"
1040 # i18n: column positioning for "hg summary"
1041 ui.status(_('largefiles: (no files to upload)\n'))
1041 ui.status(_('largefiles: (no files to upload)\n'))
1042 else:
1042 else:
1043 # i18n: column positioning for "hg summary"
1043 # i18n: column positioning for "hg summary"
1044 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1044 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1045
1045
1046 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1046 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1047 similarity=None):
1047 similarity=None):
1048 if not lfutil.islfilesrepo(repo):
1048 if not lfutil.islfilesrepo(repo):
1049 return orig(repo, pats, opts, dry_run, similarity)
1049 return orig(repo, pats, opts, dry_run, similarity)
1050 # Get the list of missing largefiles so we can remove them
1050 # Get the list of missing largefiles so we can remove them
1051 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1051 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1052 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1052 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1053 False, False)
1053 False, False)
1054 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1054 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1055
1055
1056 # Call into the normal remove code, but the removing of the standin, we want
1056 # Call into the normal remove code, but the removing of the standin, we want
1057 # to have handled by original addremove. Monkey patching here makes sure
1057 # to have handled by original addremove. Monkey patching here makes sure
1058 # we don't remove the standin in the largefiles code, preventing a very
1058 # we don't remove the standin in the largefiles code, preventing a very
1059 # confused state later.
1059 # confused state later.
1060 if missing:
1060 if missing:
1061 m = [repo.wjoin(f) for f in missing]
1061 m = [repo.wjoin(f) for f in missing]
1062 repo._isaddremove = True
1062 repo._isaddremove = True
1063 removelargefiles(repo.ui, repo, *m, **opts)
1063 removelargefiles(repo.ui, repo, *m, **opts)
1064 repo._isaddremove = False
1064 repo._isaddremove = False
1065 # Call into the normal add code, and any files that *should* be added as
1065 # Call into the normal add code, and any files that *should* be added as
1066 # largefiles will be
1066 # largefiles will be
1067 addlargefiles(repo.ui, repo, *pats, **opts)
1067 addlargefiles(repo.ui, repo, *pats, **opts)
1068 # Now that we've handled largefiles, hand off to the original addremove
1068 # Now that we've handled largefiles, hand off to the original addremove
1069 # function to take care of the rest. Make sure it doesn't do anything with
1069 # function to take care of the rest. Make sure it doesn't do anything with
1070 # largefiles by installing a matcher that will ignore them.
1070 # largefiles by installing a matcher that will ignore them.
1071 installnormalfilesmatchfn(repo[None].manifest())
1071 installnormalfilesmatchfn(repo[None].manifest())
1072 result = orig(repo, pats, opts, dry_run, similarity)
1072 result = orig(repo, pats, opts, dry_run, similarity)
1073 restorematchfn()
1073 restorematchfn()
1074 return result
1074 return result
1075
1075
1076 # Calling purge with --all will cause the largefiles to be deleted.
1076 # Calling purge with --all will cause the largefiles to be deleted.
1077 # Override repo.status to prevent this from happening.
1077 # Override repo.status to prevent this from happening.
1078 def overridepurge(orig, ui, repo, *dirs, **opts):
1078 def overridepurge(orig, ui, repo, *dirs, **opts):
1079 # XXX large file status is buggy when used on repo proxy.
1079 # XXX large file status is buggy when used on repo proxy.
1080 # XXX this needs to be investigate.
1080 # XXX this needs to be investigate.
1081 repo = repo.unfiltered()
1081 repo = repo.unfiltered()
1082 oldstatus = repo.status
1082 oldstatus = repo.status
1083 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1083 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1084 clean=False, unknown=False, listsubrepos=False):
1084 clean=False, unknown=False, listsubrepos=False):
1085 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1085 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1086 listsubrepos)
1086 listsubrepos)
1087 lfdirstate = lfutil.openlfdirstate(ui, repo)
1087 lfdirstate = lfutil.openlfdirstate(ui, repo)
1088 modified, added, removed, deleted, unknown, ignored, clean = r
1088 modified, added, removed, deleted, unknown, ignored, clean = r
1089 unknown = [f for f in unknown if lfdirstate[f] == '?']
1089 unknown = [f for f in unknown if lfdirstate[f] == '?']
1090 ignored = [f for f in ignored if lfdirstate[f] == '?']
1090 ignored = [f for f in ignored if lfdirstate[f] == '?']
1091 return modified, added, removed, deleted, unknown, ignored, clean
1091 return modified, added, removed, deleted, unknown, ignored, clean
1092 repo.status = overridestatus
1092 repo.status = overridestatus
1093 orig(ui, repo, *dirs, **opts)
1093 orig(ui, repo, *dirs, **opts)
1094 repo.status = oldstatus
1094 repo.status = oldstatus
1095
1095
1096 def overriderollback(orig, ui, repo, **opts):
1096 def overriderollback(orig, ui, repo, **opts):
1097 result = orig(ui, repo, **opts)
1097 result = orig(ui, repo, **opts)
1098 merge.update(repo, node=None, branchmerge=False, force=True,
1098 merge.update(repo, node=None, branchmerge=False, force=True,
1099 partial=lfutil.isstandin)
1099 partial=lfutil.isstandin)
1100 wlock = repo.wlock()
1100 wlock = repo.wlock()
1101 try:
1101 try:
1102 lfdirstate = lfutil.openlfdirstate(ui, repo)
1102 lfdirstate = lfutil.openlfdirstate(ui, repo)
1103 lfiles = lfutil.listlfiles(repo)
1103 lfiles = lfutil.listlfiles(repo)
1104 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1104 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1105 for file in lfiles:
1105 for file in lfiles:
1106 if file in oldlfiles:
1106 if file in oldlfiles:
1107 lfdirstate.normallookup(file)
1107 lfdirstate.normallookup(file)
1108 else:
1108 else:
1109 lfdirstate.add(file)
1109 lfdirstate.add(file)
1110 lfdirstate.write()
1110 lfdirstate.write()
1111 finally:
1111 finally:
1112 wlock.release()
1112 wlock.release()
1113 return result
1113 return result
1114
1114
1115 def overridetransplant(orig, ui, repo, *revs, **opts):
1115 def overridetransplant(orig, ui, repo, *revs, **opts):
1116 try:
1116 try:
1117 oldstandins = lfutil.getstandinsstate(repo)
1117 oldstandins = lfutil.getstandinsstate(repo)
1118 repo._istransplanting = True
1118 repo._istransplanting = True
1119 result = orig(ui, repo, *revs, **opts)
1119 result = orig(ui, repo, *revs, **opts)
1120 newstandins = lfutil.getstandinsstate(repo)
1120 newstandins = lfutil.getstandinsstate(repo)
1121 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1121 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1122 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1122 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1123 printmessage=True)
1123 printmessage=True)
1124 finally:
1124 finally:
1125 repo._istransplanting = False
1125 repo._istransplanting = False
1126 return result
1126 return result
1127
1127
1128 def overridecat(orig, ui, repo, file1, *pats, **opts):
1128 def overridecat(orig, ui, repo, file1, *pats, **opts):
1129 ctx = scmutil.revsingle(repo, opts.get('rev'))
1129 ctx = scmutil.revsingle(repo, opts.get('rev'))
1130 if not lfutil.standin(file1) in ctx:
1130 if not lfutil.standin(file1) in ctx:
1131 result = orig(ui, repo, file1, *pats, **opts)
1131 result = orig(ui, repo, file1, *pats, **opts)
1132 return result
1132 return result
1133 return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
1133 return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
1134
1134
1135 def mercurialsinkbefore(orig, sink):
1135 def mercurialsinkbefore(orig, sink):
1136 sink.repo._isconverting = True
1136 sink.repo._isconverting = True
1137 orig(sink)
1137 orig(sink)
1138
1138
1139 def mercurialsinkafter(orig, sink):
1139 def mercurialsinkafter(orig, sink):
1140 sink.repo._isconverting = False
1140 sink.repo._isconverting = False
1141 orig(sink)
1141 orig(sink)
@@ -1,504 +1,504
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13
13
14 from mercurial import context, error, manifest, match as match_, util, \
14 from mercurial import context, error, manifest, match as match_, util, \
15 discovery
15 discovery
16 from mercurial import node as node_
16 from mercurial import node as node_
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import localrepo
18 from mercurial import localrepo
19
19
20 import lfcommands
20 import lfcommands
21 import proto
21 import proto
22 import lfutil
22 import lfutil
23
23
24 def reposetup(ui, repo):
24 def reposetup(ui, repo):
25 # wire repositories should be given new wireproto functions but not the
25 # wire repositories should be given new wireproto functions but not the
26 # other largefiles modifications
26 # other largefiles modifications
27 if not repo.local():
27 if not repo.local():
28 return proto.wirereposetup(ui, repo)
28 return proto.wirereposetup(ui, repo)
29
29
30 for name in ('status', 'commitctx', 'commit', 'push'):
30 for name in ('status', 'commitctx', 'commit', 'push'):
31 method = getattr(repo, name)
31 method = getattr(repo, name)
32 if (isinstance(method, types.FunctionType) and
32 if (isinstance(method, types.FunctionType) and
33 method.func_name == 'wrap'):
33 method.func_name == 'wrap'):
34 ui.warn(_('largefiles: repo method %r appears to have already been'
34 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ' wrapped by another extension: '
35 ' wrapped by another extension: '
36 'largefiles may behave incorrectly\n')
36 'largefiles may behave incorrectly\n')
37 % name)
37 % name)
38
38
39 class lfilesrepo(repo.__class__):
39 class lfilesrepo(repo.__class__):
40 lfstatus = False
40 lfstatus = False
41 def status_nolfiles(self, *args, **kwargs):
41 def status_nolfiles(self, *args, **kwargs):
42 return super(lfilesrepo, self).status(*args, **kwargs)
42 return super(lfilesrepo, self).status(*args, **kwargs)
43
43
44 # When lfstatus is set, return a context that gives the names
44 # When lfstatus is set, return a context that gives the names
45 # of largefiles instead of their corresponding standins and
45 # of largefiles instead of their corresponding standins and
46 # identifies the largefiles as always binary, regardless of
46 # identifies the largefiles as always binary, regardless of
47 # their actual contents.
47 # their actual contents.
48 def __getitem__(self, changeid):
48 def __getitem__(self, changeid):
49 ctx = super(lfilesrepo, self).__getitem__(changeid)
49 ctx = super(lfilesrepo, self).__getitem__(changeid)
50 if self.lfstatus:
50 if self.lfstatus:
51 class lfilesmanifestdict(manifest.manifestdict):
51 class lfilesmanifestdict(manifest.manifestdict):
52 def __contains__(self, filename):
52 def __contains__(self, filename):
53 if super(lfilesmanifestdict,
53 if super(lfilesmanifestdict,
54 self).__contains__(filename):
54 self).__contains__(filename):
55 return True
55 return True
56 return super(lfilesmanifestdict,
56 return super(lfilesmanifestdict,
57 self).__contains__(lfutil.standin(filename))
57 self).__contains__(lfutil.standin(filename))
58 class lfilesctx(ctx.__class__):
58 class lfilesctx(ctx.__class__):
59 def files(self):
59 def files(self):
60 filenames = super(lfilesctx, self).files()
60 filenames = super(lfilesctx, self).files()
61 return [lfutil.splitstandin(f) or f for f in filenames]
61 return [lfutil.splitstandin(f) or f for f in filenames]
62 def manifest(self):
62 def manifest(self):
63 man1 = super(lfilesctx, self).manifest()
63 man1 = super(lfilesctx, self).manifest()
64 man1.__class__ = lfilesmanifestdict
64 man1.__class__ = lfilesmanifestdict
65 return man1
65 return man1
66 def filectx(self, path, fileid=None, filelog=None):
66 def filectx(self, path, fileid=None, filelog=None):
67 try:
67 try:
68 if filelog is not None:
68 if filelog is not None:
69 result = super(lfilesctx, self).filectx(
69 result = super(lfilesctx, self).filectx(
70 path, fileid, filelog)
70 path, fileid, filelog)
71 else:
71 else:
72 result = super(lfilesctx, self).filectx(
72 result = super(lfilesctx, self).filectx(
73 path, fileid)
73 path, fileid)
74 except error.LookupError:
74 except error.LookupError:
75 # Adding a null character will cause Mercurial to
75 # Adding a null character will cause Mercurial to
76 # identify this as a binary file.
76 # identify this as a binary file.
77 if filelog is not None:
77 if filelog is not None:
78 result = super(lfilesctx, self).filectx(
78 result = super(lfilesctx, self).filectx(
79 lfutil.standin(path), fileid, filelog)
79 lfutil.standin(path), fileid, filelog)
80 else:
80 else:
81 result = super(lfilesctx, self).filectx(
81 result = super(lfilesctx, self).filectx(
82 lfutil.standin(path), fileid)
82 lfutil.standin(path), fileid)
83 olddata = result.data
83 olddata = result.data
84 result.data = lambda: olddata() + '\0'
84 result.data = lambda: olddata() + '\0'
85 return result
85 return result
86 ctx.__class__ = lfilesctx
86 ctx.__class__ = lfilesctx
87 return ctx
87 return ctx
88
88
89 # Figure out the status of big files and insert them into the
89 # Figure out the status of big files and insert them into the
90 # appropriate list in the result. Also removes standin files
90 # appropriate list in the result. Also removes standin files
91 # from the listing. Revert to the original status if
91 # from the listing. Revert to the original status if
92 # self.lfstatus is False.
92 # self.lfstatus is False.
93 # XXX large file status is buggy when used on repo proxy.
93 # XXX large file status is buggy when used on repo proxy.
94 # XXX this needs to be investigated.
94 # XXX this needs to be investigated.
95 @localrepo.unfilteredmethod
95 @localrepo.unfilteredmethod
96 def status(self, node1='.', node2=None, match=None, ignored=False,
96 def status(self, node1='.', node2=None, match=None, ignored=False,
97 clean=False, unknown=False, listsubrepos=False):
97 clean=False, unknown=False, listsubrepos=False):
98 listignored, listclean, listunknown = ignored, clean, unknown
98 listignored, listclean, listunknown = ignored, clean, unknown
99 if not self.lfstatus:
99 if not self.lfstatus:
100 return super(lfilesrepo, self).status(node1, node2, match,
100 return super(lfilesrepo, self).status(node1, node2, match,
101 listignored, listclean, listunknown, listsubrepos)
101 listignored, listclean, listunknown, listsubrepos)
102 else:
102 else:
103 # some calls in this function rely on the old version of status
103 # some calls in this function rely on the old version of status
104 self.lfstatus = False
104 self.lfstatus = False
105 if isinstance(node1, context.changectx):
105 if isinstance(node1, context.changectx):
106 ctx1 = node1
106 ctx1 = node1
107 else:
107 else:
108 ctx1 = self[node1]
108 ctx1 = self[node1]
109 if isinstance(node2, context.changectx):
109 if isinstance(node2, context.changectx):
110 ctx2 = node2
110 ctx2 = node2
111 else:
111 else:
112 ctx2 = self[node2]
112 ctx2 = self[node2]
113 working = ctx2.rev() is None
113 working = ctx2.rev() is None
114 parentworking = working and ctx1 == self['.']
114 parentworking = working and ctx1 == self['.']
115
115
116 def inctx(file, ctx):
116 def inctx(file, ctx):
117 try:
117 try:
118 if ctx.rev() is None:
118 if ctx.rev() is None:
119 return file in ctx.manifest()
119 return file in ctx.manifest()
120 ctx[file]
120 ctx[file]
121 return True
121 return True
122 except KeyError:
122 except KeyError:
123 return False
123 return False
124
124
125 if match is None:
125 if match is None:
126 match = match_.always(self.root, self.getcwd())
126 match = match_.always(self.root, self.getcwd())
127
127
128 # First check if there were files specified on the
128 # First check if there were files specified on the
129 # command line. If there were, and none of them were
129 # command line. If there were, and none of them were
130 # largefiles, we should just bail here and let super
130 # largefiles, we should just bail here and let super
131 # handle it -- thus gaining a big performance boost.
131 # handle it -- thus gaining a big performance boost.
132 lfdirstate = lfutil.openlfdirstate(ui, self)
132 lfdirstate = lfutil.openlfdirstate(ui, self)
133 if match.files() and not match.anypats():
133 if match.files() and not match.anypats():
134 for f in lfdirstate:
134 for f in lfdirstate:
135 if match(f):
135 if match(f):
136 break
136 break
137 else:
137 else:
138 return super(lfilesrepo, self).status(node1, node2,
138 return super(lfilesrepo, self).status(node1, node2,
139 match, listignored, listclean,
139 match, listignored, listclean,
140 listunknown, listsubrepos)
140 listunknown, listsubrepos)
141
141
142 # Create a copy of match that matches standins instead
142 # Create a copy of match that matches standins instead
143 # of largefiles.
143 # of largefiles.
144 def tostandins(files):
144 def tostandins(files):
145 if not working:
145 if not working:
146 return files
146 return files
147 newfiles = []
147 newfiles = []
148 dirstate = self.dirstate
148 dirstate = self.dirstate
149 for f in files:
149 for f in files:
150 sf = lfutil.standin(f)
150 sf = lfutil.standin(f)
151 if sf in dirstate:
151 if sf in dirstate:
152 newfiles.append(sf)
152 newfiles.append(sf)
153 elif sf in dirstate.dirs():
153 elif sf in dirstate.dirs():
154 # Directory entries could be regular or
154 # Directory entries could be regular or
155 # standin, check both
155 # standin, check both
156 newfiles.extend((f, sf))
156 newfiles.extend((f, sf))
157 else:
157 else:
158 newfiles.append(f)
158 newfiles.append(f)
159 return newfiles
159 return newfiles
160
160
161 m = copy.copy(match)
161 m = copy.copy(match)
162 m._files = tostandins(m._files)
162 m._files = tostandins(m._files)
163
163
164 result = super(lfilesrepo, self).status(node1, node2, m,
164 result = super(lfilesrepo, self).status(node1, node2, m,
165 ignored, clean, unknown, listsubrepos)
165 ignored, clean, unknown, listsubrepos)
166 if working:
166 if working:
167
167
168 def sfindirstate(f):
168 def sfindirstate(f):
169 sf = lfutil.standin(f)
169 sf = lfutil.standin(f)
170 dirstate = self.dirstate
170 dirstate = self.dirstate
171 return sf in dirstate or sf in dirstate.dirs()
171 return sf in dirstate or sf in dirstate.dirs()
172
172
173 match._files = [f for f in match._files
173 match._files = [f for f in match._files
174 if sfindirstate(f)]
174 if sfindirstate(f)]
175 # Don't waste time getting the ignored and unknown
175 # Don't waste time getting the ignored and unknown
176 # files from lfdirstate
176 # files from lfdirstate
177 s = lfdirstate.status(match, [], False,
177 s = lfdirstate.status(match, [], False,
178 listclean, False)
178 listclean, False)
179 (unsure, modified, added, removed, missing, _unknown,
179 (unsure, modified, added, removed, missing, _unknown,
180 _ignored, clean) = s
180 _ignored, clean) = s
181 if parentworking:
181 if parentworking:
182 for lfile in unsure:
182 for lfile in unsure:
183 standin = lfutil.standin(lfile)
183 standin = lfutil.standin(lfile)
184 if standin not in ctx1:
184 if standin not in ctx1:
185 # from second parent
185 # from second parent
186 modified.append(lfile)
186 modified.append(lfile)
187 elif ctx1[standin].data().strip() \
187 elif ctx1[standin].data().strip() \
188 != lfutil.hashfile(self.wjoin(lfile)):
188 != lfutil.hashfile(self.wjoin(lfile)):
189 modified.append(lfile)
189 modified.append(lfile)
190 else:
190 else:
191 clean.append(lfile)
191 clean.append(lfile)
192 lfdirstate.normal(lfile)
192 lfdirstate.normal(lfile)
193 else:
193 else:
194 tocheck = unsure + modified + added + clean
194 tocheck = unsure + modified + added + clean
195 modified, added, clean = [], [], []
195 modified, added, clean = [], [], []
196
196
197 for lfile in tocheck:
197 for lfile in tocheck:
198 standin = lfutil.standin(lfile)
198 standin = lfutil.standin(lfile)
199 if inctx(standin, ctx1):
199 if inctx(standin, ctx1):
200 if ctx1[standin].data().strip() != \
200 if ctx1[standin].data().strip() != \
201 lfutil.hashfile(self.wjoin(lfile)):
201 lfutil.hashfile(self.wjoin(lfile)):
202 modified.append(lfile)
202 modified.append(lfile)
203 else:
203 else:
204 clean.append(lfile)
204 clean.append(lfile)
205 else:
205 else:
206 added.append(lfile)
206 added.append(lfile)
207
207
208 # Standins no longer found in lfdirstate has been removed
208 # Standins no longer found in lfdirstate has been removed
209 for standin in ctx1.manifest():
209 for standin in ctx1.manifest():
210 if not lfutil.isstandin(standin):
210 if not lfutil.isstandin(standin):
211 continue
211 continue
212 lfile = lfutil.splitstandin(standin)
212 lfile = lfutil.splitstandin(standin)
213 if not match(lfile):
213 if not match(lfile):
214 continue
214 continue
215 if lfile not in lfdirstate:
215 if lfile not in lfdirstate:
216 removed.append(lfile)
216 removed.append(lfile)
217
217
218 # Filter result lists
218 # Filter result lists
219 result = list(result)
219 result = list(result)
220
220
221 # Largefiles are not really removed when they're
221 # Largefiles are not really removed when they're
222 # still in the normal dirstate. Likewise, normal
222 # still in the normal dirstate. Likewise, normal
223 # files are not really removed if they are still in
223 # files are not really removed if they are still in
224 # lfdirstate. This happens in merges where files
224 # lfdirstate. This happens in merges where files
225 # change type.
225 # change type.
226 removed = [f for f in removed if f not in self.dirstate]
226 removed = [f for f in removed if f not in self.dirstate]
227 result[2] = [f for f in result[2] if f not in lfdirstate]
227 result[2] = [f for f in result[2] if f not in lfdirstate]
228
228
229 lfiles = set(lfdirstate._map)
229 lfiles = set(lfdirstate._map)
230 # Unknown files
230 # Unknown files
231 result[4] = set(result[4]).difference(lfiles)
231 result[4] = set(result[4]).difference(lfiles)
232 # Ignored files
232 # Ignored files
233 result[5] = set(result[5]).difference(lfiles)
233 result[5] = set(result[5]).difference(lfiles)
234 # combine normal files and largefiles
234 # combine normal files and largefiles
235 normals = [[fn for fn in filelist
235 normals = [[fn for fn in filelist
236 if not lfutil.isstandin(fn)]
236 if not lfutil.isstandin(fn)]
237 for filelist in result]
237 for filelist in result]
238 lfiles = (modified, added, removed, missing, [], [], clean)
238 lfiles = (modified, added, removed, missing, [], [], clean)
239 result = [sorted(list1 + list2)
239 result = [sorted(list1 + list2)
240 for (list1, list2) in zip(normals, lfiles)]
240 for (list1, list2) in zip(normals, lfiles)]
241 else:
241 else:
242 def toname(f):
242 def toname(f):
243 if lfutil.isstandin(f):
243 if lfutil.isstandin(f):
244 return lfutil.splitstandin(f)
244 return lfutil.splitstandin(f)
245 return f
245 return f
246 result = [[toname(f) for f in items] for items in result]
246 result = [[toname(f) for f in items] for items in result]
247
247
248 lfdirstate.write()
248 lfdirstate.write()
249
249
250 if not listunknown:
250 if not listunknown:
251 result[4] = []
251 result[4] = []
252 if not listignored:
252 if not listignored:
253 result[5] = []
253 result[5] = []
254 if not listclean:
254 if not listclean:
255 result[6] = []
255 result[6] = []
256 self.lfstatus = True
256 self.lfstatus = True
257 return result
257 return result
258
258
259 # As part of committing, copy all of the largefiles into the
259 # As part of committing, copy all of the largefiles into the
260 # cache.
260 # cache.
261 def commitctx(self, *args, **kwargs):
261 def commitctx(self, *args, **kwargs):
262 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
262 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
263 lfutil.copyalltostore(self, node)
263 lfutil.copyalltostore(self, node)
264 return node
264 return node
265
265
266 # Before commit, largefile standins have not had their
266 # Before commit, largefile standins have not had their
267 # contents updated to reflect the hash of their largefile.
267 # contents updated to reflect the hash of their largefile.
268 # Do that here.
268 # Do that here.
269 def commit(self, text="", user=None, date=None, match=None,
269 def commit(self, text="", user=None, date=None, match=None,
270 force=False, editor=False, extra={}):
270 force=False, editor=False, extra={}):
271 orig = super(lfilesrepo, self).commit
271 orig = super(lfilesrepo, self).commit
272
272
273 wlock = self.wlock()
273 wlock = self.wlock()
274 try:
274 try:
275 # Case 0: Rebase or Transplant
275 # Case 0: Rebase or Transplant
276 # We have to take the time to pull down the new largefiles now.
276 # We have to take the time to pull down the new largefiles now.
277 # Otherwise, any largefiles that were modified in the
277 # Otherwise, any largefiles that were modified in the
278 # destination changesets get overwritten, either by the rebase
278 # destination changesets get overwritten, either by the rebase
279 # or in the first commit after the rebase or transplant.
279 # or in the first commit after the rebase or transplant.
280 # updatelfiles will update the dirstate to mark any pulled
280 # updatelfiles will update the dirstate to mark any pulled
281 # largefiles as modified
281 # largefiles as modified
282 if getattr(self, "_isrebasing", False) or \
282 if getattr(self, "_isrebasing", False) or \
283 getattr(self, "_istransplanting", False):
283 getattr(self, "_istransplanting", False):
284 lfcommands.updatelfiles(self.ui, self, filelist=None,
284 lfcommands.updatelfiles(self.ui, self, filelist=None,
285 printmessage=False)
285 printmessage=False)
286 result = orig(text=text, user=user, date=date, match=match,
286 result = orig(text=text, user=user, date=date, match=match,
287 force=force, editor=editor, extra=extra)
287 force=force, editor=editor, extra=extra)
288 return result
288 return result
289 # Case 1: user calls commit with no specific files or
289 # Case 1: user calls commit with no specific files or
290 # include/exclude patterns: refresh and commit all files that
290 # include/exclude patterns: refresh and commit all files that
291 # are "dirty".
291 # are "dirty".
292 if ((match is None) or
292 if ((match is None) or
293 (not match.anypats() and not match.files())):
293 (not match.anypats() and not match.files())):
294 # Spend a bit of time here to get a list of files we know
294 # Spend a bit of time here to get a list of files we know
295 # are modified so we can compare only against those.
295 # are modified so we can compare only against those.
296 # It can cost a lot of time (several seconds)
296 # It can cost a lot of time (several seconds)
297 # otherwise to update all standins if the largefiles are
297 # otherwise to update all standins if the largefiles are
298 # large.
298 # large.
299 lfdirstate = lfutil.openlfdirstate(ui, self)
299 lfdirstate = lfutil.openlfdirstate(ui, self)
300 dirtymatch = match_.always(self.root, self.getcwd())
300 dirtymatch = match_.always(self.root, self.getcwd())
301 s = lfdirstate.status(dirtymatch, [], False, False, False)
301 s = lfdirstate.status(dirtymatch, [], False, False, False)
302 modifiedfiles = []
302 modifiedfiles = []
303 for i in s:
303 for i in s:
304 modifiedfiles.extend(i)
304 modifiedfiles.extend(i)
305 lfiles = lfutil.listlfiles(self)
305 lfiles = lfutil.listlfiles(self)
306 # this only loops through largefiles that exist (not
306 # this only loops through largefiles that exist (not
307 # removed/renamed)
307 # removed/renamed)
308 for lfile in lfiles:
308 for lfile in lfiles:
309 if lfile in modifiedfiles:
309 if lfile in modifiedfiles:
310 if os.path.exists(
310 if os.path.exists(
311 self.wjoin(lfutil.standin(lfile))):
311 self.wjoin(lfutil.standin(lfile))):
312 # this handles the case where a rebase is being
312 # this handles the case where a rebase is being
313 # performed and the working copy is not updated
313 # performed and the working copy is not updated
314 # yet.
314 # yet.
315 if os.path.exists(self.wjoin(lfile)):
315 if os.path.exists(self.wjoin(lfile)):
316 lfutil.updatestandin(self,
316 lfutil.updatestandin(self,
317 lfutil.standin(lfile))
317 lfutil.standin(lfile))
318 lfdirstate.normal(lfile)
318 lfdirstate.normal(lfile)
319
319
320 result = orig(text=text, user=user, date=date, match=match,
320 result = orig(text=text, user=user, date=date, match=match,
321 force=force, editor=editor, extra=extra)
321 force=force, editor=editor, extra=extra)
322
322
323 if result is not None:
323 if result is not None:
324 for lfile in lfdirstate:
324 for lfile in lfdirstate:
325 if lfile in modifiedfiles:
325 if lfile in modifiedfiles:
326 if (not os.path.exists(self.wjoin(
326 if (not os.path.exists(self.wjoin(
327 lfutil.standin(lfile)))) or \
327 lfutil.standin(lfile)))) or \
328 (not os.path.exists(self.wjoin(lfile))):
328 (not os.path.exists(self.wjoin(lfile))):
329 lfdirstate.drop(lfile)
329 lfdirstate.drop(lfile)
330
330
331 # This needs to be after commit; otherwise precommit hooks
331 # This needs to be after commit; otherwise precommit hooks
332 # get the wrong status
332 # get the wrong status
333 lfdirstate.write()
333 lfdirstate.write()
334 return result
334 return result
335
335
336 lfiles = lfutil.listlfiles(self)
336 lfiles = lfutil.listlfiles(self)
337 match._files = self._subdirlfs(match.files(), lfiles)
337 match._files = self._subdirlfs(match.files(), lfiles)
338
338
339 # Case 2: user calls commit with specified patterns: refresh
339 # Case 2: user calls commit with specified patterns: refresh
340 # any matching big files.
340 # any matching big files.
341 smatcher = lfutil.composestandinmatcher(self, match)
341 smatcher = lfutil.composestandinmatcher(self, match)
342 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
342 standins = self.dirstate.walk(smatcher, [], False, False)
343
343
344 # No matching big files: get out of the way and pass control to
344 # No matching big files: get out of the way and pass control to
345 # the usual commit() method.
345 # the usual commit() method.
346 if not standins:
346 if not standins:
347 return orig(text=text, user=user, date=date, match=match,
347 return orig(text=text, user=user, date=date, match=match,
348 force=force, editor=editor, extra=extra)
348 force=force, editor=editor, extra=extra)
349
349
350 # Refresh all matching big files. It's possible that the
350 # Refresh all matching big files. It's possible that the
351 # commit will end up failing, in which case the big files will
351 # commit will end up failing, in which case the big files will
352 # stay refreshed. No harm done: the user modified them and
352 # stay refreshed. No harm done: the user modified them and
353 # asked to commit them, so sooner or later we're going to
353 # asked to commit them, so sooner or later we're going to
354 # refresh the standins. Might as well leave them refreshed.
354 # refresh the standins. Might as well leave them refreshed.
355 lfdirstate = lfutil.openlfdirstate(ui, self)
355 lfdirstate = lfutil.openlfdirstate(ui, self)
356 for standin in standins:
356 for standin in standins:
357 lfile = lfutil.splitstandin(standin)
357 lfile = lfutil.splitstandin(standin)
358 if lfdirstate[lfile] <> 'r':
358 if lfdirstate[lfile] <> 'r':
359 lfutil.updatestandin(self, standin)
359 lfutil.updatestandin(self, standin)
360 lfdirstate.normal(lfile)
360 lfdirstate.normal(lfile)
361 else:
361 else:
362 lfdirstate.drop(lfile)
362 lfdirstate.drop(lfile)
363
363
364 # Cook up a new matcher that only matches regular files or
364 # Cook up a new matcher that only matches regular files or
365 # standins corresponding to the big files requested by the
365 # standins corresponding to the big files requested by the
366 # user. Have to modify _files to prevent commit() from
366 # user. Have to modify _files to prevent commit() from
367 # complaining "not tracked" for big files.
367 # complaining "not tracked" for big files.
368 match = copy.copy(match)
368 match = copy.copy(match)
369 origmatchfn = match.matchfn
369 origmatchfn = match.matchfn
370
370
371 # Check both the list of largefiles and the list of
371 # Check both the list of largefiles and the list of
372 # standins because if a largefile was removed, it
372 # standins because if a largefile was removed, it
373 # won't be in the list of largefiles at this point
373 # won't be in the list of largefiles at this point
374 match._files += sorted(standins)
374 match._files += sorted(standins)
375
375
376 actualfiles = []
376 actualfiles = []
377 for f in match._files:
377 for f in match._files:
378 fstandin = lfutil.standin(f)
378 fstandin = lfutil.standin(f)
379
379
380 # ignore known largefiles and standins
380 # ignore known largefiles and standins
381 if f in lfiles or fstandin in standins:
381 if f in lfiles or fstandin in standins:
382 continue
382 continue
383
383
384 # append directory separator to avoid collisions
384 # append directory separator to avoid collisions
385 if not fstandin.endswith(os.sep):
385 if not fstandin.endswith(os.sep):
386 fstandin += os.sep
386 fstandin += os.sep
387
387
388 actualfiles.append(f)
388 actualfiles.append(f)
389 match._files = actualfiles
389 match._files = actualfiles
390
390
391 def matchfn(f):
391 def matchfn(f):
392 if origmatchfn(f):
392 if origmatchfn(f):
393 return f not in lfiles
393 return f not in lfiles
394 else:
394 else:
395 return f in standins
395 return f in standins
396
396
397 match.matchfn = matchfn
397 match.matchfn = matchfn
398 result = orig(text=text, user=user, date=date, match=match,
398 result = orig(text=text, user=user, date=date, match=match,
399 force=force, editor=editor, extra=extra)
399 force=force, editor=editor, extra=extra)
400 # This needs to be after commit; otherwise precommit hooks
400 # This needs to be after commit; otherwise precommit hooks
401 # get the wrong status
401 # get the wrong status
402 lfdirstate.write()
402 lfdirstate.write()
403 return result
403 return result
404 finally:
404 finally:
405 wlock.release()
405 wlock.release()
406
406
407 def push(self, remote, force=False, revs=None, newbranch=False):
407 def push(self, remote, force=False, revs=None, newbranch=False):
408 outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
408 outgoing = discovery.findcommonoutgoing(repo, remote.peer(),
409 force=force)
409 force=force)
410 if outgoing.missing:
410 if outgoing.missing:
411 toupload = set()
411 toupload = set()
412 o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
412 o = self.changelog.nodesbetween(outgoing.missing, revs)[0]
413 for n in o:
413 for n in o:
414 parents = [p for p in self.changelog.parents(n)
414 parents = [p for p in self.changelog.parents(n)
415 if p != node_.nullid]
415 if p != node_.nullid]
416 ctx = self[n]
416 ctx = self[n]
417 files = set(ctx.files())
417 files = set(ctx.files())
418 if len(parents) == 2:
418 if len(parents) == 2:
419 mc = ctx.manifest()
419 mc = ctx.manifest()
420 mp1 = ctx.parents()[0].manifest()
420 mp1 = ctx.parents()[0].manifest()
421 mp2 = ctx.parents()[1].manifest()
421 mp2 = ctx.parents()[1].manifest()
422 for f in mp1:
422 for f in mp1:
423 if f not in mc:
423 if f not in mc:
424 files.add(f)
424 files.add(f)
425 for f in mp2:
425 for f in mp2:
426 if f not in mc:
426 if f not in mc:
427 files.add(f)
427 files.add(f)
428 for f in mc:
428 for f in mc:
429 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
429 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
430 None):
430 None):
431 files.add(f)
431 files.add(f)
432
432
433 toupload = toupload.union(
433 toupload = toupload.union(
434 set([ctx[f].data().strip()
434 set([ctx[f].data().strip()
435 for f in files
435 for f in files
436 if lfutil.isstandin(f) and f in ctx]))
436 if lfutil.isstandin(f) and f in ctx]))
437 lfcommands.uploadlfiles(ui, self, remote, toupload)
437 lfcommands.uploadlfiles(ui, self, remote, toupload)
438 return super(lfilesrepo, self).push(remote, force, revs,
438 return super(lfilesrepo, self).push(remote, force, revs,
439 newbranch)
439 newbranch)
440
440
441 def _subdirlfs(self, files, lfiles):
441 def _subdirlfs(self, files, lfiles):
442 '''
442 '''
443 Adjust matched file list
443 Adjust matched file list
444 If we pass a directory to commit whose only commitable files
444 If we pass a directory to commit whose only commitable files
445 are largefiles, the core commit code aborts before finding
445 are largefiles, the core commit code aborts before finding
446 the largefiles.
446 the largefiles.
447 So we do the following:
447 So we do the following:
448 For directories that only have largefiles as matches,
448 For directories that only have largefiles as matches,
449 we explicitly add the largefiles to the matchlist and remove
449 we explicitly add the largefiles to the matchlist and remove
450 the directory.
450 the directory.
451 In other cases, we leave the match list unmodified.
451 In other cases, we leave the match list unmodified.
452 '''
452 '''
453 actualfiles = []
453 actualfiles = []
454 dirs = []
454 dirs = []
455 regulars = []
455 regulars = []
456
456
457 for f in files:
457 for f in files:
458 if lfutil.isstandin(f + '/'):
458 if lfutil.isstandin(f + '/'):
459 raise util.Abort(
459 raise util.Abort(
460 _('file "%s" is a largefile standin') % f,
460 _('file "%s" is a largefile standin') % f,
461 hint=('commit the largefile itself instead'))
461 hint=('commit the largefile itself instead'))
462 # Scan directories
462 # Scan directories
463 if os.path.isdir(self.wjoin(f)):
463 if os.path.isdir(self.wjoin(f)):
464 dirs.append(f)
464 dirs.append(f)
465 else:
465 else:
466 regulars.append(f)
466 regulars.append(f)
467
467
468 for f in dirs:
468 for f in dirs:
469 matcheddir = False
469 matcheddir = False
470 d = self.dirstate.normalize(f) + '/'
470 d = self.dirstate.normalize(f) + '/'
471 # Check for matched normal files
471 # Check for matched normal files
472 for mf in regulars:
472 for mf in regulars:
473 if self.dirstate.normalize(mf).startswith(d):
473 if self.dirstate.normalize(mf).startswith(d):
474 actualfiles.append(f)
474 actualfiles.append(f)
475 matcheddir = True
475 matcheddir = True
476 break
476 break
477 if not matcheddir:
477 if not matcheddir:
478 # If no normal match, manually append
478 # If no normal match, manually append
479 # any matching largefiles
479 # any matching largefiles
480 for lf in lfiles:
480 for lf in lfiles:
481 if self.dirstate.normalize(lf).startswith(d):
481 if self.dirstate.normalize(lf).startswith(d):
482 actualfiles.append(lf)
482 actualfiles.append(lf)
483 if not matcheddir:
483 if not matcheddir:
484 actualfiles.append(lfutil.standin(f))
484 actualfiles.append(lfutil.standin(f))
485 matcheddir = True
485 matcheddir = True
486 # Nothing in dir, so readd it
486 # Nothing in dir, so readd it
487 # and let commit reject it
487 # and let commit reject it
488 if not matcheddir:
488 if not matcheddir:
489 actualfiles.append(f)
489 actualfiles.append(f)
490
490
491 # Always add normal files
491 # Always add normal files
492 actualfiles += regulars
492 actualfiles += regulars
493 return actualfiles
493 return actualfiles
494
494
495 repo.__class__ = lfilesrepo
495 repo.__class__ = lfilesrepo
496
496
497 def checkrequireslfiles(ui, repo, **kwargs):
497 def checkrequireslfiles(ui, repo, **kwargs):
498 if 'largefiles' not in repo.requirements and util.any(
498 if 'largefiles' not in repo.requirements and util.any(
499 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
499 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
500 repo.requirements.add('largefiles')
500 repo.requirements.add('largefiles')
501 repo._writerequirements()
501 repo._writerequirements()
502
502
503 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
503 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
504 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
504 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now