##// END OF EJS Templates
largefiles: remove additional blank lines...
liscju -
r29420:e5c91dc9 default
parent child Browse files
Show More
@@ -1,664 +1,662
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 error,
22 error,
23 httpconnection,
23 httpconnection,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 scmutil,
26 scmutil,
27 util,
27 util,
28 )
28 )
29
29
30 shortname = '.hglf'
30 shortname = '.hglf'
31 shortnameslash = shortname + '/'
31 shortnameslash = shortname + '/'
32 longname = 'largefiles'
32 longname = 'largefiles'
33
33
34
35 # -- Private worker functions ------------------------------------------
34 # -- Private worker functions ------------------------------------------
36
35
37 def getminsize(ui, assumelfiles, opt, default=10):
36 def getminsize(ui, assumelfiles, opt, default=10):
38 lfsize = opt
37 lfsize = opt
39 if not lfsize and assumelfiles:
38 if not lfsize and assumelfiles:
40 lfsize = ui.config(longname, 'minsize', default=default)
39 lfsize = ui.config(longname, 'minsize', default=default)
41 if lfsize:
40 if lfsize:
42 try:
41 try:
43 lfsize = float(lfsize)
42 lfsize = float(lfsize)
44 except ValueError:
43 except ValueError:
45 raise error.Abort(_('largefiles: size must be number (not %s)\n')
44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
46 % lfsize)
45 % lfsize)
47 if lfsize is None:
46 if lfsize is None:
48 raise error.Abort(_('minimum size for largefiles must be specified'))
47 raise error.Abort(_('minimum size for largefiles must be specified'))
49 return lfsize
48 return lfsize
50
49
51 def link(src, dest):
50 def link(src, dest):
52 """Try to create hardlink - if that fails, efficiently make a copy."""
51 """Try to create hardlink - if that fails, efficiently make a copy."""
53 util.makedirs(os.path.dirname(dest))
52 util.makedirs(os.path.dirname(dest))
54 try:
53 try:
55 util.oslink(src, dest)
54 util.oslink(src, dest)
56 except OSError:
55 except OSError:
57 # if hardlinks fail, fallback on atomic copy
56 # if hardlinks fail, fallback on atomic copy
58 dst = util.atomictempfile(dest)
57 dst = util.atomictempfile(dest)
59 for chunk in util.filechunkiter(open(src, 'rb')):
58 for chunk in util.filechunkiter(open(src, 'rb')):
60 dst.write(chunk)
59 dst.write(chunk)
61 dst.close()
60 dst.close()
62 os.chmod(dest, os.stat(src).st_mode)
61 os.chmod(dest, os.stat(src).st_mode)
63
62
64 def usercachepath(ui, hash):
63 def usercachepath(ui, hash):
65 '''Return the correct location in the "global" largefiles cache for a file
64 '''Return the correct location in the "global" largefiles cache for a file
66 with the given hash.
65 with the given hash.
67 This cache is used for sharing of largefiles across repositories - both
66 This cache is used for sharing of largefiles across repositories - both
68 to preserve download bandwidth and storage space.'''
67 to preserve download bandwidth and storage space.'''
69 return os.path.join(_usercachedir(ui), hash)
68 return os.path.join(_usercachedir(ui), hash)
70
69
71 def _usercachedir(ui):
70 def _usercachedir(ui):
72 '''Return the location of the "global" largefiles cache.'''
71 '''Return the location of the "global" largefiles cache.'''
73 path = ui.configpath(longname, 'usercache', None)
72 path = ui.configpath(longname, 'usercache', None)
74 if path:
73 if path:
75 return path
74 return path
76 if os.name == 'nt':
75 if os.name == 'nt':
77 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
78 if appdata:
77 if appdata:
79 return os.path.join(appdata, longname)
78 return os.path.join(appdata, longname)
80 elif platform.system() == 'Darwin':
79 elif platform.system() == 'Darwin':
81 home = os.getenv('HOME')
80 home = os.getenv('HOME')
82 if home:
81 if home:
83 return os.path.join(home, 'Library', 'Caches', longname)
82 return os.path.join(home, 'Library', 'Caches', longname)
84 elif os.name == 'posix':
83 elif os.name == 'posix':
85 path = os.getenv('XDG_CACHE_HOME')
84 path = os.getenv('XDG_CACHE_HOME')
86 if path:
85 if path:
87 return os.path.join(path, longname)
86 return os.path.join(path, longname)
88 home = os.getenv('HOME')
87 home = os.getenv('HOME')
89 if home:
88 if home:
90 return os.path.join(home, '.cache', longname)
89 return os.path.join(home, '.cache', longname)
91 else:
90 else:
92 raise error.Abort(_('unknown operating system: %s\n') % os.name)
91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
93 raise error.Abort(_('unknown %s usercache location\n') % longname)
92 raise error.Abort(_('unknown %s usercache location\n') % longname)
94
93
95 def inusercache(ui, hash):
94 def inusercache(ui, hash):
96 path = usercachepath(ui, hash)
95 path = usercachepath(ui, hash)
97 return os.path.exists(path)
96 return os.path.exists(path)
98
97
99 def findfile(repo, hash):
98 def findfile(repo, hash):
100 '''Return store path of the largefile with the specified hash.
99 '''Return store path of the largefile with the specified hash.
101 As a side effect, the file might be linked from user cache.
100 As a side effect, the file might be linked from user cache.
102 Return None if the file can't be found locally.'''
101 Return None if the file can't be found locally.'''
103 path, exists = findstorepath(repo, hash)
102 path, exists = findstorepath(repo, hash)
104 if exists:
103 if exists:
105 repo.ui.note(_('found %s in store\n') % hash)
104 repo.ui.note(_('found %s in store\n') % hash)
106 return path
105 return path
107 elif inusercache(repo.ui, hash):
106 elif inusercache(repo.ui, hash):
108 repo.ui.note(_('found %s in system cache\n') % hash)
107 repo.ui.note(_('found %s in system cache\n') % hash)
109 path = storepath(repo, hash)
108 path = storepath(repo, hash)
110 link(usercachepath(repo.ui, hash), path)
109 link(usercachepath(repo.ui, hash), path)
111 return path
110 return path
112 return None
111 return None
113
112
114 class largefilesdirstate(dirstate.dirstate):
113 class largefilesdirstate(dirstate.dirstate):
115 def __getitem__(self, key):
114 def __getitem__(self, key):
116 return super(largefilesdirstate, self).__getitem__(unixpath(key))
115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
117 def normal(self, f):
116 def normal(self, f):
118 return super(largefilesdirstate, self).normal(unixpath(f))
117 return super(largefilesdirstate, self).normal(unixpath(f))
119 def remove(self, f):
118 def remove(self, f):
120 return super(largefilesdirstate, self).remove(unixpath(f))
119 return super(largefilesdirstate, self).remove(unixpath(f))
121 def add(self, f):
120 def add(self, f):
122 return super(largefilesdirstate, self).add(unixpath(f))
121 return super(largefilesdirstate, self).add(unixpath(f))
123 def drop(self, f):
122 def drop(self, f):
124 return super(largefilesdirstate, self).drop(unixpath(f))
123 return super(largefilesdirstate, self).drop(unixpath(f))
125 def forget(self, f):
124 def forget(self, f):
126 return super(largefilesdirstate, self).forget(unixpath(f))
125 return super(largefilesdirstate, self).forget(unixpath(f))
127 def normallookup(self, f):
126 def normallookup(self, f):
128 return super(largefilesdirstate, self).normallookup(unixpath(f))
127 return super(largefilesdirstate, self).normallookup(unixpath(f))
129 def _ignore(self, f):
128 def _ignore(self, f):
130 return False
129 return False
131 def write(self, tr=False):
130 def write(self, tr=False):
132 # (1) disable PENDING mode always
131 # (1) disable PENDING mode always
133 # (lfdirstate isn't yet managed as a part of the transaction)
132 # (lfdirstate isn't yet managed as a part of the transaction)
134 # (2) avoid develwarn 'use dirstate.write with ....'
133 # (2) avoid develwarn 'use dirstate.write with ....'
135 super(largefilesdirstate, self).write(None)
134 super(largefilesdirstate, self).write(None)
136
135
137 def openlfdirstate(ui, repo, create=True):
136 def openlfdirstate(ui, repo, create=True):
138 '''
137 '''
139 Return a dirstate object that tracks largefiles: i.e. its root is
138 Return a dirstate object that tracks largefiles: i.e. its root is
140 the repo root, but it is saved in .hg/largefiles/dirstate.
139 the repo root, but it is saved in .hg/largefiles/dirstate.
141 '''
140 '''
142 vfs = repo.vfs
141 vfs = repo.vfs
143 lfstoredir = longname
142 lfstoredir = longname
144 opener = scmutil.opener(vfs.join(lfstoredir))
143 opener = scmutil.opener(vfs.join(lfstoredir))
145 lfdirstate = largefilesdirstate(opener, ui, repo.root,
144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
146 repo.dirstate._validate)
145 repo.dirstate._validate)
147
146
148 # If the largefiles dirstate does not exist, populate and create
147 # If the largefiles dirstate does not exist, populate and create
149 # it. This ensures that we create it on the first meaningful
148 # it. This ensures that we create it on the first meaningful
150 # largefiles operation in a new clone.
149 # largefiles operation in a new clone.
151 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
152 matcher = getstandinmatcher(repo)
151 matcher = getstandinmatcher(repo)
153 standins = repo.dirstate.walk(matcher, [], False, False)
152 standins = repo.dirstate.walk(matcher, [], False, False)
154
153
155 if len(standins) > 0:
154 if len(standins) > 0:
156 vfs.makedirs(lfstoredir)
155 vfs.makedirs(lfstoredir)
157
156
158 for standin in standins:
157 for standin in standins:
159 lfile = splitstandin(standin)
158 lfile = splitstandin(standin)
160 lfdirstate.normallookup(lfile)
159 lfdirstate.normallookup(lfile)
161 return lfdirstate
160 return lfdirstate
162
161
163 def lfdirstatestatus(lfdirstate, repo):
162 def lfdirstatestatus(lfdirstate, repo):
164 wctx = repo['.']
163 wctx = repo['.']
165 match = matchmod.always(repo.root, repo.getcwd())
164 match = matchmod.always(repo.root, repo.getcwd())
166 unsure, s = lfdirstate.status(match, [], False, False, False)
165 unsure, s = lfdirstate.status(match, [], False, False, False)
167 modified, clean = s.modified, s.clean
166 modified, clean = s.modified, s.clean
168 for lfile in unsure:
167 for lfile in unsure:
169 try:
168 try:
170 fctx = wctx[standin(lfile)]
169 fctx = wctx[standin(lfile)]
171 except LookupError:
170 except LookupError:
172 fctx = None
171 fctx = None
173 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
174 modified.append(lfile)
173 modified.append(lfile)
175 else:
174 else:
176 clean.append(lfile)
175 clean.append(lfile)
177 lfdirstate.normal(lfile)
176 lfdirstate.normal(lfile)
178 return s
177 return s
179
178
180 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
181 '''return a list of largefiles in the working copy or the
180 '''return a list of largefiles in the working copy or the
182 specified changeset'''
181 specified changeset'''
183
182
184 if matcher is None:
183 if matcher is None:
185 matcher = getstandinmatcher(repo)
184 matcher = getstandinmatcher(repo)
186
185
187 # ignore unknown files in working directory
186 # ignore unknown files in working directory
188 return [splitstandin(f)
187 return [splitstandin(f)
189 for f in repo[rev].walk(matcher)
188 for f in repo[rev].walk(matcher)
190 if rev is not None or repo.dirstate[f] != '?']
189 if rev is not None or repo.dirstate[f] != '?']
191
190
192 def instore(repo, hash, forcelocal=False):
191 def instore(repo, hash, forcelocal=False):
193 '''Return true if a largefile with the given hash exists in the store'''
192 '''Return true if a largefile with the given hash exists in the store'''
194 return os.path.exists(storepath(repo, hash, forcelocal))
193 return os.path.exists(storepath(repo, hash, forcelocal))
195
194
196 def storepath(repo, hash, forcelocal=False):
195 def storepath(repo, hash, forcelocal=False):
197 '''Return the correct location in the repository largefiles store for a
196 '''Return the correct location in the repository largefiles store for a
198 file with the given hash.'''
197 file with the given hash.'''
199 if not forcelocal and repo.shared():
198 if not forcelocal and repo.shared():
200 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
199 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
201 return repo.join(longname, hash)
200 return repo.join(longname, hash)
202
201
203 def findstorepath(repo, hash):
202 def findstorepath(repo, hash):
204 '''Search through the local store path(s) to find the file for the given
203 '''Search through the local store path(s) to find the file for the given
205 hash. If the file is not found, its path in the primary store is returned.
204 hash. If the file is not found, its path in the primary store is returned.
206 The return value is a tuple of (path, exists(path)).
205 The return value is a tuple of (path, exists(path)).
207 '''
206 '''
208 # For shared repos, the primary store is in the share source. But for
207 # For shared repos, the primary store is in the share source. But for
209 # backward compatibility, force a lookup in the local store if it wasn't
208 # backward compatibility, force a lookup in the local store if it wasn't
210 # found in the share source.
209 # found in the share source.
211 path = storepath(repo, hash, False)
210 path = storepath(repo, hash, False)
212
211
213 if instore(repo, hash):
212 if instore(repo, hash):
214 return (path, True)
213 return (path, True)
215 elif repo.shared() and instore(repo, hash, True):
214 elif repo.shared() and instore(repo, hash, True):
216 return storepath(repo, hash, True), True
215 return storepath(repo, hash, True), True
217
216
218 return (path, False)
217 return (path, False)
219
218
220 def copyfromcache(repo, hash, filename):
219 def copyfromcache(repo, hash, filename):
221 '''Copy the specified largefile from the repo or system cache to
220 '''Copy the specified largefile from the repo or system cache to
222 filename in the repository. Return true on success or false if the
221 filename in the repository. Return true on success or false if the
223 file was not found in either cache (which should not happened:
222 file was not found in either cache (which should not happened:
224 this is meant to be called only after ensuring that the needed
223 this is meant to be called only after ensuring that the needed
225 largefile exists in the cache).'''
224 largefile exists in the cache).'''
226 wvfs = repo.wvfs
225 wvfs = repo.wvfs
227 path = findfile(repo, hash)
226 path = findfile(repo, hash)
228 if path is None:
227 if path is None:
229 return False
228 return False
230 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
229 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
231 # The write may fail before the file is fully written, but we
230 # The write may fail before the file is fully written, but we
232 # don't use atomic writes in the working copy.
231 # don't use atomic writes in the working copy.
233 with open(path, 'rb') as srcfd:
232 with open(path, 'rb') as srcfd:
234 with wvfs(filename, 'wb') as destfd:
233 with wvfs(filename, 'wb') as destfd:
235 gothash = copyandhash(srcfd, destfd)
234 gothash = copyandhash(srcfd, destfd)
236 if gothash != hash:
235 if gothash != hash:
237 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
236 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
238 % (filename, path, gothash))
237 % (filename, path, gothash))
239 wvfs.unlink(filename)
238 wvfs.unlink(filename)
240 return False
239 return False
241 return True
240 return True
242
241
243 def copytostore(repo, rev, file, uploaded=False):
242 def copytostore(repo, rev, file, uploaded=False):
244 wvfs = repo.wvfs
243 wvfs = repo.wvfs
245 hash = readstandin(repo, file, rev)
244 hash = readstandin(repo, file, rev)
246 if instore(repo, hash):
245 if instore(repo, hash):
247 return
246 return
248 if wvfs.exists(file):
247 if wvfs.exists(file):
249 copytostoreabsolute(repo, wvfs.join(file), hash)
248 copytostoreabsolute(repo, wvfs.join(file), hash)
250 else:
249 else:
251 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
250 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
252 (file, hash))
251 (file, hash))
253
252
254 def copyalltostore(repo, node):
253 def copyalltostore(repo, node):
255 '''Copy all largefiles in a given revision to the store'''
254 '''Copy all largefiles in a given revision to the store'''
256
255
257 ctx = repo[node]
256 ctx = repo[node]
258 for filename in ctx.files():
257 for filename in ctx.files():
259 if isstandin(filename) and filename in ctx.manifest():
258 if isstandin(filename) and filename in ctx.manifest():
260 realfile = splitstandin(filename)
259 realfile = splitstandin(filename)
261 copytostore(repo, ctx.node(), realfile)
260 copytostore(repo, ctx.node(), realfile)
262
261
263
264 def copytostoreabsolute(repo, file, hash):
262 def copytostoreabsolute(repo, file, hash):
265 if inusercache(repo.ui, hash):
263 if inusercache(repo.ui, hash):
266 link(usercachepath(repo.ui, hash), storepath(repo, hash))
264 link(usercachepath(repo.ui, hash), storepath(repo, hash))
267 else:
265 else:
268 util.makedirs(os.path.dirname(storepath(repo, hash)))
266 util.makedirs(os.path.dirname(storepath(repo, hash)))
269 dst = util.atomictempfile(storepath(repo, hash),
267 dst = util.atomictempfile(storepath(repo, hash),
270 createmode=repo.store.createmode)
268 createmode=repo.store.createmode)
271 for chunk in util.filechunkiter(open(file, 'rb')):
269 for chunk in util.filechunkiter(open(file, 'rb')):
272 dst.write(chunk)
270 dst.write(chunk)
273 dst.close()
271 dst.close()
274 linktousercache(repo, hash)
272 linktousercache(repo, hash)
275
273
276 def linktousercache(repo, hash):
274 def linktousercache(repo, hash):
277 '''Link / copy the largefile with the specified hash from the store
275 '''Link / copy the largefile with the specified hash from the store
278 to the cache.'''
276 to the cache.'''
279 path = usercachepath(repo.ui, hash)
277 path = usercachepath(repo.ui, hash)
280 link(storepath(repo, hash), path)
278 link(storepath(repo, hash), path)
281
279
282 def getstandinmatcher(repo, rmatcher=None):
280 def getstandinmatcher(repo, rmatcher=None):
283 '''Return a match object that applies rmatcher to the standin directory'''
281 '''Return a match object that applies rmatcher to the standin directory'''
284 wvfs = repo.wvfs
282 wvfs = repo.wvfs
285 standindir = shortname
283 standindir = shortname
286
284
287 # no warnings about missing files or directories
285 # no warnings about missing files or directories
288 badfn = lambda f, msg: None
286 badfn = lambda f, msg: None
289
287
290 if rmatcher and not rmatcher.always():
288 if rmatcher and not rmatcher.always():
291 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
289 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
292 if not pats:
290 if not pats:
293 pats = [wvfs.join(standindir)]
291 pats = [wvfs.join(standindir)]
294 match = scmutil.match(repo[None], pats, badfn=badfn)
292 match = scmutil.match(repo[None], pats, badfn=badfn)
295 # if pats is empty, it would incorrectly always match, so clear _always
293 # if pats is empty, it would incorrectly always match, so clear _always
296 match._always = False
294 match._always = False
297 else:
295 else:
298 # no patterns: relative to repo root
296 # no patterns: relative to repo root
299 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
297 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
300 return match
298 return match
301
299
302 def composestandinmatcher(repo, rmatcher):
300 def composestandinmatcher(repo, rmatcher):
303 '''Return a matcher that accepts standins corresponding to the
301 '''Return a matcher that accepts standins corresponding to the
304 files accepted by rmatcher. Pass the list of files in the matcher
302 files accepted by rmatcher. Pass the list of files in the matcher
305 as the paths specified by the user.'''
303 as the paths specified by the user.'''
306 smatcher = getstandinmatcher(repo, rmatcher)
304 smatcher = getstandinmatcher(repo, rmatcher)
307 isstandin = smatcher.matchfn
305 isstandin = smatcher.matchfn
308 def composedmatchfn(f):
306 def composedmatchfn(f):
309 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
310 smatcher.matchfn = composedmatchfn
308 smatcher.matchfn = composedmatchfn
311
309
312 return smatcher
310 return smatcher
313
311
314 def standin(filename):
312 def standin(filename):
315 '''Return the repo-relative path to the standin for the specified big
313 '''Return the repo-relative path to the standin for the specified big
316 file.'''
314 file.'''
317 # Notes:
315 # Notes:
318 # 1) Some callers want an absolute path, but for instance addlargefiles
316 # 1) Some callers want an absolute path, but for instance addlargefiles
319 # needs it repo-relative so it can be passed to repo[None].add(). So
317 # needs it repo-relative so it can be passed to repo[None].add(). So
320 # leave it up to the caller to use repo.wjoin() to get an absolute path.
318 # leave it up to the caller to use repo.wjoin() to get an absolute path.
321 # 2) Join with '/' because that's what dirstate always uses, even on
319 # 2) Join with '/' because that's what dirstate always uses, even on
322 # Windows. Change existing separator to '/' first in case we are
320 # Windows. Change existing separator to '/' first in case we are
323 # passed filenames from an external source (like the command line).
321 # passed filenames from an external source (like the command line).
324 return shortnameslash + util.pconvert(filename)
322 return shortnameslash + util.pconvert(filename)
325
323
326 def isstandin(filename):
324 def isstandin(filename):
327 '''Return true if filename is a big file standin. filename must be
325 '''Return true if filename is a big file standin. filename must be
328 in Mercurial's internal form (slash-separated).'''
326 in Mercurial's internal form (slash-separated).'''
329 return filename.startswith(shortnameslash)
327 return filename.startswith(shortnameslash)
330
328
331 def splitstandin(filename):
329 def splitstandin(filename):
332 # Split on / because that's what dirstate always uses, even on Windows.
330 # Split on / because that's what dirstate always uses, even on Windows.
333 # Change local separator to / first just in case we are passed filenames
331 # Change local separator to / first just in case we are passed filenames
334 # from an external source (like the command line).
332 # from an external source (like the command line).
335 bits = util.pconvert(filename).split('/', 1)
333 bits = util.pconvert(filename).split('/', 1)
336 if len(bits) == 2 and bits[0] == shortname:
334 if len(bits) == 2 and bits[0] == shortname:
337 return bits[1]
335 return bits[1]
338 else:
336 else:
339 return None
337 return None
340
338
341 def updatestandin(repo, standin):
339 def updatestandin(repo, standin):
342 file = repo.wjoin(splitstandin(standin))
340 file = repo.wjoin(splitstandin(standin))
343 if repo.wvfs.exists(splitstandin(standin)):
341 if repo.wvfs.exists(splitstandin(standin)):
344 hash = hashfile(file)
342 hash = hashfile(file)
345 executable = getexecutable(file)
343 executable = getexecutable(file)
346 writestandin(repo, standin, hash, executable)
344 writestandin(repo, standin, hash, executable)
347 else:
345 else:
348 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
346 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
349
347
350 def readstandin(repo, filename, node=None):
348 def readstandin(repo, filename, node=None):
351 '''read hex hash from standin for filename at given node, or working
349 '''read hex hash from standin for filename at given node, or working
352 directory if no node is given'''
350 directory if no node is given'''
353 return repo[node][standin(filename)].data().strip()
351 return repo[node][standin(filename)].data().strip()
354
352
355 def writestandin(repo, standin, hash, executable):
353 def writestandin(repo, standin, hash, executable):
356 '''write hash to <repo.root>/<standin>'''
354 '''write hash to <repo.root>/<standin>'''
357 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
355 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
358
356
359 def copyandhash(instream, outfile):
357 def copyandhash(instream, outfile):
360 '''Read bytes from instream (iterable) and write them to outfile,
358 '''Read bytes from instream (iterable) and write them to outfile,
361 computing the SHA-1 hash of the data along the way. Return the hash.'''
359 computing the SHA-1 hash of the data along the way. Return the hash.'''
362 hasher = hashlib.sha1('')
360 hasher = hashlib.sha1('')
363 for data in instream:
361 for data in instream:
364 hasher.update(data)
362 hasher.update(data)
365 outfile.write(data)
363 outfile.write(data)
366 return hasher.hexdigest()
364 return hasher.hexdigest()
367
365
368 def hashrepofile(repo, file):
366 def hashrepofile(repo, file):
369 return hashfile(repo.wjoin(file))
367 return hashfile(repo.wjoin(file))
370
368
371 def hashfile(file):
369 def hashfile(file):
372 if not os.path.exists(file):
370 if not os.path.exists(file):
373 return ''
371 return ''
374 hasher = hashlib.sha1('')
372 hasher = hashlib.sha1('')
375 fd = open(file, 'rb')
373 fd = open(file, 'rb')
376 for data in util.filechunkiter(fd, 128 * 1024):
374 for data in util.filechunkiter(fd, 128 * 1024):
377 hasher.update(data)
375 hasher.update(data)
378 fd.close()
376 fd.close()
379 return hasher.hexdigest()
377 return hasher.hexdigest()
380
378
381 def getexecutable(filename):
379 def getexecutable(filename):
382 mode = os.stat(filename).st_mode
380 mode = os.stat(filename).st_mode
383 return ((mode & stat.S_IXUSR) and
381 return ((mode & stat.S_IXUSR) and
384 (mode & stat.S_IXGRP) and
382 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXOTH))
383 (mode & stat.S_IXOTH))
386
384
387 def urljoin(first, second, *arg):
385 def urljoin(first, second, *arg):
388 def join(left, right):
386 def join(left, right):
389 if not left.endswith('/'):
387 if not left.endswith('/'):
390 left += '/'
388 left += '/'
391 if right.startswith('/'):
389 if right.startswith('/'):
392 right = right[1:]
390 right = right[1:]
393 return left + right
391 return left + right
394
392
395 url = join(first, second)
393 url = join(first, second)
396 for a in arg:
394 for a in arg:
397 url = join(url, a)
395 url = join(url, a)
398 return url
396 return url
399
397
400 def hexsha1(data):
398 def hexsha1(data):
401 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 object data"""
400 object data"""
403 h = hashlib.sha1()
401 h = hashlib.sha1()
404 for chunk in util.filechunkiter(data):
402 for chunk in util.filechunkiter(data):
405 h.update(chunk)
403 h.update(chunk)
406 return h.hexdigest()
404 return h.hexdigest()
407
405
408 def httpsendfile(ui, filename):
406 def httpsendfile(ui, filename):
409 return httpconnection.httpsendfile(ui, filename, 'rb')
407 return httpconnection.httpsendfile(ui, filename, 'rb')
410
408
411 def unixpath(path):
409 def unixpath(path):
412 '''Return a version of path normalized for use with the lfdirstate.'''
410 '''Return a version of path normalized for use with the lfdirstate.'''
413 return util.pconvert(os.path.normpath(path))
411 return util.pconvert(os.path.normpath(path))
414
412
415 def islfilesrepo(repo):
413 def islfilesrepo(repo):
416 '''Return true if the repo is a largefile repo.'''
414 '''Return true if the repo is a largefile repo.'''
417 if ('largefiles' in repo.requirements and
415 if ('largefiles' in repo.requirements and
418 any(shortnameslash in f[0] for f in repo.store.datafiles())):
416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 return True
417 return True
420
418
421 return any(openlfdirstate(repo.ui, repo, False))
419 return any(openlfdirstate(repo.ui, repo, False))
422
420
423 class storeprotonotcapable(Exception):
421 class storeprotonotcapable(Exception):
424 def __init__(self, storetypes):
422 def __init__(self, storetypes):
425 self.storetypes = storetypes
423 self.storetypes = storetypes
426
424
427 def getstandinsstate(repo):
425 def getstandinsstate(repo):
428 standins = []
426 standins = []
429 matcher = getstandinmatcher(repo)
427 matcher = getstandinmatcher(repo)
430 for standin in repo.dirstate.walk(matcher, [], False, False):
428 for standin in repo.dirstate.walk(matcher, [], False, False):
431 lfile = splitstandin(standin)
429 lfile = splitstandin(standin)
432 try:
430 try:
433 hash = readstandin(repo, lfile)
431 hash = readstandin(repo, lfile)
434 except IOError:
432 except IOError:
435 hash = None
433 hash = None
436 standins.append((lfile, hash))
434 standins.append((lfile, hash))
437 return standins
435 return standins
438
436
439 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 lfstandin = standin(lfile)
438 lfstandin = standin(lfile)
441 if lfstandin in repo.dirstate:
439 if lfstandin in repo.dirstate:
442 stat = repo.dirstate._map[lfstandin]
440 stat = repo.dirstate._map[lfstandin]
443 state, mtime = stat[0], stat[3]
441 state, mtime = stat[0], stat[3]
444 else:
442 else:
445 state, mtime = '?', -1
443 state, mtime = '?', -1
446 if state == 'n':
444 if state == 'n':
447 if (normallookup or mtime < 0 or
445 if (normallookup or mtime < 0 or
448 not repo.wvfs.exists(lfile)):
446 not repo.wvfs.exists(lfile)):
449 # state 'n' doesn't ensure 'clean' in this case
447 # state 'n' doesn't ensure 'clean' in this case
450 lfdirstate.normallookup(lfile)
448 lfdirstate.normallookup(lfile)
451 else:
449 else:
452 lfdirstate.normal(lfile)
450 lfdirstate.normal(lfile)
453 elif state == 'm':
451 elif state == 'm':
454 lfdirstate.normallookup(lfile)
452 lfdirstate.normallookup(lfile)
455 elif state == 'r':
453 elif state == 'r':
456 lfdirstate.remove(lfile)
454 lfdirstate.remove(lfile)
457 elif state == 'a':
455 elif state == 'a':
458 lfdirstate.add(lfile)
456 lfdirstate.add(lfile)
459 elif state == '?':
457 elif state == '?':
460 lfdirstate.drop(lfile)
458 lfdirstate.drop(lfile)
461
459
462 def markcommitted(orig, ctx, node):
460 def markcommitted(orig, ctx, node):
463 repo = ctx.repo()
461 repo = ctx.repo()
464
462
465 orig(node)
463 orig(node)
466
464
467 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 # because files coming from the 2nd parent are omitted in the latter.
466 # because files coming from the 2nd parent are omitted in the latter.
469 #
467 #
470 # The former should be used to get targets of "synclfdirstate",
468 # The former should be used to get targets of "synclfdirstate",
471 # because such files:
469 # because such files:
472 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 # - have to be marked as "n" after commit, but
471 # - have to be marked as "n" after commit, but
474 # - aren't listed in "repo[node].files()"
472 # - aren't listed in "repo[node].files()"
475
473
476 lfdirstate = openlfdirstate(repo.ui, repo)
474 lfdirstate = openlfdirstate(repo.ui, repo)
477 for f in ctx.files():
475 for f in ctx.files():
478 if isstandin(f):
476 if isstandin(f):
479 lfile = splitstandin(f)
477 lfile = splitstandin(f)
480 synclfdirstate(repo, lfdirstate, lfile, False)
478 synclfdirstate(repo, lfdirstate, lfile, False)
481 lfdirstate.write()
479 lfdirstate.write()
482
480
483 # As part of committing, copy all of the largefiles into the cache.
481 # As part of committing, copy all of the largefiles into the cache.
484 copyalltostore(repo, node)
482 copyalltostore(repo, node)
485
483
486 def getlfilestoupdate(oldstandins, newstandins):
484 def getlfilestoupdate(oldstandins, newstandins):
487 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
485 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
488 filelist = []
486 filelist = []
489 for f in changedstandins:
487 for f in changedstandins:
490 if f[0] not in filelist:
488 if f[0] not in filelist:
491 filelist.append(f[0])
489 filelist.append(f[0])
492 return filelist
490 return filelist
493
491
494 def getlfilestoupload(repo, missing, addfunc):
492 def getlfilestoupload(repo, missing, addfunc):
495 for i, n in enumerate(missing):
493 for i, n in enumerate(missing):
496 repo.ui.progress(_('finding outgoing largefiles'), i,
494 repo.ui.progress(_('finding outgoing largefiles'), i,
497 unit=_('revisions'), total=len(missing))
495 unit=_('revisions'), total=len(missing))
498 parents = [p for p in repo[n].parents() if p != node.nullid]
496 parents = [p for p in repo[n].parents() if p != node.nullid]
499
497
500 oldlfstatus = repo.lfstatus
498 oldlfstatus = repo.lfstatus
501 repo.lfstatus = False
499 repo.lfstatus = False
502 try:
500 try:
503 ctx = repo[n]
501 ctx = repo[n]
504 finally:
502 finally:
505 repo.lfstatus = oldlfstatus
503 repo.lfstatus = oldlfstatus
506
504
507 files = set(ctx.files())
505 files = set(ctx.files())
508 if len(parents) == 2:
506 if len(parents) == 2:
509 mc = ctx.manifest()
507 mc = ctx.manifest()
510 mp1 = ctx.parents()[0].manifest()
508 mp1 = ctx.parents()[0].manifest()
511 mp2 = ctx.parents()[1].manifest()
509 mp2 = ctx.parents()[1].manifest()
512 for f in mp1:
510 for f in mp1:
513 if f not in mc:
511 if f not in mc:
514 files.add(f)
512 files.add(f)
515 for f in mp2:
513 for f in mp2:
516 if f not in mc:
514 if f not in mc:
517 files.add(f)
515 files.add(f)
518 for f in mc:
516 for f in mc:
519 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
517 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
520 files.add(f)
518 files.add(f)
521 for fn in files:
519 for fn in files:
522 if isstandin(fn) and fn in ctx:
520 if isstandin(fn) and fn in ctx:
523 addfunc(fn, ctx[fn].data().strip())
521 addfunc(fn, ctx[fn].data().strip())
524 repo.ui.progress(_('finding outgoing largefiles'), None)
522 repo.ui.progress(_('finding outgoing largefiles'), None)
525
523
526 def updatestandinsbymatch(repo, match):
524 def updatestandinsbymatch(repo, match):
527 '''Update standins in the working directory according to specified match
525 '''Update standins in the working directory according to specified match
528
526
529 This returns (possibly modified) ``match`` object to be used for
527 This returns (possibly modified) ``match`` object to be used for
530 subsequent commit process.
528 subsequent commit process.
531 '''
529 '''
532
530
533 ui = repo.ui
531 ui = repo.ui
534
532
535 # Case 1: user calls commit with no specific files or
533 # Case 1: user calls commit with no specific files or
536 # include/exclude patterns: refresh and commit all files that
534 # include/exclude patterns: refresh and commit all files that
537 # are "dirty".
535 # are "dirty".
538 if match is None or match.always():
536 if match is None or match.always():
539 # Spend a bit of time here to get a list of files we know
537 # Spend a bit of time here to get a list of files we know
540 # are modified so we can compare only against those.
538 # are modified so we can compare only against those.
541 # It can cost a lot of time (several seconds)
539 # It can cost a lot of time (several seconds)
542 # otherwise to update all standins if the largefiles are
540 # otherwise to update all standins if the largefiles are
543 # large.
541 # large.
544 lfdirstate = openlfdirstate(ui, repo)
542 lfdirstate = openlfdirstate(ui, repo)
545 dirtymatch = matchmod.always(repo.root, repo.getcwd())
543 dirtymatch = matchmod.always(repo.root, repo.getcwd())
546 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
544 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
547 False)
545 False)
548 modifiedfiles = unsure + s.modified + s.added + s.removed
546 modifiedfiles = unsure + s.modified + s.added + s.removed
549 lfiles = listlfiles(repo)
547 lfiles = listlfiles(repo)
550 # this only loops through largefiles that exist (not
548 # this only loops through largefiles that exist (not
551 # removed/renamed)
549 # removed/renamed)
552 for lfile in lfiles:
550 for lfile in lfiles:
553 if lfile in modifiedfiles:
551 if lfile in modifiedfiles:
554 if repo.wvfs.exists(standin(lfile)):
552 if repo.wvfs.exists(standin(lfile)):
555 # this handles the case where a rebase is being
553 # this handles the case where a rebase is being
556 # performed and the working copy is not updated
554 # performed and the working copy is not updated
557 # yet.
555 # yet.
558 if repo.wvfs.exists(lfile):
556 if repo.wvfs.exists(lfile):
559 updatestandin(repo,
557 updatestandin(repo,
560 standin(lfile))
558 standin(lfile))
561
559
562 return match
560 return match
563
561
564 lfiles = listlfiles(repo)
562 lfiles = listlfiles(repo)
565 match._files = repo._subdirlfs(match.files(), lfiles)
563 match._files = repo._subdirlfs(match.files(), lfiles)
566
564
567 # Case 2: user calls commit with specified patterns: refresh
565 # Case 2: user calls commit with specified patterns: refresh
568 # any matching big files.
566 # any matching big files.
569 smatcher = composestandinmatcher(repo, match)
567 smatcher = composestandinmatcher(repo, match)
570 standins = repo.dirstate.walk(smatcher, [], False, False)
568 standins = repo.dirstate.walk(smatcher, [], False, False)
571
569
572 # No matching big files: get out of the way and pass control to
570 # No matching big files: get out of the way and pass control to
573 # the usual commit() method.
571 # the usual commit() method.
574 if not standins:
572 if not standins:
575 return match
573 return match
576
574
577 # Refresh all matching big files. It's possible that the
575 # Refresh all matching big files. It's possible that the
578 # commit will end up failing, in which case the big files will
576 # commit will end up failing, in which case the big files will
579 # stay refreshed. No harm done: the user modified them and
577 # stay refreshed. No harm done: the user modified them and
580 # asked to commit them, so sooner or later we're going to
578 # asked to commit them, so sooner or later we're going to
581 # refresh the standins. Might as well leave them refreshed.
579 # refresh the standins. Might as well leave them refreshed.
582 lfdirstate = openlfdirstate(ui, repo)
580 lfdirstate = openlfdirstate(ui, repo)
583 for fstandin in standins:
581 for fstandin in standins:
584 lfile = splitstandin(fstandin)
582 lfile = splitstandin(fstandin)
585 if lfdirstate[lfile] != 'r':
583 if lfdirstate[lfile] != 'r':
586 updatestandin(repo, fstandin)
584 updatestandin(repo, fstandin)
587
585
588 # Cook up a new matcher that only matches regular files or
586 # Cook up a new matcher that only matches regular files or
589 # standins corresponding to the big files requested by the
587 # standins corresponding to the big files requested by the
590 # user. Have to modify _files to prevent commit() from
588 # user. Have to modify _files to prevent commit() from
591 # complaining "not tracked" for big files.
589 # complaining "not tracked" for big files.
592 match = copy.copy(match)
590 match = copy.copy(match)
593 origmatchfn = match.matchfn
591 origmatchfn = match.matchfn
594
592
595 # Check both the list of largefiles and the list of
593 # Check both the list of largefiles and the list of
596 # standins because if a largefile was removed, it
594 # standins because if a largefile was removed, it
597 # won't be in the list of largefiles at this point
595 # won't be in the list of largefiles at this point
598 match._files += sorted(standins)
596 match._files += sorted(standins)
599
597
600 actualfiles = []
598 actualfiles = []
601 for f in match._files:
599 for f in match._files:
602 fstandin = standin(f)
600 fstandin = standin(f)
603
601
604 # For largefiles, only one of the normal and standin should be
602 # For largefiles, only one of the normal and standin should be
605 # committed (except if one of them is a remove). In the case of a
603 # committed (except if one of them is a remove). In the case of a
606 # standin removal, drop the normal file if it is unknown to dirstate.
604 # standin removal, drop the normal file if it is unknown to dirstate.
607 # Thus, skip plain largefile names but keep the standin.
605 # Thus, skip plain largefile names but keep the standin.
608 if f in lfiles or fstandin in standins:
606 if f in lfiles or fstandin in standins:
609 if repo.dirstate[fstandin] != 'r':
607 if repo.dirstate[fstandin] != 'r':
610 if repo.dirstate[f] != 'r':
608 if repo.dirstate[f] != 'r':
611 continue
609 continue
612 elif repo.dirstate[f] == '?':
610 elif repo.dirstate[f] == '?':
613 continue
611 continue
614
612
615 actualfiles.append(f)
613 actualfiles.append(f)
616 match._files = actualfiles
614 match._files = actualfiles
617
615
618 def matchfn(f):
616 def matchfn(f):
619 if origmatchfn(f):
617 if origmatchfn(f):
620 return f not in lfiles
618 return f not in lfiles
621 else:
619 else:
622 return f in standins
620 return f in standins
623
621
624 match.matchfn = matchfn
622 match.matchfn = matchfn
625
623
626 return match
624 return match
627
625
628 class automatedcommithook(object):
626 class automatedcommithook(object):
629 '''Stateful hook to update standins at the 1st commit of resuming
627 '''Stateful hook to update standins at the 1st commit of resuming
630
628
631 For efficiency, updating standins in the working directory should
629 For efficiency, updating standins in the working directory should
632 be avoided while automated committing (like rebase, transplant and
630 be avoided while automated committing (like rebase, transplant and
633 so on), because they should be updated before committing.
631 so on), because they should be updated before committing.
634
632
635 But the 1st commit of resuming automated committing (e.g. ``rebase
633 But the 1st commit of resuming automated committing (e.g. ``rebase
636 --continue``) should update them, because largefiles may be
634 --continue``) should update them, because largefiles may be
637 modified manually.
635 modified manually.
638 '''
636 '''
639 def __init__(self, resuming):
637 def __init__(self, resuming):
640 self.resuming = resuming
638 self.resuming = resuming
641
639
642 def __call__(self, repo, match):
640 def __call__(self, repo, match):
643 if self.resuming:
641 if self.resuming:
644 self.resuming = False # avoids updating at subsequent commits
642 self.resuming = False # avoids updating at subsequent commits
645 return updatestandinsbymatch(repo, match)
643 return updatestandinsbymatch(repo, match)
646 else:
644 else:
647 return match
645 return match
648
646
649 def getstatuswriter(ui, repo, forcibly=None):
647 def getstatuswriter(ui, repo, forcibly=None):
650 '''Return the function to write largefiles specific status out
648 '''Return the function to write largefiles specific status out
651
649
652 If ``forcibly`` is ``None``, this returns the last element of
650 If ``forcibly`` is ``None``, this returns the last element of
653 ``repo._lfstatuswriters`` as "default" writer function.
651 ``repo._lfstatuswriters`` as "default" writer function.
654
652
655 Otherwise, this returns the function to always write out (or
653 Otherwise, this returns the function to always write out (or
656 ignore if ``not forcibly``) status.
654 ignore if ``not forcibly``) status.
657 '''
655 '''
658 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
656 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
659 return repo._lfstatuswriters[-1]
657 return repo._lfstatuswriters[-1]
660 else:
658 else:
661 if forcibly:
659 if forcibly:
662 return ui.status # forcibly WRITE OUT
660 return ui.status # forcibly WRITE OUT
663 else:
661 else:
664 return lambda *msg, **opts: None # forcibly IGNORE
662 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1433 +1,1432
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 revset,
25 revset,
26 scmutil,
26 scmutil,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileroots = set(m._files)
44 m._fileroots = set(m._files)
45 m._always = False
45 m._always = False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileroots = set(m._files)
59 m._fileroots = set(m._files)
60 m._always = False
60 m._always = False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get('large')
108 large = opts.get('large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
236 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get('normal'):
261 if opts.get('normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False):
292 def overridedirty(orig, repo, ignoreupdate=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate)
295 return orig(repo, ignoreupdate)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350
350
351 pats.update(fixpats(f, tostandin) for f in p)
351 pats.update(fixpats(f, tostandin) for f in p)
352 else:
352 else:
353 def tostandin(f):
353 def tostandin(f):
354 if lfutil.splitstandin(f):
354 if lfutil.splitstandin(f):
355 return f
355 return f
356 return lfutil.standin(f)
356 return lfutil.standin(f)
357 pats.update(fixpats(f, tostandin) for f in p)
357 pats.update(fixpats(f, tostandin) for f in p)
358
358
359 for i in range(0, len(m._files)):
359 for i in range(0, len(m._files)):
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 if m._files[i] == '.':
361 if m._files[i] == '.':
362 continue
362 continue
363 standin = lfutil.standin(m._files[i])
363 standin = lfutil.standin(m._files[i])
364 # If the "standin" is a directory, append instead of replace to
364 # If the "standin" is a directory, append instead of replace to
365 # support naming a directory on the command line with only
365 # support naming a directory on the command line with only
366 # largefiles. The original directory is kept to support normal
366 # largefiles. The original directory is kept to support normal
367 # files.
367 # files.
368 if standin in repo[ctx.node()]:
368 if standin in repo[ctx.node()]:
369 m._files[i] = standin
369 m._files[i] = standin
370 elif m._files[i] not in repo[ctx.node()] \
370 elif m._files[i] not in repo[ctx.node()] \
371 and repo.wvfs.isdir(standin):
371 and repo.wvfs.isdir(standin):
372 m._files.append(standin)
372 m._files.append(standin)
373
373
374 m._fileroots = set(m._files)
374 m._fileroots = set(m._files)
375 m._always = False
375 m._always = False
376 origmatchfn = m.matchfn
376 origmatchfn = m.matchfn
377 def lfmatchfn(f):
377 def lfmatchfn(f):
378 lf = lfutil.splitstandin(f)
378 lf = lfutil.splitstandin(f)
379 if lf is not None and origmatchfn(lf):
379 if lf is not None and origmatchfn(lf):
380 return True
380 return True
381 r = origmatchfn(f)
381 r = origmatchfn(f)
382 return r
382 return r
383 m.matchfn = lfmatchfn
383 m.matchfn = lfmatchfn
384
384
385 ui.debug('updated patterns: %s\n' % sorted(pats))
385 ui.debug('updated patterns: %s\n' % sorted(pats))
386 return m, pats
386 return m, pats
387
387
388 # For hg log --patch, the match object is used in two different senses:
388 # For hg log --patch, the match object is used in two different senses:
389 # (1) to determine what revisions should be printed out, and
389 # (1) to determine what revisions should be printed out, and
390 # (2) to determine what files to print out diffs for.
390 # (2) to determine what files to print out diffs for.
391 # The magic matchandpats override should be used for case (1) but not for
391 # The magic matchandpats override should be used for case (1) but not for
392 # case (2).
392 # case (2).
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
394 wctx = repo[None]
394 wctx = repo[None]
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
396 return lambda rev: match
396 return lambda rev: match
397
397
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
401
401
402 try:
402 try:
403 return orig(ui, repo, *pats, **opts)
403 return orig(ui, repo, *pats, **opts)
404 finally:
404 finally:
405 restorematchandpatsfn()
405 restorematchandpatsfn()
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
407
407
408 def overrideverify(orig, ui, repo, *pats, **opts):
408 def overrideverify(orig, ui, repo, *pats, **opts):
409 large = opts.pop('large', False)
409 large = opts.pop('large', False)
410 all = opts.pop('lfa', False)
410 all = opts.pop('lfa', False)
411 contents = opts.pop('lfc', False)
411 contents = opts.pop('lfc', False)
412
412
413 result = orig(ui, repo, *pats, **opts)
413 result = orig(ui, repo, *pats, **opts)
414 if large or all or contents:
414 if large or all or contents:
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
416 return result
416 return result
417
417
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 def overridedebugstate(orig, ui, repo, *pats, **opts):
419 large = opts.pop('large', False)
419 large = opts.pop('large', False)
420 if large:
420 if large:
421 class fakerepo(object):
421 class fakerepo(object):
422 dirstate = lfutil.openlfdirstate(ui, repo)
422 dirstate = lfutil.openlfdirstate(ui, repo)
423 orig(ui, fakerepo, *pats, **opts)
423 orig(ui, fakerepo, *pats, **opts)
424 else:
424 else:
425 orig(ui, repo, *pats, **opts)
425 orig(ui, repo, *pats, **opts)
426
426
427 # Before starting the manifest merge, merge.updates will call
427 # Before starting the manifest merge, merge.updates will call
428 # _checkunknownfile to check if there are any files in the merged-in
428 # _checkunknownfile to check if there are any files in the merged-in
429 # changeset that collide with unknown files in the working copy.
429 # changeset that collide with unknown files in the working copy.
430 #
430 #
431 # The largefiles are seen as unknown, so this prevents us from merging
431 # The largefiles are seen as unknown, so this prevents us from merging
432 # in a file 'foo' if we already have a largefile with the same name.
432 # in a file 'foo' if we already have a largefile with the same name.
433 #
433 #
434 # The overridden function filters the unknown files by removing any
434 # The overridden function filters the unknown files by removing any
435 # largefiles. This makes the merge proceed and we can then handle this
435 # largefiles. This makes the merge proceed and we can then handle this
436 # case further in the overridden calculateupdates function below.
436 # case further in the overridden calculateupdates function below.
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
439 return False
439 return False
440 return origfn(repo, wctx, mctx, f, f2)
440 return origfn(repo, wctx, mctx, f, f2)
441
441
442 # The manifest merge handles conflicts on the manifest level. We want
442 # The manifest merge handles conflicts on the manifest level. We want
443 # to handle changes in largefile-ness of files at this level too.
443 # to handle changes in largefile-ness of files at this level too.
444 #
444 #
445 # The strategy is to run the original calculateupdates and then process
445 # The strategy is to run the original calculateupdates and then process
446 # the action list it outputs. There are two cases we need to deal with:
446 # the action list it outputs. There are two cases we need to deal with:
447 #
447 #
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 # 1. Normal file in p1, largefile in p2. Here the largefile is
449 # detected via its standin file, which will enter the working copy
449 # detected via its standin file, which will enter the working copy
450 # with a "get" action. It is not "merge" since the standin is all
450 # with a "get" action. It is not "merge" since the standin is all
451 # Mercurial is concerned with at this level -- the link to the
451 # Mercurial is concerned with at this level -- the link to the
452 # existing normal file is not relevant here.
452 # existing normal file is not relevant here.
453 #
453 #
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
455 # since the largefile will be present in the working copy and
455 # since the largefile will be present in the working copy and
456 # different from the normal file in p2. Mercurial therefore
456 # different from the normal file in p2. Mercurial therefore
457 # triggers a merge action.
457 # triggers a merge action.
458 #
458 #
459 # In both cases, we prompt the user and emit new actions to either
459 # In both cases, we prompt the user and emit new actions to either
460 # remove the standin (if the normal file was kept) or to remove the
460 # remove the standin (if the normal file was kept) or to remove the
461 # normal file and get the standin (if the largefile was kept). The
461 # normal file and get the standin (if the largefile was kept). The
462 # default prompt answer is to use the largefile version since it was
462 # default prompt answer is to use the largefile version since it was
463 # presumably changed on purpose.
463 # presumably changed on purpose.
464 #
464 #
465 # Finally, the merge.applyupdates function will then take care of
465 # Finally, the merge.applyupdates function will then take care of
466 # writing the files into the working copy and lfcommands.updatelfiles
466 # writing the files into the working copy and lfcommands.updatelfiles
467 # will update the largefiles.
467 # will update the largefiles.
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
469 acceptremote, *args, **kwargs):
469 acceptremote, *args, **kwargs):
470 overwrite = force and not branchmerge
470 overwrite = force and not branchmerge
471 actions, diverge, renamedelete = origfn(
471 actions, diverge, renamedelete = origfn(
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
473
473
474 if overwrite:
474 if overwrite:
475 return actions, diverge, renamedelete
475 return actions, diverge, renamedelete
476
476
477 # Convert to dictionary with filename as key and action as value.
477 # Convert to dictionary with filename as key and action as value.
478 lfiles = set()
478 lfiles = set()
479 for f in actions:
479 for f in actions:
480 splitstandin = lfutil.splitstandin(f)
480 splitstandin = lfutil.splitstandin(f)
481 if splitstandin in p1:
481 if splitstandin in p1:
482 lfiles.add(splitstandin)
482 lfiles.add(splitstandin)
483 elif lfutil.standin(f) in p1:
483 elif lfutil.standin(f) in p1:
484 lfiles.add(f)
484 lfiles.add(f)
485
485
486 for lfile in sorted(lfiles):
486 for lfile in sorted(lfiles):
487 standin = lfutil.standin(lfile)
487 standin = lfutil.standin(lfile)
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
490 if sm in ('g', 'dc') and lm != 'r':
490 if sm in ('g', 'dc') and lm != 'r':
491 if sm == 'dc':
491 if sm == 'dc':
492 f1, f2, fa, move, anc = sargs
492 f1, f2, fa, move, anc = sargs
493 sargs = (p2[f2].flags(), False)
493 sargs = (p2[f2].flags(), False)
494 # Case 1: normal file in the working copy, largefile in
494 # Case 1: normal file in the working copy, largefile in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local normal file %s into a largefile\n'
496 usermsg = _('remote turned local normal file %s into a largefile\n'
497 'use (l)argefile or keep (n)ormal file?'
497 'use (l)argefile or keep (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
500 actions[lfile] = ('r', None, 'replaced by standin')
500 actions[lfile] = ('r', None, 'replaced by standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
501 actions[standin] = ('g', sargs, 'replaces standin')
502 else: # keep local normal file
502 else: # keep local normal file
503 actions[lfile] = ('k', None, 'replaces standin')
503 actions[lfile] = ('k', None, 'replaces standin')
504 if branchmerge:
504 if branchmerge:
505 actions[standin] = ('k', None, 'replaced by non-standin')
505 actions[standin] = ('k', None, 'replaced by non-standin')
506 else:
506 else:
507 actions[standin] = ('r', None, 'replaced by non-standin')
507 actions[standin] = ('r', None, 'replaced by non-standin')
508 elif lm in ('g', 'dc') and sm != 'r':
508 elif lm in ('g', 'dc') and sm != 'r':
509 if lm == 'dc':
509 if lm == 'dc':
510 f1, f2, fa, move, anc = largs
510 f1, f2, fa, move, anc = largs
511 largs = (p2[f2].flags(), False)
511 largs = (p2[f2].flags(), False)
512 # Case 2: largefile in the working copy, normal file in
512 # Case 2: largefile in the working copy, normal file in
513 # the second parent
513 # the second parent
514 usermsg = _('remote turned local largefile %s into a normal file\n'
514 usermsg = _('remote turned local largefile %s into a normal file\n'
515 'keep (l)argefile or use (n)ormal file?'
515 'keep (l)argefile or use (n)ormal file?'
516 '$$ &Largefile $$ &Normal file') % lfile
516 '$$ &Largefile $$ &Normal file') % lfile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
518 if branchmerge:
518 if branchmerge:
519 # largefile can be restored from standin safely
519 # largefile can be restored from standin safely
520 actions[lfile] = ('k', None, 'replaced by standin')
520 actions[lfile] = ('k', None, 'replaced by standin')
521 actions[standin] = ('k', None, 'replaces standin')
521 actions[standin] = ('k', None, 'replaces standin')
522 else:
522 else:
523 # "lfile" should be marked as "removed" without
523 # "lfile" should be marked as "removed" without
524 # removal of itself
524 # removal of itself
525 actions[lfile] = ('lfmr', None,
525 actions[lfile] = ('lfmr', None,
526 'forget non-standin largefile')
526 'forget non-standin largefile')
527
527
528 # linear-merge should treat this largefile as 're-added'
528 # linear-merge should treat this largefile as 're-added'
529 actions[standin] = ('a', None, 'keep standin')
529 actions[standin] = ('a', None, 'keep standin')
530 else: # pick remote normal file
530 else: # pick remote normal file
531 actions[lfile] = ('g', largs, 'replaces standin')
531 actions[lfile] = ('g', largs, 'replaces standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
532 actions[standin] = ('r', None, 'replaced by non-standin')
533
533
534 return actions, diverge, renamedelete
534 return actions, diverge, renamedelete
535
535
536 def mergerecordupdates(orig, repo, actions, branchmerge):
536 def mergerecordupdates(orig, repo, actions, branchmerge):
537 if 'lfmr' in actions:
537 if 'lfmr' in actions:
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
539 for lfile, args, msg in actions['lfmr']:
539 for lfile, args, msg in actions['lfmr']:
540 # this should be executed before 'orig', to execute 'remove'
540 # this should be executed before 'orig', to execute 'remove'
541 # before all other actions
541 # before all other actions
542 repo.dirstate.remove(lfile)
542 repo.dirstate.remove(lfile)
543 # make sure lfile doesn't get synclfdirstate'd as normal
543 # make sure lfile doesn't get synclfdirstate'd as normal
544 lfdirstate.add(lfile)
544 lfdirstate.add(lfile)
545 lfdirstate.write()
545 lfdirstate.write()
546
546
547 return orig(repo, actions, branchmerge)
547 return orig(repo, actions, branchmerge)
548
548
549
550 # Override filemerge to prompt the user about how they wish to merge
549 # Override filemerge to prompt the user about how they wish to merge
551 # largefiles. This will handle identical edits without prompting the user.
550 # largefiles. This will handle identical edits without prompting the user.
552 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
551 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
553 labels=None):
552 labels=None):
554 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
553 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
555 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
554 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
556 labels=labels)
555 labels=labels)
557
556
558 ahash = fca.data().strip().lower()
557 ahash = fca.data().strip().lower()
559 dhash = fcd.data().strip().lower()
558 dhash = fcd.data().strip().lower()
560 ohash = fco.data().strip().lower()
559 ohash = fco.data().strip().lower()
561 if (ohash != ahash and
560 if (ohash != ahash and
562 ohash != dhash and
561 ohash != dhash and
563 (dhash == ahash or
562 (dhash == ahash or
564 repo.ui.promptchoice(
563 repo.ui.promptchoice(
565 _('largefile %s has a merge conflict\nancestor was %s\n'
564 _('largefile %s has a merge conflict\nancestor was %s\n'
566 'keep (l)ocal %s or\ntake (o)ther %s?'
565 'keep (l)ocal %s or\ntake (o)ther %s?'
567 '$$ &Local $$ &Other') %
566 '$$ &Local $$ &Other') %
568 (lfutil.splitstandin(orig), ahash, dhash, ohash),
567 (lfutil.splitstandin(orig), ahash, dhash, ohash),
569 0) == 1)):
568 0) == 1)):
570 repo.wwrite(fcd.path(), fco.data(), fco.flags())
569 repo.wwrite(fcd.path(), fco.data(), fco.flags())
571 return True, 0, False
570 return True, 0, False
572
571
573 def copiespathcopies(orig, ctx1, ctx2, match=None):
572 def copiespathcopies(orig, ctx1, ctx2, match=None):
574 copies = orig(ctx1, ctx2, match=match)
573 copies = orig(ctx1, ctx2, match=match)
575 updated = {}
574 updated = {}
576
575
577 for k, v in copies.iteritems():
576 for k, v in copies.iteritems():
578 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
577 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
579
578
580 return updated
579 return updated
581
580
582 # Copy first changes the matchers to match standins instead of
581 # Copy first changes the matchers to match standins instead of
583 # largefiles. Then it overrides util.copyfile in that function it
582 # largefiles. Then it overrides util.copyfile in that function it
584 # checks if the destination largefile already exists. It also keeps a
583 # checks if the destination largefile already exists. It also keeps a
585 # list of copied files so that the largefiles can be copied and the
584 # list of copied files so that the largefiles can be copied and the
586 # dirstate updated.
585 # dirstate updated.
587 def overridecopy(orig, ui, repo, pats, opts, rename=False):
586 def overridecopy(orig, ui, repo, pats, opts, rename=False):
588 # doesn't remove largefile on rename
587 # doesn't remove largefile on rename
589 if len(pats) < 2:
588 if len(pats) < 2:
590 # this isn't legal, let the original function deal with it
589 # this isn't legal, let the original function deal with it
591 return orig(ui, repo, pats, opts, rename)
590 return orig(ui, repo, pats, opts, rename)
592
591
593 # This could copy both lfiles and normal files in one command,
592 # This could copy both lfiles and normal files in one command,
594 # but we don't want to do that. First replace their matcher to
593 # but we don't want to do that. First replace their matcher to
595 # only match normal files and run it, then replace it to just
594 # only match normal files and run it, then replace it to just
596 # match largefiles and run it again.
595 # match largefiles and run it again.
597 nonormalfiles = False
596 nonormalfiles = False
598 nolfiles = False
597 nolfiles = False
599 installnormalfilesmatchfn(repo[None].manifest())
598 installnormalfilesmatchfn(repo[None].manifest())
600 try:
599 try:
601 result = orig(ui, repo, pats, opts, rename)
600 result = orig(ui, repo, pats, opts, rename)
602 except error.Abort as e:
601 except error.Abort as e:
603 if str(e) != _('no files to copy'):
602 if str(e) != _('no files to copy'):
604 raise e
603 raise e
605 else:
604 else:
606 nonormalfiles = True
605 nonormalfiles = True
607 result = 0
606 result = 0
608 finally:
607 finally:
609 restorematchfn()
608 restorematchfn()
610
609
611 # The first rename can cause our current working directory to be removed.
610 # The first rename can cause our current working directory to be removed.
612 # In that case there is nothing left to copy/rename so just quit.
611 # In that case there is nothing left to copy/rename so just quit.
613 try:
612 try:
614 repo.getcwd()
613 repo.getcwd()
615 except OSError:
614 except OSError:
616 return result
615 return result
617
616
618 def makestandin(relpath):
617 def makestandin(relpath):
619 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
618 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
620 return repo.wvfs.join(lfutil.standin(path))
619 return repo.wvfs.join(lfutil.standin(path))
621
620
622 fullpats = scmutil.expandpats(pats)
621 fullpats = scmutil.expandpats(pats)
623 dest = fullpats[-1]
622 dest = fullpats[-1]
624
623
625 if os.path.isdir(dest):
624 if os.path.isdir(dest):
626 if not os.path.isdir(makestandin(dest)):
625 if not os.path.isdir(makestandin(dest)):
627 os.makedirs(makestandin(dest))
626 os.makedirs(makestandin(dest))
628
627
629 try:
628 try:
630 # When we call orig below it creates the standins but we don't add
629 # When we call orig below it creates the standins but we don't add
631 # them to the dir state until later so lock during that time.
630 # them to the dir state until later so lock during that time.
632 wlock = repo.wlock()
631 wlock = repo.wlock()
633
632
634 manifest = repo[None].manifest()
633 manifest = repo[None].manifest()
635 def overridematch(ctx, pats=(), opts=None, globbed=False,
634 def overridematch(ctx, pats=(), opts=None, globbed=False,
636 default='relpath', badfn=None):
635 default='relpath', badfn=None):
637 if opts is None:
636 if opts is None:
638 opts = {}
637 opts = {}
639 newpats = []
638 newpats = []
640 # The patterns were previously mangled to add the standin
639 # The patterns were previously mangled to add the standin
641 # directory; we need to remove that now
640 # directory; we need to remove that now
642 for pat in pats:
641 for pat in pats:
643 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
642 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
644 newpats.append(pat.replace(lfutil.shortname, ''))
643 newpats.append(pat.replace(lfutil.shortname, ''))
645 else:
644 else:
646 newpats.append(pat)
645 newpats.append(pat)
647 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
646 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
648 m = copy.copy(match)
647 m = copy.copy(match)
649 lfile = lambda f: lfutil.standin(f) in manifest
648 lfile = lambda f: lfutil.standin(f) in manifest
650 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
649 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
651 m._fileroots = set(m._files)
650 m._fileroots = set(m._files)
652 origmatchfn = m.matchfn
651 origmatchfn = m.matchfn
653 m.matchfn = lambda f: (lfutil.isstandin(f) and
652 m.matchfn = lambda f: (lfutil.isstandin(f) and
654 (f in manifest) and
653 (f in manifest) and
655 origmatchfn(lfutil.splitstandin(f)) or
654 origmatchfn(lfutil.splitstandin(f)) or
656 None)
655 None)
657 return m
656 return m
658 oldmatch = installmatchfn(overridematch)
657 oldmatch = installmatchfn(overridematch)
659 listpats = []
658 listpats = []
660 for pat in pats:
659 for pat in pats:
661 if matchmod.patkind(pat) is not None:
660 if matchmod.patkind(pat) is not None:
662 listpats.append(pat)
661 listpats.append(pat)
663 else:
662 else:
664 listpats.append(makestandin(pat))
663 listpats.append(makestandin(pat))
665
664
666 try:
665 try:
667 origcopyfile = util.copyfile
666 origcopyfile = util.copyfile
668 copiedfiles = []
667 copiedfiles = []
669 def overridecopyfile(src, dest):
668 def overridecopyfile(src, dest):
670 if (lfutil.shortname in src and
669 if (lfutil.shortname in src and
671 dest.startswith(repo.wjoin(lfutil.shortname))):
670 dest.startswith(repo.wjoin(lfutil.shortname))):
672 destlfile = dest.replace(lfutil.shortname, '')
671 destlfile = dest.replace(lfutil.shortname, '')
673 if not opts['force'] and os.path.exists(destlfile):
672 if not opts['force'] and os.path.exists(destlfile):
674 raise IOError('',
673 raise IOError('',
675 _('destination largefile already exists'))
674 _('destination largefile already exists'))
676 copiedfiles.append((src, dest))
675 copiedfiles.append((src, dest))
677 origcopyfile(src, dest)
676 origcopyfile(src, dest)
678
677
679 util.copyfile = overridecopyfile
678 util.copyfile = overridecopyfile
680 result += orig(ui, repo, listpats, opts, rename)
679 result += orig(ui, repo, listpats, opts, rename)
681 finally:
680 finally:
682 util.copyfile = origcopyfile
681 util.copyfile = origcopyfile
683
682
684 lfdirstate = lfutil.openlfdirstate(ui, repo)
683 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 for (src, dest) in copiedfiles:
684 for (src, dest) in copiedfiles:
686 if (lfutil.shortname in src and
685 if (lfutil.shortname in src and
687 dest.startswith(repo.wjoin(lfutil.shortname))):
686 dest.startswith(repo.wjoin(lfutil.shortname))):
688 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
687 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
689 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
688 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
690 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
689 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
691 if not os.path.isdir(destlfiledir):
690 if not os.path.isdir(destlfiledir):
692 os.makedirs(destlfiledir)
691 os.makedirs(destlfiledir)
693 if rename:
692 if rename:
694 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
693 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
695
694
696 # The file is gone, but this deletes any empty parent
695 # The file is gone, but this deletes any empty parent
697 # directories as a side-effect.
696 # directories as a side-effect.
698 util.unlinkpath(repo.wjoin(srclfile), True)
697 util.unlinkpath(repo.wjoin(srclfile), True)
699 lfdirstate.remove(srclfile)
698 lfdirstate.remove(srclfile)
700 else:
699 else:
701 util.copyfile(repo.wjoin(srclfile),
700 util.copyfile(repo.wjoin(srclfile),
702 repo.wjoin(destlfile))
701 repo.wjoin(destlfile))
703
702
704 lfdirstate.add(destlfile)
703 lfdirstate.add(destlfile)
705 lfdirstate.write()
704 lfdirstate.write()
706 except error.Abort as e:
705 except error.Abort as e:
707 if str(e) != _('no files to copy'):
706 if str(e) != _('no files to copy'):
708 raise e
707 raise e
709 else:
708 else:
710 nolfiles = True
709 nolfiles = True
711 finally:
710 finally:
712 restorematchfn()
711 restorematchfn()
713 wlock.release()
712 wlock.release()
714
713
715 if nolfiles and nonormalfiles:
714 if nolfiles and nonormalfiles:
716 raise error.Abort(_('no files to copy'))
715 raise error.Abort(_('no files to copy'))
717
716
718 return result
717 return result
719
718
720 # When the user calls revert, we have to be careful to not revert any
719 # When the user calls revert, we have to be careful to not revert any
721 # changes to other largefiles accidentally. This means we have to keep
720 # changes to other largefiles accidentally. This means we have to keep
722 # track of the largefiles that are being reverted so we only pull down
721 # track of the largefiles that are being reverted so we only pull down
723 # the necessary largefiles.
722 # the necessary largefiles.
724 #
723 #
725 # Standins are only updated (to match the hash of largefiles) before
724 # Standins are only updated (to match the hash of largefiles) before
726 # commits. Update the standins then run the original revert, changing
725 # commits. Update the standins then run the original revert, changing
727 # the matcher to hit standins instead of largefiles. Based on the
726 # the matcher to hit standins instead of largefiles. Based on the
728 # resulting standins update the largefiles.
727 # resulting standins update the largefiles.
729 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
728 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
730 # Because we put the standins in a bad state (by updating them)
729 # Because we put the standins in a bad state (by updating them)
731 # and then return them to a correct state we need to lock to
730 # and then return them to a correct state we need to lock to
732 # prevent others from changing them in their incorrect state.
731 # prevent others from changing them in their incorrect state.
733 with repo.wlock():
732 with repo.wlock():
734 lfdirstate = lfutil.openlfdirstate(ui, repo)
733 lfdirstate = lfutil.openlfdirstate(ui, repo)
735 s = lfutil.lfdirstatestatus(lfdirstate, repo)
734 s = lfutil.lfdirstatestatus(lfdirstate, repo)
736 lfdirstate.write()
735 lfdirstate.write()
737 for lfile in s.modified:
736 for lfile in s.modified:
738 lfutil.updatestandin(repo, lfutil.standin(lfile))
737 lfutil.updatestandin(repo, lfutil.standin(lfile))
739 for lfile in s.deleted:
738 for lfile in s.deleted:
740 if (repo.wvfs.exists(lfutil.standin(lfile))):
739 if (repo.wvfs.exists(lfutil.standin(lfile))):
741 repo.wvfs.unlink(lfutil.standin(lfile))
740 repo.wvfs.unlink(lfutil.standin(lfile))
742
741
743 oldstandins = lfutil.getstandinsstate(repo)
742 oldstandins = lfutil.getstandinsstate(repo)
744
743
745 def overridematch(mctx, pats=(), opts=None, globbed=False,
744 def overridematch(mctx, pats=(), opts=None, globbed=False,
746 default='relpath', badfn=None):
745 default='relpath', badfn=None):
747 if opts is None:
746 if opts is None:
748 opts = {}
747 opts = {}
749 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
748 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
750 m = copy.copy(match)
749 m = copy.copy(match)
751
750
752 # revert supports recursing into subrepos, and though largefiles
751 # revert supports recursing into subrepos, and though largefiles
753 # currently doesn't work correctly in that case, this match is
752 # currently doesn't work correctly in that case, this match is
754 # called, so the lfdirstate above may not be the correct one for
753 # called, so the lfdirstate above may not be the correct one for
755 # this invocation of match.
754 # this invocation of match.
756 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
755 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
757 False)
756 False)
758
757
759 def tostandin(f):
758 def tostandin(f):
760 standin = lfutil.standin(f)
759 standin = lfutil.standin(f)
761 if standin in ctx or standin in mctx:
760 if standin in ctx or standin in mctx:
762 return standin
761 return standin
763 elif standin in repo[None] or lfdirstate[f] == 'r':
762 elif standin in repo[None] or lfdirstate[f] == 'r':
764 return None
763 return None
765 return f
764 return f
766 m._files = [tostandin(f) for f in m._files]
765 m._files = [tostandin(f) for f in m._files]
767 m._files = [f for f in m._files if f is not None]
766 m._files = [f for f in m._files if f is not None]
768 m._fileroots = set(m._files)
767 m._fileroots = set(m._files)
769 origmatchfn = m.matchfn
768 origmatchfn = m.matchfn
770 def matchfn(f):
769 def matchfn(f):
771 if lfutil.isstandin(f):
770 if lfutil.isstandin(f):
772 return (origmatchfn(lfutil.splitstandin(f)) and
771 return (origmatchfn(lfutil.splitstandin(f)) and
773 (f in ctx or f in mctx))
772 (f in ctx or f in mctx))
774 return origmatchfn(f)
773 return origmatchfn(f)
775 m.matchfn = matchfn
774 m.matchfn = matchfn
776 return m
775 return m
777 oldmatch = installmatchfn(overridematch)
776 oldmatch = installmatchfn(overridematch)
778 try:
777 try:
779 orig(ui, repo, ctx, parents, *pats, **opts)
778 orig(ui, repo, ctx, parents, *pats, **opts)
780 finally:
779 finally:
781 restorematchfn()
780 restorematchfn()
782
781
783 newstandins = lfutil.getstandinsstate(repo)
782 newstandins = lfutil.getstandinsstate(repo)
784 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
783 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
785 # lfdirstate should be 'normallookup'-ed for updated files,
784 # lfdirstate should be 'normallookup'-ed for updated files,
786 # because reverting doesn't touch dirstate for 'normal' files
785 # because reverting doesn't touch dirstate for 'normal' files
787 # when target revision is explicitly specified: in such case,
786 # when target revision is explicitly specified: in such case,
788 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
787 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
789 # of target (standin) file.
788 # of target (standin) file.
790 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
789 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
791 normallookup=True)
790 normallookup=True)
792
791
793 # after pulling changesets, we need to take some extra care to get
792 # after pulling changesets, we need to take some extra care to get
794 # largefiles updated remotely
793 # largefiles updated remotely
795 def overridepull(orig, ui, repo, source=None, **opts):
794 def overridepull(orig, ui, repo, source=None, **opts):
796 revsprepull = len(repo)
795 revsprepull = len(repo)
797 if not source:
796 if not source:
798 source = 'default'
797 source = 'default'
799 repo.lfpullsource = source
798 repo.lfpullsource = source
800 result = orig(ui, repo, source, **opts)
799 result = orig(ui, repo, source, **opts)
801 revspostpull = len(repo)
800 revspostpull = len(repo)
802 lfrevs = opts.get('lfrev', [])
801 lfrevs = opts.get('lfrev', [])
803 if opts.get('all_largefiles'):
802 if opts.get('all_largefiles'):
804 lfrevs.append('pulled()')
803 lfrevs.append('pulled()')
805 if lfrevs and revspostpull > revsprepull:
804 if lfrevs and revspostpull > revsprepull:
806 numcached = 0
805 numcached = 0
807 repo.firstpulled = revsprepull # for pulled() revset expression
806 repo.firstpulled = revsprepull # for pulled() revset expression
808 try:
807 try:
809 for rev in scmutil.revrange(repo, lfrevs):
808 for rev in scmutil.revrange(repo, lfrevs):
810 ui.note(_('pulling largefiles for revision %s\n') % rev)
809 ui.note(_('pulling largefiles for revision %s\n') % rev)
811 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
810 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
812 numcached += len(cached)
811 numcached += len(cached)
813 finally:
812 finally:
814 del repo.firstpulled
813 del repo.firstpulled
815 ui.status(_("%d largefiles cached\n") % numcached)
814 ui.status(_("%d largefiles cached\n") % numcached)
816 return result
815 return result
817
816
818 def overridepush(orig, ui, repo, *args, **kwargs):
817 def overridepush(orig, ui, repo, *args, **kwargs):
819 """Override push command and store --lfrev parameters in opargs"""
818 """Override push command and store --lfrev parameters in opargs"""
820 lfrevs = kwargs.pop('lfrev', None)
819 lfrevs = kwargs.pop('lfrev', None)
821 if lfrevs:
820 if lfrevs:
822 opargs = kwargs.setdefault('opargs', {})
821 opargs = kwargs.setdefault('opargs', {})
823 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
822 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
824 return orig(ui, repo, *args, **kwargs)
823 return orig(ui, repo, *args, **kwargs)
825
824
826 def exchangepushoperation(orig, *args, **kwargs):
825 def exchangepushoperation(orig, *args, **kwargs):
827 """Override pushoperation constructor and store lfrevs parameter"""
826 """Override pushoperation constructor and store lfrevs parameter"""
828 lfrevs = kwargs.pop('lfrevs', None)
827 lfrevs = kwargs.pop('lfrevs', None)
829 pushop = orig(*args, **kwargs)
828 pushop = orig(*args, **kwargs)
830 pushop.lfrevs = lfrevs
829 pushop.lfrevs = lfrevs
831 return pushop
830 return pushop
832
831
833 revsetpredicate = registrar.revsetpredicate()
832 revsetpredicate = registrar.revsetpredicate()
834
833
835 @revsetpredicate('pulled()')
834 @revsetpredicate('pulled()')
836 def pulledrevsetsymbol(repo, subset, x):
835 def pulledrevsetsymbol(repo, subset, x):
837 """Changesets that just has been pulled.
836 """Changesets that just has been pulled.
838
837
839 Only available with largefiles from pull --lfrev expressions.
838 Only available with largefiles from pull --lfrev expressions.
840
839
841 .. container:: verbose
840 .. container:: verbose
842
841
843 Some examples:
842 Some examples:
844
843
845 - pull largefiles for all new changesets::
844 - pull largefiles for all new changesets::
846
845
847 hg pull -lfrev "pulled()"
846 hg pull -lfrev "pulled()"
848
847
849 - pull largefiles for all new branch heads::
848 - pull largefiles for all new branch heads::
850
849
851 hg pull -lfrev "head(pulled()) and not closed()"
850 hg pull -lfrev "head(pulled()) and not closed()"
852
851
853 """
852 """
854
853
855 try:
854 try:
856 firstpulled = repo.firstpulled
855 firstpulled = repo.firstpulled
857 except AttributeError:
856 except AttributeError:
858 raise error.Abort(_("pulled() only available in --lfrev"))
857 raise error.Abort(_("pulled() only available in --lfrev"))
859 return revset.baseset([r for r in subset if r >= firstpulled])
858 return revset.baseset([r for r in subset if r >= firstpulled])
860
859
861 def overrideclone(orig, ui, source, dest=None, **opts):
860 def overrideclone(orig, ui, source, dest=None, **opts):
862 d = dest
861 d = dest
863 if d is None:
862 if d is None:
864 d = hg.defaultdest(source)
863 d = hg.defaultdest(source)
865 if opts.get('all_largefiles') and not hg.islocal(d):
864 if opts.get('all_largefiles') and not hg.islocal(d):
866 raise error.Abort(_(
865 raise error.Abort(_(
867 '--all-largefiles is incompatible with non-local destination %s') %
866 '--all-largefiles is incompatible with non-local destination %s') %
868 d)
867 d)
869
868
870 return orig(ui, source, dest, **opts)
869 return orig(ui, source, dest, **opts)
871
870
872 def hgclone(orig, ui, opts, *args, **kwargs):
871 def hgclone(orig, ui, opts, *args, **kwargs):
873 result = orig(ui, opts, *args, **kwargs)
872 result = orig(ui, opts, *args, **kwargs)
874
873
875 if result is not None:
874 if result is not None:
876 sourcerepo, destrepo = result
875 sourcerepo, destrepo = result
877 repo = destrepo.local()
876 repo = destrepo.local()
878
877
879 # When cloning to a remote repo (like through SSH), no repo is available
878 # When cloning to a remote repo (like through SSH), no repo is available
880 # from the peer. Therefore the largefiles can't be downloaded and the
879 # from the peer. Therefore the largefiles can't be downloaded and the
881 # hgrc can't be updated.
880 # hgrc can't be updated.
882 if not repo:
881 if not repo:
883 return result
882 return result
884
883
885 # If largefiles is required for this repo, permanently enable it locally
884 # If largefiles is required for this repo, permanently enable it locally
886 if 'largefiles' in repo.requirements:
885 if 'largefiles' in repo.requirements:
887 fp = repo.vfs('hgrc', 'a', text=True)
886 fp = repo.vfs('hgrc', 'a', text=True)
888 try:
887 try:
889 fp.write('\n[extensions]\nlargefiles=\n')
888 fp.write('\n[extensions]\nlargefiles=\n')
890 finally:
889 finally:
891 fp.close()
890 fp.close()
892
891
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
892 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 # truncated at that point. The user may expect a download count with
893 # truncated at that point. The user may expect a download count with
895 # this option, so attempt whether or not this is a largefile repo.
894 # this option, so attempt whether or not this is a largefile repo.
896 if opts.get('all_largefiles'):
895 if opts.get('all_largefiles'):
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
896 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898
897
899 if missing != 0:
898 if missing != 0:
900 return None
899 return None
901
900
902 return result
901 return result
903
902
904 def overriderebase(orig, ui, repo, **opts):
903 def overriderebase(orig, ui, repo, **opts):
905 if not util.safehasattr(repo, '_largefilesenabled'):
904 if not util.safehasattr(repo, '_largefilesenabled'):
906 return orig(ui, repo, **opts)
905 return orig(ui, repo, **opts)
907
906
908 resuming = opts.get('continue')
907 resuming = opts.get('continue')
909 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
908 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
910 repo._lfstatuswriters.append(lambda *msg, **opts: None)
909 repo._lfstatuswriters.append(lambda *msg, **opts: None)
911 try:
910 try:
912 return orig(ui, repo, **opts)
911 return orig(ui, repo, **opts)
913 finally:
912 finally:
914 repo._lfstatuswriters.pop()
913 repo._lfstatuswriters.pop()
915 repo._lfcommithooks.pop()
914 repo._lfcommithooks.pop()
916
915
917 def overridearchivecmd(orig, ui, repo, dest, **opts):
916 def overridearchivecmd(orig, ui, repo, dest, **opts):
918 repo.unfiltered().lfstatus = True
917 repo.unfiltered().lfstatus = True
919
918
920 try:
919 try:
921 return orig(ui, repo.unfiltered(), dest, **opts)
920 return orig(ui, repo.unfiltered(), dest, **opts)
922 finally:
921 finally:
923 repo.unfiltered().lfstatus = False
922 repo.unfiltered().lfstatus = False
924
923
925 def hgwebarchive(orig, web, req, tmpl):
924 def hgwebarchive(orig, web, req, tmpl):
926 web.repo.lfstatus = True
925 web.repo.lfstatus = True
927
926
928 try:
927 try:
929 return orig(web, req, tmpl)
928 return orig(web, req, tmpl)
930 finally:
929 finally:
931 web.repo.lfstatus = False
930 web.repo.lfstatus = False
932
931
933 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
932 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
934 prefix='', mtime=None, subrepos=None):
933 prefix='', mtime=None, subrepos=None):
935 # For some reason setting repo.lfstatus in hgwebarchive only changes the
934 # For some reason setting repo.lfstatus in hgwebarchive only changes the
936 # unfiltered repo's attr, so check that as well.
935 # unfiltered repo's attr, so check that as well.
937 if not repo.lfstatus and not repo.unfiltered().lfstatus:
936 if not repo.lfstatus and not repo.unfiltered().lfstatus:
938 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
937 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
939 subrepos)
938 subrepos)
940
939
941 # No need to lock because we are only reading history and
940 # No need to lock because we are only reading history and
942 # largefile caches, neither of which are modified.
941 # largefile caches, neither of which are modified.
943 if node is not None:
942 if node is not None:
944 lfcommands.cachelfiles(repo.ui, repo, node)
943 lfcommands.cachelfiles(repo.ui, repo, node)
945
944
946 if kind not in archival.archivers:
945 if kind not in archival.archivers:
947 raise error.Abort(_("unknown archive type '%s'") % kind)
946 raise error.Abort(_("unknown archive type '%s'") % kind)
948
947
949 ctx = repo[node]
948 ctx = repo[node]
950
949
951 if kind == 'files':
950 if kind == 'files':
952 if prefix:
951 if prefix:
953 raise error.Abort(
952 raise error.Abort(
954 _('cannot give prefix when archiving to files'))
953 _('cannot give prefix when archiving to files'))
955 else:
954 else:
956 prefix = archival.tidyprefix(dest, kind, prefix)
955 prefix = archival.tidyprefix(dest, kind, prefix)
957
956
958 def write(name, mode, islink, getdata):
957 def write(name, mode, islink, getdata):
959 if matchfn and not matchfn(name):
958 if matchfn and not matchfn(name):
960 return
959 return
961 data = getdata()
960 data = getdata()
962 if decode:
961 if decode:
963 data = repo.wwritedata(name, data)
962 data = repo.wwritedata(name, data)
964 archiver.addfile(prefix + name, mode, islink, data)
963 archiver.addfile(prefix + name, mode, islink, data)
965
964
966 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
965 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
967
966
968 if repo.ui.configbool("ui", "archivemeta", True):
967 if repo.ui.configbool("ui", "archivemeta", True):
969 write('.hg_archival.txt', 0o644, False,
968 write('.hg_archival.txt', 0o644, False,
970 lambda: archival.buildmetadata(ctx))
969 lambda: archival.buildmetadata(ctx))
971
970
972 for f in ctx:
971 for f in ctx:
973 ff = ctx.flags(f)
972 ff = ctx.flags(f)
974 getdata = ctx[f].data
973 getdata = ctx[f].data
975 if lfutil.isstandin(f):
974 if lfutil.isstandin(f):
976 if node is not None:
975 if node is not None:
977 path = lfutil.findfile(repo, getdata().strip())
976 path = lfutil.findfile(repo, getdata().strip())
978
977
979 if path is None:
978 if path is None:
980 raise error.Abort(
979 raise error.Abort(
981 _('largefile %s not found in repo store or system cache')
980 _('largefile %s not found in repo store or system cache')
982 % lfutil.splitstandin(f))
981 % lfutil.splitstandin(f))
983 else:
982 else:
984 path = lfutil.splitstandin(f)
983 path = lfutil.splitstandin(f)
985
984
986 f = lfutil.splitstandin(f)
985 f = lfutil.splitstandin(f)
987
986
988 getdata = lambda: util.readfile(path)
987 getdata = lambda: util.readfile(path)
989 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
988 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
990
989
991 if subrepos:
990 if subrepos:
992 for subpath in sorted(ctx.substate):
991 for subpath in sorted(ctx.substate):
993 sub = ctx.workingsub(subpath)
992 sub = ctx.workingsub(subpath)
994 submatch = matchmod.subdirmatcher(subpath, matchfn)
993 submatch = matchmod.subdirmatcher(subpath, matchfn)
995 sub._repo.lfstatus = True
994 sub._repo.lfstatus = True
996 sub.archive(archiver, prefix, submatch)
995 sub.archive(archiver, prefix, submatch)
997
996
998 archiver.done()
997 archiver.done()
999
998
1000 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
999 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
1001 if not repo._repo.lfstatus:
1000 if not repo._repo.lfstatus:
1002 return orig(repo, archiver, prefix, match)
1001 return orig(repo, archiver, prefix, match)
1003
1002
1004 repo._get(repo._state + ('hg',))
1003 repo._get(repo._state + ('hg',))
1005 rev = repo._state[1]
1004 rev = repo._state[1]
1006 ctx = repo._repo[rev]
1005 ctx = repo._repo[rev]
1007
1006
1008 if ctx.node() is not None:
1007 if ctx.node() is not None:
1009 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1008 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1010
1009
1011 def write(name, mode, islink, getdata):
1010 def write(name, mode, islink, getdata):
1012 # At this point, the standin has been replaced with the largefile name,
1011 # At this point, the standin has been replaced with the largefile name,
1013 # so the normal matcher works here without the lfutil variants.
1012 # so the normal matcher works here without the lfutil variants.
1014 if match and not match(f):
1013 if match and not match(f):
1015 return
1014 return
1016 data = getdata()
1015 data = getdata()
1017
1016
1018 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1017 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1019
1018
1020 for f in ctx:
1019 for f in ctx:
1021 ff = ctx.flags(f)
1020 ff = ctx.flags(f)
1022 getdata = ctx[f].data
1021 getdata = ctx[f].data
1023 if lfutil.isstandin(f):
1022 if lfutil.isstandin(f):
1024 if ctx.node() is not None:
1023 if ctx.node() is not None:
1025 path = lfutil.findfile(repo._repo, getdata().strip())
1024 path = lfutil.findfile(repo._repo, getdata().strip())
1026
1025
1027 if path is None:
1026 if path is None:
1028 raise error.Abort(
1027 raise error.Abort(
1029 _('largefile %s not found in repo store or system cache')
1028 _('largefile %s not found in repo store or system cache')
1030 % lfutil.splitstandin(f))
1029 % lfutil.splitstandin(f))
1031 else:
1030 else:
1032 path = lfutil.splitstandin(f)
1031 path = lfutil.splitstandin(f)
1033
1032
1034 f = lfutil.splitstandin(f)
1033 f = lfutil.splitstandin(f)
1035
1034
1036 getdata = lambda: util.readfile(os.path.join(prefix, path))
1035 getdata = lambda: util.readfile(os.path.join(prefix, path))
1037
1036
1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1039
1038
1040 for subpath in sorted(ctx.substate):
1039 for subpath in sorted(ctx.substate):
1041 sub = ctx.workingsub(subpath)
1040 sub = ctx.workingsub(subpath)
1042 submatch = matchmod.subdirmatcher(subpath, match)
1041 submatch = matchmod.subdirmatcher(subpath, match)
1043 sub._repo.lfstatus = True
1042 sub._repo.lfstatus = True
1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1043 sub.archive(archiver, prefix + repo._path + '/', submatch)
1045
1044
1046 # If a largefile is modified, the change is not reflected in its
1045 # If a largefile is modified, the change is not reflected in its
1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 # standin until a commit. cmdutil.bailifchanged() raises an exception
1048 # if the repo has uncommitted changes. Wrap it to also check if
1047 # if the repo has uncommitted changes. Wrap it to also check if
1049 # largefiles were changed. This is used by bisect, backout and fetch.
1048 # largefiles were changed. This is used by bisect, backout and fetch.
1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 def overridebailifchanged(orig, repo, *args, **kwargs):
1051 orig(repo, *args, **kwargs)
1050 orig(repo, *args, **kwargs)
1052 repo.lfstatus = True
1051 repo.lfstatus = True
1053 s = repo.status()
1052 s = repo.status()
1054 repo.lfstatus = False
1053 repo.lfstatus = False
1055 if s.modified or s.added or s.removed or s.deleted:
1054 if s.modified or s.added or s.removed or s.deleted:
1056 raise error.Abort(_('uncommitted changes'))
1055 raise error.Abort(_('uncommitted changes'))
1057
1056
1058 def postcommitstatus(orig, repo, *args, **kwargs):
1057 def postcommitstatus(orig, repo, *args, **kwargs):
1059 repo.lfstatus = True
1058 repo.lfstatus = True
1060 try:
1059 try:
1061 return orig(repo, *args, **kwargs)
1060 return orig(repo, *args, **kwargs)
1062 finally:
1061 finally:
1063 repo.lfstatus = False
1062 repo.lfstatus = False
1064
1063
1065 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1064 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1066 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1065 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1067 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1066 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1068 m = composelargefilematcher(match, repo[None].manifest())
1067 m = composelargefilematcher(match, repo[None].manifest())
1069
1068
1070 try:
1069 try:
1071 repo.lfstatus = True
1070 repo.lfstatus = True
1072 s = repo.status(match=m, clean=True)
1071 s = repo.status(match=m, clean=True)
1073 finally:
1072 finally:
1074 repo.lfstatus = False
1073 repo.lfstatus = False
1075 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1074 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1076 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1075 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1077
1076
1078 for f in forget:
1077 for f in forget:
1079 if lfutil.standin(f) not in repo.dirstate and not \
1078 if lfutil.standin(f) not in repo.dirstate and not \
1080 repo.wvfs.isdir(lfutil.standin(f)):
1079 repo.wvfs.isdir(lfutil.standin(f)):
1081 ui.warn(_('not removing %s: file is already untracked\n')
1080 ui.warn(_('not removing %s: file is already untracked\n')
1082 % m.rel(f))
1081 % m.rel(f))
1083 bad.append(f)
1082 bad.append(f)
1084
1083
1085 for f in forget:
1084 for f in forget:
1086 if ui.verbose or not m.exact(f):
1085 if ui.verbose or not m.exact(f):
1087 ui.status(_('removing %s\n') % m.rel(f))
1086 ui.status(_('removing %s\n') % m.rel(f))
1088
1087
1089 # Need to lock because standin files are deleted then removed from the
1088 # Need to lock because standin files are deleted then removed from the
1090 # repository and we could race in-between.
1089 # repository and we could race in-between.
1091 with repo.wlock():
1090 with repo.wlock():
1092 lfdirstate = lfutil.openlfdirstate(ui, repo)
1091 lfdirstate = lfutil.openlfdirstate(ui, repo)
1093 for f in forget:
1092 for f in forget:
1094 if lfdirstate[f] == 'a':
1093 if lfdirstate[f] == 'a':
1095 lfdirstate.drop(f)
1094 lfdirstate.drop(f)
1096 else:
1095 else:
1097 lfdirstate.remove(f)
1096 lfdirstate.remove(f)
1098 lfdirstate.write()
1097 lfdirstate.write()
1099 standins = [lfutil.standin(f) for f in forget]
1098 standins = [lfutil.standin(f) for f in forget]
1100 for f in standins:
1099 for f in standins:
1101 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1100 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1102 rejected = repo[None].forget(standins)
1101 rejected = repo[None].forget(standins)
1103
1102
1104 bad.extend(f for f in rejected if f in m.files())
1103 bad.extend(f for f in rejected if f in m.files())
1105 forgot.extend(f for f in forget if f not in rejected)
1104 forgot.extend(f for f in forget if f not in rejected)
1106 return bad, forgot
1105 return bad, forgot
1107
1106
1108 def _getoutgoings(repo, other, missing, addfunc):
1107 def _getoutgoings(repo, other, missing, addfunc):
1109 """get pairs of filename and largefile hash in outgoing revisions
1108 """get pairs of filename and largefile hash in outgoing revisions
1110 in 'missing'.
1109 in 'missing'.
1111
1110
1112 largefiles already existing on 'other' repository are ignored.
1111 largefiles already existing on 'other' repository are ignored.
1113
1112
1114 'addfunc' is invoked with each unique pairs of filename and
1113 'addfunc' is invoked with each unique pairs of filename and
1115 largefile hash value.
1114 largefile hash value.
1116 """
1115 """
1117 knowns = set()
1116 knowns = set()
1118 lfhashes = set()
1117 lfhashes = set()
1119 def dedup(fn, lfhash):
1118 def dedup(fn, lfhash):
1120 k = (fn, lfhash)
1119 k = (fn, lfhash)
1121 if k not in knowns:
1120 if k not in knowns:
1122 knowns.add(k)
1121 knowns.add(k)
1123 lfhashes.add(lfhash)
1122 lfhashes.add(lfhash)
1124 lfutil.getlfilestoupload(repo, missing, dedup)
1123 lfutil.getlfilestoupload(repo, missing, dedup)
1125 if lfhashes:
1124 if lfhashes:
1126 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1125 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1127 for fn, lfhash in knowns:
1126 for fn, lfhash in knowns:
1128 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1127 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1129 addfunc(fn, lfhash)
1128 addfunc(fn, lfhash)
1130
1129
1131 def outgoinghook(ui, repo, other, opts, missing):
1130 def outgoinghook(ui, repo, other, opts, missing):
1132 if opts.pop('large', None):
1131 if opts.pop('large', None):
1133 lfhashes = set()
1132 lfhashes = set()
1134 if ui.debugflag:
1133 if ui.debugflag:
1135 toupload = {}
1134 toupload = {}
1136 def addfunc(fn, lfhash):
1135 def addfunc(fn, lfhash):
1137 if fn not in toupload:
1136 if fn not in toupload:
1138 toupload[fn] = []
1137 toupload[fn] = []
1139 toupload[fn].append(lfhash)
1138 toupload[fn].append(lfhash)
1140 lfhashes.add(lfhash)
1139 lfhashes.add(lfhash)
1141 def showhashes(fn):
1140 def showhashes(fn):
1142 for lfhash in sorted(toupload[fn]):
1141 for lfhash in sorted(toupload[fn]):
1143 ui.debug(' %s\n' % (lfhash))
1142 ui.debug(' %s\n' % (lfhash))
1144 else:
1143 else:
1145 toupload = set()
1144 toupload = set()
1146 def addfunc(fn, lfhash):
1145 def addfunc(fn, lfhash):
1147 toupload.add(fn)
1146 toupload.add(fn)
1148 lfhashes.add(lfhash)
1147 lfhashes.add(lfhash)
1149 def showhashes(fn):
1148 def showhashes(fn):
1150 pass
1149 pass
1151 _getoutgoings(repo, other, missing, addfunc)
1150 _getoutgoings(repo, other, missing, addfunc)
1152
1151
1153 if not toupload:
1152 if not toupload:
1154 ui.status(_('largefiles: no files to upload\n'))
1153 ui.status(_('largefiles: no files to upload\n'))
1155 else:
1154 else:
1156 ui.status(_('largefiles to upload (%d entities):\n')
1155 ui.status(_('largefiles to upload (%d entities):\n')
1157 % (len(lfhashes)))
1156 % (len(lfhashes)))
1158 for file in sorted(toupload):
1157 for file in sorted(toupload):
1159 ui.status(lfutil.splitstandin(file) + '\n')
1158 ui.status(lfutil.splitstandin(file) + '\n')
1160 showhashes(file)
1159 showhashes(file)
1161 ui.status('\n')
1160 ui.status('\n')
1162
1161
1163 def summaryremotehook(ui, repo, opts, changes):
1162 def summaryremotehook(ui, repo, opts, changes):
1164 largeopt = opts.get('large', False)
1163 largeopt = opts.get('large', False)
1165 if changes is None:
1164 if changes is None:
1166 if largeopt:
1165 if largeopt:
1167 return (False, True) # only outgoing check is needed
1166 return (False, True) # only outgoing check is needed
1168 else:
1167 else:
1169 return (False, False)
1168 return (False, False)
1170 elif largeopt:
1169 elif largeopt:
1171 url, branch, peer, outgoing = changes[1]
1170 url, branch, peer, outgoing = changes[1]
1172 if peer is None:
1171 if peer is None:
1173 # i18n: column positioning for "hg summary"
1172 # i18n: column positioning for "hg summary"
1174 ui.status(_('largefiles: (no remote repo)\n'))
1173 ui.status(_('largefiles: (no remote repo)\n'))
1175 return
1174 return
1176
1175
1177 toupload = set()
1176 toupload = set()
1178 lfhashes = set()
1177 lfhashes = set()
1179 def addfunc(fn, lfhash):
1178 def addfunc(fn, lfhash):
1180 toupload.add(fn)
1179 toupload.add(fn)
1181 lfhashes.add(lfhash)
1180 lfhashes.add(lfhash)
1182 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1181 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1183
1182
1184 if not toupload:
1183 if not toupload:
1185 # i18n: column positioning for "hg summary"
1184 # i18n: column positioning for "hg summary"
1186 ui.status(_('largefiles: (no files to upload)\n'))
1185 ui.status(_('largefiles: (no files to upload)\n'))
1187 else:
1186 else:
1188 # i18n: column positioning for "hg summary"
1187 # i18n: column positioning for "hg summary"
1189 ui.status(_('largefiles: %d entities for %d files to upload\n')
1188 ui.status(_('largefiles: %d entities for %d files to upload\n')
1190 % (len(lfhashes), len(toupload)))
1189 % (len(lfhashes), len(toupload)))
1191
1190
1192 def overridesummary(orig, ui, repo, *pats, **opts):
1191 def overridesummary(orig, ui, repo, *pats, **opts):
1193 try:
1192 try:
1194 repo.lfstatus = True
1193 repo.lfstatus = True
1195 orig(ui, repo, *pats, **opts)
1194 orig(ui, repo, *pats, **opts)
1196 finally:
1195 finally:
1197 repo.lfstatus = False
1196 repo.lfstatus = False
1198
1197
1199 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1198 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1200 similarity=None):
1199 similarity=None):
1201 if opts is None:
1200 if opts is None:
1202 opts = {}
1201 opts = {}
1203 if not lfutil.islfilesrepo(repo):
1202 if not lfutil.islfilesrepo(repo):
1204 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1203 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1205 # Get the list of missing largefiles so we can remove them
1204 # Get the list of missing largefiles so we can remove them
1206 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1205 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1207 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1206 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1208 False, False, False)
1207 False, False, False)
1209
1208
1210 # Call into the normal remove code, but the removing of the standin, we want
1209 # Call into the normal remove code, but the removing of the standin, we want
1211 # to have handled by original addremove. Monkey patching here makes sure
1210 # to have handled by original addremove. Monkey patching here makes sure
1212 # we don't remove the standin in the largefiles code, preventing a very
1211 # we don't remove the standin in the largefiles code, preventing a very
1213 # confused state later.
1212 # confused state later.
1214 if s.deleted:
1213 if s.deleted:
1215 m = copy.copy(matcher)
1214 m = copy.copy(matcher)
1216
1215
1217 # The m._files and m._map attributes are not changed to the deleted list
1216 # The m._files and m._map attributes are not changed to the deleted list
1218 # because that affects the m.exact() test, which in turn governs whether
1217 # because that affects the m.exact() test, which in turn governs whether
1219 # or not the file name is printed, and how. Simply limit the original
1218 # or not the file name is printed, and how. Simply limit the original
1220 # matches to those in the deleted status list.
1219 # matches to those in the deleted status list.
1221 matchfn = m.matchfn
1220 matchfn = m.matchfn
1222 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1221 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1223
1222
1224 removelargefiles(repo.ui, repo, True, m, **opts)
1223 removelargefiles(repo.ui, repo, True, m, **opts)
1225 # Call into the normal add code, and any files that *should* be added as
1224 # Call into the normal add code, and any files that *should* be added as
1226 # largefiles will be
1225 # largefiles will be
1227 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1226 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1228 # Now that we've handled largefiles, hand off to the original addremove
1227 # Now that we've handled largefiles, hand off to the original addremove
1229 # function to take care of the rest. Make sure it doesn't do anything with
1228 # function to take care of the rest. Make sure it doesn't do anything with
1230 # largefiles by passing a matcher that will ignore them.
1229 # largefiles by passing a matcher that will ignore them.
1231 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1230 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1232 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1231 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1233
1232
1234 # Calling purge with --all will cause the largefiles to be deleted.
1233 # Calling purge with --all will cause the largefiles to be deleted.
1235 # Override repo.status to prevent this from happening.
1234 # Override repo.status to prevent this from happening.
1236 def overridepurge(orig, ui, repo, *dirs, **opts):
1235 def overridepurge(orig, ui, repo, *dirs, **opts):
1237 # XXX Monkey patching a repoview will not work. The assigned attribute will
1236 # XXX Monkey patching a repoview will not work. The assigned attribute will
1238 # be set on the unfiltered repo, but we will only lookup attributes in the
1237 # be set on the unfiltered repo, but we will only lookup attributes in the
1239 # unfiltered repo if the lookup in the repoview object itself fails. As the
1238 # unfiltered repo if the lookup in the repoview object itself fails. As the
1240 # monkey patched method exists on the repoview class the lookup will not
1239 # monkey patched method exists on the repoview class the lookup will not
1241 # fail. As a result, the original version will shadow the monkey patched
1240 # fail. As a result, the original version will shadow the monkey patched
1242 # one, defeating the monkey patch.
1241 # one, defeating the monkey patch.
1243 #
1242 #
1244 # As a work around we use an unfiltered repo here. We should do something
1243 # As a work around we use an unfiltered repo here. We should do something
1245 # cleaner instead.
1244 # cleaner instead.
1246 repo = repo.unfiltered()
1245 repo = repo.unfiltered()
1247 oldstatus = repo.status
1246 oldstatus = repo.status
1248 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1247 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1249 clean=False, unknown=False, listsubrepos=False):
1248 clean=False, unknown=False, listsubrepos=False):
1250 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1249 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1251 listsubrepos)
1250 listsubrepos)
1252 lfdirstate = lfutil.openlfdirstate(ui, repo)
1251 lfdirstate = lfutil.openlfdirstate(ui, repo)
1253 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1252 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1254 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1253 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1255 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1254 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1256 unknown, ignored, r.clean)
1255 unknown, ignored, r.clean)
1257 repo.status = overridestatus
1256 repo.status = overridestatus
1258 orig(ui, repo, *dirs, **opts)
1257 orig(ui, repo, *dirs, **opts)
1259 repo.status = oldstatus
1258 repo.status = oldstatus
1260 def overriderollback(orig, ui, repo, **opts):
1259 def overriderollback(orig, ui, repo, **opts):
1261 with repo.wlock():
1260 with repo.wlock():
1262 before = repo.dirstate.parents()
1261 before = repo.dirstate.parents()
1263 orphans = set(f for f in repo.dirstate
1262 orphans = set(f for f in repo.dirstate
1264 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1263 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1265 result = orig(ui, repo, **opts)
1264 result = orig(ui, repo, **opts)
1266 after = repo.dirstate.parents()
1265 after = repo.dirstate.parents()
1267 if before == after:
1266 if before == after:
1268 return result # no need to restore standins
1267 return result # no need to restore standins
1269
1268
1270 pctx = repo['.']
1269 pctx = repo['.']
1271 for f in repo.dirstate:
1270 for f in repo.dirstate:
1272 if lfutil.isstandin(f):
1271 if lfutil.isstandin(f):
1273 orphans.discard(f)
1272 orphans.discard(f)
1274 if repo.dirstate[f] == 'r':
1273 if repo.dirstate[f] == 'r':
1275 repo.wvfs.unlinkpath(f, ignoremissing=True)
1274 repo.wvfs.unlinkpath(f, ignoremissing=True)
1276 elif f in pctx:
1275 elif f in pctx:
1277 fctx = pctx[f]
1276 fctx = pctx[f]
1278 repo.wwrite(f, fctx.data(), fctx.flags())
1277 repo.wwrite(f, fctx.data(), fctx.flags())
1279 else:
1278 else:
1280 # content of standin is not so important in 'a',
1279 # content of standin is not so important in 'a',
1281 # 'm' or 'n' (coming from the 2nd parent) cases
1280 # 'm' or 'n' (coming from the 2nd parent) cases
1282 lfutil.writestandin(repo, f, '', False)
1281 lfutil.writestandin(repo, f, '', False)
1283 for standin in orphans:
1282 for standin in orphans:
1284 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1283 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1285
1284
1286 lfdirstate = lfutil.openlfdirstate(ui, repo)
1285 lfdirstate = lfutil.openlfdirstate(ui, repo)
1287 orphans = set(lfdirstate)
1286 orphans = set(lfdirstate)
1288 lfiles = lfutil.listlfiles(repo)
1287 lfiles = lfutil.listlfiles(repo)
1289 for file in lfiles:
1288 for file in lfiles:
1290 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1289 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1291 orphans.discard(file)
1290 orphans.discard(file)
1292 for lfile in orphans:
1291 for lfile in orphans:
1293 lfdirstate.drop(lfile)
1292 lfdirstate.drop(lfile)
1294 lfdirstate.write()
1293 lfdirstate.write()
1295 return result
1294 return result
1296
1295
1297 def overridetransplant(orig, ui, repo, *revs, **opts):
1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1298 resuming = opts.get('continue')
1297 resuming = opts.get('continue')
1299 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1300 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1301 try:
1300 try:
1302 result = orig(ui, repo, *revs, **opts)
1301 result = orig(ui, repo, *revs, **opts)
1303 finally:
1302 finally:
1304 repo._lfstatuswriters.pop()
1303 repo._lfstatuswriters.pop()
1305 repo._lfcommithooks.pop()
1304 repo._lfcommithooks.pop()
1306 return result
1305 return result
1307
1306
1308 def overridecat(orig, ui, repo, file1, *pats, **opts):
1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1309 ctx = scmutil.revsingle(repo, opts.get('rev'))
1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1310 err = 1
1309 err = 1
1311 notbad = set()
1310 notbad = set()
1312 m = scmutil.match(ctx, (file1,) + pats, opts)
1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1313 origmatchfn = m.matchfn
1312 origmatchfn = m.matchfn
1314 def lfmatchfn(f):
1313 def lfmatchfn(f):
1315 if origmatchfn(f):
1314 if origmatchfn(f):
1316 return True
1315 return True
1317 lf = lfutil.splitstandin(f)
1316 lf = lfutil.splitstandin(f)
1318 if lf is None:
1317 if lf is None:
1319 return False
1318 return False
1320 notbad.add(lf)
1319 notbad.add(lf)
1321 return origmatchfn(lf)
1320 return origmatchfn(lf)
1322 m.matchfn = lfmatchfn
1321 m.matchfn = lfmatchfn
1323 origbadfn = m.bad
1322 origbadfn = m.bad
1324 def lfbadfn(f, msg):
1323 def lfbadfn(f, msg):
1325 if not f in notbad:
1324 if not f in notbad:
1326 origbadfn(f, msg)
1325 origbadfn(f, msg)
1327 m.bad = lfbadfn
1326 m.bad = lfbadfn
1328
1327
1329 origvisitdirfn = m.visitdir
1328 origvisitdirfn = m.visitdir
1330 def lfvisitdirfn(dir):
1329 def lfvisitdirfn(dir):
1331 if dir == lfutil.shortname:
1330 if dir == lfutil.shortname:
1332 return True
1331 return True
1333 ret = origvisitdirfn(dir)
1332 ret = origvisitdirfn(dir)
1334 if ret:
1333 if ret:
1335 return ret
1334 return ret
1336 lf = lfutil.splitstandin(dir)
1335 lf = lfutil.splitstandin(dir)
1337 if lf is None:
1336 if lf is None:
1338 return False
1337 return False
1339 return origvisitdirfn(lf)
1338 return origvisitdirfn(lf)
1340 m.visitdir = lfvisitdirfn
1339 m.visitdir = lfvisitdirfn
1341
1340
1342 for f in ctx.walk(m):
1341 for f in ctx.walk(m):
1343 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1344 pathname=f)
1343 pathname=f)
1345 lf = lfutil.splitstandin(f)
1344 lf = lfutil.splitstandin(f)
1346 if lf is None or origmatchfn(f):
1345 if lf is None or origmatchfn(f):
1347 # duplicating unreachable code from commands.cat
1346 # duplicating unreachable code from commands.cat
1348 data = ctx[f].data()
1347 data = ctx[f].data()
1349 if opts.get('decode'):
1348 if opts.get('decode'):
1350 data = repo.wwritedata(f, data)
1349 data = repo.wwritedata(f, data)
1351 fp.write(data)
1350 fp.write(data)
1352 else:
1351 else:
1353 hash = lfutil.readstandin(repo, lf, ctx.rev())
1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1354 if not lfutil.inusercache(repo.ui, hash):
1353 if not lfutil.inusercache(repo.ui, hash):
1355 store = storefactory.openstore(repo)
1354 store = storefactory.openstore(repo)
1356 success, missing = store.get([(lf, hash)])
1355 success, missing = store.get([(lf, hash)])
1357 if len(success) != 1:
1356 if len(success) != 1:
1358 raise error.Abort(
1357 raise error.Abort(
1359 _('largefile %s is not in cache and could not be '
1358 _('largefile %s is not in cache and could not be '
1360 'downloaded') % lf)
1359 'downloaded') % lf)
1361 path = lfutil.usercachepath(repo.ui, hash)
1360 path = lfutil.usercachepath(repo.ui, hash)
1362 fpin = open(path, "rb")
1361 fpin = open(path, "rb")
1363 for chunk in util.filechunkiter(fpin, 128 * 1024):
1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1364 fp.write(chunk)
1363 fp.write(chunk)
1365 fpin.close()
1364 fpin.close()
1366 fp.close()
1365 fp.close()
1367 err = 0
1366 err = 0
1368 return err
1367 return err
1369
1368
1370 def mergeupdate(orig, repo, node, branchmerge, force,
1369 def mergeupdate(orig, repo, node, branchmerge, force,
1371 *args, **kwargs):
1370 *args, **kwargs):
1372 matcher = kwargs.get('matcher', None)
1371 matcher = kwargs.get('matcher', None)
1373 # note if this is a partial update
1372 # note if this is a partial update
1374 partial = matcher and not matcher.always()
1373 partial = matcher and not matcher.always()
1375 with repo.wlock():
1374 with repo.wlock():
1376 # branch | | |
1375 # branch | | |
1377 # merge | force | partial | action
1376 # merge | force | partial | action
1378 # -------+-------+---------+--------------
1377 # -------+-------+---------+--------------
1379 # x | x | x | linear-merge
1378 # x | x | x | linear-merge
1380 # o | x | x | branch-merge
1379 # o | x | x | branch-merge
1381 # x | o | x | overwrite (as clean update)
1380 # x | o | x | overwrite (as clean update)
1382 # o | o | x | force-branch-merge (*1)
1381 # o | o | x | force-branch-merge (*1)
1383 # x | x | o | (*)
1382 # x | x | o | (*)
1384 # o | x | o | (*)
1383 # o | x | o | (*)
1385 # x | o | o | overwrite (as revert)
1384 # x | o | o | overwrite (as revert)
1386 # o | o | o | (*)
1385 # o | o | o | (*)
1387 #
1386 #
1388 # (*) don't care
1387 # (*) don't care
1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1388 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1390
1389
1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1390 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1392 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1391 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1393 repo.getcwd()),
1392 repo.getcwd()),
1394 [], False, False, False)
1393 [], False, False, False)
1395 pctx = repo['.']
1394 pctx = repo['.']
1396 for lfile in unsure + s.modified:
1395 for lfile in unsure + s.modified:
1397 lfileabs = repo.wvfs.join(lfile)
1396 lfileabs = repo.wvfs.join(lfile)
1398 if not repo.wvfs.exists(lfileabs):
1397 if not repo.wvfs.exists(lfileabs):
1399 continue
1398 continue
1400 lfhash = lfutil.hashrepofile(repo, lfile)
1399 lfhash = lfutil.hashrepofile(repo, lfile)
1401 standin = lfutil.standin(lfile)
1400 standin = lfutil.standin(lfile)
1402 lfutil.writestandin(repo, standin, lfhash,
1401 lfutil.writestandin(repo, standin, lfhash,
1403 lfutil.getexecutable(lfileabs))
1402 lfutil.getexecutable(lfileabs))
1404 if (standin in pctx and
1403 if (standin in pctx and
1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1404 lfhash == lfutil.readstandin(repo, lfile, '.')):
1406 lfdirstate.normal(lfile)
1405 lfdirstate.normal(lfile)
1407 for lfile in s.added:
1406 for lfile in s.added:
1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1407 lfutil.updatestandin(repo, lfutil.standin(lfile))
1409 lfdirstate.write()
1408 lfdirstate.write()
1410
1409
1411 oldstandins = lfutil.getstandinsstate(repo)
1410 oldstandins = lfutil.getstandinsstate(repo)
1412
1411
1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1412 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1414
1413
1415 newstandins = lfutil.getstandinsstate(repo)
1414 newstandins = lfutil.getstandinsstate(repo)
1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1415 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1417 if branchmerge or force or partial:
1416 if branchmerge or force or partial:
1418 filelist.extend(s.deleted + s.removed)
1417 filelist.extend(s.deleted + s.removed)
1419
1418
1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1419 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1421 normallookup=partial)
1420 normallookup=partial)
1422
1421
1423 return result
1422 return result
1424
1423
1425 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1424 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1426 result = orig(repo, files, *args, **kwargs)
1425 result = orig(repo, files, *args, **kwargs)
1427
1426
1428 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1427 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1429 if filelist:
1428 if filelist:
1430 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1429 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1431 printmessage=False, normallookup=True)
1430 printmessage=False, normallookup=True)
1432
1431
1433 return result
1432 return result
General Comments 0
You need to be logged in to leave comments. Login now