##// END OF EJS Templates
largefiles: add lfile argument to updatestandin() for efficiency (API)...
FUJIWARA Katsunori -
r31659:0eec3611 default
parent child Browse files
Show More
@@ -1,667 +1,670
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 util,
29 util,
30 vfs as vfsmod,
30 vfs as vfsmod,
31 )
31 )
32
32
33 shortname = '.hglf'
33 shortname = '.hglf'
34 shortnameslash = shortname + '/'
34 shortnameslash = shortname + '/'
35 longname = 'largefiles'
35 longname = 'largefiles'
36
36
37 # -- Private worker functions ------------------------------------------
37 # -- Private worker functions ------------------------------------------
38
38
39 def getminsize(ui, assumelfiles, opt, default=10):
39 def getminsize(ui, assumelfiles, opt, default=10):
40 lfsize = opt
40 lfsize = opt
41 if not lfsize and assumelfiles:
41 if not lfsize and assumelfiles:
42 lfsize = ui.config(longname, 'minsize', default=default)
42 lfsize = ui.config(longname, 'minsize', default=default)
43 if lfsize:
43 if lfsize:
44 try:
44 try:
45 lfsize = float(lfsize)
45 lfsize = float(lfsize)
46 except ValueError:
46 except ValueError:
47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
47 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 % lfsize)
48 % lfsize)
49 if lfsize is None:
49 if lfsize is None:
50 raise error.Abort(_('minimum size for largefiles must be specified'))
50 raise error.Abort(_('minimum size for largefiles must be specified'))
51 return lfsize
51 return lfsize
52
52
53 def link(src, dest):
53 def link(src, dest):
54 """Try to create hardlink - if that fails, efficiently make a copy."""
54 """Try to create hardlink - if that fails, efficiently make a copy."""
55 util.makedirs(os.path.dirname(dest))
55 util.makedirs(os.path.dirname(dest))
56 try:
56 try:
57 util.oslink(src, dest)
57 util.oslink(src, dest)
58 except OSError:
58 except OSError:
59 # if hardlinks fail, fallback on atomic copy
59 # if hardlinks fail, fallback on atomic copy
60 with open(src, 'rb') as srcf:
60 with open(src, 'rb') as srcf:
61 with util.atomictempfile(dest) as dstf:
61 with util.atomictempfile(dest) as dstf:
62 for chunk in util.filechunkiter(srcf):
62 for chunk in util.filechunkiter(srcf):
63 dstf.write(chunk)
63 dstf.write(chunk)
64 os.chmod(dest, os.stat(src).st_mode)
64 os.chmod(dest, os.stat(src).st_mode)
65
65
66 def usercachepath(ui, hash):
66 def usercachepath(ui, hash):
67 '''Return the correct location in the "global" largefiles cache for a file
67 '''Return the correct location in the "global" largefiles cache for a file
68 with the given hash.
68 with the given hash.
69 This cache is used for sharing of largefiles across repositories - both
69 This cache is used for sharing of largefiles across repositories - both
70 to preserve download bandwidth and storage space.'''
70 to preserve download bandwidth and storage space.'''
71 return os.path.join(_usercachedir(ui), hash)
71 return os.path.join(_usercachedir(ui), hash)
72
72
73 def _usercachedir(ui):
73 def _usercachedir(ui):
74 '''Return the location of the "global" largefiles cache.'''
74 '''Return the location of the "global" largefiles cache.'''
75 path = ui.configpath(longname, 'usercache', None)
75 path = ui.configpath(longname, 'usercache', None)
76 if path:
76 if path:
77 return path
77 return path
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 encoding.environ.get('APPDATA'))
80 encoding.environ.get('APPDATA'))
81 if appdata:
81 if appdata:
82 return os.path.join(appdata, longname)
82 return os.path.join(appdata, longname)
83 elif platform.system() == 'Darwin':
83 elif platform.system() == 'Darwin':
84 home = encoding.environ.get('HOME')
84 home = encoding.environ.get('HOME')
85 if home:
85 if home:
86 return os.path.join(home, 'Library', 'Caches', longname)
86 return os.path.join(home, 'Library', 'Caches', longname)
87 elif pycompat.osname == 'posix':
87 elif pycompat.osname == 'posix':
88 path = encoding.environ.get('XDG_CACHE_HOME')
88 path = encoding.environ.get('XDG_CACHE_HOME')
89 if path:
89 if path:
90 return os.path.join(path, longname)
90 return os.path.join(path, longname)
91 home = encoding.environ.get('HOME')
91 home = encoding.environ.get('HOME')
92 if home:
92 if home:
93 return os.path.join(home, '.cache', longname)
93 return os.path.join(home, '.cache', longname)
94 else:
94 else:
95 raise error.Abort(_('unknown operating system: %s\n')
95 raise error.Abort(_('unknown operating system: %s\n')
96 % pycompat.osname)
96 % pycompat.osname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
98
98
99 def inusercache(ui, hash):
99 def inusercache(ui, hash):
100 path = usercachepath(ui, hash)
100 path = usercachepath(ui, hash)
101 return os.path.exists(path)
101 return os.path.exists(path)
102
102
103 def findfile(repo, hash):
103 def findfile(repo, hash):
104 '''Return store path of the largefile with the specified hash.
104 '''Return store path of the largefile with the specified hash.
105 As a side effect, the file might be linked from user cache.
105 As a side effect, the file might be linked from user cache.
106 Return None if the file can't be found locally.'''
106 Return None if the file can't be found locally.'''
107 path, exists = findstorepath(repo, hash)
107 path, exists = findstorepath(repo, hash)
108 if exists:
108 if exists:
109 repo.ui.note(_('found %s in store\n') % hash)
109 repo.ui.note(_('found %s in store\n') % hash)
110 return path
110 return path
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('found %s in system cache\n') % hash)
112 repo.ui.note(_('found %s in system cache\n') % hash)
113 path = storepath(repo, hash)
113 path = storepath(repo, hash)
114 link(usercachepath(repo.ui, hash), path)
114 link(usercachepath(repo.ui, hash), path)
115 return path
115 return path
116 return None
116 return None
117
117
118 class largefilesdirstate(dirstate.dirstate):
118 class largefilesdirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefilesdirstate, self).normal(unixpath(f))
122 return super(largefilesdirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefilesdirstate, self).remove(unixpath(f))
124 return super(largefilesdirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefilesdirstate, self).add(unixpath(f))
126 return super(largefilesdirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefilesdirstate, self).drop(unixpath(f))
128 return super(largefilesdirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefilesdirstate, self).forget(unixpath(f))
130 return super(largefilesdirstate, self).forget(unixpath(f))
131 def normallookup(self, f):
131 def normallookup(self, f):
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 def _ignore(self, f):
133 def _ignore(self, f):
134 return False
134 return False
135 def write(self, tr=False):
135 def write(self, tr=False):
136 # (1) disable PENDING mode always
136 # (1) disable PENDING mode always
137 # (lfdirstate isn't yet managed as a part of the transaction)
137 # (lfdirstate isn't yet managed as a part of the transaction)
138 # (2) avoid develwarn 'use dirstate.write with ....'
138 # (2) avoid develwarn 'use dirstate.write with ....'
139 super(largefilesdirstate, self).write(None)
139 super(largefilesdirstate, self).write(None)
140
140
141 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
142 '''
142 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
145 '''
146 vfs = repo.vfs
146 vfs = repo.vfs
147 lfstoredir = longname
147 lfstoredir = longname
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 repo.dirstate._validate)
150 repo.dirstate._validate)
151
151
152 # If the largefiles dirstate does not exist, populate and create
152 # If the largefiles dirstate does not exist, populate and create
153 # it. This ensures that we create it on the first meaningful
153 # it. This ensures that we create it on the first meaningful
154 # largefiles operation in a new clone.
154 # largefiles operation in a new clone.
155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
155 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 matcher = getstandinmatcher(repo)
156 matcher = getstandinmatcher(repo)
157 standins = repo.dirstate.walk(matcher, [], False, False)
157 standins = repo.dirstate.walk(matcher, [], False, False)
158
158
159 if len(standins) > 0:
159 if len(standins) > 0:
160 vfs.makedirs(lfstoredir)
160 vfs.makedirs(lfstoredir)
161
161
162 for standin in standins:
162 for standin in standins:
163 lfile = splitstandin(standin)
163 lfile = splitstandin(standin)
164 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
165 return lfdirstate
165 return lfdirstate
166
166
167 def lfdirstatestatus(lfdirstate, repo):
167 def lfdirstatestatus(lfdirstate, repo):
168 pctx = repo['.']
168 pctx = repo['.']
169 match = matchmod.always(repo.root, repo.getcwd())
169 match = matchmod.always(repo.root, repo.getcwd())
170 unsure, s = lfdirstate.status(match, [], False, False, False)
170 unsure, s = lfdirstate.status(match, [], False, False, False)
171 modified, clean = s.modified, s.clean
171 modified, clean = s.modified, s.clean
172 for lfile in unsure:
172 for lfile in unsure:
173 try:
173 try:
174 fctx = pctx[standin(lfile)]
174 fctx = pctx[standin(lfile)]
175 except LookupError:
175 except LookupError:
176 fctx = None
176 fctx = None
177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
177 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
178 modified.append(lfile)
178 modified.append(lfile)
179 else:
179 else:
180 clean.append(lfile)
180 clean.append(lfile)
181 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
182 return s
182 return s
183
183
184 def listlfiles(repo, rev=None, matcher=None):
184 def listlfiles(repo, rev=None, matcher=None):
185 '''return a list of largefiles in the working copy or the
185 '''return a list of largefiles in the working copy or the
186 specified changeset'''
186 specified changeset'''
187
187
188 if matcher is None:
188 if matcher is None:
189 matcher = getstandinmatcher(repo)
189 matcher = getstandinmatcher(repo)
190
190
191 # ignore unknown files in working directory
191 # ignore unknown files in working directory
192 return [splitstandin(f)
192 return [splitstandin(f)
193 for f in repo[rev].walk(matcher)
193 for f in repo[rev].walk(matcher)
194 if rev is not None or repo.dirstate[f] != '?']
194 if rev is not None or repo.dirstate[f] != '?']
195
195
196 def instore(repo, hash, forcelocal=False):
196 def instore(repo, hash, forcelocal=False):
197 '''Return true if a largefile with the given hash exists in the store'''
197 '''Return true if a largefile with the given hash exists in the store'''
198 return os.path.exists(storepath(repo, hash, forcelocal))
198 return os.path.exists(storepath(repo, hash, forcelocal))
199
199
200 def storepath(repo, hash, forcelocal=False):
200 def storepath(repo, hash, forcelocal=False):
201 '''Return the correct location in the repository largefiles store for a
201 '''Return the correct location in the repository largefiles store for a
202 file with the given hash.'''
202 file with the given hash.'''
203 if not forcelocal and repo.shared():
203 if not forcelocal and repo.shared():
204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
204 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
205 return repo.vfs.join(longname, hash)
205 return repo.vfs.join(longname, hash)
206
206
207 def findstorepath(repo, hash):
207 def findstorepath(repo, hash):
208 '''Search through the local store path(s) to find the file for the given
208 '''Search through the local store path(s) to find the file for the given
209 hash. If the file is not found, its path in the primary store is returned.
209 hash. If the file is not found, its path in the primary store is returned.
210 The return value is a tuple of (path, exists(path)).
210 The return value is a tuple of (path, exists(path)).
211 '''
211 '''
212 # For shared repos, the primary store is in the share source. But for
212 # For shared repos, the primary store is in the share source. But for
213 # backward compatibility, force a lookup in the local store if it wasn't
213 # backward compatibility, force a lookup in the local store if it wasn't
214 # found in the share source.
214 # found in the share source.
215 path = storepath(repo, hash, False)
215 path = storepath(repo, hash, False)
216
216
217 if instore(repo, hash):
217 if instore(repo, hash):
218 return (path, True)
218 return (path, True)
219 elif repo.shared() and instore(repo, hash, True):
219 elif repo.shared() and instore(repo, hash, True):
220 return storepath(repo, hash, True), True
220 return storepath(repo, hash, True), True
221
221
222 return (path, False)
222 return (path, False)
223
223
224 def copyfromcache(repo, hash, filename):
224 def copyfromcache(repo, hash, filename):
225 '''Copy the specified largefile from the repo or system cache to
225 '''Copy the specified largefile from the repo or system cache to
226 filename in the repository. Return true on success or false if the
226 filename in the repository. Return true on success or false if the
227 file was not found in either cache (which should not happened:
227 file was not found in either cache (which should not happened:
228 this is meant to be called only after ensuring that the needed
228 this is meant to be called only after ensuring that the needed
229 largefile exists in the cache).'''
229 largefile exists in the cache).'''
230 wvfs = repo.wvfs
230 wvfs = repo.wvfs
231 path = findfile(repo, hash)
231 path = findfile(repo, hash)
232 if path is None:
232 if path is None:
233 return False
233 return False
234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
234 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
235 # The write may fail before the file is fully written, but we
235 # The write may fail before the file is fully written, but we
236 # don't use atomic writes in the working copy.
236 # don't use atomic writes in the working copy.
237 with open(path, 'rb') as srcfd:
237 with open(path, 'rb') as srcfd:
238 with wvfs(filename, 'wb') as destfd:
238 with wvfs(filename, 'wb') as destfd:
239 gothash = copyandhash(
239 gothash = copyandhash(
240 util.filechunkiter(srcfd), destfd)
240 util.filechunkiter(srcfd), destfd)
241 if gothash != hash:
241 if gothash != hash:
242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
242 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
243 % (filename, path, gothash))
243 % (filename, path, gothash))
244 wvfs.unlink(filename)
244 wvfs.unlink(filename)
245 return False
245 return False
246 return True
246 return True
247
247
248 def copytostore(repo, revorctx, file, uploaded=False):
248 def copytostore(repo, revorctx, file, uploaded=False):
249 wvfs = repo.wvfs
249 wvfs = repo.wvfs
250 hash = readstandin(repo, file, revorctx)
250 hash = readstandin(repo, file, revorctx)
251 if instore(repo, hash):
251 if instore(repo, hash):
252 return
252 return
253 if wvfs.exists(file):
253 if wvfs.exists(file):
254 copytostoreabsolute(repo, wvfs.join(file), hash)
254 copytostoreabsolute(repo, wvfs.join(file), hash)
255 else:
255 else:
256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
256 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
257 (file, hash))
257 (file, hash))
258
258
259 def copyalltostore(repo, node):
259 def copyalltostore(repo, node):
260 '''Copy all largefiles in a given revision to the store'''
260 '''Copy all largefiles in a given revision to the store'''
261
261
262 ctx = repo[node]
262 ctx = repo[node]
263 for filename in ctx.files():
263 for filename in ctx.files():
264 realfile = splitstandin(filename)
264 realfile = splitstandin(filename)
265 if realfile is not None and filename in ctx.manifest():
265 if realfile is not None and filename in ctx.manifest():
266 copytostore(repo, ctx, realfile)
266 copytostore(repo, ctx, realfile)
267
267
268 def copytostoreabsolute(repo, file, hash):
268 def copytostoreabsolute(repo, file, hash):
269 if inusercache(repo.ui, hash):
269 if inusercache(repo.ui, hash):
270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
270 link(usercachepath(repo.ui, hash), storepath(repo, hash))
271 else:
271 else:
272 util.makedirs(os.path.dirname(storepath(repo, hash)))
272 util.makedirs(os.path.dirname(storepath(repo, hash)))
273 with open(file, 'rb') as srcf:
273 with open(file, 'rb') as srcf:
274 with util.atomictempfile(storepath(repo, hash),
274 with util.atomictempfile(storepath(repo, hash),
275 createmode=repo.store.createmode) as dstf:
275 createmode=repo.store.createmode) as dstf:
276 for chunk in util.filechunkiter(srcf):
276 for chunk in util.filechunkiter(srcf):
277 dstf.write(chunk)
277 dstf.write(chunk)
278 linktousercache(repo, hash)
278 linktousercache(repo, hash)
279
279
280 def linktousercache(repo, hash):
280 def linktousercache(repo, hash):
281 '''Link / copy the largefile with the specified hash from the store
281 '''Link / copy the largefile with the specified hash from the store
282 to the cache.'''
282 to the cache.'''
283 path = usercachepath(repo.ui, hash)
283 path = usercachepath(repo.ui, hash)
284 link(storepath(repo, hash), path)
284 link(storepath(repo, hash), path)
285
285
286 def getstandinmatcher(repo, rmatcher=None):
286 def getstandinmatcher(repo, rmatcher=None):
287 '''Return a match object that applies rmatcher to the standin directory'''
287 '''Return a match object that applies rmatcher to the standin directory'''
288 wvfs = repo.wvfs
288 wvfs = repo.wvfs
289 standindir = shortname
289 standindir = shortname
290
290
291 # no warnings about missing files or directories
291 # no warnings about missing files or directories
292 badfn = lambda f, msg: None
292 badfn = lambda f, msg: None
293
293
294 if rmatcher and not rmatcher.always():
294 if rmatcher and not rmatcher.always():
295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
295 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
296 if not pats:
296 if not pats:
297 pats = [wvfs.join(standindir)]
297 pats = [wvfs.join(standindir)]
298 match = scmutil.match(repo[None], pats, badfn=badfn)
298 match = scmutil.match(repo[None], pats, badfn=badfn)
299 # if pats is empty, it would incorrectly always match, so clear _always
299 # if pats is empty, it would incorrectly always match, so clear _always
300 match._always = False
300 match._always = False
301 else:
301 else:
302 # no patterns: relative to repo root
302 # no patterns: relative to repo root
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
304 return match
304 return match
305
305
306 def composestandinmatcher(repo, rmatcher):
306 def composestandinmatcher(repo, rmatcher):
307 '''Return a matcher that accepts standins corresponding to the
307 '''Return a matcher that accepts standins corresponding to the
308 files accepted by rmatcher. Pass the list of files in the matcher
308 files accepted by rmatcher. Pass the list of files in the matcher
309 as the paths specified by the user.'''
309 as the paths specified by the user.'''
310 smatcher = getstandinmatcher(repo, rmatcher)
310 smatcher = getstandinmatcher(repo, rmatcher)
311 isstandin = smatcher.matchfn
311 isstandin = smatcher.matchfn
312 def composedmatchfn(f):
312 def composedmatchfn(f):
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
314 smatcher.matchfn = composedmatchfn
314 smatcher.matchfn = composedmatchfn
315
315
316 return smatcher
316 return smatcher
317
317
318 def standin(filename):
318 def standin(filename):
319 '''Return the repo-relative path to the standin for the specified big
319 '''Return the repo-relative path to the standin for the specified big
320 file.'''
320 file.'''
321 # Notes:
321 # Notes:
322 # 1) Some callers want an absolute path, but for instance addlargefiles
322 # 1) Some callers want an absolute path, but for instance addlargefiles
323 # needs it repo-relative so it can be passed to repo[None].add(). So
323 # needs it repo-relative so it can be passed to repo[None].add(). So
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
325 # 2) Join with '/' because that's what dirstate always uses, even on
325 # 2) Join with '/' because that's what dirstate always uses, even on
326 # Windows. Change existing separator to '/' first in case we are
326 # Windows. Change existing separator to '/' first in case we are
327 # passed filenames from an external source (like the command line).
327 # passed filenames from an external source (like the command line).
328 return shortnameslash + util.pconvert(filename)
328 return shortnameslash + util.pconvert(filename)
329
329
330 def isstandin(filename):
330 def isstandin(filename):
331 '''Return true if filename is a big file standin. filename must be
331 '''Return true if filename is a big file standin. filename must be
332 in Mercurial's internal form (slash-separated).'''
332 in Mercurial's internal form (slash-separated).'''
333 return filename.startswith(shortnameslash)
333 return filename.startswith(shortnameslash)
334
334
335 def splitstandin(filename):
335 def splitstandin(filename):
336 # Split on / because that's what dirstate always uses, even on Windows.
336 # Split on / because that's what dirstate always uses, even on Windows.
337 # Change local separator to / first just in case we are passed filenames
337 # Change local separator to / first just in case we are passed filenames
338 # from an external source (like the command line).
338 # from an external source (like the command line).
339 bits = util.pconvert(filename).split('/', 1)
339 bits = util.pconvert(filename).split('/', 1)
340 if len(bits) == 2 and bits[0] == shortname:
340 if len(bits) == 2 and bits[0] == shortname:
341 return bits[1]
341 return bits[1]
342 else:
342 else:
343 return None
343 return None
344
344
345 def updatestandin(repo, standin):
345 def updatestandin(repo, lfile, standin):
346 lfile = splitstandin(standin)
346 """Re-calculate hash value of lfile and write it into standin
347
348 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
349 """
347 file = repo.wjoin(lfile)
350 file = repo.wjoin(lfile)
348 if repo.wvfs.exists(lfile):
351 if repo.wvfs.exists(lfile):
349 hash = hashfile(file)
352 hash = hashfile(file)
350 executable = getexecutable(file)
353 executable = getexecutable(file)
351 writestandin(repo, standin, hash, executable)
354 writestandin(repo, standin, hash, executable)
352 else:
355 else:
353 raise error.Abort(_('%s: file not found!') % lfile)
356 raise error.Abort(_('%s: file not found!') % lfile)
354
357
355 def readstandin(repo, filename, node=None):
358 def readstandin(repo, filename, node=None):
356 '''read hex hash from standin for filename at given node, or working
359 '''read hex hash from standin for filename at given node, or working
357 directory if no node is given'''
360 directory if no node is given'''
358 return repo[node][standin(filename)].data().strip()
361 return repo[node][standin(filename)].data().strip()
359
362
360 def writestandin(repo, standin, hash, executable):
363 def writestandin(repo, standin, hash, executable):
361 '''write hash to <repo.root>/<standin>'''
364 '''write hash to <repo.root>/<standin>'''
362 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
365 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
363
366
364 def copyandhash(instream, outfile):
367 def copyandhash(instream, outfile):
365 '''Read bytes from instream (iterable) and write them to outfile,
368 '''Read bytes from instream (iterable) and write them to outfile,
366 computing the SHA-1 hash of the data along the way. Return the hash.'''
369 computing the SHA-1 hash of the data along the way. Return the hash.'''
367 hasher = hashlib.sha1('')
370 hasher = hashlib.sha1('')
368 for data in instream:
371 for data in instream:
369 hasher.update(data)
372 hasher.update(data)
370 outfile.write(data)
373 outfile.write(data)
371 return hasher.hexdigest()
374 return hasher.hexdigest()
372
375
373 def hashfile(file):
376 def hashfile(file):
374 if not os.path.exists(file):
377 if not os.path.exists(file):
375 return ''
378 return ''
376 with open(file, 'rb') as fd:
379 with open(file, 'rb') as fd:
377 return hexsha1(fd)
380 return hexsha1(fd)
378
381
379 def getexecutable(filename):
382 def getexecutable(filename):
380 mode = os.stat(filename).st_mode
383 mode = os.stat(filename).st_mode
381 return ((mode & stat.S_IXUSR) and
384 return ((mode & stat.S_IXUSR) and
382 (mode & stat.S_IXGRP) and
385 (mode & stat.S_IXGRP) and
383 (mode & stat.S_IXOTH))
386 (mode & stat.S_IXOTH))
384
387
385 def urljoin(first, second, *arg):
388 def urljoin(first, second, *arg):
386 def join(left, right):
389 def join(left, right):
387 if not left.endswith('/'):
390 if not left.endswith('/'):
388 left += '/'
391 left += '/'
389 if right.startswith('/'):
392 if right.startswith('/'):
390 right = right[1:]
393 right = right[1:]
391 return left + right
394 return left + right
392
395
393 url = join(first, second)
396 url = join(first, second)
394 for a in arg:
397 for a in arg:
395 url = join(url, a)
398 url = join(url, a)
396 return url
399 return url
397
400
398 def hexsha1(fileobj):
401 def hexsha1(fileobj):
399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
400 object data"""
403 object data"""
401 h = hashlib.sha1()
404 h = hashlib.sha1()
402 for chunk in util.filechunkiter(fileobj):
405 for chunk in util.filechunkiter(fileobj):
403 h.update(chunk)
406 h.update(chunk)
404 return h.hexdigest()
407 return h.hexdigest()
405
408
406 def httpsendfile(ui, filename):
409 def httpsendfile(ui, filename):
407 return httpconnection.httpsendfile(ui, filename, 'rb')
410 return httpconnection.httpsendfile(ui, filename, 'rb')
408
411
409 def unixpath(path):
412 def unixpath(path):
410 '''Return a version of path normalized for use with the lfdirstate.'''
413 '''Return a version of path normalized for use with the lfdirstate.'''
411 return util.pconvert(os.path.normpath(path))
414 return util.pconvert(os.path.normpath(path))
412
415
413 def islfilesrepo(repo):
416 def islfilesrepo(repo):
414 '''Return true if the repo is a largefile repo.'''
417 '''Return true if the repo is a largefile repo.'''
415 if ('largefiles' in repo.requirements and
418 if ('largefiles' in repo.requirements and
416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
417 return True
420 return True
418
421
419 return any(openlfdirstate(repo.ui, repo, False))
422 return any(openlfdirstate(repo.ui, repo, False))
420
423
421 class storeprotonotcapable(Exception):
424 class storeprotonotcapable(Exception):
422 def __init__(self, storetypes):
425 def __init__(self, storetypes):
423 self.storetypes = storetypes
426 self.storetypes = storetypes
424
427
425 def getstandinsstate(repo):
428 def getstandinsstate(repo):
426 standins = []
429 standins = []
427 matcher = getstandinmatcher(repo)
430 matcher = getstandinmatcher(repo)
428 for standin in repo.dirstate.walk(matcher, [], False, False):
431 for standin in repo.dirstate.walk(matcher, [], False, False):
429 lfile = splitstandin(standin)
432 lfile = splitstandin(standin)
430 try:
433 try:
431 hash = readstandin(repo, lfile)
434 hash = readstandin(repo, lfile)
432 except IOError:
435 except IOError:
433 hash = None
436 hash = None
434 standins.append((lfile, hash))
437 standins.append((lfile, hash))
435 return standins
438 return standins
436
439
437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
440 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
438 lfstandin = standin(lfile)
441 lfstandin = standin(lfile)
439 if lfstandin in repo.dirstate:
442 if lfstandin in repo.dirstate:
440 stat = repo.dirstate._map[lfstandin]
443 stat = repo.dirstate._map[lfstandin]
441 state, mtime = stat[0], stat[3]
444 state, mtime = stat[0], stat[3]
442 else:
445 else:
443 state, mtime = '?', -1
446 state, mtime = '?', -1
444 if state == 'n':
447 if state == 'n':
445 if (normallookup or mtime < 0 or
448 if (normallookup or mtime < 0 or
446 not repo.wvfs.exists(lfile)):
449 not repo.wvfs.exists(lfile)):
447 # state 'n' doesn't ensure 'clean' in this case
450 # state 'n' doesn't ensure 'clean' in this case
448 lfdirstate.normallookup(lfile)
451 lfdirstate.normallookup(lfile)
449 else:
452 else:
450 lfdirstate.normal(lfile)
453 lfdirstate.normal(lfile)
451 elif state == 'm':
454 elif state == 'm':
452 lfdirstate.normallookup(lfile)
455 lfdirstate.normallookup(lfile)
453 elif state == 'r':
456 elif state == 'r':
454 lfdirstate.remove(lfile)
457 lfdirstate.remove(lfile)
455 elif state == 'a':
458 elif state == 'a':
456 lfdirstate.add(lfile)
459 lfdirstate.add(lfile)
457 elif state == '?':
460 elif state == '?':
458 lfdirstate.drop(lfile)
461 lfdirstate.drop(lfile)
459
462
460 def markcommitted(orig, ctx, node):
463 def markcommitted(orig, ctx, node):
461 repo = ctx.repo()
464 repo = ctx.repo()
462
465
463 orig(node)
466 orig(node)
464
467
465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
468 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
466 # because files coming from the 2nd parent are omitted in the latter.
469 # because files coming from the 2nd parent are omitted in the latter.
467 #
470 #
468 # The former should be used to get targets of "synclfdirstate",
471 # The former should be used to get targets of "synclfdirstate",
469 # because such files:
472 # because such files:
470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
473 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
471 # - have to be marked as "n" after commit, but
474 # - have to be marked as "n" after commit, but
472 # - aren't listed in "repo[node].files()"
475 # - aren't listed in "repo[node].files()"
473
476
474 lfdirstate = openlfdirstate(repo.ui, repo)
477 lfdirstate = openlfdirstate(repo.ui, repo)
475 for f in ctx.files():
478 for f in ctx.files():
476 lfile = splitstandin(f)
479 lfile = splitstandin(f)
477 if lfile is not None:
480 if lfile is not None:
478 synclfdirstate(repo, lfdirstate, lfile, False)
481 synclfdirstate(repo, lfdirstate, lfile, False)
479 lfdirstate.write()
482 lfdirstate.write()
480
483
481 # As part of committing, copy all of the largefiles into the cache.
484 # As part of committing, copy all of the largefiles into the cache.
482 #
485 #
483 # Using "node" instead of "ctx" implies additional "repo[node]"
486 # Using "node" instead of "ctx" implies additional "repo[node]"
484 # lookup while copyalltostore(), but can omit redundant check for
487 # lookup while copyalltostore(), but can omit redundant check for
485 # files comming from the 2nd parent, which should exist in store
488 # files comming from the 2nd parent, which should exist in store
486 # at merging.
489 # at merging.
487 copyalltostore(repo, node)
490 copyalltostore(repo, node)
488
491
489 def getlfilestoupdate(oldstandins, newstandins):
492 def getlfilestoupdate(oldstandins, newstandins):
490 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
493 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
491 filelist = []
494 filelist = []
492 for f in changedstandins:
495 for f in changedstandins:
493 if f[0] not in filelist:
496 if f[0] not in filelist:
494 filelist.append(f[0])
497 filelist.append(f[0])
495 return filelist
498 return filelist
496
499
497 def getlfilestoupload(repo, missing, addfunc):
500 def getlfilestoupload(repo, missing, addfunc):
498 for i, n in enumerate(missing):
501 for i, n in enumerate(missing):
499 repo.ui.progress(_('finding outgoing largefiles'), i,
502 repo.ui.progress(_('finding outgoing largefiles'), i,
500 unit=_('revisions'), total=len(missing))
503 unit=_('revisions'), total=len(missing))
501 parents = [p for p in repo[n].parents() if p != node.nullid]
504 parents = [p for p in repo[n].parents() if p != node.nullid]
502
505
503 oldlfstatus = repo.lfstatus
506 oldlfstatus = repo.lfstatus
504 repo.lfstatus = False
507 repo.lfstatus = False
505 try:
508 try:
506 ctx = repo[n]
509 ctx = repo[n]
507 finally:
510 finally:
508 repo.lfstatus = oldlfstatus
511 repo.lfstatus = oldlfstatus
509
512
510 files = set(ctx.files())
513 files = set(ctx.files())
511 if len(parents) == 2:
514 if len(parents) == 2:
512 mc = ctx.manifest()
515 mc = ctx.manifest()
513 mp1 = ctx.parents()[0].manifest()
516 mp1 = ctx.parents()[0].manifest()
514 mp2 = ctx.parents()[1].manifest()
517 mp2 = ctx.parents()[1].manifest()
515 for f in mp1:
518 for f in mp1:
516 if f not in mc:
519 if f not in mc:
517 files.add(f)
520 files.add(f)
518 for f in mp2:
521 for f in mp2:
519 if f not in mc:
522 if f not in mc:
520 files.add(f)
523 files.add(f)
521 for f in mc:
524 for f in mc:
522 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
525 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
523 files.add(f)
526 files.add(f)
524 for fn in files:
527 for fn in files:
525 if isstandin(fn) and fn in ctx:
528 if isstandin(fn) and fn in ctx:
526 addfunc(fn, ctx[fn].data().strip())
529 addfunc(fn, ctx[fn].data().strip())
527 repo.ui.progress(_('finding outgoing largefiles'), None)
530 repo.ui.progress(_('finding outgoing largefiles'), None)
528
531
529 def updatestandinsbymatch(repo, match):
532 def updatestandinsbymatch(repo, match):
530 '''Update standins in the working directory according to specified match
533 '''Update standins in the working directory according to specified match
531
534
532 This returns (possibly modified) ``match`` object to be used for
535 This returns (possibly modified) ``match`` object to be used for
533 subsequent commit process.
536 subsequent commit process.
534 '''
537 '''
535
538
536 ui = repo.ui
539 ui = repo.ui
537
540
538 # Case 1: user calls commit with no specific files or
541 # Case 1: user calls commit with no specific files or
539 # include/exclude patterns: refresh and commit all files that
542 # include/exclude patterns: refresh and commit all files that
540 # are "dirty".
543 # are "dirty".
541 if match is None or match.always():
544 if match is None or match.always():
542 # Spend a bit of time here to get a list of files we know
545 # Spend a bit of time here to get a list of files we know
543 # are modified so we can compare only against those.
546 # are modified so we can compare only against those.
544 # It can cost a lot of time (several seconds)
547 # It can cost a lot of time (several seconds)
545 # otherwise to update all standins if the largefiles are
548 # otherwise to update all standins if the largefiles are
546 # large.
549 # large.
547 lfdirstate = openlfdirstate(ui, repo)
550 lfdirstate = openlfdirstate(ui, repo)
548 dirtymatch = matchmod.always(repo.root, repo.getcwd())
551 dirtymatch = matchmod.always(repo.root, repo.getcwd())
549 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
552 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
550 False)
553 False)
551 modifiedfiles = unsure + s.modified + s.added + s.removed
554 modifiedfiles = unsure + s.modified + s.added + s.removed
552 lfiles = listlfiles(repo)
555 lfiles = listlfiles(repo)
553 # this only loops through largefiles that exist (not
556 # this only loops through largefiles that exist (not
554 # removed/renamed)
557 # removed/renamed)
555 for lfile in lfiles:
558 for lfile in lfiles:
556 if lfile in modifiedfiles:
559 if lfile in modifiedfiles:
557 fstandin = standin(lfile)
560 fstandin = standin(lfile)
558 if repo.wvfs.exists(fstandin):
561 if repo.wvfs.exists(fstandin):
559 # this handles the case where a rebase is being
562 # this handles the case where a rebase is being
560 # performed and the working copy is not updated
563 # performed and the working copy is not updated
561 # yet.
564 # yet.
562 if repo.wvfs.exists(lfile):
565 if repo.wvfs.exists(lfile):
563 updatestandin(repo, fstandin)
566 updatestandin(repo, lfile, fstandin)
564
567
565 return match
568 return match
566
569
567 lfiles = listlfiles(repo)
570 lfiles = listlfiles(repo)
568 match._files = repo._subdirlfs(match.files(), lfiles)
571 match._files = repo._subdirlfs(match.files(), lfiles)
569
572
570 # Case 2: user calls commit with specified patterns: refresh
573 # Case 2: user calls commit with specified patterns: refresh
571 # any matching big files.
574 # any matching big files.
572 smatcher = composestandinmatcher(repo, match)
575 smatcher = composestandinmatcher(repo, match)
573 standins = repo.dirstate.walk(smatcher, [], False, False)
576 standins = repo.dirstate.walk(smatcher, [], False, False)
574
577
575 # No matching big files: get out of the way and pass control to
578 # No matching big files: get out of the way and pass control to
576 # the usual commit() method.
579 # the usual commit() method.
577 if not standins:
580 if not standins:
578 return match
581 return match
579
582
580 # Refresh all matching big files. It's possible that the
583 # Refresh all matching big files. It's possible that the
581 # commit will end up failing, in which case the big files will
584 # commit will end up failing, in which case the big files will
582 # stay refreshed. No harm done: the user modified them and
585 # stay refreshed. No harm done: the user modified them and
583 # asked to commit them, so sooner or later we're going to
586 # asked to commit them, so sooner or later we're going to
584 # refresh the standins. Might as well leave them refreshed.
587 # refresh the standins. Might as well leave them refreshed.
585 lfdirstate = openlfdirstate(ui, repo)
588 lfdirstate = openlfdirstate(ui, repo)
586 for fstandin in standins:
589 for fstandin in standins:
587 lfile = splitstandin(fstandin)
590 lfile = splitstandin(fstandin)
588 if lfdirstate[lfile] != 'r':
591 if lfdirstate[lfile] != 'r':
589 updatestandin(repo, fstandin)
592 updatestandin(repo, lfile, fstandin)
590
593
591 # Cook up a new matcher that only matches regular files or
594 # Cook up a new matcher that only matches regular files or
592 # standins corresponding to the big files requested by the
595 # standins corresponding to the big files requested by the
593 # user. Have to modify _files to prevent commit() from
596 # user. Have to modify _files to prevent commit() from
594 # complaining "not tracked" for big files.
597 # complaining "not tracked" for big files.
595 match = copy.copy(match)
598 match = copy.copy(match)
596 origmatchfn = match.matchfn
599 origmatchfn = match.matchfn
597
600
598 # Check both the list of largefiles and the list of
601 # Check both the list of largefiles and the list of
599 # standins because if a largefile was removed, it
602 # standins because if a largefile was removed, it
600 # won't be in the list of largefiles at this point
603 # won't be in the list of largefiles at this point
601 match._files += sorted(standins)
604 match._files += sorted(standins)
602
605
603 actualfiles = []
606 actualfiles = []
604 for f in match._files:
607 for f in match._files:
605 fstandin = standin(f)
608 fstandin = standin(f)
606
609
607 # For largefiles, only one of the normal and standin should be
610 # For largefiles, only one of the normal and standin should be
608 # committed (except if one of them is a remove). In the case of a
611 # committed (except if one of them is a remove). In the case of a
609 # standin removal, drop the normal file if it is unknown to dirstate.
612 # standin removal, drop the normal file if it is unknown to dirstate.
610 # Thus, skip plain largefile names but keep the standin.
613 # Thus, skip plain largefile names but keep the standin.
611 if f in lfiles or fstandin in standins:
614 if f in lfiles or fstandin in standins:
612 if repo.dirstate[fstandin] != 'r':
615 if repo.dirstate[fstandin] != 'r':
613 if repo.dirstate[f] != 'r':
616 if repo.dirstate[f] != 'r':
614 continue
617 continue
615 elif repo.dirstate[f] == '?':
618 elif repo.dirstate[f] == '?':
616 continue
619 continue
617
620
618 actualfiles.append(f)
621 actualfiles.append(f)
619 match._files = actualfiles
622 match._files = actualfiles
620
623
621 def matchfn(f):
624 def matchfn(f):
622 if origmatchfn(f):
625 if origmatchfn(f):
623 return f not in lfiles
626 return f not in lfiles
624 else:
627 else:
625 return f in standins
628 return f in standins
626
629
627 match.matchfn = matchfn
630 match.matchfn = matchfn
628
631
629 return match
632 return match
630
633
631 class automatedcommithook(object):
634 class automatedcommithook(object):
632 '''Stateful hook to update standins at the 1st commit of resuming
635 '''Stateful hook to update standins at the 1st commit of resuming
633
636
634 For efficiency, updating standins in the working directory should
637 For efficiency, updating standins in the working directory should
635 be avoided while automated committing (like rebase, transplant and
638 be avoided while automated committing (like rebase, transplant and
636 so on), because they should be updated before committing.
639 so on), because they should be updated before committing.
637
640
638 But the 1st commit of resuming automated committing (e.g. ``rebase
641 But the 1st commit of resuming automated committing (e.g. ``rebase
639 --continue``) should update them, because largefiles may be
642 --continue``) should update them, because largefiles may be
640 modified manually.
643 modified manually.
641 '''
644 '''
642 def __init__(self, resuming):
645 def __init__(self, resuming):
643 self.resuming = resuming
646 self.resuming = resuming
644
647
645 def __call__(self, repo, match):
648 def __call__(self, repo, match):
646 if self.resuming:
649 if self.resuming:
647 self.resuming = False # avoids updating at subsequent commits
650 self.resuming = False # avoids updating at subsequent commits
648 return updatestandinsbymatch(repo, match)
651 return updatestandinsbymatch(repo, match)
649 else:
652 else:
650 return match
653 return match
651
654
652 def getstatuswriter(ui, repo, forcibly=None):
655 def getstatuswriter(ui, repo, forcibly=None):
653 '''Return the function to write largefiles specific status out
656 '''Return the function to write largefiles specific status out
654
657
655 If ``forcibly`` is ``None``, this returns the last element of
658 If ``forcibly`` is ``None``, this returns the last element of
656 ``repo._lfstatuswriters`` as "default" writer function.
659 ``repo._lfstatuswriters`` as "default" writer function.
657
660
658 Otherwise, this returns the function to always write out (or
661 Otherwise, this returns the function to always write out (or
659 ignore if ``not forcibly``) status.
662 ignore if ``not forcibly``) status.
660 '''
663 '''
661 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
664 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
662 return repo._lfstatuswriters[-1]
665 return repo._lfstatuswriters[-1]
663 else:
666 else:
664 if forcibly:
667 if forcibly:
665 return ui.status # forcibly WRITE OUT
668 return ui.status # forcibly WRITE OUT
666 else:
669 else:
667 return lambda *msg, **opts: None # forcibly IGNORE
670 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1460 +1,1460
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 scmutil,
25 scmutil,
26 smartset,
26 smartset,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileroots = set(m._files)
44 m._fileroots = set(m._files)
45 m._always = False
45 m._always = False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileroots = set(m._files)
59 m._fileroots = set(m._files)
60 m._always = False
60 m._always = False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get('large')
108 large = opts.get('large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in repo.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get('normal'):
261 if opts.get('normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False):
292 def overridedirty(orig, repo, ignoreupdate=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate)
295 return orig(repo, ignoreupdate)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350
350
351 pats.update(fixpats(f, tostandin) for f in p)
351 pats.update(fixpats(f, tostandin) for f in p)
352 else:
352 else:
353 def tostandin(f):
353 def tostandin(f):
354 if lfutil.isstandin(f):
354 if lfutil.isstandin(f):
355 return f
355 return f
356 return lfutil.standin(f)
356 return lfutil.standin(f)
357 pats.update(fixpats(f, tostandin) for f in p)
357 pats.update(fixpats(f, tostandin) for f in p)
358
358
359 for i in range(0, len(m._files)):
359 for i in range(0, len(m._files)):
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
360 # Don't add '.hglf' to m.files, since that is already covered by '.'
361 if m._files[i] == '.':
361 if m._files[i] == '.':
362 continue
362 continue
363 standin = lfutil.standin(m._files[i])
363 standin = lfutil.standin(m._files[i])
364 # If the "standin" is a directory, append instead of replace to
364 # If the "standin" is a directory, append instead of replace to
365 # support naming a directory on the command line with only
365 # support naming a directory on the command line with only
366 # largefiles. The original directory is kept to support normal
366 # largefiles. The original directory is kept to support normal
367 # files.
367 # files.
368 if standin in ctx:
368 if standin in ctx:
369 m._files[i] = standin
369 m._files[i] = standin
370 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
370 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
371 m._files.append(standin)
371 m._files.append(standin)
372
372
373 m._fileroots = set(m._files)
373 m._fileroots = set(m._files)
374 m._always = False
374 m._always = False
375 origmatchfn = m.matchfn
375 origmatchfn = m.matchfn
376 def lfmatchfn(f):
376 def lfmatchfn(f):
377 lf = lfutil.splitstandin(f)
377 lf = lfutil.splitstandin(f)
378 if lf is not None and origmatchfn(lf):
378 if lf is not None and origmatchfn(lf):
379 return True
379 return True
380 r = origmatchfn(f)
380 r = origmatchfn(f)
381 return r
381 return r
382 m.matchfn = lfmatchfn
382 m.matchfn = lfmatchfn
383
383
384 ui.debug('updated patterns: %s\n' % sorted(pats))
384 ui.debug('updated patterns: %s\n' % sorted(pats))
385 return m, pats
385 return m, pats
386
386
387 # For hg log --patch, the match object is used in two different senses:
387 # For hg log --patch, the match object is used in two different senses:
388 # (1) to determine what revisions should be printed out, and
388 # (1) to determine what revisions should be printed out, and
389 # (2) to determine what files to print out diffs for.
389 # (2) to determine what files to print out diffs for.
390 # The magic matchandpats override should be used for case (1) but not for
390 # The magic matchandpats override should be used for case (1) but not for
391 # case (2).
391 # case (2).
392 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
392 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
393 wctx = repo[None]
393 wctx = repo[None]
394 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
394 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
395 return lambda rev: match
395 return lambda rev: match
396
396
397 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
397 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
398 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
398 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
399 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
399 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
400
400
401 try:
401 try:
402 return orig(ui, repo, *pats, **opts)
402 return orig(ui, repo, *pats, **opts)
403 finally:
403 finally:
404 restorematchandpatsfn()
404 restorematchandpatsfn()
405 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
405 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
406
406
407 def overrideverify(orig, ui, repo, *pats, **opts):
407 def overrideverify(orig, ui, repo, *pats, **opts):
408 large = opts.pop('large', False)
408 large = opts.pop('large', False)
409 all = opts.pop('lfa', False)
409 all = opts.pop('lfa', False)
410 contents = opts.pop('lfc', False)
410 contents = opts.pop('lfc', False)
411
411
412 result = orig(ui, repo, *pats, **opts)
412 result = orig(ui, repo, *pats, **opts)
413 if large or all or contents:
413 if large or all or contents:
414 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
414 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
415 return result
415 return result
416
416
417 def overridedebugstate(orig, ui, repo, *pats, **opts):
417 def overridedebugstate(orig, ui, repo, *pats, **opts):
418 large = opts.pop('large', False)
418 large = opts.pop('large', False)
419 if large:
419 if large:
420 class fakerepo(object):
420 class fakerepo(object):
421 dirstate = lfutil.openlfdirstate(ui, repo)
421 dirstate = lfutil.openlfdirstate(ui, repo)
422 orig(ui, fakerepo, *pats, **opts)
422 orig(ui, fakerepo, *pats, **opts)
423 else:
423 else:
424 orig(ui, repo, *pats, **opts)
424 orig(ui, repo, *pats, **opts)
425
425
426 # Before starting the manifest merge, merge.updates will call
426 # Before starting the manifest merge, merge.updates will call
427 # _checkunknownfile to check if there are any files in the merged-in
427 # _checkunknownfile to check if there are any files in the merged-in
428 # changeset that collide with unknown files in the working copy.
428 # changeset that collide with unknown files in the working copy.
429 #
429 #
430 # The largefiles are seen as unknown, so this prevents us from merging
430 # The largefiles are seen as unknown, so this prevents us from merging
431 # in a file 'foo' if we already have a largefile with the same name.
431 # in a file 'foo' if we already have a largefile with the same name.
432 #
432 #
433 # The overridden function filters the unknown files by removing any
433 # The overridden function filters the unknown files by removing any
434 # largefiles. This makes the merge proceed and we can then handle this
434 # largefiles. This makes the merge proceed and we can then handle this
435 # case further in the overridden calculateupdates function below.
435 # case further in the overridden calculateupdates function below.
436 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
436 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
437 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
437 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
438 return False
438 return False
439 return origfn(repo, wctx, mctx, f, f2)
439 return origfn(repo, wctx, mctx, f, f2)
440
440
441 # The manifest merge handles conflicts on the manifest level. We want
441 # The manifest merge handles conflicts on the manifest level. We want
442 # to handle changes in largefile-ness of files at this level too.
442 # to handle changes in largefile-ness of files at this level too.
443 #
443 #
444 # The strategy is to run the original calculateupdates and then process
444 # The strategy is to run the original calculateupdates and then process
445 # the action list it outputs. There are two cases we need to deal with:
445 # the action list it outputs. There are two cases we need to deal with:
446 #
446 #
447 # 1. Normal file in p1, largefile in p2. Here the largefile is
447 # 1. Normal file in p1, largefile in p2. Here the largefile is
448 # detected via its standin file, which will enter the working copy
448 # detected via its standin file, which will enter the working copy
449 # with a "get" action. It is not "merge" since the standin is all
449 # with a "get" action. It is not "merge" since the standin is all
450 # Mercurial is concerned with at this level -- the link to the
450 # Mercurial is concerned with at this level -- the link to the
451 # existing normal file is not relevant here.
451 # existing normal file is not relevant here.
452 #
452 #
453 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
453 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
454 # since the largefile will be present in the working copy and
454 # since the largefile will be present in the working copy and
455 # different from the normal file in p2. Mercurial therefore
455 # different from the normal file in p2. Mercurial therefore
456 # triggers a merge action.
456 # triggers a merge action.
457 #
457 #
458 # In both cases, we prompt the user and emit new actions to either
458 # In both cases, we prompt the user and emit new actions to either
459 # remove the standin (if the normal file was kept) or to remove the
459 # remove the standin (if the normal file was kept) or to remove the
460 # normal file and get the standin (if the largefile was kept). The
460 # normal file and get the standin (if the largefile was kept). The
461 # default prompt answer is to use the largefile version since it was
461 # default prompt answer is to use the largefile version since it was
462 # presumably changed on purpose.
462 # presumably changed on purpose.
463 #
463 #
464 # Finally, the merge.applyupdates function will then take care of
464 # Finally, the merge.applyupdates function will then take care of
465 # writing the files into the working copy and lfcommands.updatelfiles
465 # writing the files into the working copy and lfcommands.updatelfiles
466 # will update the largefiles.
466 # will update the largefiles.
467 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
467 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
468 acceptremote, *args, **kwargs):
468 acceptremote, *args, **kwargs):
469 overwrite = force and not branchmerge
469 overwrite = force and not branchmerge
470 actions, diverge, renamedelete = origfn(
470 actions, diverge, renamedelete = origfn(
471 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
471 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
472
472
473 if overwrite:
473 if overwrite:
474 return actions, diverge, renamedelete
474 return actions, diverge, renamedelete
475
475
476 # Convert to dictionary with filename as key and action as value.
476 # Convert to dictionary with filename as key and action as value.
477 lfiles = set()
477 lfiles = set()
478 for f in actions:
478 for f in actions:
479 splitstandin = lfutil.splitstandin(f)
479 splitstandin = lfutil.splitstandin(f)
480 if splitstandin in p1:
480 if splitstandin in p1:
481 lfiles.add(splitstandin)
481 lfiles.add(splitstandin)
482 elif lfutil.standin(f) in p1:
482 elif lfutil.standin(f) in p1:
483 lfiles.add(f)
483 lfiles.add(f)
484
484
485 for lfile in sorted(lfiles):
485 for lfile in sorted(lfiles):
486 standin = lfutil.standin(lfile)
486 standin = lfutil.standin(lfile)
487 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
487 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
488 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
488 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
489 if sm in ('g', 'dc') and lm != 'r':
489 if sm in ('g', 'dc') and lm != 'r':
490 if sm == 'dc':
490 if sm == 'dc':
491 f1, f2, fa, move, anc = sargs
491 f1, f2, fa, move, anc = sargs
492 sargs = (p2[f2].flags(), False)
492 sargs = (p2[f2].flags(), False)
493 # Case 1: normal file in the working copy, largefile in
493 # Case 1: normal file in the working copy, largefile in
494 # the second parent
494 # the second parent
495 usermsg = _('remote turned local normal file %s into a largefile\n'
495 usermsg = _('remote turned local normal file %s into a largefile\n'
496 'use (l)argefile or keep (n)ormal file?'
496 'use (l)argefile or keep (n)ormal file?'
497 '$$ &Largefile $$ &Normal file') % lfile
497 '$$ &Largefile $$ &Normal file') % lfile
498 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
498 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
499 actions[lfile] = ('r', None, 'replaced by standin')
499 actions[lfile] = ('r', None, 'replaced by standin')
500 actions[standin] = ('g', sargs, 'replaces standin')
500 actions[standin] = ('g', sargs, 'replaces standin')
501 else: # keep local normal file
501 else: # keep local normal file
502 actions[lfile] = ('k', None, 'replaces standin')
502 actions[lfile] = ('k', None, 'replaces standin')
503 if branchmerge:
503 if branchmerge:
504 actions[standin] = ('k', None, 'replaced by non-standin')
504 actions[standin] = ('k', None, 'replaced by non-standin')
505 else:
505 else:
506 actions[standin] = ('r', None, 'replaced by non-standin')
506 actions[standin] = ('r', None, 'replaced by non-standin')
507 elif lm in ('g', 'dc') and sm != 'r':
507 elif lm in ('g', 'dc') and sm != 'r':
508 if lm == 'dc':
508 if lm == 'dc':
509 f1, f2, fa, move, anc = largs
509 f1, f2, fa, move, anc = largs
510 largs = (p2[f2].flags(), False)
510 largs = (p2[f2].flags(), False)
511 # Case 2: largefile in the working copy, normal file in
511 # Case 2: largefile in the working copy, normal file in
512 # the second parent
512 # the second parent
513 usermsg = _('remote turned local largefile %s into a normal file\n'
513 usermsg = _('remote turned local largefile %s into a normal file\n'
514 'keep (l)argefile or use (n)ormal file?'
514 'keep (l)argefile or use (n)ormal file?'
515 '$$ &Largefile $$ &Normal file') % lfile
515 '$$ &Largefile $$ &Normal file') % lfile
516 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
516 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
517 if branchmerge:
517 if branchmerge:
518 # largefile can be restored from standin safely
518 # largefile can be restored from standin safely
519 actions[lfile] = ('k', None, 'replaced by standin')
519 actions[lfile] = ('k', None, 'replaced by standin')
520 actions[standin] = ('k', None, 'replaces standin')
520 actions[standin] = ('k', None, 'replaces standin')
521 else:
521 else:
522 # "lfile" should be marked as "removed" without
522 # "lfile" should be marked as "removed" without
523 # removal of itself
523 # removal of itself
524 actions[lfile] = ('lfmr', None,
524 actions[lfile] = ('lfmr', None,
525 'forget non-standin largefile')
525 'forget non-standin largefile')
526
526
527 # linear-merge should treat this largefile as 're-added'
527 # linear-merge should treat this largefile as 're-added'
528 actions[standin] = ('a', None, 'keep standin')
528 actions[standin] = ('a', None, 'keep standin')
529 else: # pick remote normal file
529 else: # pick remote normal file
530 actions[lfile] = ('g', largs, 'replaces standin')
530 actions[lfile] = ('g', largs, 'replaces standin')
531 actions[standin] = ('r', None, 'replaced by non-standin')
531 actions[standin] = ('r', None, 'replaced by non-standin')
532
532
533 return actions, diverge, renamedelete
533 return actions, diverge, renamedelete
534
534
535 def mergerecordupdates(orig, repo, actions, branchmerge):
535 def mergerecordupdates(orig, repo, actions, branchmerge):
536 if 'lfmr' in actions:
536 if 'lfmr' in actions:
537 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
537 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
538 for lfile, args, msg in actions['lfmr']:
538 for lfile, args, msg in actions['lfmr']:
539 # this should be executed before 'orig', to execute 'remove'
539 # this should be executed before 'orig', to execute 'remove'
540 # before all other actions
540 # before all other actions
541 repo.dirstate.remove(lfile)
541 repo.dirstate.remove(lfile)
542 # make sure lfile doesn't get synclfdirstate'd as normal
542 # make sure lfile doesn't get synclfdirstate'd as normal
543 lfdirstate.add(lfile)
543 lfdirstate.add(lfile)
544 lfdirstate.write()
544 lfdirstate.write()
545
545
546 return orig(repo, actions, branchmerge)
546 return orig(repo, actions, branchmerge)
547
547
548 # Override filemerge to prompt the user about how they wish to merge
548 # Override filemerge to prompt the user about how they wish to merge
549 # largefiles. This will handle identical edits without prompting the user.
549 # largefiles. This will handle identical edits without prompting the user.
550 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
550 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
551 labels=None):
551 labels=None):
552 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
552 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
553 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
553 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
554 labels=labels)
554 labels=labels)
555
555
556 ahash = fca.data().strip().lower()
556 ahash = fca.data().strip().lower()
557 dhash = fcd.data().strip().lower()
557 dhash = fcd.data().strip().lower()
558 ohash = fco.data().strip().lower()
558 ohash = fco.data().strip().lower()
559 if (ohash != ahash and
559 if (ohash != ahash and
560 ohash != dhash and
560 ohash != dhash and
561 (dhash == ahash or
561 (dhash == ahash or
562 repo.ui.promptchoice(
562 repo.ui.promptchoice(
563 _('largefile %s has a merge conflict\nancestor was %s\n'
563 _('largefile %s has a merge conflict\nancestor was %s\n'
564 'keep (l)ocal %s or\ntake (o)ther %s?'
564 'keep (l)ocal %s or\ntake (o)ther %s?'
565 '$$ &Local $$ &Other') %
565 '$$ &Local $$ &Other') %
566 (lfutil.splitstandin(orig), ahash, dhash, ohash),
566 (lfutil.splitstandin(orig), ahash, dhash, ohash),
567 0) == 1)):
567 0) == 1)):
568 repo.wwrite(fcd.path(), fco.data(), fco.flags())
568 repo.wwrite(fcd.path(), fco.data(), fco.flags())
569 return True, 0, False
569 return True, 0, False
570
570
571 def copiespathcopies(orig, ctx1, ctx2, match=None):
571 def copiespathcopies(orig, ctx1, ctx2, match=None):
572 copies = orig(ctx1, ctx2, match=match)
572 copies = orig(ctx1, ctx2, match=match)
573 updated = {}
573 updated = {}
574
574
575 for k, v in copies.iteritems():
575 for k, v in copies.iteritems():
576 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
576 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
577
577
578 return updated
578 return updated
579
579
580 # Copy first changes the matchers to match standins instead of
580 # Copy first changes the matchers to match standins instead of
581 # largefiles. Then it overrides util.copyfile in that function it
581 # largefiles. Then it overrides util.copyfile in that function it
582 # checks if the destination largefile already exists. It also keeps a
582 # checks if the destination largefile already exists. It also keeps a
583 # list of copied files so that the largefiles can be copied and the
583 # list of copied files so that the largefiles can be copied and the
584 # dirstate updated.
584 # dirstate updated.
585 def overridecopy(orig, ui, repo, pats, opts, rename=False):
585 def overridecopy(orig, ui, repo, pats, opts, rename=False):
586 # doesn't remove largefile on rename
586 # doesn't remove largefile on rename
587 if len(pats) < 2:
587 if len(pats) < 2:
588 # this isn't legal, let the original function deal with it
588 # this isn't legal, let the original function deal with it
589 return orig(ui, repo, pats, opts, rename)
589 return orig(ui, repo, pats, opts, rename)
590
590
591 # This could copy both lfiles and normal files in one command,
591 # This could copy both lfiles and normal files in one command,
592 # but we don't want to do that. First replace their matcher to
592 # but we don't want to do that. First replace their matcher to
593 # only match normal files and run it, then replace it to just
593 # only match normal files and run it, then replace it to just
594 # match largefiles and run it again.
594 # match largefiles and run it again.
595 nonormalfiles = False
595 nonormalfiles = False
596 nolfiles = False
596 nolfiles = False
597 installnormalfilesmatchfn(repo[None].manifest())
597 installnormalfilesmatchfn(repo[None].manifest())
598 try:
598 try:
599 result = orig(ui, repo, pats, opts, rename)
599 result = orig(ui, repo, pats, opts, rename)
600 except error.Abort as e:
600 except error.Abort as e:
601 if str(e) != _('no files to copy'):
601 if str(e) != _('no files to copy'):
602 raise e
602 raise e
603 else:
603 else:
604 nonormalfiles = True
604 nonormalfiles = True
605 result = 0
605 result = 0
606 finally:
606 finally:
607 restorematchfn()
607 restorematchfn()
608
608
609 # The first rename can cause our current working directory to be removed.
609 # The first rename can cause our current working directory to be removed.
610 # In that case there is nothing left to copy/rename so just quit.
610 # In that case there is nothing left to copy/rename so just quit.
611 try:
611 try:
612 repo.getcwd()
612 repo.getcwd()
613 except OSError:
613 except OSError:
614 return result
614 return result
615
615
616 def makestandin(relpath):
616 def makestandin(relpath):
617 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
617 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
618 return repo.wvfs.join(lfutil.standin(path))
618 return repo.wvfs.join(lfutil.standin(path))
619
619
620 fullpats = scmutil.expandpats(pats)
620 fullpats = scmutil.expandpats(pats)
621 dest = fullpats[-1]
621 dest = fullpats[-1]
622
622
623 if os.path.isdir(dest):
623 if os.path.isdir(dest):
624 if not os.path.isdir(makestandin(dest)):
624 if not os.path.isdir(makestandin(dest)):
625 os.makedirs(makestandin(dest))
625 os.makedirs(makestandin(dest))
626
626
627 try:
627 try:
628 # When we call orig below it creates the standins but we don't add
628 # When we call orig below it creates the standins but we don't add
629 # them to the dir state until later so lock during that time.
629 # them to the dir state until later so lock during that time.
630 wlock = repo.wlock()
630 wlock = repo.wlock()
631
631
632 manifest = repo[None].manifest()
632 manifest = repo[None].manifest()
633 def overridematch(ctx, pats=(), opts=None, globbed=False,
633 def overridematch(ctx, pats=(), opts=None, globbed=False,
634 default='relpath', badfn=None):
634 default='relpath', badfn=None):
635 if opts is None:
635 if opts is None:
636 opts = {}
636 opts = {}
637 newpats = []
637 newpats = []
638 # The patterns were previously mangled to add the standin
638 # The patterns were previously mangled to add the standin
639 # directory; we need to remove that now
639 # directory; we need to remove that now
640 for pat in pats:
640 for pat in pats:
641 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
641 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
642 newpats.append(pat.replace(lfutil.shortname, ''))
642 newpats.append(pat.replace(lfutil.shortname, ''))
643 else:
643 else:
644 newpats.append(pat)
644 newpats.append(pat)
645 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
645 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
646 m = copy.copy(match)
646 m = copy.copy(match)
647 lfile = lambda f: lfutil.standin(f) in manifest
647 lfile = lambda f: lfutil.standin(f) in manifest
648 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
648 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
649 m._fileroots = set(m._files)
649 m._fileroots = set(m._files)
650 origmatchfn = m.matchfn
650 origmatchfn = m.matchfn
651 def matchfn(f):
651 def matchfn(f):
652 lfile = lfutil.splitstandin(f)
652 lfile = lfutil.splitstandin(f)
653 return (lfile is not None and
653 return (lfile is not None and
654 (f in manifest) and
654 (f in manifest) and
655 origmatchfn(lfile) or
655 origmatchfn(lfile) or
656 None)
656 None)
657 m.matchfn = matchfn
657 m.matchfn = matchfn
658 return m
658 return m
659 oldmatch = installmatchfn(overridematch)
659 oldmatch = installmatchfn(overridematch)
660 listpats = []
660 listpats = []
661 for pat in pats:
661 for pat in pats:
662 if matchmod.patkind(pat) is not None:
662 if matchmod.patkind(pat) is not None:
663 listpats.append(pat)
663 listpats.append(pat)
664 else:
664 else:
665 listpats.append(makestandin(pat))
665 listpats.append(makestandin(pat))
666
666
667 try:
667 try:
668 origcopyfile = util.copyfile
668 origcopyfile = util.copyfile
669 copiedfiles = []
669 copiedfiles = []
670 def overridecopyfile(src, dest):
670 def overridecopyfile(src, dest):
671 if (lfutil.shortname in src and
671 if (lfutil.shortname in src and
672 dest.startswith(repo.wjoin(lfutil.shortname))):
672 dest.startswith(repo.wjoin(lfutil.shortname))):
673 destlfile = dest.replace(lfutil.shortname, '')
673 destlfile = dest.replace(lfutil.shortname, '')
674 if not opts['force'] and os.path.exists(destlfile):
674 if not opts['force'] and os.path.exists(destlfile):
675 raise IOError('',
675 raise IOError('',
676 _('destination largefile already exists'))
676 _('destination largefile already exists'))
677 copiedfiles.append((src, dest))
677 copiedfiles.append((src, dest))
678 origcopyfile(src, dest)
678 origcopyfile(src, dest)
679
679
680 util.copyfile = overridecopyfile
680 util.copyfile = overridecopyfile
681 result += orig(ui, repo, listpats, opts, rename)
681 result += orig(ui, repo, listpats, opts, rename)
682 finally:
682 finally:
683 util.copyfile = origcopyfile
683 util.copyfile = origcopyfile
684
684
685 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 lfdirstate = lfutil.openlfdirstate(ui, repo)
686 for (src, dest) in copiedfiles:
686 for (src, dest) in copiedfiles:
687 if (lfutil.shortname in src and
687 if (lfutil.shortname in src and
688 dest.startswith(repo.wjoin(lfutil.shortname))):
688 dest.startswith(repo.wjoin(lfutil.shortname))):
689 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
689 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
690 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
690 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
691 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
691 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
692 if not os.path.isdir(destlfiledir):
692 if not os.path.isdir(destlfiledir):
693 os.makedirs(destlfiledir)
693 os.makedirs(destlfiledir)
694 if rename:
694 if rename:
695 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
695 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
696
696
697 # The file is gone, but this deletes any empty parent
697 # The file is gone, but this deletes any empty parent
698 # directories as a side-effect.
698 # directories as a side-effect.
699 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
699 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
700 lfdirstate.remove(srclfile)
700 lfdirstate.remove(srclfile)
701 else:
701 else:
702 util.copyfile(repo.wjoin(srclfile),
702 util.copyfile(repo.wjoin(srclfile),
703 repo.wjoin(destlfile))
703 repo.wjoin(destlfile))
704
704
705 lfdirstate.add(destlfile)
705 lfdirstate.add(destlfile)
706 lfdirstate.write()
706 lfdirstate.write()
707 except error.Abort as e:
707 except error.Abort as e:
708 if str(e) != _('no files to copy'):
708 if str(e) != _('no files to copy'):
709 raise e
709 raise e
710 else:
710 else:
711 nolfiles = True
711 nolfiles = True
712 finally:
712 finally:
713 restorematchfn()
713 restorematchfn()
714 wlock.release()
714 wlock.release()
715
715
716 if nolfiles and nonormalfiles:
716 if nolfiles and nonormalfiles:
717 raise error.Abort(_('no files to copy'))
717 raise error.Abort(_('no files to copy'))
718
718
719 return result
719 return result
720
720
721 # When the user calls revert, we have to be careful to not revert any
721 # When the user calls revert, we have to be careful to not revert any
722 # changes to other largefiles accidentally. This means we have to keep
722 # changes to other largefiles accidentally. This means we have to keep
723 # track of the largefiles that are being reverted so we only pull down
723 # track of the largefiles that are being reverted so we only pull down
724 # the necessary largefiles.
724 # the necessary largefiles.
725 #
725 #
726 # Standins are only updated (to match the hash of largefiles) before
726 # Standins are only updated (to match the hash of largefiles) before
727 # commits. Update the standins then run the original revert, changing
727 # commits. Update the standins then run the original revert, changing
728 # the matcher to hit standins instead of largefiles. Based on the
728 # the matcher to hit standins instead of largefiles. Based on the
729 # resulting standins update the largefiles.
729 # resulting standins update the largefiles.
730 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
730 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
731 # Because we put the standins in a bad state (by updating them)
731 # Because we put the standins in a bad state (by updating them)
732 # and then return them to a correct state we need to lock to
732 # and then return them to a correct state we need to lock to
733 # prevent others from changing them in their incorrect state.
733 # prevent others from changing them in their incorrect state.
734 with repo.wlock():
734 with repo.wlock():
735 lfdirstate = lfutil.openlfdirstate(ui, repo)
735 lfdirstate = lfutil.openlfdirstate(ui, repo)
736 s = lfutil.lfdirstatestatus(lfdirstate, repo)
736 s = lfutil.lfdirstatestatus(lfdirstate, repo)
737 lfdirstate.write()
737 lfdirstate.write()
738 for lfile in s.modified:
738 for lfile in s.modified:
739 lfutil.updatestandin(repo, lfutil.standin(lfile))
739 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
740 for lfile in s.deleted:
740 for lfile in s.deleted:
741 fstandin = lfutil.standin(lfile)
741 fstandin = lfutil.standin(lfile)
742 if (repo.wvfs.exists(fstandin)):
742 if (repo.wvfs.exists(fstandin)):
743 repo.wvfs.unlink(fstandin)
743 repo.wvfs.unlink(fstandin)
744
744
745 oldstandins = lfutil.getstandinsstate(repo)
745 oldstandins = lfutil.getstandinsstate(repo)
746
746
747 def overridematch(mctx, pats=(), opts=None, globbed=False,
747 def overridematch(mctx, pats=(), opts=None, globbed=False,
748 default='relpath', badfn=None):
748 default='relpath', badfn=None):
749 if opts is None:
749 if opts is None:
750 opts = {}
750 opts = {}
751 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
751 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
752 m = copy.copy(match)
752 m = copy.copy(match)
753
753
754 # revert supports recursing into subrepos, and though largefiles
754 # revert supports recursing into subrepos, and though largefiles
755 # currently doesn't work correctly in that case, this match is
755 # currently doesn't work correctly in that case, this match is
756 # called, so the lfdirstate above may not be the correct one for
756 # called, so the lfdirstate above may not be the correct one for
757 # this invocation of match.
757 # this invocation of match.
758 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
758 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
759 False)
759 False)
760
760
761 wctx = repo[None]
761 wctx = repo[None]
762 matchfiles = []
762 matchfiles = []
763 for f in m._files:
763 for f in m._files:
764 standin = lfutil.standin(f)
764 standin = lfutil.standin(f)
765 if standin in ctx or standin in mctx:
765 if standin in ctx or standin in mctx:
766 matchfiles.append(standin)
766 matchfiles.append(standin)
767 elif standin in wctx or lfdirstate[f] == 'r':
767 elif standin in wctx or lfdirstate[f] == 'r':
768 continue
768 continue
769 else:
769 else:
770 matchfiles.append(f)
770 matchfiles.append(f)
771 m._files = matchfiles
771 m._files = matchfiles
772 m._fileroots = set(m._files)
772 m._fileroots = set(m._files)
773 origmatchfn = m.matchfn
773 origmatchfn = m.matchfn
774 def matchfn(f):
774 def matchfn(f):
775 lfile = lfutil.splitstandin(f)
775 lfile = lfutil.splitstandin(f)
776 if lfile is not None:
776 if lfile is not None:
777 return (origmatchfn(lfile) and
777 return (origmatchfn(lfile) and
778 (f in ctx or f in mctx))
778 (f in ctx or f in mctx))
779 return origmatchfn(f)
779 return origmatchfn(f)
780 m.matchfn = matchfn
780 m.matchfn = matchfn
781 return m
781 return m
782 oldmatch = installmatchfn(overridematch)
782 oldmatch = installmatchfn(overridematch)
783 try:
783 try:
784 orig(ui, repo, ctx, parents, *pats, **opts)
784 orig(ui, repo, ctx, parents, *pats, **opts)
785 finally:
785 finally:
786 restorematchfn()
786 restorematchfn()
787
787
788 newstandins = lfutil.getstandinsstate(repo)
788 newstandins = lfutil.getstandinsstate(repo)
789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
789 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
790 # lfdirstate should be 'normallookup'-ed for updated files,
790 # lfdirstate should be 'normallookup'-ed for updated files,
791 # because reverting doesn't touch dirstate for 'normal' files
791 # because reverting doesn't touch dirstate for 'normal' files
792 # when target revision is explicitly specified: in such case,
792 # when target revision is explicitly specified: in such case,
793 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
793 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
794 # of target (standin) file.
794 # of target (standin) file.
795 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
795 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
796 normallookup=True)
796 normallookup=True)
797
797
798 # after pulling changesets, we need to take some extra care to get
798 # after pulling changesets, we need to take some extra care to get
799 # largefiles updated remotely
799 # largefiles updated remotely
800 def overridepull(orig, ui, repo, source=None, **opts):
800 def overridepull(orig, ui, repo, source=None, **opts):
801 revsprepull = len(repo)
801 revsprepull = len(repo)
802 if not source:
802 if not source:
803 source = 'default'
803 source = 'default'
804 repo.lfpullsource = source
804 repo.lfpullsource = source
805 result = orig(ui, repo, source, **opts)
805 result = orig(ui, repo, source, **opts)
806 revspostpull = len(repo)
806 revspostpull = len(repo)
807 lfrevs = opts.get('lfrev', [])
807 lfrevs = opts.get('lfrev', [])
808 if opts.get('all_largefiles'):
808 if opts.get('all_largefiles'):
809 lfrevs.append('pulled()')
809 lfrevs.append('pulled()')
810 if lfrevs and revspostpull > revsprepull:
810 if lfrevs and revspostpull > revsprepull:
811 numcached = 0
811 numcached = 0
812 repo.firstpulled = revsprepull # for pulled() revset expression
812 repo.firstpulled = revsprepull # for pulled() revset expression
813 try:
813 try:
814 for rev in scmutil.revrange(repo, lfrevs):
814 for rev in scmutil.revrange(repo, lfrevs):
815 ui.note(_('pulling largefiles for revision %s\n') % rev)
815 ui.note(_('pulling largefiles for revision %s\n') % rev)
816 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
816 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
817 numcached += len(cached)
817 numcached += len(cached)
818 finally:
818 finally:
819 del repo.firstpulled
819 del repo.firstpulled
820 ui.status(_("%d largefiles cached\n") % numcached)
820 ui.status(_("%d largefiles cached\n") % numcached)
821 return result
821 return result
822
822
823 def overridepush(orig, ui, repo, *args, **kwargs):
823 def overridepush(orig, ui, repo, *args, **kwargs):
824 """Override push command and store --lfrev parameters in opargs"""
824 """Override push command and store --lfrev parameters in opargs"""
825 lfrevs = kwargs.pop('lfrev', None)
825 lfrevs = kwargs.pop('lfrev', None)
826 if lfrevs:
826 if lfrevs:
827 opargs = kwargs.setdefault('opargs', {})
827 opargs = kwargs.setdefault('opargs', {})
828 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
828 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
829 return orig(ui, repo, *args, **kwargs)
829 return orig(ui, repo, *args, **kwargs)
830
830
831 def exchangepushoperation(orig, *args, **kwargs):
831 def exchangepushoperation(orig, *args, **kwargs):
832 """Override pushoperation constructor and store lfrevs parameter"""
832 """Override pushoperation constructor and store lfrevs parameter"""
833 lfrevs = kwargs.pop('lfrevs', None)
833 lfrevs = kwargs.pop('lfrevs', None)
834 pushop = orig(*args, **kwargs)
834 pushop = orig(*args, **kwargs)
835 pushop.lfrevs = lfrevs
835 pushop.lfrevs = lfrevs
836 return pushop
836 return pushop
837
837
838 revsetpredicate = registrar.revsetpredicate()
838 revsetpredicate = registrar.revsetpredicate()
839
839
840 @revsetpredicate('pulled()')
840 @revsetpredicate('pulled()')
841 def pulledrevsetsymbol(repo, subset, x):
841 def pulledrevsetsymbol(repo, subset, x):
842 """Changesets that just has been pulled.
842 """Changesets that just has been pulled.
843
843
844 Only available with largefiles from pull --lfrev expressions.
844 Only available with largefiles from pull --lfrev expressions.
845
845
846 .. container:: verbose
846 .. container:: verbose
847
847
848 Some examples:
848 Some examples:
849
849
850 - pull largefiles for all new changesets::
850 - pull largefiles for all new changesets::
851
851
852 hg pull -lfrev "pulled()"
852 hg pull -lfrev "pulled()"
853
853
854 - pull largefiles for all new branch heads::
854 - pull largefiles for all new branch heads::
855
855
856 hg pull -lfrev "head(pulled()) and not closed()"
856 hg pull -lfrev "head(pulled()) and not closed()"
857
857
858 """
858 """
859
859
860 try:
860 try:
861 firstpulled = repo.firstpulled
861 firstpulled = repo.firstpulled
862 except AttributeError:
862 except AttributeError:
863 raise error.Abort(_("pulled() only available in --lfrev"))
863 raise error.Abort(_("pulled() only available in --lfrev"))
864 return smartset.baseset([r for r in subset if r >= firstpulled])
864 return smartset.baseset([r for r in subset if r >= firstpulled])
865
865
866 def overrideclone(orig, ui, source, dest=None, **opts):
866 def overrideclone(orig, ui, source, dest=None, **opts):
867 d = dest
867 d = dest
868 if d is None:
868 if d is None:
869 d = hg.defaultdest(source)
869 d = hg.defaultdest(source)
870 if opts.get('all_largefiles') and not hg.islocal(d):
870 if opts.get('all_largefiles') and not hg.islocal(d):
871 raise error.Abort(_(
871 raise error.Abort(_(
872 '--all-largefiles is incompatible with non-local destination %s') %
872 '--all-largefiles is incompatible with non-local destination %s') %
873 d)
873 d)
874
874
875 return orig(ui, source, dest, **opts)
875 return orig(ui, source, dest, **opts)
876
876
877 def hgclone(orig, ui, opts, *args, **kwargs):
877 def hgclone(orig, ui, opts, *args, **kwargs):
878 result = orig(ui, opts, *args, **kwargs)
878 result = orig(ui, opts, *args, **kwargs)
879
879
880 if result is not None:
880 if result is not None:
881 sourcerepo, destrepo = result
881 sourcerepo, destrepo = result
882 repo = destrepo.local()
882 repo = destrepo.local()
883
883
884 # When cloning to a remote repo (like through SSH), no repo is available
884 # When cloning to a remote repo (like through SSH), no repo is available
885 # from the peer. Therefore the largefiles can't be downloaded and the
885 # from the peer. Therefore the largefiles can't be downloaded and the
886 # hgrc can't be updated.
886 # hgrc can't be updated.
887 if not repo:
887 if not repo:
888 return result
888 return result
889
889
890 # If largefiles is required for this repo, permanently enable it locally
890 # If largefiles is required for this repo, permanently enable it locally
891 if 'largefiles' in repo.requirements:
891 if 'largefiles' in repo.requirements:
892 with repo.vfs('hgrc', 'a', text=True) as fp:
892 with repo.vfs('hgrc', 'a', text=True) as fp:
893 fp.write('\n[extensions]\nlargefiles=\n')
893 fp.write('\n[extensions]\nlargefiles=\n')
894
894
895 # Caching is implicitly limited to 'rev' option, since the dest repo was
895 # Caching is implicitly limited to 'rev' option, since the dest repo was
896 # truncated at that point. The user may expect a download count with
896 # truncated at that point. The user may expect a download count with
897 # this option, so attempt whether or not this is a largefile repo.
897 # this option, so attempt whether or not this is a largefile repo.
898 if opts.get('all_largefiles'):
898 if opts.get('all_largefiles'):
899 success, missing = lfcommands.downloadlfiles(ui, repo, None)
899 success, missing = lfcommands.downloadlfiles(ui, repo, None)
900
900
901 if missing != 0:
901 if missing != 0:
902 return None
902 return None
903
903
904 return result
904 return result
905
905
906 def overriderebase(orig, ui, repo, **opts):
906 def overriderebase(orig, ui, repo, **opts):
907 if not util.safehasattr(repo, '_largefilesenabled'):
907 if not util.safehasattr(repo, '_largefilesenabled'):
908 return orig(ui, repo, **opts)
908 return orig(ui, repo, **opts)
909
909
910 resuming = opts.get('continue')
910 resuming = opts.get('continue')
911 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
911 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
912 repo._lfstatuswriters.append(lambda *msg, **opts: None)
912 repo._lfstatuswriters.append(lambda *msg, **opts: None)
913 try:
913 try:
914 return orig(ui, repo, **opts)
914 return orig(ui, repo, **opts)
915 finally:
915 finally:
916 repo._lfstatuswriters.pop()
916 repo._lfstatuswriters.pop()
917 repo._lfcommithooks.pop()
917 repo._lfcommithooks.pop()
918
918
919 def overridearchivecmd(orig, ui, repo, dest, **opts):
919 def overridearchivecmd(orig, ui, repo, dest, **opts):
920 repo.unfiltered().lfstatus = True
920 repo.unfiltered().lfstatus = True
921
921
922 try:
922 try:
923 return orig(ui, repo.unfiltered(), dest, **opts)
923 return orig(ui, repo.unfiltered(), dest, **opts)
924 finally:
924 finally:
925 repo.unfiltered().lfstatus = False
925 repo.unfiltered().lfstatus = False
926
926
927 def hgwebarchive(orig, web, req, tmpl):
927 def hgwebarchive(orig, web, req, tmpl):
928 web.repo.lfstatus = True
928 web.repo.lfstatus = True
929
929
930 try:
930 try:
931 return orig(web, req, tmpl)
931 return orig(web, req, tmpl)
932 finally:
932 finally:
933 web.repo.lfstatus = False
933 web.repo.lfstatus = False
934
934
935 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
935 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
936 prefix='', mtime=None, subrepos=None):
936 prefix='', mtime=None, subrepos=None):
937 # For some reason setting repo.lfstatus in hgwebarchive only changes the
937 # For some reason setting repo.lfstatus in hgwebarchive only changes the
938 # unfiltered repo's attr, so check that as well.
938 # unfiltered repo's attr, so check that as well.
939 if not repo.lfstatus and not repo.unfiltered().lfstatus:
939 if not repo.lfstatus and not repo.unfiltered().lfstatus:
940 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
940 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
941 subrepos)
941 subrepos)
942
942
943 # No need to lock because we are only reading history and
943 # No need to lock because we are only reading history and
944 # largefile caches, neither of which are modified.
944 # largefile caches, neither of which are modified.
945 if node is not None:
945 if node is not None:
946 lfcommands.cachelfiles(repo.ui, repo, node)
946 lfcommands.cachelfiles(repo.ui, repo, node)
947
947
948 if kind not in archival.archivers:
948 if kind not in archival.archivers:
949 raise error.Abort(_("unknown archive type '%s'") % kind)
949 raise error.Abort(_("unknown archive type '%s'") % kind)
950
950
951 ctx = repo[node]
951 ctx = repo[node]
952
952
953 if kind == 'files':
953 if kind == 'files':
954 if prefix:
954 if prefix:
955 raise error.Abort(
955 raise error.Abort(
956 _('cannot give prefix when archiving to files'))
956 _('cannot give prefix when archiving to files'))
957 else:
957 else:
958 prefix = archival.tidyprefix(dest, kind, prefix)
958 prefix = archival.tidyprefix(dest, kind, prefix)
959
959
960 def write(name, mode, islink, getdata):
960 def write(name, mode, islink, getdata):
961 if matchfn and not matchfn(name):
961 if matchfn and not matchfn(name):
962 return
962 return
963 data = getdata()
963 data = getdata()
964 if decode:
964 if decode:
965 data = repo.wwritedata(name, data)
965 data = repo.wwritedata(name, data)
966 archiver.addfile(prefix + name, mode, islink, data)
966 archiver.addfile(prefix + name, mode, islink, data)
967
967
968 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
968 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
969
969
970 if repo.ui.configbool("ui", "archivemeta", True):
970 if repo.ui.configbool("ui", "archivemeta", True):
971 write('.hg_archival.txt', 0o644, False,
971 write('.hg_archival.txt', 0o644, False,
972 lambda: archival.buildmetadata(ctx))
972 lambda: archival.buildmetadata(ctx))
973
973
974 for f in ctx:
974 for f in ctx:
975 ff = ctx.flags(f)
975 ff = ctx.flags(f)
976 getdata = ctx[f].data
976 getdata = ctx[f].data
977 lfile = lfutil.splitstandin(f)
977 lfile = lfutil.splitstandin(f)
978 if lfile is not None:
978 if lfile is not None:
979 if node is not None:
979 if node is not None:
980 path = lfutil.findfile(repo, getdata().strip())
980 path = lfutil.findfile(repo, getdata().strip())
981
981
982 if path is None:
982 if path is None:
983 raise error.Abort(
983 raise error.Abort(
984 _('largefile %s not found in repo store or system cache')
984 _('largefile %s not found in repo store or system cache')
985 % lfile)
985 % lfile)
986 else:
986 else:
987 path = lfile
987 path = lfile
988
988
989 f = lfile
989 f = lfile
990
990
991 getdata = lambda: util.readfile(path)
991 getdata = lambda: util.readfile(path)
992 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
992 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
993
993
994 if subrepos:
994 if subrepos:
995 for subpath in sorted(ctx.substate):
995 for subpath in sorted(ctx.substate):
996 sub = ctx.workingsub(subpath)
996 sub = ctx.workingsub(subpath)
997 submatch = matchmod.subdirmatcher(subpath, matchfn)
997 submatch = matchmod.subdirmatcher(subpath, matchfn)
998 sub._repo.lfstatus = True
998 sub._repo.lfstatus = True
999 sub.archive(archiver, prefix, submatch)
999 sub.archive(archiver, prefix, submatch)
1000
1000
1001 archiver.done()
1001 archiver.done()
1002
1002
1003 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1003 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1004 if not repo._repo.lfstatus:
1004 if not repo._repo.lfstatus:
1005 return orig(repo, archiver, prefix, match, decode)
1005 return orig(repo, archiver, prefix, match, decode)
1006
1006
1007 repo._get(repo._state + ('hg',))
1007 repo._get(repo._state + ('hg',))
1008 rev = repo._state[1]
1008 rev = repo._state[1]
1009 ctx = repo._repo[rev]
1009 ctx = repo._repo[rev]
1010
1010
1011 if ctx.node() is not None:
1011 if ctx.node() is not None:
1012 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1012 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1013
1013
1014 def write(name, mode, islink, getdata):
1014 def write(name, mode, islink, getdata):
1015 # At this point, the standin has been replaced with the largefile name,
1015 # At this point, the standin has been replaced with the largefile name,
1016 # so the normal matcher works here without the lfutil variants.
1016 # so the normal matcher works here without the lfutil variants.
1017 if match and not match(f):
1017 if match and not match(f):
1018 return
1018 return
1019 data = getdata()
1019 data = getdata()
1020 if decode:
1020 if decode:
1021 data = repo._repo.wwritedata(name, data)
1021 data = repo._repo.wwritedata(name, data)
1022
1022
1023 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1023 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1024
1024
1025 for f in ctx:
1025 for f in ctx:
1026 ff = ctx.flags(f)
1026 ff = ctx.flags(f)
1027 getdata = ctx[f].data
1027 getdata = ctx[f].data
1028 lfile = lfutil.splitstandin(f)
1028 lfile = lfutil.splitstandin(f)
1029 if lfile is not None:
1029 if lfile is not None:
1030 if ctx.node() is not None:
1030 if ctx.node() is not None:
1031 path = lfutil.findfile(repo._repo, getdata().strip())
1031 path = lfutil.findfile(repo._repo, getdata().strip())
1032
1032
1033 if path is None:
1033 if path is None:
1034 raise error.Abort(
1034 raise error.Abort(
1035 _('largefile %s not found in repo store or system cache')
1035 _('largefile %s not found in repo store or system cache')
1036 % lfile)
1036 % lfile)
1037 else:
1037 else:
1038 path = lfile
1038 path = lfile
1039
1039
1040 f = lfile
1040 f = lfile
1041
1041
1042 getdata = lambda: util.readfile(os.path.join(prefix, path))
1042 getdata = lambda: util.readfile(os.path.join(prefix, path))
1043
1043
1044 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1044 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1045
1045
1046 for subpath in sorted(ctx.substate):
1046 for subpath in sorted(ctx.substate):
1047 sub = ctx.workingsub(subpath)
1047 sub = ctx.workingsub(subpath)
1048 submatch = matchmod.subdirmatcher(subpath, match)
1048 submatch = matchmod.subdirmatcher(subpath, match)
1049 sub._repo.lfstatus = True
1049 sub._repo.lfstatus = True
1050 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1050 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1051
1051
1052 # If a largefile is modified, the change is not reflected in its
1052 # If a largefile is modified, the change is not reflected in its
1053 # standin until a commit. cmdutil.bailifchanged() raises an exception
1053 # standin until a commit. cmdutil.bailifchanged() raises an exception
1054 # if the repo has uncommitted changes. Wrap it to also check if
1054 # if the repo has uncommitted changes. Wrap it to also check if
1055 # largefiles were changed. This is used by bisect, backout and fetch.
1055 # largefiles were changed. This is used by bisect, backout and fetch.
1056 def overridebailifchanged(orig, repo, *args, **kwargs):
1056 def overridebailifchanged(orig, repo, *args, **kwargs):
1057 orig(repo, *args, **kwargs)
1057 orig(repo, *args, **kwargs)
1058 repo.lfstatus = True
1058 repo.lfstatus = True
1059 s = repo.status()
1059 s = repo.status()
1060 repo.lfstatus = False
1060 repo.lfstatus = False
1061 if s.modified or s.added or s.removed or s.deleted:
1061 if s.modified or s.added or s.removed or s.deleted:
1062 raise error.Abort(_('uncommitted changes'))
1062 raise error.Abort(_('uncommitted changes'))
1063
1063
1064 def postcommitstatus(orig, repo, *args, **kwargs):
1064 def postcommitstatus(orig, repo, *args, **kwargs):
1065 repo.lfstatus = True
1065 repo.lfstatus = True
1066 try:
1066 try:
1067 return orig(repo, *args, **kwargs)
1067 return orig(repo, *args, **kwargs)
1068 finally:
1068 finally:
1069 repo.lfstatus = False
1069 repo.lfstatus = False
1070
1070
1071 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1071 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1072 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1072 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1073 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1073 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1074 m = composelargefilematcher(match, repo[None].manifest())
1074 m = composelargefilematcher(match, repo[None].manifest())
1075
1075
1076 try:
1076 try:
1077 repo.lfstatus = True
1077 repo.lfstatus = True
1078 s = repo.status(match=m, clean=True)
1078 s = repo.status(match=m, clean=True)
1079 finally:
1079 finally:
1080 repo.lfstatus = False
1080 repo.lfstatus = False
1081 manifest = repo[None].manifest()
1081 manifest = repo[None].manifest()
1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1082 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1083 forget = [f for f in forget if lfutil.standin(f) in manifest]
1084
1084
1085 for f in forget:
1085 for f in forget:
1086 fstandin = lfutil.standin(f)
1086 fstandin = lfutil.standin(f)
1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1087 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1088 ui.warn(_('not removing %s: file is already untracked\n')
1088 ui.warn(_('not removing %s: file is already untracked\n')
1089 % m.rel(f))
1089 % m.rel(f))
1090 bad.append(f)
1090 bad.append(f)
1091
1091
1092 for f in forget:
1092 for f in forget:
1093 if ui.verbose or not m.exact(f):
1093 if ui.verbose or not m.exact(f):
1094 ui.status(_('removing %s\n') % m.rel(f))
1094 ui.status(_('removing %s\n') % m.rel(f))
1095
1095
1096 # Need to lock because standin files are deleted then removed from the
1096 # Need to lock because standin files are deleted then removed from the
1097 # repository and we could race in-between.
1097 # repository and we could race in-between.
1098 with repo.wlock():
1098 with repo.wlock():
1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1099 lfdirstate = lfutil.openlfdirstate(ui, repo)
1100 for f in forget:
1100 for f in forget:
1101 if lfdirstate[f] == 'a':
1101 if lfdirstate[f] == 'a':
1102 lfdirstate.drop(f)
1102 lfdirstate.drop(f)
1103 else:
1103 else:
1104 lfdirstate.remove(f)
1104 lfdirstate.remove(f)
1105 lfdirstate.write()
1105 lfdirstate.write()
1106 standins = [lfutil.standin(f) for f in forget]
1106 standins = [lfutil.standin(f) for f in forget]
1107 for f in standins:
1107 for f in standins:
1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1108 repo.wvfs.unlinkpath(f, ignoremissing=True)
1109 rejected = repo[None].forget(standins)
1109 rejected = repo[None].forget(standins)
1110
1110
1111 bad.extend(f for f in rejected if f in m.files())
1111 bad.extend(f for f in rejected if f in m.files())
1112 forgot.extend(f for f in forget if f not in rejected)
1112 forgot.extend(f for f in forget if f not in rejected)
1113 return bad, forgot
1113 return bad, forgot
1114
1114
1115 def _getoutgoings(repo, other, missing, addfunc):
1115 def _getoutgoings(repo, other, missing, addfunc):
1116 """get pairs of filename and largefile hash in outgoing revisions
1116 """get pairs of filename and largefile hash in outgoing revisions
1117 in 'missing'.
1117 in 'missing'.
1118
1118
1119 largefiles already existing on 'other' repository are ignored.
1119 largefiles already existing on 'other' repository are ignored.
1120
1120
1121 'addfunc' is invoked with each unique pairs of filename and
1121 'addfunc' is invoked with each unique pairs of filename and
1122 largefile hash value.
1122 largefile hash value.
1123 """
1123 """
1124 knowns = set()
1124 knowns = set()
1125 lfhashes = set()
1125 lfhashes = set()
1126 def dedup(fn, lfhash):
1126 def dedup(fn, lfhash):
1127 k = (fn, lfhash)
1127 k = (fn, lfhash)
1128 if k not in knowns:
1128 if k not in knowns:
1129 knowns.add(k)
1129 knowns.add(k)
1130 lfhashes.add(lfhash)
1130 lfhashes.add(lfhash)
1131 lfutil.getlfilestoupload(repo, missing, dedup)
1131 lfutil.getlfilestoupload(repo, missing, dedup)
1132 if lfhashes:
1132 if lfhashes:
1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1133 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1134 for fn, lfhash in knowns:
1134 for fn, lfhash in knowns:
1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1135 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1136 addfunc(fn, lfhash)
1136 addfunc(fn, lfhash)
1137
1137
1138 def outgoinghook(ui, repo, other, opts, missing):
1138 def outgoinghook(ui, repo, other, opts, missing):
1139 if opts.pop('large', None):
1139 if opts.pop('large', None):
1140 lfhashes = set()
1140 lfhashes = set()
1141 if ui.debugflag:
1141 if ui.debugflag:
1142 toupload = {}
1142 toupload = {}
1143 def addfunc(fn, lfhash):
1143 def addfunc(fn, lfhash):
1144 if fn not in toupload:
1144 if fn not in toupload:
1145 toupload[fn] = []
1145 toupload[fn] = []
1146 toupload[fn].append(lfhash)
1146 toupload[fn].append(lfhash)
1147 lfhashes.add(lfhash)
1147 lfhashes.add(lfhash)
1148 def showhashes(fn):
1148 def showhashes(fn):
1149 for lfhash in sorted(toupload[fn]):
1149 for lfhash in sorted(toupload[fn]):
1150 ui.debug(' %s\n' % (lfhash))
1150 ui.debug(' %s\n' % (lfhash))
1151 else:
1151 else:
1152 toupload = set()
1152 toupload = set()
1153 def addfunc(fn, lfhash):
1153 def addfunc(fn, lfhash):
1154 toupload.add(fn)
1154 toupload.add(fn)
1155 lfhashes.add(lfhash)
1155 lfhashes.add(lfhash)
1156 def showhashes(fn):
1156 def showhashes(fn):
1157 pass
1157 pass
1158 _getoutgoings(repo, other, missing, addfunc)
1158 _getoutgoings(repo, other, missing, addfunc)
1159
1159
1160 if not toupload:
1160 if not toupload:
1161 ui.status(_('largefiles: no files to upload\n'))
1161 ui.status(_('largefiles: no files to upload\n'))
1162 else:
1162 else:
1163 ui.status(_('largefiles to upload (%d entities):\n')
1163 ui.status(_('largefiles to upload (%d entities):\n')
1164 % (len(lfhashes)))
1164 % (len(lfhashes)))
1165 for file in sorted(toupload):
1165 for file in sorted(toupload):
1166 ui.status(lfutil.splitstandin(file) + '\n')
1166 ui.status(lfutil.splitstandin(file) + '\n')
1167 showhashes(file)
1167 showhashes(file)
1168 ui.status('\n')
1168 ui.status('\n')
1169
1169
1170 def summaryremotehook(ui, repo, opts, changes):
1170 def summaryremotehook(ui, repo, opts, changes):
1171 largeopt = opts.get('large', False)
1171 largeopt = opts.get('large', False)
1172 if changes is None:
1172 if changes is None:
1173 if largeopt:
1173 if largeopt:
1174 return (False, True) # only outgoing check is needed
1174 return (False, True) # only outgoing check is needed
1175 else:
1175 else:
1176 return (False, False)
1176 return (False, False)
1177 elif largeopt:
1177 elif largeopt:
1178 url, branch, peer, outgoing = changes[1]
1178 url, branch, peer, outgoing = changes[1]
1179 if peer is None:
1179 if peer is None:
1180 # i18n: column positioning for "hg summary"
1180 # i18n: column positioning for "hg summary"
1181 ui.status(_('largefiles: (no remote repo)\n'))
1181 ui.status(_('largefiles: (no remote repo)\n'))
1182 return
1182 return
1183
1183
1184 toupload = set()
1184 toupload = set()
1185 lfhashes = set()
1185 lfhashes = set()
1186 def addfunc(fn, lfhash):
1186 def addfunc(fn, lfhash):
1187 toupload.add(fn)
1187 toupload.add(fn)
1188 lfhashes.add(lfhash)
1188 lfhashes.add(lfhash)
1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1189 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1190
1190
1191 if not toupload:
1191 if not toupload:
1192 # i18n: column positioning for "hg summary"
1192 # i18n: column positioning for "hg summary"
1193 ui.status(_('largefiles: (no files to upload)\n'))
1193 ui.status(_('largefiles: (no files to upload)\n'))
1194 else:
1194 else:
1195 # i18n: column positioning for "hg summary"
1195 # i18n: column positioning for "hg summary"
1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1196 ui.status(_('largefiles: %d entities for %d files to upload\n')
1197 % (len(lfhashes), len(toupload)))
1197 % (len(lfhashes), len(toupload)))
1198
1198
1199 def overridesummary(orig, ui, repo, *pats, **opts):
1199 def overridesummary(orig, ui, repo, *pats, **opts):
1200 try:
1200 try:
1201 repo.lfstatus = True
1201 repo.lfstatus = True
1202 orig(ui, repo, *pats, **opts)
1202 orig(ui, repo, *pats, **opts)
1203 finally:
1203 finally:
1204 repo.lfstatus = False
1204 repo.lfstatus = False
1205
1205
1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1206 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1207 similarity=None):
1207 similarity=None):
1208 if opts is None:
1208 if opts is None:
1209 opts = {}
1209 opts = {}
1210 if not lfutil.islfilesrepo(repo):
1210 if not lfutil.islfilesrepo(repo):
1211 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1211 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1212 # Get the list of missing largefiles so we can remove them
1212 # Get the list of missing largefiles so we can remove them
1213 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1213 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1214 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1214 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1215 False, False, False)
1215 False, False, False)
1216
1216
1217 # Call into the normal remove code, but the removing of the standin, we want
1217 # Call into the normal remove code, but the removing of the standin, we want
1218 # to have handled by original addremove. Monkey patching here makes sure
1218 # to have handled by original addremove. Monkey patching here makes sure
1219 # we don't remove the standin in the largefiles code, preventing a very
1219 # we don't remove the standin in the largefiles code, preventing a very
1220 # confused state later.
1220 # confused state later.
1221 if s.deleted:
1221 if s.deleted:
1222 m = copy.copy(matcher)
1222 m = copy.copy(matcher)
1223
1223
1224 # The m._files and m._map attributes are not changed to the deleted list
1224 # The m._files and m._map attributes are not changed to the deleted list
1225 # because that affects the m.exact() test, which in turn governs whether
1225 # because that affects the m.exact() test, which in turn governs whether
1226 # or not the file name is printed, and how. Simply limit the original
1226 # or not the file name is printed, and how. Simply limit the original
1227 # matches to those in the deleted status list.
1227 # matches to those in the deleted status list.
1228 matchfn = m.matchfn
1228 matchfn = m.matchfn
1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1229 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1230
1230
1231 removelargefiles(repo.ui, repo, True, m, **opts)
1231 removelargefiles(repo.ui, repo, True, m, **opts)
1232 # Call into the normal add code, and any files that *should* be added as
1232 # Call into the normal add code, and any files that *should* be added as
1233 # largefiles will be
1233 # largefiles will be
1234 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1234 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1235 # Now that we've handled largefiles, hand off to the original addremove
1235 # Now that we've handled largefiles, hand off to the original addremove
1236 # function to take care of the rest. Make sure it doesn't do anything with
1236 # function to take care of the rest. Make sure it doesn't do anything with
1237 # largefiles by passing a matcher that will ignore them.
1237 # largefiles by passing a matcher that will ignore them.
1238 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1238 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1239 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1239 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1240
1240
1241 # Calling purge with --all will cause the largefiles to be deleted.
1241 # Calling purge with --all will cause the largefiles to be deleted.
1242 # Override repo.status to prevent this from happening.
1242 # Override repo.status to prevent this from happening.
1243 def overridepurge(orig, ui, repo, *dirs, **opts):
1243 def overridepurge(orig, ui, repo, *dirs, **opts):
1244 # XXX Monkey patching a repoview will not work. The assigned attribute will
1244 # XXX Monkey patching a repoview will not work. The assigned attribute will
1245 # be set on the unfiltered repo, but we will only lookup attributes in the
1245 # be set on the unfiltered repo, but we will only lookup attributes in the
1246 # unfiltered repo if the lookup in the repoview object itself fails. As the
1246 # unfiltered repo if the lookup in the repoview object itself fails. As the
1247 # monkey patched method exists on the repoview class the lookup will not
1247 # monkey patched method exists on the repoview class the lookup will not
1248 # fail. As a result, the original version will shadow the monkey patched
1248 # fail. As a result, the original version will shadow the monkey patched
1249 # one, defeating the monkey patch.
1249 # one, defeating the monkey patch.
1250 #
1250 #
1251 # As a work around we use an unfiltered repo here. We should do something
1251 # As a work around we use an unfiltered repo here. We should do something
1252 # cleaner instead.
1252 # cleaner instead.
1253 repo = repo.unfiltered()
1253 repo = repo.unfiltered()
1254 oldstatus = repo.status
1254 oldstatus = repo.status
1255 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1255 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1256 clean=False, unknown=False, listsubrepos=False):
1256 clean=False, unknown=False, listsubrepos=False):
1257 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1257 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1258 listsubrepos)
1258 listsubrepos)
1259 lfdirstate = lfutil.openlfdirstate(ui, repo)
1259 lfdirstate = lfutil.openlfdirstate(ui, repo)
1260 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1260 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1261 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1261 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1262 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1262 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1263 unknown, ignored, r.clean)
1263 unknown, ignored, r.clean)
1264 repo.status = overridestatus
1264 repo.status = overridestatus
1265 orig(ui, repo, *dirs, **opts)
1265 orig(ui, repo, *dirs, **opts)
1266 repo.status = oldstatus
1266 repo.status = oldstatus
1267 def overriderollback(orig, ui, repo, **opts):
1267 def overriderollback(orig, ui, repo, **opts):
1268 with repo.wlock():
1268 with repo.wlock():
1269 before = repo.dirstate.parents()
1269 before = repo.dirstate.parents()
1270 orphans = set(f for f in repo.dirstate
1270 orphans = set(f for f in repo.dirstate
1271 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1271 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1272 result = orig(ui, repo, **opts)
1272 result = orig(ui, repo, **opts)
1273 after = repo.dirstate.parents()
1273 after = repo.dirstate.parents()
1274 if before == after:
1274 if before == after:
1275 return result # no need to restore standins
1275 return result # no need to restore standins
1276
1276
1277 pctx = repo['.']
1277 pctx = repo['.']
1278 for f in repo.dirstate:
1278 for f in repo.dirstate:
1279 if lfutil.isstandin(f):
1279 if lfutil.isstandin(f):
1280 orphans.discard(f)
1280 orphans.discard(f)
1281 if repo.dirstate[f] == 'r':
1281 if repo.dirstate[f] == 'r':
1282 repo.wvfs.unlinkpath(f, ignoremissing=True)
1282 repo.wvfs.unlinkpath(f, ignoremissing=True)
1283 elif f in pctx:
1283 elif f in pctx:
1284 fctx = pctx[f]
1284 fctx = pctx[f]
1285 repo.wwrite(f, fctx.data(), fctx.flags())
1285 repo.wwrite(f, fctx.data(), fctx.flags())
1286 else:
1286 else:
1287 # content of standin is not so important in 'a',
1287 # content of standin is not so important in 'a',
1288 # 'm' or 'n' (coming from the 2nd parent) cases
1288 # 'm' or 'n' (coming from the 2nd parent) cases
1289 lfutil.writestandin(repo, f, '', False)
1289 lfutil.writestandin(repo, f, '', False)
1290 for standin in orphans:
1290 for standin in orphans:
1291 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1291 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1292
1292
1293 lfdirstate = lfutil.openlfdirstate(ui, repo)
1293 lfdirstate = lfutil.openlfdirstate(ui, repo)
1294 orphans = set(lfdirstate)
1294 orphans = set(lfdirstate)
1295 lfiles = lfutil.listlfiles(repo)
1295 lfiles = lfutil.listlfiles(repo)
1296 for file in lfiles:
1296 for file in lfiles:
1297 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1297 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1298 orphans.discard(file)
1298 orphans.discard(file)
1299 for lfile in orphans:
1299 for lfile in orphans:
1300 lfdirstate.drop(lfile)
1300 lfdirstate.drop(lfile)
1301 lfdirstate.write()
1301 lfdirstate.write()
1302 return result
1302 return result
1303
1303
1304 def overridetransplant(orig, ui, repo, *revs, **opts):
1304 def overridetransplant(orig, ui, repo, *revs, **opts):
1305 resuming = opts.get('continue')
1305 resuming = opts.get('continue')
1306 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1306 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1307 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1307 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1308 try:
1308 try:
1309 result = orig(ui, repo, *revs, **opts)
1309 result = orig(ui, repo, *revs, **opts)
1310 finally:
1310 finally:
1311 repo._lfstatuswriters.pop()
1311 repo._lfstatuswriters.pop()
1312 repo._lfcommithooks.pop()
1312 repo._lfcommithooks.pop()
1313 return result
1313 return result
1314
1314
1315 def overridecat(orig, ui, repo, file1, *pats, **opts):
1315 def overridecat(orig, ui, repo, file1, *pats, **opts):
1316 ctx = scmutil.revsingle(repo, opts.get('rev'))
1316 ctx = scmutil.revsingle(repo, opts.get('rev'))
1317 err = 1
1317 err = 1
1318 notbad = set()
1318 notbad = set()
1319 m = scmutil.match(ctx, (file1,) + pats, opts)
1319 m = scmutil.match(ctx, (file1,) + pats, opts)
1320 origmatchfn = m.matchfn
1320 origmatchfn = m.matchfn
1321 def lfmatchfn(f):
1321 def lfmatchfn(f):
1322 if origmatchfn(f):
1322 if origmatchfn(f):
1323 return True
1323 return True
1324 lf = lfutil.splitstandin(f)
1324 lf = lfutil.splitstandin(f)
1325 if lf is None:
1325 if lf is None:
1326 return False
1326 return False
1327 notbad.add(lf)
1327 notbad.add(lf)
1328 return origmatchfn(lf)
1328 return origmatchfn(lf)
1329 m.matchfn = lfmatchfn
1329 m.matchfn = lfmatchfn
1330 origbadfn = m.bad
1330 origbadfn = m.bad
1331 def lfbadfn(f, msg):
1331 def lfbadfn(f, msg):
1332 if not f in notbad:
1332 if not f in notbad:
1333 origbadfn(f, msg)
1333 origbadfn(f, msg)
1334 m.bad = lfbadfn
1334 m.bad = lfbadfn
1335
1335
1336 origvisitdirfn = m.visitdir
1336 origvisitdirfn = m.visitdir
1337 def lfvisitdirfn(dir):
1337 def lfvisitdirfn(dir):
1338 if dir == lfutil.shortname:
1338 if dir == lfutil.shortname:
1339 return True
1339 return True
1340 ret = origvisitdirfn(dir)
1340 ret = origvisitdirfn(dir)
1341 if ret:
1341 if ret:
1342 return ret
1342 return ret
1343 lf = lfutil.splitstandin(dir)
1343 lf = lfutil.splitstandin(dir)
1344 if lf is None:
1344 if lf is None:
1345 return False
1345 return False
1346 return origvisitdirfn(lf)
1346 return origvisitdirfn(lf)
1347 m.visitdir = lfvisitdirfn
1347 m.visitdir = lfvisitdirfn
1348
1348
1349 for f in ctx.walk(m):
1349 for f in ctx.walk(m):
1350 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1350 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1351 pathname=f) as fp:
1351 pathname=f) as fp:
1352 lf = lfutil.splitstandin(f)
1352 lf = lfutil.splitstandin(f)
1353 if lf is None or origmatchfn(f):
1353 if lf is None or origmatchfn(f):
1354 # duplicating unreachable code from commands.cat
1354 # duplicating unreachable code from commands.cat
1355 data = ctx[f].data()
1355 data = ctx[f].data()
1356 if opts.get('decode'):
1356 if opts.get('decode'):
1357 data = repo.wwritedata(f, data)
1357 data = repo.wwritedata(f, data)
1358 fp.write(data)
1358 fp.write(data)
1359 else:
1359 else:
1360 hash = lfutil.readstandin(repo, lf, ctx)
1360 hash = lfutil.readstandin(repo, lf, ctx)
1361 if not lfutil.inusercache(repo.ui, hash):
1361 if not lfutil.inusercache(repo.ui, hash):
1362 store = storefactory.openstore(repo)
1362 store = storefactory.openstore(repo)
1363 success, missing = store.get([(lf, hash)])
1363 success, missing = store.get([(lf, hash)])
1364 if len(success) != 1:
1364 if len(success) != 1:
1365 raise error.Abort(
1365 raise error.Abort(
1366 _('largefile %s is not in cache and could not be '
1366 _('largefile %s is not in cache and could not be '
1367 'downloaded') % lf)
1367 'downloaded') % lf)
1368 path = lfutil.usercachepath(repo.ui, hash)
1368 path = lfutil.usercachepath(repo.ui, hash)
1369 with open(path, "rb") as fpin:
1369 with open(path, "rb") as fpin:
1370 for chunk in util.filechunkiter(fpin):
1370 for chunk in util.filechunkiter(fpin):
1371 fp.write(chunk)
1371 fp.write(chunk)
1372 err = 0
1372 err = 0
1373 return err
1373 return err
1374
1374
1375 def mergeupdate(orig, repo, node, branchmerge, force,
1375 def mergeupdate(orig, repo, node, branchmerge, force,
1376 *args, **kwargs):
1376 *args, **kwargs):
1377 matcher = kwargs.get('matcher', None)
1377 matcher = kwargs.get('matcher', None)
1378 # note if this is a partial update
1378 # note if this is a partial update
1379 partial = matcher and not matcher.always()
1379 partial = matcher and not matcher.always()
1380 with repo.wlock():
1380 with repo.wlock():
1381 # branch | | |
1381 # branch | | |
1382 # merge | force | partial | action
1382 # merge | force | partial | action
1383 # -------+-------+---------+--------------
1383 # -------+-------+---------+--------------
1384 # x | x | x | linear-merge
1384 # x | x | x | linear-merge
1385 # o | x | x | branch-merge
1385 # o | x | x | branch-merge
1386 # x | o | x | overwrite (as clean update)
1386 # x | o | x | overwrite (as clean update)
1387 # o | o | x | force-branch-merge (*1)
1387 # o | o | x | force-branch-merge (*1)
1388 # x | x | o | (*)
1388 # x | x | o | (*)
1389 # o | x | o | (*)
1389 # o | x | o | (*)
1390 # x | o | o | overwrite (as revert)
1390 # x | o | o | overwrite (as revert)
1391 # o | o | o | (*)
1391 # o | o | o | (*)
1392 #
1392 #
1393 # (*) don't care
1393 # (*) don't care
1394 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1394 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1395
1395
1396 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1396 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1397 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1397 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1398 repo.getcwd()),
1398 repo.getcwd()),
1399 [], False, True, False)
1399 [], False, True, False)
1400 oldclean = set(s.clean)
1400 oldclean = set(s.clean)
1401 pctx = repo['.']
1401 pctx = repo['.']
1402 dctx = repo[node]
1402 dctx = repo[node]
1403 for lfile in unsure + s.modified:
1403 for lfile in unsure + s.modified:
1404 lfileabs = repo.wvfs.join(lfile)
1404 lfileabs = repo.wvfs.join(lfile)
1405 if not repo.wvfs.exists(lfileabs):
1405 if not repo.wvfs.exists(lfileabs):
1406 continue
1406 continue
1407 lfhash = lfutil.hashfile(lfileabs)
1407 lfhash = lfutil.hashfile(lfileabs)
1408 standin = lfutil.standin(lfile)
1408 standin = lfutil.standin(lfile)
1409 lfutil.writestandin(repo, standin, lfhash,
1409 lfutil.writestandin(repo, standin, lfhash,
1410 lfutil.getexecutable(lfileabs))
1410 lfutil.getexecutable(lfileabs))
1411 if (standin in pctx and
1411 if (standin in pctx and
1412 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1412 lfhash == lfutil.readstandin(repo, lfile, pctx)):
1413 oldclean.add(lfile)
1413 oldclean.add(lfile)
1414 for lfile in s.added:
1414 for lfile in s.added:
1415 fstandin = lfutil.standin(lfile)
1415 fstandin = lfutil.standin(lfile)
1416 if fstandin not in dctx:
1416 if fstandin not in dctx:
1417 # in this case, content of standin file is meaningless
1417 # in this case, content of standin file is meaningless
1418 # (in dctx, lfile is unknown, or normal file)
1418 # (in dctx, lfile is unknown, or normal file)
1419 continue
1419 continue
1420 lfutil.updatestandin(repo, fstandin)
1420 lfutil.updatestandin(repo, lfile, fstandin)
1421 # mark all clean largefiles as dirty, just in case the update gets
1421 # mark all clean largefiles as dirty, just in case the update gets
1422 # interrupted before largefiles and lfdirstate are synchronized
1422 # interrupted before largefiles and lfdirstate are synchronized
1423 for lfile in oldclean:
1423 for lfile in oldclean:
1424 lfdirstate.normallookup(lfile)
1424 lfdirstate.normallookup(lfile)
1425 lfdirstate.write()
1425 lfdirstate.write()
1426
1426
1427 oldstandins = lfutil.getstandinsstate(repo)
1427 oldstandins = lfutil.getstandinsstate(repo)
1428
1428
1429 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1429 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1430
1430
1431 newstandins = lfutil.getstandinsstate(repo)
1431 newstandins = lfutil.getstandinsstate(repo)
1432 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1432 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1433
1433
1434 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1434 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1435 # all the ones that didn't change as clean
1435 # all the ones that didn't change as clean
1436 for lfile in oldclean.difference(filelist):
1436 for lfile in oldclean.difference(filelist):
1437 lfdirstate.normal(lfile)
1437 lfdirstate.normal(lfile)
1438 lfdirstate.write()
1438 lfdirstate.write()
1439
1439
1440 if branchmerge or force or partial:
1440 if branchmerge or force or partial:
1441 filelist.extend(s.deleted + s.removed)
1441 filelist.extend(s.deleted + s.removed)
1442
1442
1443 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1443 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1444 normallookup=partial)
1444 normallookup=partial)
1445
1445
1446 return result
1446 return result
1447
1447
1448 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1448 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1449 result = orig(repo, files, *args, **kwargs)
1449 result = orig(repo, files, *args, **kwargs)
1450
1450
1451 filelist = []
1451 filelist = []
1452 for f in files:
1452 for f in files:
1453 lf = lfutil.splitstandin(f)
1453 lf = lfutil.splitstandin(f)
1454 if lf is not None:
1454 if lf is not None:
1455 filelist.append(lf)
1455 filelist.append(lf)
1456 if filelist:
1456 if filelist:
1457 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1457 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1458 printmessage=False, normallookup=True)
1458 printmessage=False, normallookup=True)
1459
1459
1460 return result
1460 return result
General Comments 0
You need to be logged in to leave comments. Login now