##// END OF EJS Templates
largefiles: link the core dirstate._changing context to the lfdirstate one...
marmoute -
r50922:0b4a6912 default
parent child Browse files
Show More
@@ -1,810 +1,818 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
163
162 def __getitem__(self, key):
164 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
165 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
166
165 def set_tracked(self, f):
167 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
168 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
169
168 def set_untracked(self, f):
170 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
171 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
172
171 def normal(self, f, parentfiledata=None):
173 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
174 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
175 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
176 return super(largefilesdirstate, self).normal(unixpath(f))
175
177
176 def remove(self, f):
178 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
179 return super(largefilesdirstate, self).remove(unixpath(f))
178
180
179 def add(self, f):
181 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
182 return super(largefilesdirstate, self).add(unixpath(f))
181
183
182 def drop(self, f):
184 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
185 return super(largefilesdirstate, self).drop(unixpath(f))
184
186
185 def forget(self, f):
187 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
188 return super(largefilesdirstate, self).forget(unixpath(f))
187
189
188 def normallookup(self, f):
190 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
191 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
192
191 def _ignore(self, f):
193 def _ignore(self, f):
192 return False
194 return False
193
195
194 def write(self, tr):
196 def write(self, tr):
195 # (1) disable PENDING mode always
197 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
198 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
199 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
200 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
201 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 super(largefilesdirstate, self).write(None)
202 super(largefilesdirstate, self).write(None)
201
203
202
204
203 def openlfdirstate(ui, repo, create=True):
205 def openlfdirstate(ui, repo, create=True):
204 """
206 """
205 Return a dirstate object that tracks largefiles: i.e. its root is
207 Return a dirstate object that tracks largefiles: i.e. its root is
206 the repo root, but it is saved in .hg/largefiles/dirstate.
208 the repo root, but it is saved in .hg/largefiles/dirstate.
209
210 If a dirstate object already exists and is being used for a 'changing_*'
211 context, it will be returned.
207 """
212 """
213 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 if sub_dirstate is not None:
215 return sub_dirstate
208 vfs = repo.vfs
216 vfs = repo.vfs
209 lfstoredir = longname
217 lfstoredir = longname
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
218 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
219 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 lfdirstate = largefilesdirstate(
220 lfdirstate = largefilesdirstate(
213 opener,
221 opener,
214 ui,
222 ui,
215 repo.root,
223 repo.root,
216 repo.dirstate._validate,
224 repo.dirstate._validate,
217 lambda: sparse.matcher(repo),
225 lambda: sparse.matcher(repo),
218 repo.nodeconstants,
226 repo.nodeconstants,
219 use_dirstate_v2,
227 use_dirstate_v2,
220 )
228 )
221
229
222 # If the largefiles dirstate does not exist, populate and create
230 # If the largefiles dirstate does not exist, populate and create
223 # it. This ensures that we create it on the first meaningful
231 # it. This ensures that we create it on the first meaningful
224 # largefiles operation in a new clone.
232 # largefiles operation in a new clone.
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
233 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 try:
234 try:
227 with repo.wlock(wait=False):
235 with repo.wlock(wait=False):
228 matcher = getstandinmatcher(repo)
236 matcher = getstandinmatcher(repo)
229 standins = repo.dirstate.walk(
237 standins = repo.dirstate.walk(
230 matcher, subrepos=[], unknown=False, ignored=False
238 matcher, subrepos=[], unknown=False, ignored=False
231 )
239 )
232
240
233 if len(standins) > 0:
241 if len(standins) > 0:
234 vfs.makedirs(lfstoredir)
242 vfs.makedirs(lfstoredir)
235
243
236 for standin in standins:
244 for standin in standins:
237 lfile = splitstandin(standin)
245 lfile = splitstandin(standin)
238 lfdirstate.hacky_extension_update_file(
246 lfdirstate.hacky_extension_update_file(
239 lfile,
247 lfile,
240 p1_tracked=True,
248 p1_tracked=True,
241 wc_tracked=True,
249 wc_tracked=True,
242 possibly_dirty=True,
250 possibly_dirty=True,
243 )
251 )
244 except error.LockError:
252 except error.LockError:
245 # Assume that whatever was holding the lock was important.
253 # Assume that whatever was holding the lock was important.
246 # If we were doing something important, we would already have
254 # If we were doing something important, we would already have
247 # either the lock or a largefile dirstate.
255 # either the lock or a largefile dirstate.
248 pass
256 pass
249 return lfdirstate
257 return lfdirstate
250
258
251
259
252 def lfdirstatestatus(lfdirstate, repo):
260 def lfdirstatestatus(lfdirstate, repo):
253 pctx = repo[b'.']
261 pctx = repo[b'.']
254 match = matchmod.always()
262 match = matchmod.always()
255 unsure, s, mtime_boundary = lfdirstate.status(
263 unsure, s, mtime_boundary = lfdirstate.status(
256 match, subrepos=[], ignored=False, clean=False, unknown=False
264 match, subrepos=[], ignored=False, clean=False, unknown=False
257 )
265 )
258 modified, clean = s.modified, s.clean
266 modified, clean = s.modified, s.clean
259 wctx = repo[None]
267 wctx = repo[None]
260 for lfile in unsure:
268 for lfile in unsure:
261 try:
269 try:
262 fctx = pctx[standin(lfile)]
270 fctx = pctx[standin(lfile)]
263 except LookupError:
271 except LookupError:
264 fctx = None
272 fctx = None
265 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
273 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
266 modified.append(lfile)
274 modified.append(lfile)
267 else:
275 else:
268 clean.append(lfile)
276 clean.append(lfile)
269 st = wctx[lfile].lstat()
277 st = wctx[lfile].lstat()
270 mode = st.st_mode
278 mode = st.st_mode
271 size = st.st_size
279 size = st.st_size
272 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
280 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
273 if mtime is not None:
281 if mtime is not None:
274 cache_data = (mode, size, mtime)
282 cache_data = (mode, size, mtime)
275 lfdirstate.set_clean(lfile, cache_data)
283 lfdirstate.set_clean(lfile, cache_data)
276 return s
284 return s
277
285
278
286
279 def listlfiles(repo, rev=None, matcher=None):
287 def listlfiles(repo, rev=None, matcher=None):
280 """return a list of largefiles in the working copy or the
288 """return a list of largefiles in the working copy or the
281 specified changeset"""
289 specified changeset"""
282
290
283 if matcher is None:
291 if matcher is None:
284 matcher = getstandinmatcher(repo)
292 matcher = getstandinmatcher(repo)
285
293
286 # ignore unknown files in working directory
294 # ignore unknown files in working directory
287 return [
295 return [
288 splitstandin(f)
296 splitstandin(f)
289 for f in repo[rev].walk(matcher)
297 for f in repo[rev].walk(matcher)
290 if rev is not None or repo.dirstate.get_entry(f).any_tracked
298 if rev is not None or repo.dirstate.get_entry(f).any_tracked
291 ]
299 ]
292
300
293
301
294 def instore(repo, hash, forcelocal=False):
302 def instore(repo, hash, forcelocal=False):
295 '''Return true if a largefile with the given hash exists in the store'''
303 '''Return true if a largefile with the given hash exists in the store'''
296 return os.path.exists(storepath(repo, hash, forcelocal))
304 return os.path.exists(storepath(repo, hash, forcelocal))
297
305
298
306
299 def storepath(repo, hash, forcelocal=False):
307 def storepath(repo, hash, forcelocal=False):
300 """Return the correct location in the repository largefiles store for a
308 """Return the correct location in the repository largefiles store for a
301 file with the given hash."""
309 file with the given hash."""
302 if not forcelocal and repo.shared():
310 if not forcelocal and repo.shared():
303 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
311 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
304 return repo.vfs.join(longname, hash)
312 return repo.vfs.join(longname, hash)
305
313
306
314
307 def findstorepath(repo, hash):
315 def findstorepath(repo, hash):
308 """Search through the local store path(s) to find the file for the given
316 """Search through the local store path(s) to find the file for the given
309 hash. If the file is not found, its path in the primary store is returned.
317 hash. If the file is not found, its path in the primary store is returned.
310 The return value is a tuple of (path, exists(path)).
318 The return value is a tuple of (path, exists(path)).
311 """
319 """
312 # For shared repos, the primary store is in the share source. But for
320 # For shared repos, the primary store is in the share source. But for
313 # backward compatibility, force a lookup in the local store if it wasn't
321 # backward compatibility, force a lookup in the local store if it wasn't
314 # found in the share source.
322 # found in the share source.
315 path = storepath(repo, hash, False)
323 path = storepath(repo, hash, False)
316
324
317 if instore(repo, hash):
325 if instore(repo, hash):
318 return (path, True)
326 return (path, True)
319 elif repo.shared() and instore(repo, hash, True):
327 elif repo.shared() and instore(repo, hash, True):
320 return storepath(repo, hash, True), True
328 return storepath(repo, hash, True), True
321
329
322 return (path, False)
330 return (path, False)
323
331
324
332
325 def copyfromcache(repo, hash, filename):
333 def copyfromcache(repo, hash, filename):
326 """Copy the specified largefile from the repo or system cache to
334 """Copy the specified largefile from the repo or system cache to
327 filename in the repository. Return true on success or false if the
335 filename in the repository. Return true on success or false if the
328 file was not found in either cache (which should not happened:
336 file was not found in either cache (which should not happened:
329 this is meant to be called only after ensuring that the needed
337 this is meant to be called only after ensuring that the needed
330 largefile exists in the cache)."""
338 largefile exists in the cache)."""
331 wvfs = repo.wvfs
339 wvfs = repo.wvfs
332 path = findfile(repo, hash)
340 path = findfile(repo, hash)
333 if path is None:
341 if path is None:
334 return False
342 return False
335 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
343 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
336 # The write may fail before the file is fully written, but we
344 # The write may fail before the file is fully written, but we
337 # don't use atomic writes in the working copy.
345 # don't use atomic writes in the working copy.
338 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
346 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
339 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
347 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
340 if gothash != hash:
348 if gothash != hash:
341 repo.ui.warn(
349 repo.ui.warn(
342 _(b'%s: data corruption in %s with hash %s\n')
350 _(b'%s: data corruption in %s with hash %s\n')
343 % (filename, path, gothash)
351 % (filename, path, gothash)
344 )
352 )
345 wvfs.unlink(filename)
353 wvfs.unlink(filename)
346 return False
354 return False
347 return True
355 return True
348
356
349
357
350 def copytostore(repo, ctx, file, fstandin):
358 def copytostore(repo, ctx, file, fstandin):
351 wvfs = repo.wvfs
359 wvfs = repo.wvfs
352 hash = readasstandin(ctx[fstandin])
360 hash = readasstandin(ctx[fstandin])
353 if instore(repo, hash):
361 if instore(repo, hash):
354 return
362 return
355 if wvfs.exists(file):
363 if wvfs.exists(file):
356 copytostoreabsolute(repo, wvfs.join(file), hash)
364 copytostoreabsolute(repo, wvfs.join(file), hash)
357 else:
365 else:
358 repo.ui.warn(
366 repo.ui.warn(
359 _(b"%s: largefile %s not available from local store\n")
367 _(b"%s: largefile %s not available from local store\n")
360 % (file, hash)
368 % (file, hash)
361 )
369 )
362
370
363
371
364 def copyalltostore(repo, node):
372 def copyalltostore(repo, node):
365 '''Copy all largefiles in a given revision to the store'''
373 '''Copy all largefiles in a given revision to the store'''
366
374
367 ctx = repo[node]
375 ctx = repo[node]
368 for filename in ctx.files():
376 for filename in ctx.files():
369 realfile = splitstandin(filename)
377 realfile = splitstandin(filename)
370 if realfile is not None and filename in ctx.manifest():
378 if realfile is not None and filename in ctx.manifest():
371 copytostore(repo, ctx, realfile, filename)
379 copytostore(repo, ctx, realfile, filename)
372
380
373
381
374 def copytostoreabsolute(repo, file, hash):
382 def copytostoreabsolute(repo, file, hash):
375 if inusercache(repo.ui, hash):
383 if inusercache(repo.ui, hash):
376 link(usercachepath(repo.ui, hash), storepath(repo, hash))
384 link(usercachepath(repo.ui, hash), storepath(repo, hash))
377 else:
385 else:
378 util.makedirs(os.path.dirname(storepath(repo, hash)))
386 util.makedirs(os.path.dirname(storepath(repo, hash)))
379 with open(file, b'rb') as srcf:
387 with open(file, b'rb') as srcf:
380 with util.atomictempfile(
388 with util.atomictempfile(
381 storepath(repo, hash), createmode=repo.store.createmode
389 storepath(repo, hash), createmode=repo.store.createmode
382 ) as dstf:
390 ) as dstf:
383 for chunk in util.filechunkiter(srcf):
391 for chunk in util.filechunkiter(srcf):
384 dstf.write(chunk)
392 dstf.write(chunk)
385 linktousercache(repo, hash)
393 linktousercache(repo, hash)
386
394
387
395
388 def linktousercache(repo, hash):
396 def linktousercache(repo, hash):
389 """Link / copy the largefile with the specified hash from the store
397 """Link / copy the largefile with the specified hash from the store
390 to the cache."""
398 to the cache."""
391 path = usercachepath(repo.ui, hash)
399 path = usercachepath(repo.ui, hash)
392 link(storepath(repo, hash), path)
400 link(storepath(repo, hash), path)
393
401
394
402
395 def getstandinmatcher(repo, rmatcher=None):
403 def getstandinmatcher(repo, rmatcher=None):
396 '''Return a match object that applies rmatcher to the standin directory'''
404 '''Return a match object that applies rmatcher to the standin directory'''
397 wvfs = repo.wvfs
405 wvfs = repo.wvfs
398 standindir = shortname
406 standindir = shortname
399
407
400 # no warnings about missing files or directories
408 # no warnings about missing files or directories
401 badfn = lambda f, msg: None
409 badfn = lambda f, msg: None
402
410
403 if rmatcher and not rmatcher.always():
411 if rmatcher and not rmatcher.always():
404 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
412 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
405 if not pats:
413 if not pats:
406 pats = [wvfs.join(standindir)]
414 pats = [wvfs.join(standindir)]
407 match = scmutil.match(repo[None], pats, badfn=badfn)
415 match = scmutil.match(repo[None], pats, badfn=badfn)
408 else:
416 else:
409 # no patterns: relative to repo root
417 # no patterns: relative to repo root
410 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
418 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
411 return match
419 return match
412
420
413
421
414 def composestandinmatcher(repo, rmatcher):
422 def composestandinmatcher(repo, rmatcher):
415 """Return a matcher that accepts standins corresponding to the
423 """Return a matcher that accepts standins corresponding to the
416 files accepted by rmatcher. Pass the list of files in the matcher
424 files accepted by rmatcher. Pass the list of files in the matcher
417 as the paths specified by the user."""
425 as the paths specified by the user."""
418 smatcher = getstandinmatcher(repo, rmatcher)
426 smatcher = getstandinmatcher(repo, rmatcher)
419 isstandin = smatcher.matchfn
427 isstandin = smatcher.matchfn
420
428
421 def composedmatchfn(f):
429 def composedmatchfn(f):
422 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
430 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
423
431
424 smatcher.matchfn = composedmatchfn
432 smatcher.matchfn = composedmatchfn
425
433
426 return smatcher
434 return smatcher
427
435
428
436
429 def standin(filename):
437 def standin(filename):
430 """Return the repo-relative path to the standin for the specified big
438 """Return the repo-relative path to the standin for the specified big
431 file."""
439 file."""
432 # Notes:
440 # Notes:
433 # 1) Some callers want an absolute path, but for instance addlargefiles
441 # 1) Some callers want an absolute path, but for instance addlargefiles
434 # needs it repo-relative so it can be passed to repo[None].add(). So
442 # needs it repo-relative so it can be passed to repo[None].add(). So
435 # leave it up to the caller to use repo.wjoin() to get an absolute path.
443 # leave it up to the caller to use repo.wjoin() to get an absolute path.
436 # 2) Join with '/' because that's what dirstate always uses, even on
444 # 2) Join with '/' because that's what dirstate always uses, even on
437 # Windows. Change existing separator to '/' first in case we are
445 # Windows. Change existing separator to '/' first in case we are
438 # passed filenames from an external source (like the command line).
446 # passed filenames from an external source (like the command line).
439 return shortnameslash + util.pconvert(filename)
447 return shortnameslash + util.pconvert(filename)
440
448
441
449
442 def isstandin(filename):
450 def isstandin(filename):
443 """Return true if filename is a big file standin. filename must be
451 """Return true if filename is a big file standin. filename must be
444 in Mercurial's internal form (slash-separated)."""
452 in Mercurial's internal form (slash-separated)."""
445 return filename.startswith(shortnameslash)
453 return filename.startswith(shortnameslash)
446
454
447
455
448 def splitstandin(filename):
456 def splitstandin(filename):
449 # Split on / because that's what dirstate always uses, even on Windows.
457 # Split on / because that's what dirstate always uses, even on Windows.
450 # Change local separator to / first just in case we are passed filenames
458 # Change local separator to / first just in case we are passed filenames
451 # from an external source (like the command line).
459 # from an external source (like the command line).
452 bits = util.pconvert(filename).split(b'/', 1)
460 bits = util.pconvert(filename).split(b'/', 1)
453 if len(bits) == 2 and bits[0] == shortname:
461 if len(bits) == 2 and bits[0] == shortname:
454 return bits[1]
462 return bits[1]
455 else:
463 else:
456 return None
464 return None
457
465
458
466
459 def updatestandin(repo, lfile, standin):
467 def updatestandin(repo, lfile, standin):
460 """Re-calculate hash value of lfile and write it into standin
468 """Re-calculate hash value of lfile and write it into standin
461
469
462 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
470 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
463 """
471 """
464 file = repo.wjoin(lfile)
472 file = repo.wjoin(lfile)
465 if repo.wvfs.exists(lfile):
473 if repo.wvfs.exists(lfile):
466 hash = hashfile(file)
474 hash = hashfile(file)
467 executable = getexecutable(file)
475 executable = getexecutable(file)
468 writestandin(repo, standin, hash, executable)
476 writestandin(repo, standin, hash, executable)
469 else:
477 else:
470 raise error.Abort(_(b'%s: file not found!') % lfile)
478 raise error.Abort(_(b'%s: file not found!') % lfile)
471
479
472
480
473 def readasstandin(fctx):
481 def readasstandin(fctx):
474 """read hex hash from given filectx of standin file
482 """read hex hash from given filectx of standin file
475
483
476 This encapsulates how "standin" data is stored into storage layer."""
484 This encapsulates how "standin" data is stored into storage layer."""
477 return fctx.data().strip()
485 return fctx.data().strip()
478
486
479
487
480 def writestandin(repo, standin, hash, executable):
488 def writestandin(repo, standin, hash, executable):
481 '''write hash to <repo.root>/<standin>'''
489 '''write hash to <repo.root>/<standin>'''
482 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
490 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
483
491
484
492
485 def copyandhash(instream, outfile):
493 def copyandhash(instream, outfile):
486 """Read bytes from instream (iterable) and write them to outfile,
494 """Read bytes from instream (iterable) and write them to outfile,
487 computing the SHA-1 hash of the data along the way. Return the hash."""
495 computing the SHA-1 hash of the data along the way. Return the hash."""
488 hasher = hashutil.sha1(b'')
496 hasher = hashutil.sha1(b'')
489 for data in instream:
497 for data in instream:
490 hasher.update(data)
498 hasher.update(data)
491 outfile.write(data)
499 outfile.write(data)
492 return hex(hasher.digest())
500 return hex(hasher.digest())
493
501
494
502
495 def hashfile(file):
503 def hashfile(file):
496 if not os.path.exists(file):
504 if not os.path.exists(file):
497 return b''
505 return b''
498 with open(file, b'rb') as fd:
506 with open(file, b'rb') as fd:
499 return hexsha1(fd)
507 return hexsha1(fd)
500
508
501
509
502 def getexecutable(filename):
510 def getexecutable(filename):
503 mode = os.stat(filename).st_mode
511 mode = os.stat(filename).st_mode
504 return (
512 return (
505 (mode & stat.S_IXUSR)
513 (mode & stat.S_IXUSR)
506 and (mode & stat.S_IXGRP)
514 and (mode & stat.S_IXGRP)
507 and (mode & stat.S_IXOTH)
515 and (mode & stat.S_IXOTH)
508 )
516 )
509
517
510
518
511 def urljoin(first, second, *arg):
519 def urljoin(first, second, *arg):
512 def join(left, right):
520 def join(left, right):
513 if not left.endswith(b'/'):
521 if not left.endswith(b'/'):
514 left += b'/'
522 left += b'/'
515 if right.startswith(b'/'):
523 if right.startswith(b'/'):
516 right = right[1:]
524 right = right[1:]
517 return left + right
525 return left + right
518
526
519 url = join(first, second)
527 url = join(first, second)
520 for a in arg:
528 for a in arg:
521 url = join(url, a)
529 url = join(url, a)
522 return url
530 return url
523
531
524
532
525 def hexsha1(fileobj):
533 def hexsha1(fileobj):
526 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
534 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
527 object data"""
535 object data"""
528 h = hashutil.sha1()
536 h = hashutil.sha1()
529 for chunk in util.filechunkiter(fileobj):
537 for chunk in util.filechunkiter(fileobj):
530 h.update(chunk)
538 h.update(chunk)
531 return hex(h.digest())
539 return hex(h.digest())
532
540
533
541
534 def httpsendfile(ui, filename):
542 def httpsendfile(ui, filename):
535 return httpconnection.httpsendfile(ui, filename, b'rb')
543 return httpconnection.httpsendfile(ui, filename, b'rb')
536
544
537
545
538 def unixpath(path):
546 def unixpath(path):
539 '''Return a version of path normalized for use with the lfdirstate.'''
547 '''Return a version of path normalized for use with the lfdirstate.'''
540 return util.pconvert(os.path.normpath(path))
548 return util.pconvert(os.path.normpath(path))
541
549
542
550
543 def islfilesrepo(repo):
551 def islfilesrepo(repo):
544 '''Return true if the repo is a largefile repo.'''
552 '''Return true if the repo is a largefile repo.'''
545 if b'largefiles' in repo.requirements and any(
553 if b'largefiles' in repo.requirements and any(
546 shortnameslash in f[1] for f in repo.store.datafiles()
554 shortnameslash in f[1] for f in repo.store.datafiles()
547 ):
555 ):
548 return True
556 return True
549
557
550 return any(openlfdirstate(repo.ui, repo, False))
558 return any(openlfdirstate(repo.ui, repo, False))
551
559
552
560
553 class storeprotonotcapable(Exception):
561 class storeprotonotcapable(Exception):
554 def __init__(self, storetypes):
562 def __init__(self, storetypes):
555 self.storetypes = storetypes
563 self.storetypes = storetypes
556
564
557
565
558 def getstandinsstate(repo):
566 def getstandinsstate(repo):
559 standins = []
567 standins = []
560 matcher = getstandinmatcher(repo)
568 matcher = getstandinmatcher(repo)
561 wctx = repo[None]
569 wctx = repo[None]
562 for standin in repo.dirstate.walk(
570 for standin in repo.dirstate.walk(
563 matcher, subrepos=[], unknown=False, ignored=False
571 matcher, subrepos=[], unknown=False, ignored=False
564 ):
572 ):
565 lfile = splitstandin(standin)
573 lfile = splitstandin(standin)
566 try:
574 try:
567 hash = readasstandin(wctx[standin])
575 hash = readasstandin(wctx[standin])
568 except IOError:
576 except IOError:
569 hash = None
577 hash = None
570 standins.append((lfile, hash))
578 standins.append((lfile, hash))
571 return standins
579 return standins
572
580
573
581
574 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
582 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
575 lfstandin = standin(lfile)
583 lfstandin = standin(lfile)
576 if lfstandin not in repo.dirstate:
584 if lfstandin not in repo.dirstate:
577 lfdirstate.hacky_extension_update_file(
585 lfdirstate.hacky_extension_update_file(
578 lfile,
586 lfile,
579 p1_tracked=False,
587 p1_tracked=False,
580 wc_tracked=False,
588 wc_tracked=False,
581 )
589 )
582 else:
590 else:
583 entry = repo.dirstate.get_entry(lfstandin)
591 entry = repo.dirstate.get_entry(lfstandin)
584 lfdirstate.hacky_extension_update_file(
592 lfdirstate.hacky_extension_update_file(
585 lfile,
593 lfile,
586 wc_tracked=entry.tracked,
594 wc_tracked=entry.tracked,
587 p1_tracked=entry.p1_tracked,
595 p1_tracked=entry.p1_tracked,
588 p2_info=entry.p2_info,
596 p2_info=entry.p2_info,
589 possibly_dirty=True,
597 possibly_dirty=True,
590 )
598 )
591
599
592
600
593 def markcommitted(orig, ctx, node):
601 def markcommitted(orig, ctx, node):
594 repo = ctx.repo()
602 repo = ctx.repo()
595
603
596 lfdirstate = openlfdirstate(repo.ui, repo)
604 lfdirstate = openlfdirstate(repo.ui, repo)
597 with lfdirstate.changing_parents(repo):
605 with lfdirstate.changing_parents(repo):
598 orig(node)
606 orig(node)
599
607
600 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
601 # because files coming from the 2nd parent are omitted in the latter.
609 # because files coming from the 2nd parent are omitted in the latter.
602 #
610 #
603 # The former should be used to get targets of "synclfdirstate",
611 # The former should be used to get targets of "synclfdirstate",
604 # because such files:
612 # because such files:
605 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
606 # - have to be marked as "n" after commit, but
614 # - have to be marked as "n" after commit, but
607 # - aren't listed in "repo[node].files()"
615 # - aren't listed in "repo[node].files()"
608
616
609 for f in ctx.files():
617 for f in ctx.files():
610 lfile = splitstandin(f)
618 lfile = splitstandin(f)
611 if lfile is not None:
619 if lfile is not None:
612 synclfdirstate(repo, lfdirstate, lfile, False)
620 synclfdirstate(repo, lfdirstate, lfile, False)
613 lfdirstate.write(repo.currenttransaction())
621 lfdirstate.write(repo.currenttransaction())
614
622
615 # As part of committing, copy all of the largefiles into the cache.
623 # As part of committing, copy all of the largefiles into the cache.
616 #
624 #
617 # Using "node" instead of "ctx" implies additional "repo[node]"
625 # Using "node" instead of "ctx" implies additional "repo[node]"
618 # lookup while copyalltostore(), but can omit redundant check for
626 # lookup while copyalltostore(), but can omit redundant check for
619 # files comming from the 2nd parent, which should exist in store
627 # files comming from the 2nd parent, which should exist in store
620 # at merging.
628 # at merging.
621 copyalltostore(repo, node)
629 copyalltostore(repo, node)
622
630
623
631
624 def getlfilestoupdate(oldstandins, newstandins):
632 def getlfilestoupdate(oldstandins, newstandins):
625 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
626 filelist = []
634 filelist = []
627 for f in changedstandins:
635 for f in changedstandins:
628 if f[0] not in filelist:
636 if f[0] not in filelist:
629 filelist.append(f[0])
637 filelist.append(f[0])
630 return filelist
638 return filelist
631
639
632
640
633 def getlfilestoupload(repo, missing, addfunc):
641 def getlfilestoupload(repo, missing, addfunc):
634 makeprogress = repo.ui.makeprogress
642 makeprogress = repo.ui.makeprogress
635 with makeprogress(
643 with makeprogress(
636 _(b'finding outgoing largefiles'),
644 _(b'finding outgoing largefiles'),
637 unit=_(b'revisions'),
645 unit=_(b'revisions'),
638 total=len(missing),
646 total=len(missing),
639 ) as progress:
647 ) as progress:
640 for i, n in enumerate(missing):
648 for i, n in enumerate(missing):
641 progress.update(i)
649 progress.update(i)
642 parents = [p for p in repo[n].parents() if p != repo.nullid]
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
643
651
644 with lfstatus(repo, value=False):
652 with lfstatus(repo, value=False):
645 ctx = repo[n]
653 ctx = repo[n]
646
654
647 files = set(ctx.files())
655 files = set(ctx.files())
648 if len(parents) == 2:
656 if len(parents) == 2:
649 mc = ctx.manifest()
657 mc = ctx.manifest()
650 mp1 = ctx.p1().manifest()
658 mp1 = ctx.p1().manifest()
651 mp2 = ctx.p2().manifest()
659 mp2 = ctx.p2().manifest()
652 for f in mp1:
660 for f in mp1:
653 if f not in mc:
661 if f not in mc:
654 files.add(f)
662 files.add(f)
655 for f in mp2:
663 for f in mp2:
656 if f not in mc:
664 if f not in mc:
657 files.add(f)
665 files.add(f)
658 for f in mc:
666 for f in mc:
659 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
660 files.add(f)
668 files.add(f)
661 for fn in files:
669 for fn in files:
662 if isstandin(fn) and fn in ctx:
670 if isstandin(fn) and fn in ctx:
663 addfunc(fn, readasstandin(ctx[fn]))
671 addfunc(fn, readasstandin(ctx[fn]))
664
672
665
673
666 def updatestandinsbymatch(repo, match):
674 def updatestandinsbymatch(repo, match):
667 """Update standins in the working directory according to specified match
675 """Update standins in the working directory according to specified match
668
676
669 This returns (possibly modified) ``match`` object to be used for
677 This returns (possibly modified) ``match`` object to be used for
670 subsequent commit process.
678 subsequent commit process.
671 """
679 """
672
680
673 ui = repo.ui
681 ui = repo.ui
674
682
675 # Case 1: user calls commit with no specific files or
683 # Case 1: user calls commit with no specific files or
676 # include/exclude patterns: refresh and commit all files that
684 # include/exclude patterns: refresh and commit all files that
677 # are "dirty".
685 # are "dirty".
678 if match is None or match.always():
686 if match is None or match.always():
679 # Spend a bit of time here to get a list of files we know
687 # Spend a bit of time here to get a list of files we know
680 # are modified so we can compare only against those.
688 # are modified so we can compare only against those.
681 # It can cost a lot of time (several seconds)
689 # It can cost a lot of time (several seconds)
682 # otherwise to update all standins if the largefiles are
690 # otherwise to update all standins if the largefiles are
683 # large.
691 # large.
684 lfdirstate = openlfdirstate(ui, repo)
692 lfdirstate = openlfdirstate(ui, repo)
685 dirtymatch = matchmod.always()
693 dirtymatch = matchmod.always()
686 unsure, s, mtime_boundary = lfdirstate.status(
694 unsure, s, mtime_boundary = lfdirstate.status(
687 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
695 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
688 )
696 )
689 modifiedfiles = unsure + s.modified + s.added + s.removed
697 modifiedfiles = unsure + s.modified + s.added + s.removed
690 lfiles = listlfiles(repo)
698 lfiles = listlfiles(repo)
691 # this only loops through largefiles that exist (not
699 # this only loops through largefiles that exist (not
692 # removed/renamed)
700 # removed/renamed)
693 for lfile in lfiles:
701 for lfile in lfiles:
694 if lfile in modifiedfiles:
702 if lfile in modifiedfiles:
695 fstandin = standin(lfile)
703 fstandin = standin(lfile)
696 if repo.wvfs.exists(fstandin):
704 if repo.wvfs.exists(fstandin):
697 # this handles the case where a rebase is being
705 # this handles the case where a rebase is being
698 # performed and the working copy is not updated
706 # performed and the working copy is not updated
699 # yet.
707 # yet.
700 if repo.wvfs.exists(lfile):
708 if repo.wvfs.exists(lfile):
701 updatestandin(repo, lfile, fstandin)
709 updatestandin(repo, lfile, fstandin)
702
710
703 return match
711 return match
704
712
705 lfiles = listlfiles(repo)
713 lfiles = listlfiles(repo)
706 match._files = repo._subdirlfs(match.files(), lfiles)
714 match._files = repo._subdirlfs(match.files(), lfiles)
707
715
708 # Case 2: user calls commit with specified patterns: refresh
716 # Case 2: user calls commit with specified patterns: refresh
709 # any matching big files.
717 # any matching big files.
710 smatcher = composestandinmatcher(repo, match)
718 smatcher = composestandinmatcher(repo, match)
711 standins = repo.dirstate.walk(
719 standins = repo.dirstate.walk(
712 smatcher, subrepos=[], unknown=False, ignored=False
720 smatcher, subrepos=[], unknown=False, ignored=False
713 )
721 )
714
722
715 # No matching big files: get out of the way and pass control to
723 # No matching big files: get out of the way and pass control to
716 # the usual commit() method.
724 # the usual commit() method.
717 if not standins:
725 if not standins:
718 return match
726 return match
719
727
720 # Refresh all matching big files. It's possible that the
728 # Refresh all matching big files. It's possible that the
721 # commit will end up failing, in which case the big files will
729 # commit will end up failing, in which case the big files will
722 # stay refreshed. No harm done: the user modified them and
730 # stay refreshed. No harm done: the user modified them and
723 # asked to commit them, so sooner or later we're going to
731 # asked to commit them, so sooner or later we're going to
724 # refresh the standins. Might as well leave them refreshed.
732 # refresh the standins. Might as well leave them refreshed.
725 lfdirstate = openlfdirstate(ui, repo)
733 lfdirstate = openlfdirstate(ui, repo)
726 for fstandin in standins:
734 for fstandin in standins:
727 lfile = splitstandin(fstandin)
735 lfile = splitstandin(fstandin)
728 if lfdirstate.get_entry(lfile).tracked:
736 if lfdirstate.get_entry(lfile).tracked:
729 updatestandin(repo, lfile, fstandin)
737 updatestandin(repo, lfile, fstandin)
730
738
731 # Cook up a new matcher that only matches regular files or
739 # Cook up a new matcher that only matches regular files or
732 # standins corresponding to the big files requested by the
740 # standins corresponding to the big files requested by the
733 # user. Have to modify _files to prevent commit() from
741 # user. Have to modify _files to prevent commit() from
734 # complaining "not tracked" for big files.
742 # complaining "not tracked" for big files.
735 match = copy.copy(match)
743 match = copy.copy(match)
736 origmatchfn = match.matchfn
744 origmatchfn = match.matchfn
737
745
738 # Check both the list of largefiles and the list of
746 # Check both the list of largefiles and the list of
739 # standins because if a largefile was removed, it
747 # standins because if a largefile was removed, it
740 # won't be in the list of largefiles at this point
748 # won't be in the list of largefiles at this point
741 match._files += sorted(standins)
749 match._files += sorted(standins)
742
750
743 actualfiles = []
751 actualfiles = []
744 for f in match._files:
752 for f in match._files:
745 fstandin = standin(f)
753 fstandin = standin(f)
746
754
747 # For largefiles, only one of the normal and standin should be
755 # For largefiles, only one of the normal and standin should be
748 # committed (except if one of them is a remove). In the case of a
756 # committed (except if one of them is a remove). In the case of a
749 # standin removal, drop the normal file if it is unknown to dirstate.
757 # standin removal, drop the normal file if it is unknown to dirstate.
750 # Thus, skip plain largefile names but keep the standin.
758 # Thus, skip plain largefile names but keep the standin.
751 if f in lfiles or fstandin in standins:
759 if f in lfiles or fstandin in standins:
752 if not repo.dirstate.get_entry(fstandin).removed:
760 if not repo.dirstate.get_entry(fstandin).removed:
753 if not repo.dirstate.get_entry(f).removed:
761 if not repo.dirstate.get_entry(f).removed:
754 continue
762 continue
755 elif not repo.dirstate.get_entry(f).any_tracked:
763 elif not repo.dirstate.get_entry(f).any_tracked:
756 continue
764 continue
757
765
758 actualfiles.append(f)
766 actualfiles.append(f)
759 match._files = actualfiles
767 match._files = actualfiles
760
768
761 def matchfn(f):
769 def matchfn(f):
762 if origmatchfn(f):
770 if origmatchfn(f):
763 return f not in lfiles
771 return f not in lfiles
764 else:
772 else:
765 return f in standins
773 return f in standins
766
774
767 match.matchfn = matchfn
775 match.matchfn = matchfn
768
776
769 return match
777 return match
770
778
771
779
772 class automatedcommithook:
780 class automatedcommithook:
773 """Stateful hook to update standins at the 1st commit of resuming
781 """Stateful hook to update standins at the 1st commit of resuming
774
782
775 For efficiency, updating standins in the working directory should
783 For efficiency, updating standins in the working directory should
776 be avoided while automated committing (like rebase, transplant and
784 be avoided while automated committing (like rebase, transplant and
777 so on), because they should be updated before committing.
785 so on), because they should be updated before committing.
778
786
779 But the 1st commit of resuming automated committing (e.g. ``rebase
787 But the 1st commit of resuming automated committing (e.g. ``rebase
780 --continue``) should update them, because largefiles may be
788 --continue``) should update them, because largefiles may be
781 modified manually.
789 modified manually.
782 """
790 """
783
791
784 def __init__(self, resuming):
792 def __init__(self, resuming):
785 self.resuming = resuming
793 self.resuming = resuming
786
794
787 def __call__(self, repo, match):
795 def __call__(self, repo, match):
788 if self.resuming:
796 if self.resuming:
789 self.resuming = False # avoids updating at subsequent commits
797 self.resuming = False # avoids updating at subsequent commits
790 return updatestandinsbymatch(repo, match)
798 return updatestandinsbymatch(repo, match)
791 else:
799 else:
792 return match
800 return match
793
801
794
802
795 def getstatuswriter(ui, repo, forcibly=None):
803 def getstatuswriter(ui, repo, forcibly=None):
796 """Return the function to write largefiles specific status out
804 """Return the function to write largefiles specific status out
797
805
798 If ``forcibly`` is ``None``, this returns the last element of
806 If ``forcibly`` is ``None``, this returns the last element of
799 ``repo._lfstatuswriters`` as "default" writer function.
807 ``repo._lfstatuswriters`` as "default" writer function.
800
808
801 Otherwise, this returns the function to always write out (or
809 Otherwise, this returns the function to always write out (or
802 ignore if ``not forcibly``) status.
810 ignore if ``not forcibly``) status.
803 """
811 """
804 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
812 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
805 return repo._lfstatuswriters[-1]
813 return repo._lfstatuswriters[-1]
806 else:
814 else:
807 if forcibly:
815 if forcibly:
808 return ui.status # forcibly WRITE OUT
816 return ui.status # forcibly WRITE OUT
809 else:
817 else:
810 return lambda *msg, **opts: None # forcibly IGNORE
818 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1867 +1,1890 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import contextlib
11 import copy
12 import copy
12 import os
13 import os
13
14
14 from mercurial.i18n import _
15 from mercurial.i18n import _
15
16
16 from mercurial.pycompat import open
17 from mercurial.pycompat import open
17
18
18 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
19
20
20 from mercurial import (
21 from mercurial import (
21 archival,
22 archival,
22 cmdutil,
23 cmdutil,
23 copies as copiesmod,
24 copies as copiesmod,
25 dirstate,
24 error,
26 error,
25 exchange,
27 exchange,
26 extensions,
28 extensions,
27 exthelper,
29 exthelper,
28 filemerge,
30 filemerge,
29 hg,
31 hg,
30 logcmdutil,
32 logcmdutil,
31 match as matchmod,
33 match as matchmod,
32 merge,
34 merge,
33 mergestate as mergestatemod,
35 mergestate as mergestatemod,
34 pathutil,
36 pathutil,
35 pycompat,
37 pycompat,
36 scmutil,
38 scmutil,
37 smartset,
39 smartset,
38 subrepo,
40 subrepo,
39 url as urlmod,
41 url as urlmod,
40 util,
42 util,
41 )
43 )
42
44
43 from mercurial.upgrade_utils import (
45 from mercurial.upgrade_utils import (
44 actions as upgrade_actions,
46 actions as upgrade_actions,
45 )
47 )
46
48
47 from . import (
49 from . import (
48 lfcommands,
50 lfcommands,
49 lfutil,
51 lfutil,
50 storefactory,
52 storefactory,
51 )
53 )
52
54
53 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_ADD = mergestatemod.ACTION_ADD
54 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
55 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_GET = mergestatemod.ACTION_GET
56 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
57 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
58
60
59 eh = exthelper.exthelper()
61 eh = exthelper.exthelper()
60
62
61 lfstatus = lfutil.lfstatus
63 lfstatus = lfutil.lfstatus
62
64
63 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
64
66
65 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
66
68
67
69
68 def composelargefilematcher(match, manifest):
70 def composelargefilematcher(match, manifest):
69 """create a matcher that matches only the largefiles in the original
71 """create a matcher that matches only the largefiles in the original
70 matcher"""
72 matcher"""
71 m = copy.copy(match)
73 m = copy.copy(match)
72 lfile = lambda f: lfutil.standin(f) in manifest
74 lfile = lambda f: lfutil.standin(f) in manifest
73 m._files = [lf for lf in m._files if lfile(lf)]
75 m._files = [lf for lf in m._files if lfile(lf)]
74 m._fileset = set(m._files)
76 m._fileset = set(m._files)
75 m.always = lambda: False
77 m.always = lambda: False
76 origmatchfn = m.matchfn
78 origmatchfn = m.matchfn
77 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
78 return m
80 return m
79
81
80
82
81 def composenormalfilematcher(match, manifest, exclude=None):
83 def composenormalfilematcher(match, manifest, exclude=None):
82 excluded = set()
84 excluded = set()
83 if exclude is not None:
85 if exclude is not None:
84 excluded.update(exclude)
86 excluded.update(exclude)
85
87
86 m = copy.copy(match)
88 m = copy.copy(match)
87 notlfile = lambda f: not (
89 notlfile = lambda f: not (
88 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
89 )
91 )
90 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._files = [lf for lf in m._files if notlfile(lf)]
91 m._fileset = set(m._files)
93 m._fileset = set(m._files)
92 m.always = lambda: False
94 m.always = lambda: False
93 origmatchfn = m.matchfn
95 origmatchfn = m.matchfn
94 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
95 return m
97 return m
96
98
97
99
98 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
99 large = opts.get('large')
101 large = opts.get('large')
100 lfsize = lfutil.getminsize(
102 lfsize = lfutil.getminsize(
101 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
102 )
104 )
103
105
104 lfmatcher = None
106 lfmatcher = None
105 if lfutil.islfilesrepo(repo):
107 if lfutil.islfilesrepo(repo):
106 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
107 if lfpats:
109 if lfpats:
108 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
109
111
110 lfnames = []
112 lfnames = []
111 m = matcher
113 m = matcher
112
114
113 wctx = repo[None]
115 wctx = repo[None]
114 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
115 exact = m.exact(f)
117 exact = m.exact(f)
116 lfile = lfutil.standin(f) in wctx
118 lfile = lfutil.standin(f) in wctx
117 nfile = f in wctx
119 nfile = f in wctx
118 exists = lfile or nfile
120 exists = lfile or nfile
119
121
120 # Don't warn the user when they attempt to add a normal tracked file.
122 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
123 # The normal add code will do that for us.
122 if exact and exists:
124 if exact and exists:
123 if lfile:
125 if lfile:
124 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
125 continue
127 continue
126
128
127 if (exact or not exists) and not lfutil.isstandin(f):
129 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
130 # In case the file was removed previously, but not committed
129 # (issue3507)
131 # (issue3507)
130 if not repo.wvfs.exists(f):
132 if not repo.wvfs.exists(f):
131 continue
133 continue
132
134
133 abovemin = (
135 abovemin = (
134 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
135 )
137 )
136 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
137 lfnames.append(f)
139 lfnames.append(f)
138 if ui.verbose or not exact:
140 if ui.verbose or not exact:
139 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
140
142
141 bad = []
143 bad = []
142
144
143 # Need to lock, otherwise there could be a race condition between
145 # Need to lock, otherwise there could be a race condition between
144 # when standins are created and added to the repo.
146 # when standins are created and added to the repo.
145 with repo.wlock():
147 with repo.wlock():
146 if not opts.get('dry_run'):
148 if not opts.get('dry_run'):
147 standins = []
149 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
151 for f in lfnames:
150 standinname = lfutil.standin(f)
152 standinname = lfutil.standin(f)
151 lfutil.writestandin(
153 lfutil.writestandin(
152 repo,
154 repo,
153 standinname,
155 standinname,
154 hash=b'',
156 hash=b'',
155 executable=lfutil.getexecutable(repo.wjoin(f)),
157 executable=lfutil.getexecutable(repo.wjoin(f)),
156 )
158 )
157 standins.append(standinname)
159 standins.append(standinname)
158 lfdirstate.set_tracked(f)
160 lfdirstate.set_tracked(f)
159 lfdirstate.write(repo.currenttransaction())
161 lfdirstate.write(repo.currenttransaction())
160 bad += [
162 bad += [
161 lfutil.splitstandin(f)
163 lfutil.splitstandin(f)
162 for f in repo[None].add(standins)
164 for f in repo[None].add(standins)
163 if f in m.files()
165 if f in m.files()
164 ]
166 ]
165
167
166 added = [f for f in lfnames if f not in bad]
168 added = [f for f in lfnames if f not in bad]
167 return added, bad
169 return added, bad
168
170
169
171
170 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
171 after = opts.get('after')
173 after = opts.get('after')
172 m = composelargefilematcher(matcher, repo[None].manifest())
174 m = composelargefilematcher(matcher, repo[None].manifest())
173 with lfstatus(repo):
175 with lfstatus(repo):
174 s = repo.status(match=m, clean=not isaddremove)
176 s = repo.status(match=m, clean=not isaddremove)
175 manifest = repo[None].manifest()
177 manifest = repo[None].manifest()
176 modified, added, deleted, clean = [
178 modified, added, deleted, clean = [
177 [f for f in list if lfutil.standin(f) in manifest]
179 [f for f in list if lfutil.standin(f) in manifest]
178 for list in (s.modified, s.added, s.deleted, s.clean)
180 for list in (s.modified, s.added, s.deleted, s.clean)
179 ]
181 ]
180
182
181 def warn(files, msg):
183 def warn(files, msg):
182 for f in files:
184 for f in files:
183 ui.warn(msg % uipathfn(f))
185 ui.warn(msg % uipathfn(f))
184 return int(len(files) > 0)
186 return int(len(files) > 0)
185
187
186 if after:
188 if after:
187 remove = deleted
189 remove = deleted
188 result = warn(
190 result = warn(
189 modified + added + clean, _(b'not removing %s: file still exists\n')
191 modified + added + clean, _(b'not removing %s: file still exists\n')
190 )
192 )
191 else:
193 else:
192 remove = deleted + clean
194 remove = deleted + clean
193 result = warn(
195 result = warn(
194 modified,
196 modified,
195 _(
197 _(
196 b'not removing %s: file is modified (use -f'
198 b'not removing %s: file is modified (use -f'
197 b' to force removal)\n'
199 b' to force removal)\n'
198 ),
200 ),
199 )
201 )
200 result = (
202 result = (
201 warn(
203 warn(
202 added,
204 added,
203 _(
205 _(
204 b'not removing %s: file has been marked for add'
206 b'not removing %s: file has been marked for add'
205 b' (use forget to undo)\n'
207 b' (use forget to undo)\n'
206 ),
208 ),
207 )
209 )
208 or result
210 or result
209 )
211 )
210
212
211 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
214 # repository and we could race in-between.
213 with repo.wlock():
215 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
217 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
217 ui.status(_(b'removing %s\n') % uipathfn(f))
219 ui.status(_(b'removing %s\n') % uipathfn(f))
218
220
219 if not dryrun:
221 if not dryrun:
220 if not after:
222 if not after:
221 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
222
224
223 if dryrun:
225 if dryrun:
224 return result
226 return result
225
227
226 remove = [lfutil.standin(f) for f in remove]
228 remove = [lfutil.standin(f) for f in remove]
227 # If this is being called by addremove, let the original addremove
229 # If this is being called by addremove, let the original addremove
228 # function handle this.
230 # function handle this.
229 if not isaddremove:
231 if not isaddremove:
230 for f in remove:
232 for f in remove:
231 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
232 repo[None].forget(remove)
234 repo[None].forget(remove)
233
235
234 for f in remove:
236 for f in remove:
235 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
236
238
237 lfdirstate.write(repo.currenttransaction())
239 lfdirstate.write(repo.currenttransaction())
238
240
239 return result
241 return result
240
242
241
243
242 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
243 # appear at their right place in the manifests.
245 # appear at their right place in the manifests.
244 @eh.wrapfunction(webcommands, b'decodepath')
246 @eh.wrapfunction(webcommands, b'decodepath')
245 def decodepath(orig, path):
247 def decodepath(orig, path):
246 return lfutil.splitstandin(path) or path
248 return lfutil.splitstandin(path) or path
247
249
248
250
249 # -- Wrappers: modify existing commands --------------------------------
251 # -- Wrappers: modify existing commands --------------------------------
250
252
251
253
252 @eh.wrapcommand(
254 @eh.wrapcommand(
253 b'add',
255 b'add',
254 opts=[
256 opts=[
255 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'large', None, _(b'add as largefile')),
256 (b'', b'normal', None, _(b'add as normal file')),
258 (b'', b'normal', None, _(b'add as normal file')),
257 (
259 (
258 b'',
260 b'',
259 b'lfsize',
261 b'lfsize',
260 b'',
262 b'',
261 _(
263 _(
262 b'add all files above this size (in megabytes) '
264 b'add all files above this size (in megabytes) '
263 b'as largefiles (default: 10)'
265 b'as largefiles (default: 10)'
264 ),
266 ),
265 ),
267 ),
266 ],
268 ],
267 )
269 )
268 def overrideadd(orig, ui, repo, *pats, **opts):
270 def overrideadd(orig, ui, repo, *pats, **opts):
269 if opts.get('normal') and opts.get('large'):
271 if opts.get('normal') and opts.get('large'):
270 raise error.Abort(_(b'--normal cannot be used with --large'))
272 raise error.Abort(_(b'--normal cannot be used with --large'))
271 return orig(ui, repo, *pats, **opts)
273 return orig(ui, repo, *pats, **opts)
272
274
273
275
274 @eh.wrapfunction(cmdutil, b'add')
276 @eh.wrapfunction(cmdutil, b'add')
275 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
276 # The --normal flag short circuits this override
278 # The --normal flag short circuits this override
277 if opts.get('normal'):
279 if opts.get('normal'):
278 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
279
281
280 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
281 normalmatcher = composenormalfilematcher(
283 normalmatcher = composenormalfilematcher(
282 matcher, repo[None].manifest(), ladded
284 matcher, repo[None].manifest(), ladded
283 )
285 )
284 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
285
287
286 bad.extend(f for f in lbad)
288 bad.extend(f for f in lbad)
287 return bad
289 return bad
288
290
289
291
290 @eh.wrapfunction(cmdutil, b'remove')
292 @eh.wrapfunction(cmdutil, b'remove')
291 def cmdutilremove(
293 def cmdutilremove(
292 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
293 ):
295 ):
294 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
295 result = orig(
297 result = orig(
296 ui,
298 ui,
297 repo,
299 repo,
298 normalmatcher,
300 normalmatcher,
299 prefix,
301 prefix,
300 uipathfn,
302 uipathfn,
301 after,
303 after,
302 force,
304 force,
303 subrepos,
305 subrepos,
304 dryrun,
306 dryrun,
305 )
307 )
306 return (
308 return (
307 removelargefiles(
309 removelargefiles(
308 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
309 )
311 )
310 or result
312 or result
311 )
313 )
312
314
313
315
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
329 yield
330 else:
331 with sub_dirstate._changing(repo, change_type):
332 yield
333 finally:
334 self._sub_dirstate = pre
335
336
314 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
315 def overridestatusfn(orig, repo, rev2, **opts):
338 def overridestatusfn(orig, repo, rev2, **opts):
316 with lfstatus(repo._repo):
339 with lfstatus(repo._repo):
317 return orig(repo, rev2, **opts)
340 return orig(repo, rev2, **opts)
318
341
319
342
320 @eh.wrapcommand(b'status')
343 @eh.wrapcommand(b'status')
321 def overridestatus(orig, ui, repo, *pats, **opts):
344 def overridestatus(orig, ui, repo, *pats, **opts):
322 with lfstatus(repo):
345 with lfstatus(repo):
323 return orig(ui, repo, *pats, **opts)
346 return orig(ui, repo, *pats, **opts)
324
347
325
348
326 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
327 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
328 with lfstatus(repo._repo):
351 with lfstatus(repo._repo):
329 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
330
353
331
354
332 @eh.wrapcommand(b'log')
355 @eh.wrapcommand(b'log')
333 def overridelog(orig, ui, repo, *pats, **opts):
356 def overridelog(orig, ui, repo, *pats, **opts):
334 def overridematchandpats(
357 def overridematchandpats(
335 orig,
358 orig,
336 ctx,
359 ctx,
337 pats=(),
360 pats=(),
338 opts=None,
361 opts=None,
339 globbed=False,
362 globbed=False,
340 default=b'relpath',
363 default=b'relpath',
341 badfn=None,
364 badfn=None,
342 ):
365 ):
343 """Matcher that merges root directory with .hglf, suitable for log.
366 """Matcher that merges root directory with .hglf, suitable for log.
344 It is still possible to match .hglf directly.
367 It is still possible to match .hglf directly.
345 For any listed files run log on the standin too.
368 For any listed files run log on the standin too.
346 matchfn tries both the given filename and with .hglf stripped.
369 matchfn tries both the given filename and with .hglf stripped.
347 """
370 """
348 if opts is None:
371 if opts is None:
349 opts = {}
372 opts = {}
350 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
351 m, p = copy.copy(matchandpats)
374 m, p = copy.copy(matchandpats)
352
375
353 if m.always():
376 if m.always():
354 # We want to match everything anyway, so there's no benefit trying
377 # We want to match everything anyway, so there's no benefit trying
355 # to add standins.
378 # to add standins.
356 return matchandpats
379 return matchandpats
357
380
358 pats = set(p)
381 pats = set(p)
359
382
360 def fixpats(pat, tostandin=lfutil.standin):
383 def fixpats(pat, tostandin=lfutil.standin):
361 if pat.startswith(b'set:'):
384 if pat.startswith(b'set:'):
362 return pat
385 return pat
363
386
364 kindpat = matchmod._patsplit(pat, None)
387 kindpat = matchmod._patsplit(pat, None)
365
388
366 if kindpat[0] is not None:
389 if kindpat[0] is not None:
367 return kindpat[0] + b':' + tostandin(kindpat[1])
390 return kindpat[0] + b':' + tostandin(kindpat[1])
368 return tostandin(kindpat[1])
391 return tostandin(kindpat[1])
369
392
370 cwd = repo.getcwd()
393 cwd = repo.getcwd()
371 if cwd:
394 if cwd:
372 hglf = lfutil.shortname
395 hglf = lfutil.shortname
373 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
374
397
375 def tostandin(f):
398 def tostandin(f):
376 # The file may already be a standin, so truncate the back
399 # The file may already be a standin, so truncate the back
377 # prefix and test before mangling it. This avoids turning
400 # prefix and test before mangling it. This avoids turning
378 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
379 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
380 return f
403 return f
381
404
382 # An absolute path is from outside the repo, so truncate the
405 # An absolute path is from outside the repo, so truncate the
383 # path to the root before building the standin. Otherwise cwd
406 # path to the root before building the standin. Otherwise cwd
384 # is somewhere in the repo, relative to root, and needs to be
407 # is somewhere in the repo, relative to root, and needs to be
385 # prepended before building the standin.
408 # prepended before building the standin.
386 if os.path.isabs(cwd):
409 if os.path.isabs(cwd):
387 f = f[len(back) :]
410 f = f[len(back) :]
388 else:
411 else:
389 f = cwd + b'/' + f
412 f = cwd + b'/' + f
390 return back + lfutil.standin(f)
413 return back + lfutil.standin(f)
391
414
392 else:
415 else:
393
416
394 def tostandin(f):
417 def tostandin(f):
395 if lfutil.isstandin(f):
418 if lfutil.isstandin(f):
396 return f
419 return f
397 return lfutil.standin(f)
420 return lfutil.standin(f)
398
421
399 pats.update(fixpats(f, tostandin) for f in p)
422 pats.update(fixpats(f, tostandin) for f in p)
400
423
401 for i in range(0, len(m._files)):
424 for i in range(0, len(m._files)):
402 # Don't add '.hglf' to m.files, since that is already covered by '.'
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
403 if m._files[i] == b'.':
426 if m._files[i] == b'.':
404 continue
427 continue
405 standin = lfutil.standin(m._files[i])
428 standin = lfutil.standin(m._files[i])
406 # If the "standin" is a directory, append instead of replace to
429 # If the "standin" is a directory, append instead of replace to
407 # support naming a directory on the command line with only
430 # support naming a directory on the command line with only
408 # largefiles. The original directory is kept to support normal
431 # largefiles. The original directory is kept to support normal
409 # files.
432 # files.
410 if standin in ctx:
433 if standin in ctx:
411 m._files[i] = standin
434 m._files[i] = standin
412 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
413 m._files.append(standin)
436 m._files.append(standin)
414
437
415 m._fileset = set(m._files)
438 m._fileset = set(m._files)
416 m.always = lambda: False
439 m.always = lambda: False
417 origmatchfn = m.matchfn
440 origmatchfn = m.matchfn
418
441
419 def lfmatchfn(f):
442 def lfmatchfn(f):
420 lf = lfutil.splitstandin(f)
443 lf = lfutil.splitstandin(f)
421 if lf is not None and origmatchfn(lf):
444 if lf is not None and origmatchfn(lf):
422 return True
445 return True
423 r = origmatchfn(f)
446 r = origmatchfn(f)
424 return r
447 return r
425
448
426 m.matchfn = lfmatchfn
449 m.matchfn = lfmatchfn
427
450
428 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
429 return m, pats
452 return m, pats
430
453
431 # For hg log --patch, the match object is used in two different senses:
454 # For hg log --patch, the match object is used in two different senses:
432 # (1) to determine what revisions should be printed out, and
455 # (1) to determine what revisions should be printed out, and
433 # (2) to determine what files to print out diffs for.
456 # (2) to determine what files to print out diffs for.
434 # The magic matchandpats override should be used for case (1) but not for
457 # The magic matchandpats override should be used for case (1) but not for
435 # case (2).
458 # case (2).
436 oldmatchandpats = scmutil.matchandpats
459 oldmatchandpats = scmutil.matchandpats
437
460
438 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
439 wctx = repo[None]
462 wctx = repo[None]
440 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
441 return lambda ctx: match
464 return lambda ctx: match
442
465
443 wrappedmatchandpats = extensions.wrappedfunction(
466 wrappedmatchandpats = extensions.wrappedfunction(
444 scmutil, b'matchandpats', overridematchandpats
467 scmutil, b'matchandpats', overridematchandpats
445 )
468 )
446 wrappedmakefilematcher = extensions.wrappedfunction(
469 wrappedmakefilematcher = extensions.wrappedfunction(
447 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
448 )
471 )
449 with wrappedmatchandpats, wrappedmakefilematcher:
472 with wrappedmatchandpats, wrappedmakefilematcher:
450 return orig(ui, repo, *pats, **opts)
473 return orig(ui, repo, *pats, **opts)
451
474
452
475
453 @eh.wrapcommand(
476 @eh.wrapcommand(
454 b'verify',
477 b'verify',
455 opts=[
478 opts=[
456 (
479 (
457 b'',
480 b'',
458 b'large',
481 b'large',
459 None,
482 None,
460 _(b'verify that all largefiles in current revision exists'),
483 _(b'verify that all largefiles in current revision exists'),
461 ),
484 ),
462 (
485 (
463 b'',
486 b'',
464 b'lfa',
487 b'lfa',
465 None,
488 None,
466 _(b'verify largefiles in all revisions, not just current'),
489 _(b'verify largefiles in all revisions, not just current'),
467 ),
490 ),
468 (
491 (
469 b'',
492 b'',
470 b'lfc',
493 b'lfc',
471 None,
494 None,
472 _(b'verify local largefile contents, not just existence'),
495 _(b'verify local largefile contents, not just existence'),
473 ),
496 ),
474 ],
497 ],
475 )
498 )
476 def overrideverify(orig, ui, repo, *pats, **opts):
499 def overrideverify(orig, ui, repo, *pats, **opts):
477 large = opts.pop('large', False)
500 large = opts.pop('large', False)
478 all = opts.pop('lfa', False)
501 all = opts.pop('lfa', False)
479 contents = opts.pop('lfc', False)
502 contents = opts.pop('lfc', False)
480
503
481 result = orig(ui, repo, *pats, **opts)
504 result = orig(ui, repo, *pats, **opts)
482 if large or all or contents:
505 if large or all or contents:
483 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
484 return result
507 return result
485
508
486
509
487 @eh.wrapcommand(
510 @eh.wrapcommand(
488 b'debugstate',
511 b'debugstate',
489 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
490 )
513 )
491 def overridedebugstate(orig, ui, repo, *pats, **opts):
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
492 large = opts.pop('large', False)
515 large = opts.pop('large', False)
493 if large:
516 if large:
494
517
495 class fakerepo:
518 class fakerepo:
496 dirstate = lfutil.openlfdirstate(ui, repo)
519 dirstate = lfutil.openlfdirstate(ui, repo)
497
520
498 orig(ui, fakerepo, *pats, **opts)
521 orig(ui, fakerepo, *pats, **opts)
499 else:
522 else:
500 orig(ui, repo, *pats, **opts)
523 orig(ui, repo, *pats, **opts)
501
524
502
525
503 # Before starting the manifest merge, merge.updates will call
526 # Before starting the manifest merge, merge.updates will call
504 # _checkunknownfile to check if there are any files in the merged-in
527 # _checkunknownfile to check if there are any files in the merged-in
505 # changeset that collide with unknown files in the working copy.
528 # changeset that collide with unknown files in the working copy.
506 #
529 #
507 # The largefiles are seen as unknown, so this prevents us from merging
530 # The largefiles are seen as unknown, so this prevents us from merging
508 # in a file 'foo' if we already have a largefile with the same name.
531 # in a file 'foo' if we already have a largefile with the same name.
509 #
532 #
510 # The overridden function filters the unknown files by removing any
533 # The overridden function filters the unknown files by removing any
511 # largefiles. This makes the merge proceed and we can then handle this
534 # largefiles. This makes the merge proceed and we can then handle this
512 # case further in the overridden calculateupdates function below.
535 # case further in the overridden calculateupdates function below.
513 @eh.wrapfunction(merge, b'_checkunknownfile')
536 @eh.wrapfunction(merge, b'_checkunknownfile')
514 def overridecheckunknownfile(
537 def overridecheckunknownfile(
515 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
516 ):
539 ):
517 if lfutil.standin(dirstate.normalize(f)) in wctx:
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
518 return False
541 return False
519 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
520
543
521
544
522 # The manifest merge handles conflicts on the manifest level. We want
545 # The manifest merge handles conflicts on the manifest level. We want
523 # to handle changes in largefile-ness of files at this level too.
546 # to handle changes in largefile-ness of files at this level too.
524 #
547 #
525 # The strategy is to run the original calculateupdates and then process
548 # The strategy is to run the original calculateupdates and then process
526 # the action list it outputs. There are two cases we need to deal with:
549 # the action list it outputs. There are two cases we need to deal with:
527 #
550 #
528 # 1. Normal file in p1, largefile in p2. Here the largefile is
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
529 # detected via its standin file, which will enter the working copy
552 # detected via its standin file, which will enter the working copy
530 # with a "get" action. It is not "merge" since the standin is all
553 # with a "get" action. It is not "merge" since the standin is all
531 # Mercurial is concerned with at this level -- the link to the
554 # Mercurial is concerned with at this level -- the link to the
532 # existing normal file is not relevant here.
555 # existing normal file is not relevant here.
533 #
556 #
534 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
535 # since the largefile will be present in the working copy and
558 # since the largefile will be present in the working copy and
536 # different from the normal file in p2. Mercurial therefore
559 # different from the normal file in p2. Mercurial therefore
537 # triggers a merge action.
560 # triggers a merge action.
538 #
561 #
539 # In both cases, we prompt the user and emit new actions to either
562 # In both cases, we prompt the user and emit new actions to either
540 # remove the standin (if the normal file was kept) or to remove the
563 # remove the standin (if the normal file was kept) or to remove the
541 # normal file and get the standin (if the largefile was kept). The
564 # normal file and get the standin (if the largefile was kept). The
542 # default prompt answer is to use the largefile version since it was
565 # default prompt answer is to use the largefile version since it was
543 # presumably changed on purpose.
566 # presumably changed on purpose.
544 #
567 #
545 # Finally, the merge.applyupdates function will then take care of
568 # Finally, the merge.applyupdates function will then take care of
546 # writing the files into the working copy and lfcommands.updatelfiles
569 # writing the files into the working copy and lfcommands.updatelfiles
547 # will update the largefiles.
570 # will update the largefiles.
548 @eh.wrapfunction(merge, b'calculateupdates')
571 @eh.wrapfunction(merge, b'calculateupdates')
549 def overridecalculateupdates(
572 def overridecalculateupdates(
550 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
551 ):
574 ):
552 overwrite = force and not branchmerge
575 overwrite = force and not branchmerge
553 mresult = origfn(
576 mresult = origfn(
554 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
555 )
578 )
556
579
557 if overwrite:
580 if overwrite:
558 return mresult
581 return mresult
559
582
560 # Convert to dictionary with filename as key and action as value.
583 # Convert to dictionary with filename as key and action as value.
561 lfiles = set()
584 lfiles = set()
562 for f in mresult.files():
585 for f in mresult.files():
563 splitstandin = lfutil.splitstandin(f)
586 splitstandin = lfutil.splitstandin(f)
564 if splitstandin is not None and splitstandin in p1:
587 if splitstandin is not None and splitstandin in p1:
565 lfiles.add(splitstandin)
588 lfiles.add(splitstandin)
566 elif lfutil.standin(f) in p1:
589 elif lfutil.standin(f) in p1:
567 lfiles.add(f)
590 lfiles.add(f)
568
591
569 for lfile in sorted(lfiles):
592 for lfile in sorted(lfiles):
570 standin = lfutil.standin(lfile)
593 standin = lfutil.standin(lfile)
571 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
572 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
573
596
574 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
575 if sm == ACTION_DELETED_CHANGED:
598 if sm == ACTION_DELETED_CHANGED:
576 f1, f2, fa, move, anc = sargs
599 f1, f2, fa, move, anc = sargs
577 sargs = (p2[f2].flags(), False)
600 sargs = (p2[f2].flags(), False)
578 # Case 1: normal file in the working copy, largefile in
601 # Case 1: normal file in the working copy, largefile in
579 # the second parent
602 # the second parent
580 usermsg = (
603 usermsg = (
581 _(
604 _(
582 b'remote turned local normal file %s into a largefile\n'
605 b'remote turned local normal file %s into a largefile\n'
583 b'use (l)argefile or keep (n)ormal file?'
606 b'use (l)argefile or keep (n)ormal file?'
584 b'$$ &Largefile $$ &Normal file'
607 b'$$ &Largefile $$ &Normal file'
585 )
608 )
586 % lfile
609 % lfile
587 )
610 )
588 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
589 mresult.addfile(
612 mresult.addfile(
590 lfile, ACTION_REMOVE, None, b'replaced by standin'
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
591 )
614 )
592 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
593 else: # keep local normal file
616 else: # keep local normal file
594 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
595 if branchmerge:
618 if branchmerge:
596 mresult.addfile(
619 mresult.addfile(
597 standin,
620 standin,
598 ACTION_KEEP,
621 ACTION_KEEP,
599 None,
622 None,
600 b'replaced by non-standin',
623 b'replaced by non-standin',
601 )
624 )
602 else:
625 else:
603 mresult.addfile(
626 mresult.addfile(
604 standin,
627 standin,
605 ACTION_REMOVE,
628 ACTION_REMOVE,
606 None,
629 None,
607 b'replaced by non-standin',
630 b'replaced by non-standin',
608 )
631 )
609 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
610 if lm == ACTION_DELETED_CHANGED:
633 if lm == ACTION_DELETED_CHANGED:
611 f1, f2, fa, move, anc = largs
634 f1, f2, fa, move, anc = largs
612 largs = (p2[f2].flags(), False)
635 largs = (p2[f2].flags(), False)
613 # Case 2: largefile in the working copy, normal file in
636 # Case 2: largefile in the working copy, normal file in
614 # the second parent
637 # the second parent
615 usermsg = (
638 usermsg = (
616 _(
639 _(
617 b'remote turned local largefile %s into a normal file\n'
640 b'remote turned local largefile %s into a normal file\n'
618 b'keep (l)argefile or use (n)ormal file?'
641 b'keep (l)argefile or use (n)ormal file?'
619 b'$$ &Largefile $$ &Normal file'
642 b'$$ &Largefile $$ &Normal file'
620 )
643 )
621 % lfile
644 % lfile
622 )
645 )
623 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
624 if branchmerge:
647 if branchmerge:
625 # largefile can be restored from standin safely
648 # largefile can be restored from standin safely
626 mresult.addfile(
649 mresult.addfile(
627 lfile,
650 lfile,
628 ACTION_KEEP,
651 ACTION_KEEP,
629 None,
652 None,
630 b'replaced by standin',
653 b'replaced by standin',
631 )
654 )
632 mresult.addfile(
655 mresult.addfile(
633 standin, ACTION_KEEP, None, b'replaces standin'
656 standin, ACTION_KEEP, None, b'replaces standin'
634 )
657 )
635 else:
658 else:
636 # "lfile" should be marked as "removed" without
659 # "lfile" should be marked as "removed" without
637 # removal of itself
660 # removal of itself
638 mresult.addfile(
661 mresult.addfile(
639 lfile,
662 lfile,
640 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
641 None,
664 None,
642 b'forget non-standin largefile',
665 b'forget non-standin largefile',
643 )
666 )
644
667
645 # linear-merge should treat this largefile as 're-added'
668 # linear-merge should treat this largefile as 're-added'
646 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
647 else: # pick remote normal file
670 else: # pick remote normal file
648 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
649 mresult.addfile(
672 mresult.addfile(
650 standin,
673 standin,
651 ACTION_REMOVE,
674 ACTION_REMOVE,
652 None,
675 None,
653 b'replaced by non-standin',
676 b'replaced by non-standin',
654 )
677 )
655
678
656 return mresult
679 return mresult
657
680
658
681
659 @eh.wrapfunction(mergestatemod, b'recordupdates')
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
660 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
661 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
662 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
663 with lfdirstate.changing_parents(repo):
686 with lfdirstate.changing_parents(repo):
664 for lfile, args, msg in actions[
687 for lfile, args, msg in actions[
665 MERGE_ACTION_LARGEFILE_MARK_REMOVED
688 MERGE_ACTION_LARGEFILE_MARK_REMOVED
666 ]:
689 ]:
667 # this should be executed before 'orig', to execute 'remove'
690 # this should be executed before 'orig', to execute 'remove'
668 # before all other actions
691 # before all other actions
669 repo.dirstate.update_file(
692 repo.dirstate.update_file(
670 lfile, p1_tracked=True, wc_tracked=False
693 lfile, p1_tracked=True, wc_tracked=False
671 )
694 )
672 # make sure lfile doesn't get synclfdirstate'd as normal
695 # make sure lfile doesn't get synclfdirstate'd as normal
673 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
696 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
674 lfdirstate.write(repo.currenttransaction())
697 lfdirstate.write(repo.currenttransaction())
675
698
676 return orig(repo, actions, branchmerge, getfiledata)
699 return orig(repo, actions, branchmerge, getfiledata)
677
700
678
701
679 # Override filemerge to prompt the user about how they wish to merge
702 # Override filemerge to prompt the user about how they wish to merge
680 # largefiles. This will handle identical edits without prompting the user.
703 # largefiles. This will handle identical edits without prompting the user.
681 @eh.wrapfunction(filemerge, b'filemerge')
704 @eh.wrapfunction(filemerge, b'filemerge')
682 def overridefilemerge(
705 def overridefilemerge(
683 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
706 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
684 ):
707 ):
685 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
708 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
686 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
709 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
687
710
688 ahash = lfutil.readasstandin(fca).lower()
711 ahash = lfutil.readasstandin(fca).lower()
689 dhash = lfutil.readasstandin(fcd).lower()
712 dhash = lfutil.readasstandin(fcd).lower()
690 ohash = lfutil.readasstandin(fco).lower()
713 ohash = lfutil.readasstandin(fco).lower()
691 if (
714 if (
692 ohash != ahash
715 ohash != ahash
693 and ohash != dhash
716 and ohash != dhash
694 and (
717 and (
695 dhash == ahash
718 dhash == ahash
696 or repo.ui.promptchoice(
719 or repo.ui.promptchoice(
697 _(
720 _(
698 b'largefile %s has a merge conflict\nancestor was %s\n'
721 b'largefile %s has a merge conflict\nancestor was %s\n'
699 b'you can keep (l)ocal %s or take (o)ther %s.\n'
722 b'you can keep (l)ocal %s or take (o)ther %s.\n'
700 b'what do you want to do?'
723 b'what do you want to do?'
701 b'$$ &Local $$ &Other'
724 b'$$ &Local $$ &Other'
702 )
725 )
703 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
726 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
704 0,
727 0,
705 )
728 )
706 == 1
729 == 1
707 )
730 )
708 ):
731 ):
709 repo.wwrite(fcd.path(), fco.data(), fco.flags())
732 repo.wwrite(fcd.path(), fco.data(), fco.flags())
710 return 0, False
733 return 0, False
711
734
712
735
713 @eh.wrapfunction(copiesmod, b'pathcopies')
736 @eh.wrapfunction(copiesmod, b'pathcopies')
714 def copiespathcopies(orig, ctx1, ctx2, match=None):
737 def copiespathcopies(orig, ctx1, ctx2, match=None):
715 copies = orig(ctx1, ctx2, match=match)
738 copies = orig(ctx1, ctx2, match=match)
716 updated = {}
739 updated = {}
717
740
718 for k, v in copies.items():
741 for k, v in copies.items():
719 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
742 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
720
743
721 return updated
744 return updated
722
745
723
746
724 # Copy first changes the matchers to match standins instead of
747 # Copy first changes the matchers to match standins instead of
725 # largefiles. Then it overrides util.copyfile in that function it
748 # largefiles. Then it overrides util.copyfile in that function it
726 # checks if the destination largefile already exists. It also keeps a
749 # checks if the destination largefile already exists. It also keeps a
727 # list of copied files so that the largefiles can be copied and the
750 # list of copied files so that the largefiles can be copied and the
728 # dirstate updated.
751 # dirstate updated.
729 @eh.wrapfunction(cmdutil, b'copy')
752 @eh.wrapfunction(cmdutil, b'copy')
730 def overridecopy(orig, ui, repo, pats, opts, rename=False):
753 def overridecopy(orig, ui, repo, pats, opts, rename=False):
731 # doesn't remove largefile on rename
754 # doesn't remove largefile on rename
732 if len(pats) < 2:
755 if len(pats) < 2:
733 # this isn't legal, let the original function deal with it
756 # this isn't legal, let the original function deal with it
734 return orig(ui, repo, pats, opts, rename)
757 return orig(ui, repo, pats, opts, rename)
735
758
736 # This could copy both lfiles and normal files in one command,
759 # This could copy both lfiles and normal files in one command,
737 # but we don't want to do that. First replace their matcher to
760 # but we don't want to do that. First replace their matcher to
738 # only match normal files and run it, then replace it to just
761 # only match normal files and run it, then replace it to just
739 # match largefiles and run it again.
762 # match largefiles and run it again.
740 nonormalfiles = False
763 nonormalfiles = False
741 nolfiles = False
764 nolfiles = False
742 manifest = repo[None].manifest()
765 manifest = repo[None].manifest()
743
766
744 def normalfilesmatchfn(
767 def normalfilesmatchfn(
745 orig,
768 orig,
746 ctx,
769 ctx,
747 pats=(),
770 pats=(),
748 opts=None,
771 opts=None,
749 globbed=False,
772 globbed=False,
750 default=b'relpath',
773 default=b'relpath',
751 badfn=None,
774 badfn=None,
752 ):
775 ):
753 if opts is None:
776 if opts is None:
754 opts = {}
777 opts = {}
755 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
778 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
756 return composenormalfilematcher(match, manifest)
779 return composenormalfilematcher(match, manifest)
757
780
758 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
781 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
759 try:
782 try:
760 result = orig(ui, repo, pats, opts, rename)
783 result = orig(ui, repo, pats, opts, rename)
761 except error.Abort as e:
784 except error.Abort as e:
762 if e.message != _(b'no files to copy'):
785 if e.message != _(b'no files to copy'):
763 raise e
786 raise e
764 else:
787 else:
765 nonormalfiles = True
788 nonormalfiles = True
766 result = 0
789 result = 0
767
790
768 # The first rename can cause our current working directory to be removed.
791 # The first rename can cause our current working directory to be removed.
769 # In that case there is nothing left to copy/rename so just quit.
792 # In that case there is nothing left to copy/rename so just quit.
770 try:
793 try:
771 repo.getcwd()
794 repo.getcwd()
772 except OSError:
795 except OSError:
773 return result
796 return result
774
797
775 def makestandin(relpath):
798 def makestandin(relpath):
776 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
799 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
777 return repo.wvfs.join(lfutil.standin(path))
800 return repo.wvfs.join(lfutil.standin(path))
778
801
779 fullpats = scmutil.expandpats(pats)
802 fullpats = scmutil.expandpats(pats)
780 dest = fullpats[-1]
803 dest = fullpats[-1]
781
804
782 if os.path.isdir(dest):
805 if os.path.isdir(dest):
783 if not os.path.isdir(makestandin(dest)):
806 if not os.path.isdir(makestandin(dest)):
784 os.makedirs(makestandin(dest))
807 os.makedirs(makestandin(dest))
785
808
786 try:
809 try:
787 # When we call orig below it creates the standins but we don't add
810 # When we call orig below it creates the standins but we don't add
788 # them to the dir state until later so lock during that time.
811 # them to the dir state until later so lock during that time.
789 wlock = repo.wlock()
812 wlock = repo.wlock()
790
813
791 manifest = repo[None].manifest()
814 manifest = repo[None].manifest()
792
815
793 def overridematch(
816 def overridematch(
794 orig,
817 orig,
795 ctx,
818 ctx,
796 pats=(),
819 pats=(),
797 opts=None,
820 opts=None,
798 globbed=False,
821 globbed=False,
799 default=b'relpath',
822 default=b'relpath',
800 badfn=None,
823 badfn=None,
801 ):
824 ):
802 if opts is None:
825 if opts is None:
803 opts = {}
826 opts = {}
804 newpats = []
827 newpats = []
805 # The patterns were previously mangled to add the standin
828 # The patterns were previously mangled to add the standin
806 # directory; we need to remove that now
829 # directory; we need to remove that now
807 for pat in pats:
830 for pat in pats:
808 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
831 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
809 newpats.append(pat.replace(lfutil.shortname, b''))
832 newpats.append(pat.replace(lfutil.shortname, b''))
810 else:
833 else:
811 newpats.append(pat)
834 newpats.append(pat)
812 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
835 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
813 m = copy.copy(match)
836 m = copy.copy(match)
814 lfile = lambda f: lfutil.standin(f) in manifest
837 lfile = lambda f: lfutil.standin(f) in manifest
815 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
838 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
816 m._fileset = set(m._files)
839 m._fileset = set(m._files)
817 origmatchfn = m.matchfn
840 origmatchfn = m.matchfn
818
841
819 def matchfn(f):
842 def matchfn(f):
820 lfile = lfutil.splitstandin(f)
843 lfile = lfutil.splitstandin(f)
821 return (
844 return (
822 lfile is not None
845 lfile is not None
823 and (f in manifest)
846 and (f in manifest)
824 and origmatchfn(lfile)
847 and origmatchfn(lfile)
825 or None
848 or None
826 )
849 )
827
850
828 m.matchfn = matchfn
851 m.matchfn = matchfn
829 return m
852 return m
830
853
831 listpats = []
854 listpats = []
832 for pat in pats:
855 for pat in pats:
833 if matchmod.patkind(pat) is not None:
856 if matchmod.patkind(pat) is not None:
834 listpats.append(pat)
857 listpats.append(pat)
835 else:
858 else:
836 listpats.append(makestandin(pat))
859 listpats.append(makestandin(pat))
837
860
838 copiedfiles = []
861 copiedfiles = []
839
862
840 def overridecopyfile(orig, src, dest, *args, **kwargs):
863 def overridecopyfile(orig, src, dest, *args, **kwargs):
841 if lfutil.shortname in src and dest.startswith(
864 if lfutil.shortname in src and dest.startswith(
842 repo.wjoin(lfutil.shortname)
865 repo.wjoin(lfutil.shortname)
843 ):
866 ):
844 destlfile = dest.replace(lfutil.shortname, b'')
867 destlfile = dest.replace(lfutil.shortname, b'')
845 if not opts[b'force'] and os.path.exists(destlfile):
868 if not opts[b'force'] and os.path.exists(destlfile):
846 raise IOError(
869 raise IOError(
847 b'', _(b'destination largefile already exists')
870 b'', _(b'destination largefile already exists')
848 )
871 )
849 copiedfiles.append((src, dest))
872 copiedfiles.append((src, dest))
850 orig(src, dest, *args, **kwargs)
873 orig(src, dest, *args, **kwargs)
851
874
852 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
875 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
853 with extensions.wrappedfunction(scmutil, b'match', overridematch):
876 with extensions.wrappedfunction(scmutil, b'match', overridematch):
854 result += orig(ui, repo, listpats, opts, rename)
877 result += orig(ui, repo, listpats, opts, rename)
855
878
856 lfdirstate = lfutil.openlfdirstate(ui, repo)
879 lfdirstate = lfutil.openlfdirstate(ui, repo)
857 for (src, dest) in copiedfiles:
880 for (src, dest) in copiedfiles:
858 if lfutil.shortname in src and dest.startswith(
881 if lfutil.shortname in src and dest.startswith(
859 repo.wjoin(lfutil.shortname)
882 repo.wjoin(lfutil.shortname)
860 ):
883 ):
861 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
884 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
862 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
885 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
863 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
886 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
864 if not os.path.isdir(destlfiledir):
887 if not os.path.isdir(destlfiledir):
865 os.makedirs(destlfiledir)
888 os.makedirs(destlfiledir)
866 if rename:
889 if rename:
867 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
890 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
868
891
869 # The file is gone, but this deletes any empty parent
892 # The file is gone, but this deletes any empty parent
870 # directories as a side-effect.
893 # directories as a side-effect.
871 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
894 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
872 lfdirstate.set_untracked(srclfile)
895 lfdirstate.set_untracked(srclfile)
873 else:
896 else:
874 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
897 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
875
898
876 lfdirstate.set_tracked(destlfile)
899 lfdirstate.set_tracked(destlfile)
877 lfdirstate.write(repo.currenttransaction())
900 lfdirstate.write(repo.currenttransaction())
878 except error.Abort as e:
901 except error.Abort as e:
879 if e.message != _(b'no files to copy'):
902 if e.message != _(b'no files to copy'):
880 raise e
903 raise e
881 else:
904 else:
882 nolfiles = True
905 nolfiles = True
883 finally:
906 finally:
884 wlock.release()
907 wlock.release()
885
908
886 if nolfiles and nonormalfiles:
909 if nolfiles and nonormalfiles:
887 raise error.Abort(_(b'no files to copy'))
910 raise error.Abort(_(b'no files to copy'))
888
911
889 return result
912 return result
890
913
891
914
892 # When the user calls revert, we have to be careful to not revert any
915 # When the user calls revert, we have to be careful to not revert any
893 # changes to other largefiles accidentally. This means we have to keep
916 # changes to other largefiles accidentally. This means we have to keep
894 # track of the largefiles that are being reverted so we only pull down
917 # track of the largefiles that are being reverted so we only pull down
895 # the necessary largefiles.
918 # the necessary largefiles.
896 #
919 #
897 # Standins are only updated (to match the hash of largefiles) before
920 # Standins are only updated (to match the hash of largefiles) before
898 # commits. Update the standins then run the original revert, changing
921 # commits. Update the standins then run the original revert, changing
899 # the matcher to hit standins instead of largefiles. Based on the
922 # the matcher to hit standins instead of largefiles. Based on the
900 # resulting standins update the largefiles.
923 # resulting standins update the largefiles.
901 @eh.wrapfunction(cmdutil, b'revert')
924 @eh.wrapfunction(cmdutil, b'revert')
902 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
925 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
903 # Because we put the standins in a bad state (by updating them)
926 # Because we put the standins in a bad state (by updating them)
904 # and then return them to a correct state we need to lock to
927 # and then return them to a correct state we need to lock to
905 # prevent others from changing them in their incorrect state.
928 # prevent others from changing them in their incorrect state.
906 with repo.wlock():
929 with repo.wlock():
907 lfdirstate = lfutil.openlfdirstate(ui, repo)
930 lfdirstate = lfutil.openlfdirstate(ui, repo)
908 s = lfutil.lfdirstatestatus(lfdirstate, repo)
931 s = lfutil.lfdirstatestatus(lfdirstate, repo)
909 lfdirstate.write(repo.currenttransaction())
932 lfdirstate.write(repo.currenttransaction())
910 for lfile in s.modified:
933 for lfile in s.modified:
911 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
934 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
912 for lfile in s.deleted:
935 for lfile in s.deleted:
913 fstandin = lfutil.standin(lfile)
936 fstandin = lfutil.standin(lfile)
914 if repo.wvfs.exists(fstandin):
937 if repo.wvfs.exists(fstandin):
915 repo.wvfs.unlink(fstandin)
938 repo.wvfs.unlink(fstandin)
916
939
917 oldstandins = lfutil.getstandinsstate(repo)
940 oldstandins = lfutil.getstandinsstate(repo)
918
941
919 def overridematch(
942 def overridematch(
920 orig,
943 orig,
921 mctx,
944 mctx,
922 pats=(),
945 pats=(),
923 opts=None,
946 opts=None,
924 globbed=False,
947 globbed=False,
925 default=b'relpath',
948 default=b'relpath',
926 badfn=None,
949 badfn=None,
927 ):
950 ):
928 if opts is None:
951 if opts is None:
929 opts = {}
952 opts = {}
930 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
953 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
931 m = copy.copy(match)
954 m = copy.copy(match)
932
955
933 # revert supports recursing into subrepos, and though largefiles
956 # revert supports recursing into subrepos, and though largefiles
934 # currently doesn't work correctly in that case, this match is
957 # currently doesn't work correctly in that case, this match is
935 # called, so the lfdirstate above may not be the correct one for
958 # called, so the lfdirstate above may not be the correct one for
936 # this invocation of match.
959 # this invocation of match.
937 lfdirstate = lfutil.openlfdirstate(
960 lfdirstate = lfutil.openlfdirstate(
938 mctx.repo().ui, mctx.repo(), False
961 mctx.repo().ui, mctx.repo(), False
939 )
962 )
940
963
941 wctx = repo[None]
964 wctx = repo[None]
942 matchfiles = []
965 matchfiles = []
943 for f in m._files:
966 for f in m._files:
944 standin = lfutil.standin(f)
967 standin = lfutil.standin(f)
945 if standin in ctx or standin in mctx:
968 if standin in ctx or standin in mctx:
946 matchfiles.append(standin)
969 matchfiles.append(standin)
947 elif standin in wctx or lfdirstate.get_entry(f).removed:
970 elif standin in wctx or lfdirstate.get_entry(f).removed:
948 continue
971 continue
949 else:
972 else:
950 matchfiles.append(f)
973 matchfiles.append(f)
951 m._files = matchfiles
974 m._files = matchfiles
952 m._fileset = set(m._files)
975 m._fileset = set(m._files)
953 origmatchfn = m.matchfn
976 origmatchfn = m.matchfn
954
977
955 def matchfn(f):
978 def matchfn(f):
956 lfile = lfutil.splitstandin(f)
979 lfile = lfutil.splitstandin(f)
957 if lfile is not None:
980 if lfile is not None:
958 return origmatchfn(lfile) and (f in ctx or f in mctx)
981 return origmatchfn(lfile) and (f in ctx or f in mctx)
959 return origmatchfn(f)
982 return origmatchfn(f)
960
983
961 m.matchfn = matchfn
984 m.matchfn = matchfn
962 return m
985 return m
963
986
964 with extensions.wrappedfunction(scmutil, b'match', overridematch):
987 with extensions.wrappedfunction(scmutil, b'match', overridematch):
965 orig(ui, repo, ctx, *pats, **opts)
988 orig(ui, repo, ctx, *pats, **opts)
966
989
967 newstandins = lfutil.getstandinsstate(repo)
990 newstandins = lfutil.getstandinsstate(repo)
968 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
991 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
969 # lfdirstate should be 'normallookup'-ed for updated files,
992 # lfdirstate should be 'normallookup'-ed for updated files,
970 # because reverting doesn't touch dirstate for 'normal' files
993 # because reverting doesn't touch dirstate for 'normal' files
971 # when target revision is explicitly specified: in such case,
994 # when target revision is explicitly specified: in such case,
972 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
995 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
973 # of target (standin) file.
996 # of target (standin) file.
974 lfcommands.updatelfiles(
997 lfcommands.updatelfiles(
975 ui, repo, filelist, printmessage=False, normallookup=True
998 ui, repo, filelist, printmessage=False, normallookup=True
976 )
999 )
977
1000
978
1001
979 # after pulling changesets, we need to take some extra care to get
1002 # after pulling changesets, we need to take some extra care to get
980 # largefiles updated remotely
1003 # largefiles updated remotely
981 @eh.wrapcommand(
1004 @eh.wrapcommand(
982 b'pull',
1005 b'pull',
983 opts=[
1006 opts=[
984 (
1007 (
985 b'',
1008 b'',
986 b'all-largefiles',
1009 b'all-largefiles',
987 None,
1010 None,
988 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1011 _(b'download all pulled versions of largefiles (DEPRECATED)'),
989 ),
1012 ),
990 (
1013 (
991 b'',
1014 b'',
992 b'lfrev',
1015 b'lfrev',
993 [],
1016 [],
994 _(b'download largefiles for these revisions'),
1017 _(b'download largefiles for these revisions'),
995 _(b'REV'),
1018 _(b'REV'),
996 ),
1019 ),
997 ],
1020 ],
998 )
1021 )
999 def overridepull(orig, ui, repo, source=None, **opts):
1022 def overridepull(orig, ui, repo, source=None, **opts):
1000 revsprepull = len(repo)
1023 revsprepull = len(repo)
1001 if not source:
1024 if not source:
1002 source = b'default'
1025 source = b'default'
1003 repo.lfpullsource = source
1026 repo.lfpullsource = source
1004 result = orig(ui, repo, source, **opts)
1027 result = orig(ui, repo, source, **opts)
1005 revspostpull = len(repo)
1028 revspostpull = len(repo)
1006 lfrevs = opts.get('lfrev', [])
1029 lfrevs = opts.get('lfrev', [])
1007 if opts.get('all_largefiles'):
1030 if opts.get('all_largefiles'):
1008 lfrevs.append(b'pulled()')
1031 lfrevs.append(b'pulled()')
1009 if lfrevs and revspostpull > revsprepull:
1032 if lfrevs and revspostpull > revsprepull:
1010 numcached = 0
1033 numcached = 0
1011 repo.firstpulled = revsprepull # for pulled() revset expression
1034 repo.firstpulled = revsprepull # for pulled() revset expression
1012 try:
1035 try:
1013 for rev in logcmdutil.revrange(repo, lfrevs):
1036 for rev in logcmdutil.revrange(repo, lfrevs):
1014 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1037 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1015 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1038 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1016 numcached += len(cached)
1039 numcached += len(cached)
1017 finally:
1040 finally:
1018 del repo.firstpulled
1041 del repo.firstpulled
1019 ui.status(_(b"%d largefiles cached\n") % numcached)
1042 ui.status(_(b"%d largefiles cached\n") % numcached)
1020 return result
1043 return result
1021
1044
1022
1045
1023 @eh.wrapcommand(
1046 @eh.wrapcommand(
1024 b'push',
1047 b'push',
1025 opts=[
1048 opts=[
1026 (
1049 (
1027 b'',
1050 b'',
1028 b'lfrev',
1051 b'lfrev',
1029 [],
1052 [],
1030 _(b'upload largefiles for these revisions'),
1053 _(b'upload largefiles for these revisions'),
1031 _(b'REV'),
1054 _(b'REV'),
1032 )
1055 )
1033 ],
1056 ],
1034 )
1057 )
1035 def overridepush(orig, ui, repo, *args, **kwargs):
1058 def overridepush(orig, ui, repo, *args, **kwargs):
1036 """Override push command and store --lfrev parameters in opargs"""
1059 """Override push command and store --lfrev parameters in opargs"""
1037 lfrevs = kwargs.pop('lfrev', None)
1060 lfrevs = kwargs.pop('lfrev', None)
1038 if lfrevs:
1061 if lfrevs:
1039 opargs = kwargs.setdefault('opargs', {})
1062 opargs = kwargs.setdefault('opargs', {})
1040 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1063 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1041 return orig(ui, repo, *args, **kwargs)
1064 return orig(ui, repo, *args, **kwargs)
1042
1065
1043
1066
1044 @eh.wrapfunction(exchange, b'pushoperation')
1067 @eh.wrapfunction(exchange, b'pushoperation')
1045 def exchangepushoperation(orig, *args, **kwargs):
1068 def exchangepushoperation(orig, *args, **kwargs):
1046 """Override pushoperation constructor and store lfrevs parameter"""
1069 """Override pushoperation constructor and store lfrevs parameter"""
1047 lfrevs = kwargs.pop('lfrevs', None)
1070 lfrevs = kwargs.pop('lfrevs', None)
1048 pushop = orig(*args, **kwargs)
1071 pushop = orig(*args, **kwargs)
1049 pushop.lfrevs = lfrevs
1072 pushop.lfrevs = lfrevs
1050 return pushop
1073 return pushop
1051
1074
1052
1075
1053 @eh.revsetpredicate(b'pulled()')
1076 @eh.revsetpredicate(b'pulled()')
1054 def pulledrevsetsymbol(repo, subset, x):
1077 def pulledrevsetsymbol(repo, subset, x):
1055 """Changesets that just has been pulled.
1078 """Changesets that just has been pulled.
1056
1079
1057 Only available with largefiles from pull --lfrev expressions.
1080 Only available with largefiles from pull --lfrev expressions.
1058
1081
1059 .. container:: verbose
1082 .. container:: verbose
1060
1083
1061 Some examples:
1084 Some examples:
1062
1085
1063 - pull largefiles for all new changesets::
1086 - pull largefiles for all new changesets::
1064
1087
1065 hg pull -lfrev "pulled()"
1088 hg pull -lfrev "pulled()"
1066
1089
1067 - pull largefiles for all new branch heads::
1090 - pull largefiles for all new branch heads::
1068
1091
1069 hg pull -lfrev "head(pulled()) and not closed()"
1092 hg pull -lfrev "head(pulled()) and not closed()"
1070
1093
1071 """
1094 """
1072
1095
1073 try:
1096 try:
1074 firstpulled = repo.firstpulled
1097 firstpulled = repo.firstpulled
1075 except AttributeError:
1098 except AttributeError:
1076 raise error.Abort(_(b"pulled() only available in --lfrev"))
1099 raise error.Abort(_(b"pulled() only available in --lfrev"))
1077 return smartset.baseset([r for r in subset if r >= firstpulled])
1100 return smartset.baseset([r for r in subset if r >= firstpulled])
1078
1101
1079
1102
1080 @eh.wrapcommand(
1103 @eh.wrapcommand(
1081 b'clone',
1104 b'clone',
1082 opts=[
1105 opts=[
1083 (
1106 (
1084 b'',
1107 b'',
1085 b'all-largefiles',
1108 b'all-largefiles',
1086 None,
1109 None,
1087 _(b'download all versions of all largefiles'),
1110 _(b'download all versions of all largefiles'),
1088 )
1111 )
1089 ],
1112 ],
1090 )
1113 )
1091 def overrideclone(orig, ui, source, dest=None, **opts):
1114 def overrideclone(orig, ui, source, dest=None, **opts):
1092 d = dest
1115 d = dest
1093 if d is None:
1116 if d is None:
1094 d = hg.defaultdest(source)
1117 d = hg.defaultdest(source)
1095 if opts.get('all_largefiles') and not hg.islocal(d):
1118 if opts.get('all_largefiles') and not hg.islocal(d):
1096 raise error.Abort(
1119 raise error.Abort(
1097 _(b'--all-largefiles is incompatible with non-local destination %s')
1120 _(b'--all-largefiles is incompatible with non-local destination %s')
1098 % d
1121 % d
1099 )
1122 )
1100
1123
1101 return orig(ui, source, dest, **opts)
1124 return orig(ui, source, dest, **opts)
1102
1125
1103
1126
1104 @eh.wrapfunction(hg, b'clone')
1127 @eh.wrapfunction(hg, b'clone')
1105 def hgclone(orig, ui, opts, *args, **kwargs):
1128 def hgclone(orig, ui, opts, *args, **kwargs):
1106 result = orig(ui, opts, *args, **kwargs)
1129 result = orig(ui, opts, *args, **kwargs)
1107
1130
1108 if result is not None:
1131 if result is not None:
1109 sourcerepo, destrepo = result
1132 sourcerepo, destrepo = result
1110 repo = destrepo.local()
1133 repo = destrepo.local()
1111
1134
1112 # When cloning to a remote repo (like through SSH), no repo is available
1135 # When cloning to a remote repo (like through SSH), no repo is available
1113 # from the peer. Therefore the largefiles can't be downloaded and the
1136 # from the peer. Therefore the largefiles can't be downloaded and the
1114 # hgrc can't be updated.
1137 # hgrc can't be updated.
1115 if not repo:
1138 if not repo:
1116 return result
1139 return result
1117
1140
1118 # Caching is implicitly limited to 'rev' option, since the dest repo was
1141 # Caching is implicitly limited to 'rev' option, since the dest repo was
1119 # truncated at that point. The user may expect a download count with
1142 # truncated at that point. The user may expect a download count with
1120 # this option, so attempt whether or not this is a largefile repo.
1143 # this option, so attempt whether or not this is a largefile repo.
1121 if opts.get(b'all_largefiles'):
1144 if opts.get(b'all_largefiles'):
1122 success, missing = lfcommands.downloadlfiles(ui, repo)
1145 success, missing = lfcommands.downloadlfiles(ui, repo)
1123
1146
1124 if missing != 0:
1147 if missing != 0:
1125 return None
1148 return None
1126
1149
1127 return result
1150 return result
1128
1151
1129
1152
1130 @eh.wrapcommand(b'rebase', extension=b'rebase')
1153 @eh.wrapcommand(b'rebase', extension=b'rebase')
1131 def overriderebasecmd(orig, ui, repo, **opts):
1154 def overriderebasecmd(orig, ui, repo, **opts):
1132 if not util.safehasattr(repo, b'_largefilesenabled'):
1155 if not util.safehasattr(repo, b'_largefilesenabled'):
1133 return orig(ui, repo, **opts)
1156 return orig(ui, repo, **opts)
1134
1157
1135 resuming = opts.get('continue')
1158 resuming = opts.get('continue')
1136 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1159 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1137 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1160 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1138 try:
1161 try:
1139 with ui.configoverride(
1162 with ui.configoverride(
1140 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1163 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1141 ):
1164 ):
1142 return orig(ui, repo, **opts)
1165 return orig(ui, repo, **opts)
1143 finally:
1166 finally:
1144 repo._lfstatuswriters.pop()
1167 repo._lfstatuswriters.pop()
1145 repo._lfcommithooks.pop()
1168 repo._lfcommithooks.pop()
1146
1169
1147
1170
1148 @eh.extsetup
1171 @eh.extsetup
1149 def overriderebase(ui):
1172 def overriderebase(ui):
1150 try:
1173 try:
1151 rebase = extensions.find(b'rebase')
1174 rebase = extensions.find(b'rebase')
1152 except KeyError:
1175 except KeyError:
1153 pass
1176 pass
1154 else:
1177 else:
1155
1178
1156 def _dorebase(orig, *args, **kwargs):
1179 def _dorebase(orig, *args, **kwargs):
1157 kwargs['inmemory'] = False
1180 kwargs['inmemory'] = False
1158 return orig(*args, **kwargs)
1181 return orig(*args, **kwargs)
1159
1182
1160 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1183 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1161
1184
1162
1185
1163 @eh.wrapcommand(b'archive')
1186 @eh.wrapcommand(b'archive')
1164 def overridearchivecmd(orig, ui, repo, dest, **opts):
1187 def overridearchivecmd(orig, ui, repo, dest, **opts):
1165 with lfstatus(repo.unfiltered()):
1188 with lfstatus(repo.unfiltered()):
1166 return orig(ui, repo.unfiltered(), dest, **opts)
1189 return orig(ui, repo.unfiltered(), dest, **opts)
1167
1190
1168
1191
1169 @eh.wrapfunction(webcommands, b'archive')
1192 @eh.wrapfunction(webcommands, b'archive')
1170 def hgwebarchive(orig, web):
1193 def hgwebarchive(orig, web):
1171 with lfstatus(web.repo):
1194 with lfstatus(web.repo):
1172 return orig(web)
1195 return orig(web)
1173
1196
1174
1197
1175 @eh.wrapfunction(archival, b'archive')
1198 @eh.wrapfunction(archival, b'archive')
1176 def overridearchive(
1199 def overridearchive(
1177 orig,
1200 orig,
1178 repo,
1201 repo,
1179 dest,
1202 dest,
1180 node,
1203 node,
1181 kind,
1204 kind,
1182 decode=True,
1205 decode=True,
1183 match=None,
1206 match=None,
1184 prefix=b'',
1207 prefix=b'',
1185 mtime=None,
1208 mtime=None,
1186 subrepos=None,
1209 subrepos=None,
1187 ):
1210 ):
1188 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1211 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1189 # unfiltered repo's attr, so check that as well.
1212 # unfiltered repo's attr, so check that as well.
1190 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1213 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1191 return orig(
1214 return orig(
1192 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1215 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1193 )
1216 )
1194
1217
1195 # No need to lock because we are only reading history and
1218 # No need to lock because we are only reading history and
1196 # largefile caches, neither of which are modified.
1219 # largefile caches, neither of which are modified.
1197 if node is not None:
1220 if node is not None:
1198 lfcommands.cachelfiles(repo.ui, repo, node)
1221 lfcommands.cachelfiles(repo.ui, repo, node)
1199
1222
1200 if kind not in archival.archivers:
1223 if kind not in archival.archivers:
1201 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1224 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1202
1225
1203 ctx = repo[node]
1226 ctx = repo[node]
1204
1227
1205 if kind == b'files':
1228 if kind == b'files':
1206 if prefix:
1229 if prefix:
1207 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1230 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1208 else:
1231 else:
1209 prefix = archival.tidyprefix(dest, kind, prefix)
1232 prefix = archival.tidyprefix(dest, kind, prefix)
1210
1233
1211 def write(name, mode, islink, getdata):
1234 def write(name, mode, islink, getdata):
1212 if match and not match(name):
1235 if match and not match(name):
1213 return
1236 return
1214 data = getdata()
1237 data = getdata()
1215 if decode:
1238 if decode:
1216 data = repo.wwritedata(name, data)
1239 data = repo.wwritedata(name, data)
1217 archiver.addfile(prefix + name, mode, islink, data)
1240 archiver.addfile(prefix + name, mode, islink, data)
1218
1241
1219 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1242 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1220
1243
1221 if repo.ui.configbool(b"ui", b"archivemeta"):
1244 if repo.ui.configbool(b"ui", b"archivemeta"):
1222 write(
1245 write(
1223 b'.hg_archival.txt',
1246 b'.hg_archival.txt',
1224 0o644,
1247 0o644,
1225 False,
1248 False,
1226 lambda: archival.buildmetadata(ctx),
1249 lambda: archival.buildmetadata(ctx),
1227 )
1250 )
1228
1251
1229 for f in ctx:
1252 for f in ctx:
1230 ff = ctx.flags(f)
1253 ff = ctx.flags(f)
1231 getdata = ctx[f].data
1254 getdata = ctx[f].data
1232 lfile = lfutil.splitstandin(f)
1255 lfile = lfutil.splitstandin(f)
1233 if lfile is not None:
1256 if lfile is not None:
1234 if node is not None:
1257 if node is not None:
1235 path = lfutil.findfile(repo, getdata().strip())
1258 path = lfutil.findfile(repo, getdata().strip())
1236
1259
1237 if path is None:
1260 if path is None:
1238 raise error.Abort(
1261 raise error.Abort(
1239 _(
1262 _(
1240 b'largefile %s not found in repo store or system cache'
1263 b'largefile %s not found in repo store or system cache'
1241 )
1264 )
1242 % lfile
1265 % lfile
1243 )
1266 )
1244 else:
1267 else:
1245 path = lfile
1268 path = lfile
1246
1269
1247 f = lfile
1270 f = lfile
1248
1271
1249 getdata = lambda: util.readfile(path)
1272 getdata = lambda: util.readfile(path)
1250 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1273 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1251
1274
1252 if subrepos:
1275 if subrepos:
1253 for subpath in sorted(ctx.substate):
1276 for subpath in sorted(ctx.substate):
1254 sub = ctx.workingsub(subpath)
1277 sub = ctx.workingsub(subpath)
1255 submatch = matchmod.subdirmatcher(subpath, match)
1278 submatch = matchmod.subdirmatcher(subpath, match)
1256 subprefix = prefix + subpath + b'/'
1279 subprefix = prefix + subpath + b'/'
1257
1280
1258 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1259 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1282 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1260 # allow only hgsubrepos to set this, instead of the current scheme
1283 # allow only hgsubrepos to set this, instead of the current scheme
1261 # where the parent sets this for the child.
1284 # where the parent sets this for the child.
1262 with (
1285 with (
1263 util.safehasattr(sub, '_repo')
1286 util.safehasattr(sub, '_repo')
1264 and lfstatus(sub._repo)
1287 and lfstatus(sub._repo)
1265 or util.nullcontextmanager()
1288 or util.nullcontextmanager()
1266 ):
1289 ):
1267 sub.archive(archiver, subprefix, submatch)
1290 sub.archive(archiver, subprefix, submatch)
1268
1291
1269 archiver.done()
1292 archiver.done()
1270
1293
1271
1294
1272 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1295 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1273 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1296 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1274 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1297 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1275 if not lfenabled or not repo._repo.lfstatus:
1298 if not lfenabled or not repo._repo.lfstatus:
1276 return orig(repo, archiver, prefix, match, decode)
1299 return orig(repo, archiver, prefix, match, decode)
1277
1300
1278 repo._get(repo._state + (b'hg',))
1301 repo._get(repo._state + (b'hg',))
1279 rev = repo._state[1]
1302 rev = repo._state[1]
1280 ctx = repo._repo[rev]
1303 ctx = repo._repo[rev]
1281
1304
1282 if ctx.node() is not None:
1305 if ctx.node() is not None:
1283 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1306 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1284
1307
1285 def write(name, mode, islink, getdata):
1308 def write(name, mode, islink, getdata):
1286 # At this point, the standin has been replaced with the largefile name,
1309 # At this point, the standin has been replaced with the largefile name,
1287 # so the normal matcher works here without the lfutil variants.
1310 # so the normal matcher works here without the lfutil variants.
1288 if match and not match(f):
1311 if match and not match(f):
1289 return
1312 return
1290 data = getdata()
1313 data = getdata()
1291 if decode:
1314 if decode:
1292 data = repo._repo.wwritedata(name, data)
1315 data = repo._repo.wwritedata(name, data)
1293
1316
1294 archiver.addfile(prefix + name, mode, islink, data)
1317 archiver.addfile(prefix + name, mode, islink, data)
1295
1318
1296 for f in ctx:
1319 for f in ctx:
1297 ff = ctx.flags(f)
1320 ff = ctx.flags(f)
1298 getdata = ctx[f].data
1321 getdata = ctx[f].data
1299 lfile = lfutil.splitstandin(f)
1322 lfile = lfutil.splitstandin(f)
1300 if lfile is not None:
1323 if lfile is not None:
1301 if ctx.node() is not None:
1324 if ctx.node() is not None:
1302 path = lfutil.findfile(repo._repo, getdata().strip())
1325 path = lfutil.findfile(repo._repo, getdata().strip())
1303
1326
1304 if path is None:
1327 if path is None:
1305 raise error.Abort(
1328 raise error.Abort(
1306 _(
1329 _(
1307 b'largefile %s not found in repo store or system cache'
1330 b'largefile %s not found in repo store or system cache'
1308 )
1331 )
1309 % lfile
1332 % lfile
1310 )
1333 )
1311 else:
1334 else:
1312 path = lfile
1335 path = lfile
1313
1336
1314 f = lfile
1337 f = lfile
1315
1338
1316 getdata = lambda: util.readfile(os.path.join(prefix, path))
1339 getdata = lambda: util.readfile(os.path.join(prefix, path))
1317
1340
1318 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1341 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1319
1342
1320 for subpath in sorted(ctx.substate):
1343 for subpath in sorted(ctx.substate):
1321 sub = ctx.workingsub(subpath)
1344 sub = ctx.workingsub(subpath)
1322 submatch = matchmod.subdirmatcher(subpath, match)
1345 submatch = matchmod.subdirmatcher(subpath, match)
1323 subprefix = prefix + subpath + b'/'
1346 subprefix = prefix + subpath + b'/'
1324 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1347 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1325 # infer and possibly set lfstatus at the top of this function. That
1348 # infer and possibly set lfstatus at the top of this function. That
1326 # would allow only hgsubrepos to set this, instead of the current scheme
1349 # would allow only hgsubrepos to set this, instead of the current scheme
1327 # where the parent sets this for the child.
1350 # where the parent sets this for the child.
1328 with (
1351 with (
1329 util.safehasattr(sub, '_repo')
1352 util.safehasattr(sub, '_repo')
1330 and lfstatus(sub._repo)
1353 and lfstatus(sub._repo)
1331 or util.nullcontextmanager()
1354 or util.nullcontextmanager()
1332 ):
1355 ):
1333 sub.archive(archiver, subprefix, submatch, decode)
1356 sub.archive(archiver, subprefix, submatch, decode)
1334
1357
1335
1358
1336 # If a largefile is modified, the change is not reflected in its
1359 # If a largefile is modified, the change is not reflected in its
1337 # standin until a commit. cmdutil.bailifchanged() raises an exception
1360 # standin until a commit. cmdutil.bailifchanged() raises an exception
1338 # if the repo has uncommitted changes. Wrap it to also check if
1361 # if the repo has uncommitted changes. Wrap it to also check if
1339 # largefiles were changed. This is used by bisect, backout and fetch.
1362 # largefiles were changed. This is used by bisect, backout and fetch.
1340 @eh.wrapfunction(cmdutil, b'bailifchanged')
1363 @eh.wrapfunction(cmdutil, b'bailifchanged')
1341 def overridebailifchanged(orig, repo, *args, **kwargs):
1364 def overridebailifchanged(orig, repo, *args, **kwargs):
1342 orig(repo, *args, **kwargs)
1365 orig(repo, *args, **kwargs)
1343 with lfstatus(repo):
1366 with lfstatus(repo):
1344 s = repo.status()
1367 s = repo.status()
1345 if s.modified or s.added or s.removed or s.deleted:
1368 if s.modified or s.added or s.removed or s.deleted:
1346 raise error.Abort(_(b'uncommitted changes'))
1369 raise error.Abort(_(b'uncommitted changes'))
1347
1370
1348
1371
1349 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1372 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1350 def postcommitstatus(orig, repo, *args, **kwargs):
1373 def postcommitstatus(orig, repo, *args, **kwargs):
1351 with lfstatus(repo):
1374 with lfstatus(repo):
1352 return orig(repo, *args, **kwargs)
1375 return orig(repo, *args, **kwargs)
1353
1376
1354
1377
1355 @eh.wrapfunction(cmdutil, b'forget')
1378 @eh.wrapfunction(cmdutil, b'forget')
1356 def cmdutilforget(
1379 def cmdutilforget(
1357 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1380 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1358 ):
1381 ):
1359 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1382 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1360 bad, forgot = orig(
1383 bad, forgot = orig(
1361 ui,
1384 ui,
1362 repo,
1385 repo,
1363 normalmatcher,
1386 normalmatcher,
1364 prefix,
1387 prefix,
1365 uipathfn,
1388 uipathfn,
1366 explicitonly,
1389 explicitonly,
1367 dryrun,
1390 dryrun,
1368 interactive,
1391 interactive,
1369 )
1392 )
1370 m = composelargefilematcher(match, repo[None].manifest())
1393 m = composelargefilematcher(match, repo[None].manifest())
1371
1394
1372 with lfstatus(repo):
1395 with lfstatus(repo):
1373 s = repo.status(match=m, clean=True)
1396 s = repo.status(match=m, clean=True)
1374 manifest = repo[None].manifest()
1397 manifest = repo[None].manifest()
1375 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1398 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1376 forget = [f for f in forget if lfutil.standin(f) in manifest]
1399 forget = [f for f in forget if lfutil.standin(f) in manifest]
1377
1400
1378 for f in forget:
1401 for f in forget:
1379 fstandin = lfutil.standin(f)
1402 fstandin = lfutil.standin(f)
1380 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1403 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1381 ui.warn(
1404 ui.warn(
1382 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1405 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1383 )
1406 )
1384 bad.append(f)
1407 bad.append(f)
1385
1408
1386 for f in forget:
1409 for f in forget:
1387 if ui.verbose or not m.exact(f):
1410 if ui.verbose or not m.exact(f):
1388 ui.status(_(b'removing %s\n') % uipathfn(f))
1411 ui.status(_(b'removing %s\n') % uipathfn(f))
1389
1412
1390 # Need to lock because standin files are deleted then removed from the
1413 # Need to lock because standin files are deleted then removed from the
1391 # repository and we could race in-between.
1414 # repository and we could race in-between.
1392 with repo.wlock():
1415 with repo.wlock():
1393 lfdirstate = lfutil.openlfdirstate(ui, repo)
1416 lfdirstate = lfutil.openlfdirstate(ui, repo)
1394 for f in forget:
1417 for f in forget:
1395 lfdirstate.set_untracked(f)
1418 lfdirstate.set_untracked(f)
1396 lfdirstate.write(repo.currenttransaction())
1419 lfdirstate.write(repo.currenttransaction())
1397 standins = [lfutil.standin(f) for f in forget]
1420 standins = [lfutil.standin(f) for f in forget]
1398 for f in standins:
1421 for f in standins:
1399 repo.wvfs.unlinkpath(f, ignoremissing=True)
1422 repo.wvfs.unlinkpath(f, ignoremissing=True)
1400 rejected = repo[None].forget(standins)
1423 rejected = repo[None].forget(standins)
1401
1424
1402 bad.extend(f for f in rejected if f in m.files())
1425 bad.extend(f for f in rejected if f in m.files())
1403 forgot.extend(f for f in forget if f not in rejected)
1426 forgot.extend(f for f in forget if f not in rejected)
1404 return bad, forgot
1427 return bad, forgot
1405
1428
1406
1429
1407 def _getoutgoings(repo, other, missing, addfunc):
1430 def _getoutgoings(repo, other, missing, addfunc):
1408 """get pairs of filename and largefile hash in outgoing revisions
1431 """get pairs of filename and largefile hash in outgoing revisions
1409 in 'missing'.
1432 in 'missing'.
1410
1433
1411 largefiles already existing on 'other' repository are ignored.
1434 largefiles already existing on 'other' repository are ignored.
1412
1435
1413 'addfunc' is invoked with each unique pairs of filename and
1436 'addfunc' is invoked with each unique pairs of filename and
1414 largefile hash value.
1437 largefile hash value.
1415 """
1438 """
1416 knowns = set()
1439 knowns = set()
1417 lfhashes = set()
1440 lfhashes = set()
1418
1441
1419 def dedup(fn, lfhash):
1442 def dedup(fn, lfhash):
1420 k = (fn, lfhash)
1443 k = (fn, lfhash)
1421 if k not in knowns:
1444 if k not in knowns:
1422 knowns.add(k)
1445 knowns.add(k)
1423 lfhashes.add(lfhash)
1446 lfhashes.add(lfhash)
1424
1447
1425 lfutil.getlfilestoupload(repo, missing, dedup)
1448 lfutil.getlfilestoupload(repo, missing, dedup)
1426 if lfhashes:
1449 if lfhashes:
1427 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1450 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1428 for fn, lfhash in knowns:
1451 for fn, lfhash in knowns:
1429 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1452 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1430 addfunc(fn, lfhash)
1453 addfunc(fn, lfhash)
1431
1454
1432
1455
1433 def outgoinghook(ui, repo, other, opts, missing):
1456 def outgoinghook(ui, repo, other, opts, missing):
1434 if opts.pop(b'large', None):
1457 if opts.pop(b'large', None):
1435 lfhashes = set()
1458 lfhashes = set()
1436 if ui.debugflag:
1459 if ui.debugflag:
1437 toupload = {}
1460 toupload = {}
1438
1461
1439 def addfunc(fn, lfhash):
1462 def addfunc(fn, lfhash):
1440 if fn not in toupload:
1463 if fn not in toupload:
1441 toupload[fn] = [] # pytype: disable=unsupported-operands
1464 toupload[fn] = [] # pytype: disable=unsupported-operands
1442 toupload[fn].append(lfhash)
1465 toupload[fn].append(lfhash)
1443 lfhashes.add(lfhash)
1466 lfhashes.add(lfhash)
1444
1467
1445 def showhashes(fn):
1468 def showhashes(fn):
1446 for lfhash in sorted(toupload[fn]):
1469 for lfhash in sorted(toupload[fn]):
1447 ui.debug(b' %s\n' % lfhash)
1470 ui.debug(b' %s\n' % lfhash)
1448
1471
1449 else:
1472 else:
1450 toupload = set()
1473 toupload = set()
1451
1474
1452 def addfunc(fn, lfhash):
1475 def addfunc(fn, lfhash):
1453 toupload.add(fn)
1476 toupload.add(fn)
1454 lfhashes.add(lfhash)
1477 lfhashes.add(lfhash)
1455
1478
1456 def showhashes(fn):
1479 def showhashes(fn):
1457 pass
1480 pass
1458
1481
1459 _getoutgoings(repo, other, missing, addfunc)
1482 _getoutgoings(repo, other, missing, addfunc)
1460
1483
1461 if not toupload:
1484 if not toupload:
1462 ui.status(_(b'largefiles: no files to upload\n'))
1485 ui.status(_(b'largefiles: no files to upload\n'))
1463 else:
1486 else:
1464 ui.status(
1487 ui.status(
1465 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1488 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1466 )
1489 )
1467 for file in sorted(toupload):
1490 for file in sorted(toupload):
1468 ui.status(lfutil.splitstandin(file) + b'\n')
1491 ui.status(lfutil.splitstandin(file) + b'\n')
1469 showhashes(file)
1492 showhashes(file)
1470 ui.status(b'\n')
1493 ui.status(b'\n')
1471
1494
1472
1495
1473 @eh.wrapcommand(
1496 @eh.wrapcommand(
1474 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1497 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1475 )
1498 )
1476 def _outgoingcmd(orig, *args, **kwargs):
1499 def _outgoingcmd(orig, *args, **kwargs):
1477 # Nothing to do here other than add the extra help option- the hook above
1500 # Nothing to do here other than add the extra help option- the hook above
1478 # processes it.
1501 # processes it.
1479 return orig(*args, **kwargs)
1502 return orig(*args, **kwargs)
1480
1503
1481
1504
1482 def summaryremotehook(ui, repo, opts, changes):
1505 def summaryremotehook(ui, repo, opts, changes):
1483 largeopt = opts.get(b'large', False)
1506 largeopt = opts.get(b'large', False)
1484 if changes is None:
1507 if changes is None:
1485 if largeopt:
1508 if largeopt:
1486 return (False, True) # only outgoing check is needed
1509 return (False, True) # only outgoing check is needed
1487 else:
1510 else:
1488 return (False, False)
1511 return (False, False)
1489 elif largeopt:
1512 elif largeopt:
1490 url, branch, peer, outgoing = changes[1]
1513 url, branch, peer, outgoing = changes[1]
1491 if peer is None:
1514 if peer is None:
1492 # i18n: column positioning for "hg summary"
1515 # i18n: column positioning for "hg summary"
1493 ui.status(_(b'largefiles: (no remote repo)\n'))
1516 ui.status(_(b'largefiles: (no remote repo)\n'))
1494 return
1517 return
1495
1518
1496 toupload = set()
1519 toupload = set()
1497 lfhashes = set()
1520 lfhashes = set()
1498
1521
1499 def addfunc(fn, lfhash):
1522 def addfunc(fn, lfhash):
1500 toupload.add(fn)
1523 toupload.add(fn)
1501 lfhashes.add(lfhash)
1524 lfhashes.add(lfhash)
1502
1525
1503 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1526 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1504
1527
1505 if not toupload:
1528 if not toupload:
1506 # i18n: column positioning for "hg summary"
1529 # i18n: column positioning for "hg summary"
1507 ui.status(_(b'largefiles: (no files to upload)\n'))
1530 ui.status(_(b'largefiles: (no files to upload)\n'))
1508 else:
1531 else:
1509 # i18n: column positioning for "hg summary"
1532 # i18n: column positioning for "hg summary"
1510 ui.status(
1533 ui.status(
1511 _(b'largefiles: %d entities for %d files to upload\n')
1534 _(b'largefiles: %d entities for %d files to upload\n')
1512 % (len(lfhashes), len(toupload))
1535 % (len(lfhashes), len(toupload))
1513 )
1536 )
1514
1537
1515
1538
1516 @eh.wrapcommand(
1539 @eh.wrapcommand(
1517 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1540 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1518 )
1541 )
1519 def overridesummary(orig, ui, repo, *pats, **opts):
1542 def overridesummary(orig, ui, repo, *pats, **opts):
1520 with lfstatus(repo):
1543 with lfstatus(repo):
1521 orig(ui, repo, *pats, **opts)
1544 orig(ui, repo, *pats, **opts)
1522
1545
1523
1546
1524 @eh.wrapfunction(scmutil, b'addremove')
1547 @eh.wrapfunction(scmutil, b'addremove')
1525 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1548 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1526 if opts is None:
1549 if opts is None:
1527 opts = {}
1550 opts = {}
1528 if not lfutil.islfilesrepo(repo):
1551 if not lfutil.islfilesrepo(repo):
1529 return orig(repo, matcher, prefix, uipathfn, opts)
1552 return orig(repo, matcher, prefix, uipathfn, opts)
1530 # Get the list of missing largefiles so we can remove them
1553 # Get the list of missing largefiles so we can remove them
1531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1554 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1532 unsure, s, mtime_boundary = lfdirstate.status(
1555 unsure, s, mtime_boundary = lfdirstate.status(
1533 matchmod.always(),
1556 matchmod.always(),
1534 subrepos=[],
1557 subrepos=[],
1535 ignored=False,
1558 ignored=False,
1536 clean=False,
1559 clean=False,
1537 unknown=False,
1560 unknown=False,
1538 )
1561 )
1539
1562
1540 # Call into the normal remove code, but the removing of the standin, we want
1563 # Call into the normal remove code, but the removing of the standin, we want
1541 # to have handled by original addremove. Monkey patching here makes sure
1564 # to have handled by original addremove. Monkey patching here makes sure
1542 # we don't remove the standin in the largefiles code, preventing a very
1565 # we don't remove the standin in the largefiles code, preventing a very
1543 # confused state later.
1566 # confused state later.
1544 if s.deleted:
1567 if s.deleted:
1545 m = copy.copy(matcher)
1568 m = copy.copy(matcher)
1546
1569
1547 # The m._files and m._map attributes are not changed to the deleted list
1570 # The m._files and m._map attributes are not changed to the deleted list
1548 # because that affects the m.exact() test, which in turn governs whether
1571 # because that affects the m.exact() test, which in turn governs whether
1549 # or not the file name is printed, and how. Simply limit the original
1572 # or not the file name is printed, and how. Simply limit the original
1550 # matches to those in the deleted status list.
1573 # matches to those in the deleted status list.
1551 matchfn = m.matchfn
1574 matchfn = m.matchfn
1552 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1575 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1553
1576
1554 removelargefiles(
1577 removelargefiles(
1555 repo.ui,
1578 repo.ui,
1556 repo,
1579 repo,
1557 True,
1580 True,
1558 m,
1581 m,
1559 uipathfn,
1582 uipathfn,
1560 opts.get(b'dry_run'),
1583 opts.get(b'dry_run'),
1561 **pycompat.strkwargs(opts)
1584 **pycompat.strkwargs(opts)
1562 )
1585 )
1563 # Call into the normal add code, and any files that *should* be added as
1586 # Call into the normal add code, and any files that *should* be added as
1564 # largefiles will be
1587 # largefiles will be
1565 added, bad = addlargefiles(
1588 added, bad = addlargefiles(
1566 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1589 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1567 )
1590 )
1568 # Now that we've handled largefiles, hand off to the original addremove
1591 # Now that we've handled largefiles, hand off to the original addremove
1569 # function to take care of the rest. Make sure it doesn't do anything with
1592 # function to take care of the rest. Make sure it doesn't do anything with
1570 # largefiles by passing a matcher that will ignore them.
1593 # largefiles by passing a matcher that will ignore them.
1571 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1594 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1572 return orig(repo, matcher, prefix, uipathfn, opts)
1595 return orig(repo, matcher, prefix, uipathfn, opts)
1573
1596
1574
1597
1575 # Calling purge with --all will cause the largefiles to be deleted.
1598 # Calling purge with --all will cause the largefiles to be deleted.
1576 # Override repo.status to prevent this from happening.
1599 # Override repo.status to prevent this from happening.
1577 @eh.wrapcommand(b'purge')
1600 @eh.wrapcommand(b'purge')
1578 def overridepurge(orig, ui, repo, *dirs, **opts):
1601 def overridepurge(orig, ui, repo, *dirs, **opts):
1579 # XXX Monkey patching a repoview will not work. The assigned attribute will
1602 # XXX Monkey patching a repoview will not work. The assigned attribute will
1580 # be set on the unfiltered repo, but we will only lookup attributes in the
1603 # be set on the unfiltered repo, but we will only lookup attributes in the
1581 # unfiltered repo if the lookup in the repoview object itself fails. As the
1604 # unfiltered repo if the lookup in the repoview object itself fails. As the
1582 # monkey patched method exists on the repoview class the lookup will not
1605 # monkey patched method exists on the repoview class the lookup will not
1583 # fail. As a result, the original version will shadow the monkey patched
1606 # fail. As a result, the original version will shadow the monkey patched
1584 # one, defeating the monkey patch.
1607 # one, defeating the monkey patch.
1585 #
1608 #
1586 # As a work around we use an unfiltered repo here. We should do something
1609 # As a work around we use an unfiltered repo here. We should do something
1587 # cleaner instead.
1610 # cleaner instead.
1588 repo = repo.unfiltered()
1611 repo = repo.unfiltered()
1589 oldstatus = repo.status
1612 oldstatus = repo.status
1590
1613
1591 def overridestatus(
1614 def overridestatus(
1592 node1=b'.',
1615 node1=b'.',
1593 node2=None,
1616 node2=None,
1594 match=None,
1617 match=None,
1595 ignored=False,
1618 ignored=False,
1596 clean=False,
1619 clean=False,
1597 unknown=False,
1620 unknown=False,
1598 listsubrepos=False,
1621 listsubrepos=False,
1599 ):
1622 ):
1600 r = oldstatus(
1623 r = oldstatus(
1601 node1, node2, match, ignored, clean, unknown, listsubrepos
1624 node1, node2, match, ignored, clean, unknown, listsubrepos
1602 )
1625 )
1603 lfdirstate = lfutil.openlfdirstate(ui, repo)
1626 lfdirstate = lfutil.openlfdirstate(ui, repo)
1604 unknown = [
1627 unknown = [
1605 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1628 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1606 ]
1629 ]
1607 ignored = [
1630 ignored = [
1608 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1631 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1609 ]
1632 ]
1610 return scmutil.status(
1633 return scmutil.status(
1611 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1634 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1612 )
1635 )
1613
1636
1614 repo.status = overridestatus
1637 repo.status = overridestatus
1615 orig(ui, repo, *dirs, **opts)
1638 orig(ui, repo, *dirs, **opts)
1616 repo.status = oldstatus
1639 repo.status = oldstatus
1617
1640
1618
1641
1619 @eh.wrapcommand(b'rollback')
1642 @eh.wrapcommand(b'rollback')
1620 def overriderollback(orig, ui, repo, **opts):
1643 def overriderollback(orig, ui, repo, **opts):
1621 with repo.wlock():
1644 with repo.wlock():
1622 before = repo.dirstate.parents()
1645 before = repo.dirstate.parents()
1623 orphans = {
1646 orphans = {
1624 f
1647 f
1625 for f in repo.dirstate
1648 for f in repo.dirstate
1626 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1649 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1627 }
1650 }
1628 result = orig(ui, repo, **opts)
1651 result = orig(ui, repo, **opts)
1629 after = repo.dirstate.parents()
1652 after = repo.dirstate.parents()
1630 if before == after:
1653 if before == after:
1631 return result # no need to restore standins
1654 return result # no need to restore standins
1632
1655
1633 pctx = repo[b'.']
1656 pctx = repo[b'.']
1634 for f in repo.dirstate:
1657 for f in repo.dirstate:
1635 if lfutil.isstandin(f):
1658 if lfutil.isstandin(f):
1636 orphans.discard(f)
1659 orphans.discard(f)
1637 if repo.dirstate.get_entry(f).removed:
1660 if repo.dirstate.get_entry(f).removed:
1638 repo.wvfs.unlinkpath(f, ignoremissing=True)
1661 repo.wvfs.unlinkpath(f, ignoremissing=True)
1639 elif f in pctx:
1662 elif f in pctx:
1640 fctx = pctx[f]
1663 fctx = pctx[f]
1641 repo.wwrite(f, fctx.data(), fctx.flags())
1664 repo.wwrite(f, fctx.data(), fctx.flags())
1642 else:
1665 else:
1643 # content of standin is not so important in 'a',
1666 # content of standin is not so important in 'a',
1644 # 'm' or 'n' (coming from the 2nd parent) cases
1667 # 'm' or 'n' (coming from the 2nd parent) cases
1645 lfutil.writestandin(repo, f, b'', False)
1668 lfutil.writestandin(repo, f, b'', False)
1646 for standin in orphans:
1669 for standin in orphans:
1647 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1670 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1648
1671
1649 return result
1672 return result
1650
1673
1651
1674
1652 @eh.wrapcommand(b'transplant', extension=b'transplant')
1675 @eh.wrapcommand(b'transplant', extension=b'transplant')
1653 def overridetransplant(orig, ui, repo, *revs, **opts):
1676 def overridetransplant(orig, ui, repo, *revs, **opts):
1654 resuming = opts.get('continue')
1677 resuming = opts.get('continue')
1655 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1678 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1656 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1679 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1657 try:
1680 try:
1658 result = orig(ui, repo, *revs, **opts)
1681 result = orig(ui, repo, *revs, **opts)
1659 finally:
1682 finally:
1660 repo._lfstatuswriters.pop()
1683 repo._lfstatuswriters.pop()
1661 repo._lfcommithooks.pop()
1684 repo._lfcommithooks.pop()
1662 return result
1685 return result
1663
1686
1664
1687
1665 @eh.wrapcommand(b'cat')
1688 @eh.wrapcommand(b'cat')
1666 def overridecat(orig, ui, repo, file1, *pats, **opts):
1689 def overridecat(orig, ui, repo, file1, *pats, **opts):
1667 opts = pycompat.byteskwargs(opts)
1690 opts = pycompat.byteskwargs(opts)
1668 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1691 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1669 err = 1
1692 err = 1
1670 notbad = set()
1693 notbad = set()
1671 m = scmutil.match(ctx, (file1,) + pats, opts)
1694 m = scmutil.match(ctx, (file1,) + pats, opts)
1672 origmatchfn = m.matchfn
1695 origmatchfn = m.matchfn
1673
1696
1674 def lfmatchfn(f):
1697 def lfmatchfn(f):
1675 if origmatchfn(f):
1698 if origmatchfn(f):
1676 return True
1699 return True
1677 lf = lfutil.splitstandin(f)
1700 lf = lfutil.splitstandin(f)
1678 if lf is None:
1701 if lf is None:
1679 return False
1702 return False
1680 notbad.add(lf)
1703 notbad.add(lf)
1681 return origmatchfn(lf)
1704 return origmatchfn(lf)
1682
1705
1683 m.matchfn = lfmatchfn
1706 m.matchfn = lfmatchfn
1684 origbadfn = m.bad
1707 origbadfn = m.bad
1685
1708
1686 def lfbadfn(f, msg):
1709 def lfbadfn(f, msg):
1687 if not f in notbad:
1710 if not f in notbad:
1688 origbadfn(f, msg)
1711 origbadfn(f, msg)
1689
1712
1690 m.bad = lfbadfn
1713 m.bad = lfbadfn
1691
1714
1692 origvisitdirfn = m.visitdir
1715 origvisitdirfn = m.visitdir
1693
1716
1694 def lfvisitdirfn(dir):
1717 def lfvisitdirfn(dir):
1695 if dir == lfutil.shortname:
1718 if dir == lfutil.shortname:
1696 return True
1719 return True
1697 ret = origvisitdirfn(dir)
1720 ret = origvisitdirfn(dir)
1698 if ret:
1721 if ret:
1699 return ret
1722 return ret
1700 lf = lfutil.splitstandin(dir)
1723 lf = lfutil.splitstandin(dir)
1701 if lf is None:
1724 if lf is None:
1702 return False
1725 return False
1703 return origvisitdirfn(lf)
1726 return origvisitdirfn(lf)
1704
1727
1705 m.visitdir = lfvisitdirfn
1728 m.visitdir = lfvisitdirfn
1706
1729
1707 for f in ctx.walk(m):
1730 for f in ctx.walk(m):
1708 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1731 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1709 lf = lfutil.splitstandin(f)
1732 lf = lfutil.splitstandin(f)
1710 if lf is None or origmatchfn(f):
1733 if lf is None or origmatchfn(f):
1711 # duplicating unreachable code from commands.cat
1734 # duplicating unreachable code from commands.cat
1712 data = ctx[f].data()
1735 data = ctx[f].data()
1713 if opts.get(b'decode'):
1736 if opts.get(b'decode'):
1714 data = repo.wwritedata(f, data)
1737 data = repo.wwritedata(f, data)
1715 fp.write(data)
1738 fp.write(data)
1716 else:
1739 else:
1717 hash = lfutil.readasstandin(ctx[f])
1740 hash = lfutil.readasstandin(ctx[f])
1718 if not lfutil.inusercache(repo.ui, hash):
1741 if not lfutil.inusercache(repo.ui, hash):
1719 store = storefactory.openstore(repo)
1742 store = storefactory.openstore(repo)
1720 success, missing = store.get([(lf, hash)])
1743 success, missing = store.get([(lf, hash)])
1721 if len(success) != 1:
1744 if len(success) != 1:
1722 raise error.Abort(
1745 raise error.Abort(
1723 _(
1746 _(
1724 b'largefile %s is not in cache and could not be '
1747 b'largefile %s is not in cache and could not be '
1725 b'downloaded'
1748 b'downloaded'
1726 )
1749 )
1727 % lf
1750 % lf
1728 )
1751 )
1729 path = lfutil.usercachepath(repo.ui, hash)
1752 path = lfutil.usercachepath(repo.ui, hash)
1730 with open(path, b"rb") as fpin:
1753 with open(path, b"rb") as fpin:
1731 for chunk in util.filechunkiter(fpin):
1754 for chunk in util.filechunkiter(fpin):
1732 fp.write(chunk)
1755 fp.write(chunk)
1733 err = 0
1756 err = 0
1734 return err
1757 return err
1735
1758
1736
1759
1737 @eh.wrapfunction(merge, b'_update')
1760 @eh.wrapfunction(merge, b'_update')
1738 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1761 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1739 matcher = kwargs.get('matcher', None)
1762 matcher = kwargs.get('matcher', None)
1740 # note if this is a partial update
1763 # note if this is a partial update
1741 partial = matcher and not matcher.always()
1764 partial = matcher and not matcher.always()
1742 with repo.wlock():
1765 with repo.wlock():
1743 # branch | | |
1766 # branch | | |
1744 # merge | force | partial | action
1767 # merge | force | partial | action
1745 # -------+-------+---------+--------------
1768 # -------+-------+---------+--------------
1746 # x | x | x | linear-merge
1769 # x | x | x | linear-merge
1747 # o | x | x | branch-merge
1770 # o | x | x | branch-merge
1748 # x | o | x | overwrite (as clean update)
1771 # x | o | x | overwrite (as clean update)
1749 # o | o | x | force-branch-merge (*1)
1772 # o | o | x | force-branch-merge (*1)
1750 # x | x | o | (*)
1773 # x | x | o | (*)
1751 # o | x | o | (*)
1774 # o | x | o | (*)
1752 # x | o | o | overwrite (as revert)
1775 # x | o | o | overwrite (as revert)
1753 # o | o | o | (*)
1776 # o | o | o | (*)
1754 #
1777 #
1755 # (*) don't care
1778 # (*) don't care
1756 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1779 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1757
1780
1758 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1781 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1759 unsure, s, mtime_boundary = lfdirstate.status(
1782 unsure, s, mtime_boundary = lfdirstate.status(
1760 matchmod.always(),
1783 matchmod.always(),
1761 subrepos=[],
1784 subrepos=[],
1762 ignored=False,
1785 ignored=False,
1763 clean=True,
1786 clean=True,
1764 unknown=False,
1787 unknown=False,
1765 )
1788 )
1766 oldclean = set(s.clean)
1789 oldclean = set(s.clean)
1767 pctx = repo[b'.']
1790 pctx = repo[b'.']
1768 dctx = repo[node]
1791 dctx = repo[node]
1769 for lfile in unsure + s.modified:
1792 for lfile in unsure + s.modified:
1770 lfileabs = repo.wvfs.join(lfile)
1793 lfileabs = repo.wvfs.join(lfile)
1771 if not repo.wvfs.exists(lfileabs):
1794 if not repo.wvfs.exists(lfileabs):
1772 continue
1795 continue
1773 lfhash = lfutil.hashfile(lfileabs)
1796 lfhash = lfutil.hashfile(lfileabs)
1774 standin = lfutil.standin(lfile)
1797 standin = lfutil.standin(lfile)
1775 lfutil.writestandin(
1798 lfutil.writestandin(
1776 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1799 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1777 )
1800 )
1778 if standin in pctx and lfhash == lfutil.readasstandin(
1801 if standin in pctx and lfhash == lfutil.readasstandin(
1779 pctx[standin]
1802 pctx[standin]
1780 ):
1803 ):
1781 oldclean.add(lfile)
1804 oldclean.add(lfile)
1782 for lfile in s.added:
1805 for lfile in s.added:
1783 fstandin = lfutil.standin(lfile)
1806 fstandin = lfutil.standin(lfile)
1784 if fstandin not in dctx:
1807 if fstandin not in dctx:
1785 # in this case, content of standin file is meaningless
1808 # in this case, content of standin file is meaningless
1786 # (in dctx, lfile is unknown, or normal file)
1809 # (in dctx, lfile is unknown, or normal file)
1787 continue
1810 continue
1788 lfutil.updatestandin(repo, lfile, fstandin)
1811 lfutil.updatestandin(repo, lfile, fstandin)
1789 # mark all clean largefiles as dirty, just in case the update gets
1812 # mark all clean largefiles as dirty, just in case the update gets
1790 # interrupted before largefiles and lfdirstate are synchronized
1813 # interrupted before largefiles and lfdirstate are synchronized
1791 for lfile in oldclean:
1814 for lfile in oldclean:
1792 lfdirstate.set_possibly_dirty(lfile)
1815 lfdirstate.set_possibly_dirty(lfile)
1793 lfdirstate.write(repo.currenttransaction())
1816 lfdirstate.write(repo.currenttransaction())
1794
1817
1795 oldstandins = lfutil.getstandinsstate(repo)
1818 oldstandins = lfutil.getstandinsstate(repo)
1796 wc = kwargs.get('wc')
1819 wc = kwargs.get('wc')
1797 if wc and wc.isinmemory():
1820 if wc and wc.isinmemory():
1798 # largefiles is not a good candidate for in-memory merge (large
1821 # largefiles is not a good candidate for in-memory merge (large
1799 # files, custom dirstate, matcher usage).
1822 # files, custom dirstate, matcher usage).
1800 raise error.ProgrammingError(
1823 raise error.ProgrammingError(
1801 b'largefiles is not compatible with in-memory merge'
1824 b'largefiles is not compatible with in-memory merge'
1802 )
1825 )
1803 with lfdirstate.changing_parents(repo):
1826 with lfdirstate.changing_parents(repo):
1804 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1827 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1805
1828
1806 newstandins = lfutil.getstandinsstate(repo)
1829 newstandins = lfutil.getstandinsstate(repo)
1807 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1830 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1808
1831
1809 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1832 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1810 # all the ones that didn't change as clean
1833 # all the ones that didn't change as clean
1811 for lfile in oldclean.difference(filelist):
1834 for lfile in oldclean.difference(filelist):
1812 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1835 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1813 lfdirstate.write(repo.currenttransaction())
1836 lfdirstate.write(repo.currenttransaction())
1814
1837
1815 if branchmerge or force or partial:
1838 if branchmerge or force or partial:
1816 filelist.extend(s.deleted + s.removed)
1839 filelist.extend(s.deleted + s.removed)
1817
1840
1818 lfcommands.updatelfiles(
1841 lfcommands.updatelfiles(
1819 repo.ui, repo, filelist=filelist, normallookup=partial
1842 repo.ui, repo, filelist=filelist, normallookup=partial
1820 )
1843 )
1821
1844
1822 return result
1845 return result
1823
1846
1824
1847
1825 @eh.wrapfunction(scmutil, b'marktouched')
1848 @eh.wrapfunction(scmutil, b'marktouched')
1826 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1849 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1827 result = orig(repo, files, *args, **kwargs)
1850 result = orig(repo, files, *args, **kwargs)
1828
1851
1829 filelist = []
1852 filelist = []
1830 for f in files:
1853 for f in files:
1831 lf = lfutil.splitstandin(f)
1854 lf = lfutil.splitstandin(f)
1832 if lf is not None:
1855 if lf is not None:
1833 filelist.append(lf)
1856 filelist.append(lf)
1834 if filelist:
1857 if filelist:
1835 lfcommands.updatelfiles(
1858 lfcommands.updatelfiles(
1836 repo.ui,
1859 repo.ui,
1837 repo,
1860 repo,
1838 filelist=filelist,
1861 filelist=filelist,
1839 printmessage=False,
1862 printmessage=False,
1840 normallookup=True,
1863 normallookup=True,
1841 )
1864 )
1842
1865
1843 return result
1866 return result
1844
1867
1845
1868
1846 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1869 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1847 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1870 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1848 def upgraderequirements(orig, repo):
1871 def upgraderequirements(orig, repo):
1849 reqs = orig(repo)
1872 reqs = orig(repo)
1850 if b'largefiles' in repo.requirements:
1873 if b'largefiles' in repo.requirements:
1851 reqs.add(b'largefiles')
1874 reqs.add(b'largefiles')
1852 return reqs
1875 return reqs
1853
1876
1854
1877
1855 _lfscheme = b'largefile://'
1878 _lfscheme = b'largefile://'
1856
1879
1857
1880
1858 @eh.wrapfunction(urlmod, b'open')
1881 @eh.wrapfunction(urlmod, b'open')
1859 def openlargefile(orig, ui, url_, data=None, **kwargs):
1882 def openlargefile(orig, ui, url_, data=None, **kwargs):
1860 if url_.startswith(_lfscheme):
1883 if url_.startswith(_lfscheme):
1861 if data:
1884 if data:
1862 msg = b"cannot use data on a 'largefile://' url"
1885 msg = b"cannot use data on a 'largefile://' url"
1863 raise error.ProgrammingError(msg)
1886 raise error.ProgrammingError(msg)
1864 lfid = url_[len(_lfscheme) :]
1887 lfid = url_[len(_lfscheme) :]
1865 return storefactory.getlfile(ui, lfid)
1888 return storefactory.getlfile(ui, lfid)
1866 else:
1889 else:
1867 return orig(ui, url_, data=data, **kwargs)
1890 return orig(ui, url_, data=data, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now