##// END OF EJS Templates
largefile: make sure we hold the lock when updating the second dirstate...
marmoute -
r50859:0cf4c1b8 default
parent child Browse files
Show More
@@ -1,797 +1,807
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
201
201
202
202
203 def openlfdirstate(ui, repo, create=True):
203 def openlfdirstate(ui, repo, create=True):
204 """
204 """
205 Return a dirstate object that tracks largefiles: i.e. its root is
205 Return a dirstate object that tracks largefiles: i.e. its root is
206 the repo root, but it is saved in .hg/largefiles/dirstate.
206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 """
207 """
208 vfs = repo.vfs
208 vfs = repo.vfs
209 lfstoredir = longname
209 lfstoredir = longname
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 lfdirstate = largefilesdirstate(
212 lfdirstate = largefilesdirstate(
213 opener,
213 opener,
214 ui,
214 ui,
215 repo.root,
215 repo.root,
216 repo.dirstate._validate,
216 repo.dirstate._validate,
217 lambda: sparse.matcher(repo),
217 lambda: sparse.matcher(repo),
218 repo.nodeconstants,
218 repo.nodeconstants,
219 use_dirstate_v2,
219 use_dirstate_v2,
220 )
220 )
221
221
222 # If the largefiles dirstate does not exist, populate and create
222 # If the largefiles dirstate does not exist, populate and create
223 # it. This ensures that we create it on the first meaningful
223 # it. This ensures that we create it on the first meaningful
224 # largefiles operation in a new clone.
224 # largefiles operation in a new clone.
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 try:
227 with repo.wlock(wait=False):
226 matcher = getstandinmatcher(repo)
228 matcher = getstandinmatcher(repo)
227 standins = repo.dirstate.walk(
229 standins = repo.dirstate.walk(
228 matcher, subrepos=[], unknown=False, ignored=False
230 matcher, subrepos=[], unknown=False, ignored=False
229 )
231 )
230
232
231 if len(standins) > 0:
233 if len(standins) > 0:
232 vfs.makedirs(lfstoredir)
234 vfs.makedirs(lfstoredir)
233
235
234 with lfdirstate.changing_parents(repo):
236 with lfdirstate.changing_parents(repo):
235 for standin in standins:
237 for standin in standins:
236 lfile = splitstandin(standin)
238 lfile = splitstandin(standin)
237 lfdirstate.update_file(
239 lfdirstate.update_file(
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
240 lfile,
241 p1_tracked=True,
242 wc_tracked=True,
243 possibly_dirty=True,
239 )
244 )
245 except error.LockError:
246 # Assume that whatever was holding the lock was important.
247 # If we were doing something important, we would already have
248 # either the lock or a largefile dirstate.
249 pass
240 return lfdirstate
250 return lfdirstate
241
251
242
252
243 def lfdirstatestatus(lfdirstate, repo):
253 def lfdirstatestatus(lfdirstate, repo):
244 pctx = repo[b'.']
254 pctx = repo[b'.']
245 match = matchmod.always()
255 match = matchmod.always()
246 unsure, s, mtime_boundary = lfdirstate.status(
256 unsure, s, mtime_boundary = lfdirstate.status(
247 match, subrepos=[], ignored=False, clean=False, unknown=False
257 match, subrepos=[], ignored=False, clean=False, unknown=False
248 )
258 )
249 modified, clean = s.modified, s.clean
259 modified, clean = s.modified, s.clean
250 wctx = repo[None]
260 wctx = repo[None]
251 for lfile in unsure:
261 for lfile in unsure:
252 try:
262 try:
253 fctx = pctx[standin(lfile)]
263 fctx = pctx[standin(lfile)]
254 except LookupError:
264 except LookupError:
255 fctx = None
265 fctx = None
256 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
257 modified.append(lfile)
267 modified.append(lfile)
258 else:
268 else:
259 clean.append(lfile)
269 clean.append(lfile)
260 st = wctx[lfile].lstat()
270 st = wctx[lfile].lstat()
261 mode = st.st_mode
271 mode = st.st_mode
262 size = st.st_size
272 size = st.st_size
263 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
264 if mtime is not None:
274 if mtime is not None:
265 cache_data = (mode, size, mtime)
275 cache_data = (mode, size, mtime)
266 lfdirstate.set_clean(lfile, cache_data)
276 lfdirstate.set_clean(lfile, cache_data)
267 return s
277 return s
268
278
269
279
270 def listlfiles(repo, rev=None, matcher=None):
280 def listlfiles(repo, rev=None, matcher=None):
271 """return a list of largefiles in the working copy or the
281 """return a list of largefiles in the working copy or the
272 specified changeset"""
282 specified changeset"""
273
283
274 if matcher is None:
284 if matcher is None:
275 matcher = getstandinmatcher(repo)
285 matcher = getstandinmatcher(repo)
276
286
277 # ignore unknown files in working directory
287 # ignore unknown files in working directory
278 return [
288 return [
279 splitstandin(f)
289 splitstandin(f)
280 for f in repo[rev].walk(matcher)
290 for f in repo[rev].walk(matcher)
281 if rev is not None or repo.dirstate.get_entry(f).any_tracked
291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
282 ]
292 ]
283
293
284
294
285 def instore(repo, hash, forcelocal=False):
295 def instore(repo, hash, forcelocal=False):
286 '''Return true if a largefile with the given hash exists in the store'''
296 '''Return true if a largefile with the given hash exists in the store'''
287 return os.path.exists(storepath(repo, hash, forcelocal))
297 return os.path.exists(storepath(repo, hash, forcelocal))
288
298
289
299
290 def storepath(repo, hash, forcelocal=False):
300 def storepath(repo, hash, forcelocal=False):
291 """Return the correct location in the repository largefiles store for a
301 """Return the correct location in the repository largefiles store for a
292 file with the given hash."""
302 file with the given hash."""
293 if not forcelocal and repo.shared():
303 if not forcelocal and repo.shared():
294 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
295 return repo.vfs.join(longname, hash)
305 return repo.vfs.join(longname, hash)
296
306
297
307
298 def findstorepath(repo, hash):
308 def findstorepath(repo, hash):
299 """Search through the local store path(s) to find the file for the given
309 """Search through the local store path(s) to find the file for the given
300 hash. If the file is not found, its path in the primary store is returned.
310 hash. If the file is not found, its path in the primary store is returned.
301 The return value is a tuple of (path, exists(path)).
311 The return value is a tuple of (path, exists(path)).
302 """
312 """
303 # For shared repos, the primary store is in the share source. But for
313 # For shared repos, the primary store is in the share source. But for
304 # backward compatibility, force a lookup in the local store if it wasn't
314 # backward compatibility, force a lookup in the local store if it wasn't
305 # found in the share source.
315 # found in the share source.
306 path = storepath(repo, hash, False)
316 path = storepath(repo, hash, False)
307
317
308 if instore(repo, hash):
318 if instore(repo, hash):
309 return (path, True)
319 return (path, True)
310 elif repo.shared() and instore(repo, hash, True):
320 elif repo.shared() and instore(repo, hash, True):
311 return storepath(repo, hash, True), True
321 return storepath(repo, hash, True), True
312
322
313 return (path, False)
323 return (path, False)
314
324
315
325
316 def copyfromcache(repo, hash, filename):
326 def copyfromcache(repo, hash, filename):
317 """Copy the specified largefile from the repo or system cache to
327 """Copy the specified largefile from the repo or system cache to
318 filename in the repository. Return true on success or false if the
328 filename in the repository. Return true on success or false if the
319 file was not found in either cache (which should not happened:
329 file was not found in either cache (which should not happened:
320 this is meant to be called only after ensuring that the needed
330 this is meant to be called only after ensuring that the needed
321 largefile exists in the cache)."""
331 largefile exists in the cache)."""
322 wvfs = repo.wvfs
332 wvfs = repo.wvfs
323 path = findfile(repo, hash)
333 path = findfile(repo, hash)
324 if path is None:
334 if path is None:
325 return False
335 return False
326 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
327 # The write may fail before the file is fully written, but we
337 # The write may fail before the file is fully written, but we
328 # don't use atomic writes in the working copy.
338 # don't use atomic writes in the working copy.
329 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
330 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
331 if gothash != hash:
341 if gothash != hash:
332 repo.ui.warn(
342 repo.ui.warn(
333 _(b'%s: data corruption in %s with hash %s\n')
343 _(b'%s: data corruption in %s with hash %s\n')
334 % (filename, path, gothash)
344 % (filename, path, gothash)
335 )
345 )
336 wvfs.unlink(filename)
346 wvfs.unlink(filename)
337 return False
347 return False
338 return True
348 return True
339
349
340
350
341 def copytostore(repo, ctx, file, fstandin):
351 def copytostore(repo, ctx, file, fstandin):
342 wvfs = repo.wvfs
352 wvfs = repo.wvfs
343 hash = readasstandin(ctx[fstandin])
353 hash = readasstandin(ctx[fstandin])
344 if instore(repo, hash):
354 if instore(repo, hash):
345 return
355 return
346 if wvfs.exists(file):
356 if wvfs.exists(file):
347 copytostoreabsolute(repo, wvfs.join(file), hash)
357 copytostoreabsolute(repo, wvfs.join(file), hash)
348 else:
358 else:
349 repo.ui.warn(
359 repo.ui.warn(
350 _(b"%s: largefile %s not available from local store\n")
360 _(b"%s: largefile %s not available from local store\n")
351 % (file, hash)
361 % (file, hash)
352 )
362 )
353
363
354
364
355 def copyalltostore(repo, node):
365 def copyalltostore(repo, node):
356 '''Copy all largefiles in a given revision to the store'''
366 '''Copy all largefiles in a given revision to the store'''
357
367
358 ctx = repo[node]
368 ctx = repo[node]
359 for filename in ctx.files():
369 for filename in ctx.files():
360 realfile = splitstandin(filename)
370 realfile = splitstandin(filename)
361 if realfile is not None and filename in ctx.manifest():
371 if realfile is not None and filename in ctx.manifest():
362 copytostore(repo, ctx, realfile, filename)
372 copytostore(repo, ctx, realfile, filename)
363
373
364
374
365 def copytostoreabsolute(repo, file, hash):
375 def copytostoreabsolute(repo, file, hash):
366 if inusercache(repo.ui, hash):
376 if inusercache(repo.ui, hash):
367 link(usercachepath(repo.ui, hash), storepath(repo, hash))
377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
368 else:
378 else:
369 util.makedirs(os.path.dirname(storepath(repo, hash)))
379 util.makedirs(os.path.dirname(storepath(repo, hash)))
370 with open(file, b'rb') as srcf:
380 with open(file, b'rb') as srcf:
371 with util.atomictempfile(
381 with util.atomictempfile(
372 storepath(repo, hash), createmode=repo.store.createmode
382 storepath(repo, hash), createmode=repo.store.createmode
373 ) as dstf:
383 ) as dstf:
374 for chunk in util.filechunkiter(srcf):
384 for chunk in util.filechunkiter(srcf):
375 dstf.write(chunk)
385 dstf.write(chunk)
376 linktousercache(repo, hash)
386 linktousercache(repo, hash)
377
387
378
388
379 def linktousercache(repo, hash):
389 def linktousercache(repo, hash):
380 """Link / copy the largefile with the specified hash from the store
390 """Link / copy the largefile with the specified hash from the store
381 to the cache."""
391 to the cache."""
382 path = usercachepath(repo.ui, hash)
392 path = usercachepath(repo.ui, hash)
383 link(storepath(repo, hash), path)
393 link(storepath(repo, hash), path)
384
394
385
395
386 def getstandinmatcher(repo, rmatcher=None):
396 def getstandinmatcher(repo, rmatcher=None):
387 '''Return a match object that applies rmatcher to the standin directory'''
397 '''Return a match object that applies rmatcher to the standin directory'''
388 wvfs = repo.wvfs
398 wvfs = repo.wvfs
389 standindir = shortname
399 standindir = shortname
390
400
391 # no warnings about missing files or directories
401 # no warnings about missing files or directories
392 badfn = lambda f, msg: None
402 badfn = lambda f, msg: None
393
403
394 if rmatcher and not rmatcher.always():
404 if rmatcher and not rmatcher.always():
395 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
396 if not pats:
406 if not pats:
397 pats = [wvfs.join(standindir)]
407 pats = [wvfs.join(standindir)]
398 match = scmutil.match(repo[None], pats, badfn=badfn)
408 match = scmutil.match(repo[None], pats, badfn=badfn)
399 else:
409 else:
400 # no patterns: relative to repo root
410 # no patterns: relative to repo root
401 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
402 return match
412 return match
403
413
404
414
405 def composestandinmatcher(repo, rmatcher):
415 def composestandinmatcher(repo, rmatcher):
406 """Return a matcher that accepts standins corresponding to the
416 """Return a matcher that accepts standins corresponding to the
407 files accepted by rmatcher. Pass the list of files in the matcher
417 files accepted by rmatcher. Pass the list of files in the matcher
408 as the paths specified by the user."""
418 as the paths specified by the user."""
409 smatcher = getstandinmatcher(repo, rmatcher)
419 smatcher = getstandinmatcher(repo, rmatcher)
410 isstandin = smatcher.matchfn
420 isstandin = smatcher.matchfn
411
421
412 def composedmatchfn(f):
422 def composedmatchfn(f):
413 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
414
424
415 smatcher.matchfn = composedmatchfn
425 smatcher.matchfn = composedmatchfn
416
426
417 return smatcher
427 return smatcher
418
428
419
429
420 def standin(filename):
430 def standin(filename):
421 """Return the repo-relative path to the standin for the specified big
431 """Return the repo-relative path to the standin for the specified big
422 file."""
432 file."""
423 # Notes:
433 # Notes:
424 # 1) Some callers want an absolute path, but for instance addlargefiles
434 # 1) Some callers want an absolute path, but for instance addlargefiles
425 # needs it repo-relative so it can be passed to repo[None].add(). So
435 # needs it repo-relative so it can be passed to repo[None].add(). So
426 # leave it up to the caller to use repo.wjoin() to get an absolute path.
436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
427 # 2) Join with '/' because that's what dirstate always uses, even on
437 # 2) Join with '/' because that's what dirstate always uses, even on
428 # Windows. Change existing separator to '/' first in case we are
438 # Windows. Change existing separator to '/' first in case we are
429 # passed filenames from an external source (like the command line).
439 # passed filenames from an external source (like the command line).
430 return shortnameslash + util.pconvert(filename)
440 return shortnameslash + util.pconvert(filename)
431
441
432
442
433 def isstandin(filename):
443 def isstandin(filename):
434 """Return true if filename is a big file standin. filename must be
444 """Return true if filename is a big file standin. filename must be
435 in Mercurial's internal form (slash-separated)."""
445 in Mercurial's internal form (slash-separated)."""
436 return filename.startswith(shortnameslash)
446 return filename.startswith(shortnameslash)
437
447
438
448
439 def splitstandin(filename):
449 def splitstandin(filename):
440 # Split on / because that's what dirstate always uses, even on Windows.
450 # Split on / because that's what dirstate always uses, even on Windows.
441 # Change local separator to / first just in case we are passed filenames
451 # Change local separator to / first just in case we are passed filenames
442 # from an external source (like the command line).
452 # from an external source (like the command line).
443 bits = util.pconvert(filename).split(b'/', 1)
453 bits = util.pconvert(filename).split(b'/', 1)
444 if len(bits) == 2 and bits[0] == shortname:
454 if len(bits) == 2 and bits[0] == shortname:
445 return bits[1]
455 return bits[1]
446 else:
456 else:
447 return None
457 return None
448
458
449
459
450 def updatestandin(repo, lfile, standin):
460 def updatestandin(repo, lfile, standin):
451 """Re-calculate hash value of lfile and write it into standin
461 """Re-calculate hash value of lfile and write it into standin
452
462
453 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
454 """
464 """
455 file = repo.wjoin(lfile)
465 file = repo.wjoin(lfile)
456 if repo.wvfs.exists(lfile):
466 if repo.wvfs.exists(lfile):
457 hash = hashfile(file)
467 hash = hashfile(file)
458 executable = getexecutable(file)
468 executable = getexecutable(file)
459 writestandin(repo, standin, hash, executable)
469 writestandin(repo, standin, hash, executable)
460 else:
470 else:
461 raise error.Abort(_(b'%s: file not found!') % lfile)
471 raise error.Abort(_(b'%s: file not found!') % lfile)
462
472
463
473
464 def readasstandin(fctx):
474 def readasstandin(fctx):
465 """read hex hash from given filectx of standin file
475 """read hex hash from given filectx of standin file
466
476
467 This encapsulates how "standin" data is stored into storage layer."""
477 This encapsulates how "standin" data is stored into storage layer."""
468 return fctx.data().strip()
478 return fctx.data().strip()
469
479
470
480
471 def writestandin(repo, standin, hash, executable):
481 def writestandin(repo, standin, hash, executable):
472 '''write hash to <repo.root>/<standin>'''
482 '''write hash to <repo.root>/<standin>'''
473 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
474
484
475
485
476 def copyandhash(instream, outfile):
486 def copyandhash(instream, outfile):
477 """Read bytes from instream (iterable) and write them to outfile,
487 """Read bytes from instream (iterable) and write them to outfile,
478 computing the SHA-1 hash of the data along the way. Return the hash."""
488 computing the SHA-1 hash of the data along the way. Return the hash."""
479 hasher = hashutil.sha1(b'')
489 hasher = hashutil.sha1(b'')
480 for data in instream:
490 for data in instream:
481 hasher.update(data)
491 hasher.update(data)
482 outfile.write(data)
492 outfile.write(data)
483 return hex(hasher.digest())
493 return hex(hasher.digest())
484
494
485
495
486 def hashfile(file):
496 def hashfile(file):
487 if not os.path.exists(file):
497 if not os.path.exists(file):
488 return b''
498 return b''
489 with open(file, b'rb') as fd:
499 with open(file, b'rb') as fd:
490 return hexsha1(fd)
500 return hexsha1(fd)
491
501
492
502
493 def getexecutable(filename):
503 def getexecutable(filename):
494 mode = os.stat(filename).st_mode
504 mode = os.stat(filename).st_mode
495 return (
505 return (
496 (mode & stat.S_IXUSR)
506 (mode & stat.S_IXUSR)
497 and (mode & stat.S_IXGRP)
507 and (mode & stat.S_IXGRP)
498 and (mode & stat.S_IXOTH)
508 and (mode & stat.S_IXOTH)
499 )
509 )
500
510
501
511
502 def urljoin(first, second, *arg):
512 def urljoin(first, second, *arg):
503 def join(left, right):
513 def join(left, right):
504 if not left.endswith(b'/'):
514 if not left.endswith(b'/'):
505 left += b'/'
515 left += b'/'
506 if right.startswith(b'/'):
516 if right.startswith(b'/'):
507 right = right[1:]
517 right = right[1:]
508 return left + right
518 return left + right
509
519
510 url = join(first, second)
520 url = join(first, second)
511 for a in arg:
521 for a in arg:
512 url = join(url, a)
522 url = join(url, a)
513 return url
523 return url
514
524
515
525
516 def hexsha1(fileobj):
526 def hexsha1(fileobj):
517 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
518 object data"""
528 object data"""
519 h = hashutil.sha1()
529 h = hashutil.sha1()
520 for chunk in util.filechunkiter(fileobj):
530 for chunk in util.filechunkiter(fileobj):
521 h.update(chunk)
531 h.update(chunk)
522 return hex(h.digest())
532 return hex(h.digest())
523
533
524
534
525 def httpsendfile(ui, filename):
535 def httpsendfile(ui, filename):
526 return httpconnection.httpsendfile(ui, filename, b'rb')
536 return httpconnection.httpsendfile(ui, filename, b'rb')
527
537
528
538
529 def unixpath(path):
539 def unixpath(path):
530 '''Return a version of path normalized for use with the lfdirstate.'''
540 '''Return a version of path normalized for use with the lfdirstate.'''
531 return util.pconvert(os.path.normpath(path))
541 return util.pconvert(os.path.normpath(path))
532
542
533
543
534 def islfilesrepo(repo):
544 def islfilesrepo(repo):
535 '''Return true if the repo is a largefile repo.'''
545 '''Return true if the repo is a largefile repo.'''
536 if b'largefiles' in repo.requirements and any(
546 if b'largefiles' in repo.requirements and any(
537 shortnameslash in f[1] for f in repo.store.datafiles()
547 shortnameslash in f[1] for f in repo.store.datafiles()
538 ):
548 ):
539 return True
549 return True
540
550
541 return any(openlfdirstate(repo.ui, repo, False))
551 return any(openlfdirstate(repo.ui, repo, False))
542
552
543
553
544 class storeprotonotcapable(Exception):
554 class storeprotonotcapable(Exception):
545 def __init__(self, storetypes):
555 def __init__(self, storetypes):
546 self.storetypes = storetypes
556 self.storetypes = storetypes
547
557
548
558
549 def getstandinsstate(repo):
559 def getstandinsstate(repo):
550 standins = []
560 standins = []
551 matcher = getstandinmatcher(repo)
561 matcher = getstandinmatcher(repo)
552 wctx = repo[None]
562 wctx = repo[None]
553 for standin in repo.dirstate.walk(
563 for standin in repo.dirstate.walk(
554 matcher, subrepos=[], unknown=False, ignored=False
564 matcher, subrepos=[], unknown=False, ignored=False
555 ):
565 ):
556 lfile = splitstandin(standin)
566 lfile = splitstandin(standin)
557 try:
567 try:
558 hash = readasstandin(wctx[standin])
568 hash = readasstandin(wctx[standin])
559 except IOError:
569 except IOError:
560 hash = None
570 hash = None
561 standins.append((lfile, hash))
571 standins.append((lfile, hash))
562 return standins
572 return standins
563
573
564
574
565 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
566 lfstandin = standin(lfile)
576 lfstandin = standin(lfile)
567 if lfstandin not in repo.dirstate:
577 if lfstandin not in repo.dirstate:
568 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
569 else:
579 else:
570 entry = repo.dirstate.get_entry(lfstandin)
580 entry = repo.dirstate.get_entry(lfstandin)
571 lfdirstate.update_file(
581 lfdirstate.update_file(
572 lfile,
582 lfile,
573 wc_tracked=entry.tracked,
583 wc_tracked=entry.tracked,
574 p1_tracked=entry.p1_tracked,
584 p1_tracked=entry.p1_tracked,
575 p2_info=entry.p2_info,
585 p2_info=entry.p2_info,
576 possibly_dirty=True,
586 possibly_dirty=True,
577 )
587 )
578
588
579
589
580 def markcommitted(orig, ctx, node):
590 def markcommitted(orig, ctx, node):
581 repo = ctx.repo()
591 repo = ctx.repo()
582
592
583 lfdirstate = openlfdirstate(repo.ui, repo)
593 lfdirstate = openlfdirstate(repo.ui, repo)
584 with lfdirstate.changing_parents(repo):
594 with lfdirstate.changing_parents(repo):
585 orig(node)
595 orig(node)
586
596
587 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
597 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
588 # because files coming from the 2nd parent are omitted in the latter.
598 # because files coming from the 2nd parent are omitted in the latter.
589 #
599 #
590 # The former should be used to get targets of "synclfdirstate",
600 # The former should be used to get targets of "synclfdirstate",
591 # because such files:
601 # because such files:
592 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
602 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
593 # - have to be marked as "n" after commit, but
603 # - have to be marked as "n" after commit, but
594 # - aren't listed in "repo[node].files()"
604 # - aren't listed in "repo[node].files()"
595
605
596 for f in ctx.files():
606 for f in ctx.files():
597 lfile = splitstandin(f)
607 lfile = splitstandin(f)
598 if lfile is not None:
608 if lfile is not None:
599 synclfdirstate(repo, lfdirstate, lfile, False)
609 synclfdirstate(repo, lfdirstate, lfile, False)
600 lfdirstate.write(repo.currenttransaction())
610 lfdirstate.write(repo.currenttransaction())
601
611
602 # As part of committing, copy all of the largefiles into the cache.
612 # As part of committing, copy all of the largefiles into the cache.
603 #
613 #
604 # Using "node" instead of "ctx" implies additional "repo[node]"
614 # Using "node" instead of "ctx" implies additional "repo[node]"
605 # lookup while copyalltostore(), but can omit redundant check for
615 # lookup while copyalltostore(), but can omit redundant check for
606 # files comming from the 2nd parent, which should exist in store
616 # files comming from the 2nd parent, which should exist in store
607 # at merging.
617 # at merging.
608 copyalltostore(repo, node)
618 copyalltostore(repo, node)
609
619
610
620
611 def getlfilestoupdate(oldstandins, newstandins):
621 def getlfilestoupdate(oldstandins, newstandins):
612 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
622 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
613 filelist = []
623 filelist = []
614 for f in changedstandins:
624 for f in changedstandins:
615 if f[0] not in filelist:
625 if f[0] not in filelist:
616 filelist.append(f[0])
626 filelist.append(f[0])
617 return filelist
627 return filelist
618
628
619
629
620 def getlfilestoupload(repo, missing, addfunc):
630 def getlfilestoupload(repo, missing, addfunc):
621 makeprogress = repo.ui.makeprogress
631 makeprogress = repo.ui.makeprogress
622 with makeprogress(
632 with makeprogress(
623 _(b'finding outgoing largefiles'),
633 _(b'finding outgoing largefiles'),
624 unit=_(b'revisions'),
634 unit=_(b'revisions'),
625 total=len(missing),
635 total=len(missing),
626 ) as progress:
636 ) as progress:
627 for i, n in enumerate(missing):
637 for i, n in enumerate(missing):
628 progress.update(i)
638 progress.update(i)
629 parents = [p for p in repo[n].parents() if p != repo.nullid]
639 parents = [p for p in repo[n].parents() if p != repo.nullid]
630
640
631 with lfstatus(repo, value=False):
641 with lfstatus(repo, value=False):
632 ctx = repo[n]
642 ctx = repo[n]
633
643
634 files = set(ctx.files())
644 files = set(ctx.files())
635 if len(parents) == 2:
645 if len(parents) == 2:
636 mc = ctx.manifest()
646 mc = ctx.manifest()
637 mp1 = ctx.p1().manifest()
647 mp1 = ctx.p1().manifest()
638 mp2 = ctx.p2().manifest()
648 mp2 = ctx.p2().manifest()
639 for f in mp1:
649 for f in mp1:
640 if f not in mc:
650 if f not in mc:
641 files.add(f)
651 files.add(f)
642 for f in mp2:
652 for f in mp2:
643 if f not in mc:
653 if f not in mc:
644 files.add(f)
654 files.add(f)
645 for f in mc:
655 for f in mc:
646 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
656 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
647 files.add(f)
657 files.add(f)
648 for fn in files:
658 for fn in files:
649 if isstandin(fn) and fn in ctx:
659 if isstandin(fn) and fn in ctx:
650 addfunc(fn, readasstandin(ctx[fn]))
660 addfunc(fn, readasstandin(ctx[fn]))
651
661
652
662
653 def updatestandinsbymatch(repo, match):
663 def updatestandinsbymatch(repo, match):
654 """Update standins in the working directory according to specified match
664 """Update standins in the working directory according to specified match
655
665
656 This returns (possibly modified) ``match`` object to be used for
666 This returns (possibly modified) ``match`` object to be used for
657 subsequent commit process.
667 subsequent commit process.
658 """
668 """
659
669
660 ui = repo.ui
670 ui = repo.ui
661
671
662 # Case 1: user calls commit with no specific files or
672 # Case 1: user calls commit with no specific files or
663 # include/exclude patterns: refresh and commit all files that
673 # include/exclude patterns: refresh and commit all files that
664 # are "dirty".
674 # are "dirty".
665 if match is None or match.always():
675 if match is None or match.always():
666 # Spend a bit of time here to get a list of files we know
676 # Spend a bit of time here to get a list of files we know
667 # are modified so we can compare only against those.
677 # are modified so we can compare only against those.
668 # It can cost a lot of time (several seconds)
678 # It can cost a lot of time (several seconds)
669 # otherwise to update all standins if the largefiles are
679 # otherwise to update all standins if the largefiles are
670 # large.
680 # large.
671 lfdirstate = openlfdirstate(ui, repo)
681 lfdirstate = openlfdirstate(ui, repo)
672 dirtymatch = matchmod.always()
682 dirtymatch = matchmod.always()
673 unsure, s, mtime_boundary = lfdirstate.status(
683 unsure, s, mtime_boundary = lfdirstate.status(
674 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
684 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
675 )
685 )
676 modifiedfiles = unsure + s.modified + s.added + s.removed
686 modifiedfiles = unsure + s.modified + s.added + s.removed
677 lfiles = listlfiles(repo)
687 lfiles = listlfiles(repo)
678 # this only loops through largefiles that exist (not
688 # this only loops through largefiles that exist (not
679 # removed/renamed)
689 # removed/renamed)
680 for lfile in lfiles:
690 for lfile in lfiles:
681 if lfile in modifiedfiles:
691 if lfile in modifiedfiles:
682 fstandin = standin(lfile)
692 fstandin = standin(lfile)
683 if repo.wvfs.exists(fstandin):
693 if repo.wvfs.exists(fstandin):
684 # this handles the case where a rebase is being
694 # this handles the case where a rebase is being
685 # performed and the working copy is not updated
695 # performed and the working copy is not updated
686 # yet.
696 # yet.
687 if repo.wvfs.exists(lfile):
697 if repo.wvfs.exists(lfile):
688 updatestandin(repo, lfile, fstandin)
698 updatestandin(repo, lfile, fstandin)
689
699
690 return match
700 return match
691
701
692 lfiles = listlfiles(repo)
702 lfiles = listlfiles(repo)
693 match._files = repo._subdirlfs(match.files(), lfiles)
703 match._files = repo._subdirlfs(match.files(), lfiles)
694
704
695 # Case 2: user calls commit with specified patterns: refresh
705 # Case 2: user calls commit with specified patterns: refresh
696 # any matching big files.
706 # any matching big files.
697 smatcher = composestandinmatcher(repo, match)
707 smatcher = composestandinmatcher(repo, match)
698 standins = repo.dirstate.walk(
708 standins = repo.dirstate.walk(
699 smatcher, subrepos=[], unknown=False, ignored=False
709 smatcher, subrepos=[], unknown=False, ignored=False
700 )
710 )
701
711
702 # No matching big files: get out of the way and pass control to
712 # No matching big files: get out of the way and pass control to
703 # the usual commit() method.
713 # the usual commit() method.
704 if not standins:
714 if not standins:
705 return match
715 return match
706
716
707 # Refresh all matching big files. It's possible that the
717 # Refresh all matching big files. It's possible that the
708 # commit will end up failing, in which case the big files will
718 # commit will end up failing, in which case the big files will
709 # stay refreshed. No harm done: the user modified them and
719 # stay refreshed. No harm done: the user modified them and
710 # asked to commit them, so sooner or later we're going to
720 # asked to commit them, so sooner or later we're going to
711 # refresh the standins. Might as well leave them refreshed.
721 # refresh the standins. Might as well leave them refreshed.
712 lfdirstate = openlfdirstate(ui, repo)
722 lfdirstate = openlfdirstate(ui, repo)
713 for fstandin in standins:
723 for fstandin in standins:
714 lfile = splitstandin(fstandin)
724 lfile = splitstandin(fstandin)
715 if lfdirstate.get_entry(lfile).tracked:
725 if lfdirstate.get_entry(lfile).tracked:
716 updatestandin(repo, lfile, fstandin)
726 updatestandin(repo, lfile, fstandin)
717
727
718 # Cook up a new matcher that only matches regular files or
728 # Cook up a new matcher that only matches regular files or
719 # standins corresponding to the big files requested by the
729 # standins corresponding to the big files requested by the
720 # user. Have to modify _files to prevent commit() from
730 # user. Have to modify _files to prevent commit() from
721 # complaining "not tracked" for big files.
731 # complaining "not tracked" for big files.
722 match = copy.copy(match)
732 match = copy.copy(match)
723 origmatchfn = match.matchfn
733 origmatchfn = match.matchfn
724
734
725 # Check both the list of largefiles and the list of
735 # Check both the list of largefiles and the list of
726 # standins because if a largefile was removed, it
736 # standins because if a largefile was removed, it
727 # won't be in the list of largefiles at this point
737 # won't be in the list of largefiles at this point
728 match._files += sorted(standins)
738 match._files += sorted(standins)
729
739
730 actualfiles = []
740 actualfiles = []
731 for f in match._files:
741 for f in match._files:
732 fstandin = standin(f)
742 fstandin = standin(f)
733
743
734 # For largefiles, only one of the normal and standin should be
744 # For largefiles, only one of the normal and standin should be
735 # committed (except if one of them is a remove). In the case of a
745 # committed (except if one of them is a remove). In the case of a
736 # standin removal, drop the normal file if it is unknown to dirstate.
746 # standin removal, drop the normal file if it is unknown to dirstate.
737 # Thus, skip plain largefile names but keep the standin.
747 # Thus, skip plain largefile names but keep the standin.
738 if f in lfiles or fstandin in standins:
748 if f in lfiles or fstandin in standins:
739 if not repo.dirstate.get_entry(fstandin).removed:
749 if not repo.dirstate.get_entry(fstandin).removed:
740 if not repo.dirstate.get_entry(f).removed:
750 if not repo.dirstate.get_entry(f).removed:
741 continue
751 continue
742 elif not repo.dirstate.get_entry(f).any_tracked:
752 elif not repo.dirstate.get_entry(f).any_tracked:
743 continue
753 continue
744
754
745 actualfiles.append(f)
755 actualfiles.append(f)
746 match._files = actualfiles
756 match._files = actualfiles
747
757
748 def matchfn(f):
758 def matchfn(f):
749 if origmatchfn(f):
759 if origmatchfn(f):
750 return f not in lfiles
760 return f not in lfiles
751 else:
761 else:
752 return f in standins
762 return f in standins
753
763
754 match.matchfn = matchfn
764 match.matchfn = matchfn
755
765
756 return match
766 return match
757
767
758
768
759 class automatedcommithook:
769 class automatedcommithook:
760 """Stateful hook to update standins at the 1st commit of resuming
770 """Stateful hook to update standins at the 1st commit of resuming
761
771
762 For efficiency, updating standins in the working directory should
772 For efficiency, updating standins in the working directory should
763 be avoided while automated committing (like rebase, transplant and
773 be avoided while automated committing (like rebase, transplant and
764 so on), because they should be updated before committing.
774 so on), because they should be updated before committing.
765
775
766 But the 1st commit of resuming automated committing (e.g. ``rebase
776 But the 1st commit of resuming automated committing (e.g. ``rebase
767 --continue``) should update them, because largefiles may be
777 --continue``) should update them, because largefiles may be
768 modified manually.
778 modified manually.
769 """
779 """
770
780
771 def __init__(self, resuming):
781 def __init__(self, resuming):
772 self.resuming = resuming
782 self.resuming = resuming
773
783
774 def __call__(self, repo, match):
784 def __call__(self, repo, match):
775 if self.resuming:
785 if self.resuming:
776 self.resuming = False # avoids updating at subsequent commits
786 self.resuming = False # avoids updating at subsequent commits
777 return updatestandinsbymatch(repo, match)
787 return updatestandinsbymatch(repo, match)
778 else:
788 else:
779 return match
789 return match
780
790
781
791
782 def getstatuswriter(ui, repo, forcibly=None):
792 def getstatuswriter(ui, repo, forcibly=None):
783 """Return the function to write largefiles specific status out
793 """Return the function to write largefiles specific status out
784
794
785 If ``forcibly`` is ``None``, this returns the last element of
795 If ``forcibly`` is ``None``, this returns the last element of
786 ``repo._lfstatuswriters`` as "default" writer function.
796 ``repo._lfstatuswriters`` as "default" writer function.
787
797
788 Otherwise, this returns the function to always write out (or
798 Otherwise, this returns the function to always write out (or
789 ignore if ``not forcibly``) status.
799 ignore if ``not forcibly``) status.
790 """
800 """
791 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
801 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
792 return repo._lfstatuswriters[-1]
802 return repo._lfstatuswriters[-1]
793 else:
803 else:
794 if forcibly:
804 if forcibly:
795 return ui.status # forcibly WRITE OUT
805 return ui.status # forcibly WRITE OUT
796 else:
806 else:
797 return lambda *msg, **opts: None # forcibly IGNORE
807 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now