##// END OF EJS Templates
largefiles: use `hacky_extension_update_file` in `synclfdirstate`...
marmoute -
r50911:c694db2d default
parent child Browse files
Show More
@@ -1,807 +1,811 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
201
201
202
202
203 def openlfdirstate(ui, repo, create=True):
203 def openlfdirstate(ui, repo, create=True):
204 """
204 """
205 Return a dirstate object that tracks largefiles: i.e. its root is
205 Return a dirstate object that tracks largefiles: i.e. its root is
206 the repo root, but it is saved in .hg/largefiles/dirstate.
206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 """
207 """
208 vfs = repo.vfs
208 vfs = repo.vfs
209 lfstoredir = longname
209 lfstoredir = longname
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 lfdirstate = largefilesdirstate(
212 lfdirstate = largefilesdirstate(
213 opener,
213 opener,
214 ui,
214 ui,
215 repo.root,
215 repo.root,
216 repo.dirstate._validate,
216 repo.dirstate._validate,
217 lambda: sparse.matcher(repo),
217 lambda: sparse.matcher(repo),
218 repo.nodeconstants,
218 repo.nodeconstants,
219 use_dirstate_v2,
219 use_dirstate_v2,
220 )
220 )
221
221
222 # If the largefiles dirstate does not exist, populate and create
222 # If the largefiles dirstate does not exist, populate and create
223 # it. This ensures that we create it on the first meaningful
223 # it. This ensures that we create it on the first meaningful
224 # largefiles operation in a new clone.
224 # largefiles operation in a new clone.
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 try:
226 try:
227 with repo.wlock(wait=False):
227 with repo.wlock(wait=False):
228 matcher = getstandinmatcher(repo)
228 matcher = getstandinmatcher(repo)
229 standins = repo.dirstate.walk(
229 standins = repo.dirstate.walk(
230 matcher, subrepos=[], unknown=False, ignored=False
230 matcher, subrepos=[], unknown=False, ignored=False
231 )
231 )
232
232
233 if len(standins) > 0:
233 if len(standins) > 0:
234 vfs.makedirs(lfstoredir)
234 vfs.makedirs(lfstoredir)
235
235
236 with lfdirstate.changing_parents(repo):
236 with lfdirstate.changing_parents(repo):
237 for standin in standins:
237 for standin in standins:
238 lfile = splitstandin(standin)
238 lfile = splitstandin(standin)
239 lfdirstate.hacky_extension_update_file(
239 lfdirstate.hacky_extension_update_file(
240 lfile,
240 lfile,
241 p1_tracked=True,
241 p1_tracked=True,
242 wc_tracked=True,
242 wc_tracked=True,
243 possibly_dirty=True,
243 possibly_dirty=True,
244 )
244 )
245 except error.LockError:
245 except error.LockError:
246 # Assume that whatever was holding the lock was important.
246 # Assume that whatever was holding the lock was important.
247 # If we were doing something important, we would already have
247 # If we were doing something important, we would already have
248 # either the lock or a largefile dirstate.
248 # either the lock or a largefile dirstate.
249 pass
249 pass
250 return lfdirstate
250 return lfdirstate
251
251
252
252
253 def lfdirstatestatus(lfdirstate, repo):
253 def lfdirstatestatus(lfdirstate, repo):
254 pctx = repo[b'.']
254 pctx = repo[b'.']
255 match = matchmod.always()
255 match = matchmod.always()
256 unsure, s, mtime_boundary = lfdirstate.status(
256 unsure, s, mtime_boundary = lfdirstate.status(
257 match, subrepos=[], ignored=False, clean=False, unknown=False
257 match, subrepos=[], ignored=False, clean=False, unknown=False
258 )
258 )
259 modified, clean = s.modified, s.clean
259 modified, clean = s.modified, s.clean
260 wctx = repo[None]
260 wctx = repo[None]
261 for lfile in unsure:
261 for lfile in unsure:
262 try:
262 try:
263 fctx = pctx[standin(lfile)]
263 fctx = pctx[standin(lfile)]
264 except LookupError:
264 except LookupError:
265 fctx = None
265 fctx = None
266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
267 modified.append(lfile)
267 modified.append(lfile)
268 else:
268 else:
269 clean.append(lfile)
269 clean.append(lfile)
270 st = wctx[lfile].lstat()
270 st = wctx[lfile].lstat()
271 mode = st.st_mode
271 mode = st.st_mode
272 size = st.st_size
272 size = st.st_size
273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
274 if mtime is not None:
274 if mtime is not None:
275 cache_data = (mode, size, mtime)
275 cache_data = (mode, size, mtime)
276 lfdirstate.set_clean(lfile, cache_data)
276 lfdirstate.set_clean(lfile, cache_data)
277 return s
277 return s
278
278
279
279
280 def listlfiles(repo, rev=None, matcher=None):
280 def listlfiles(repo, rev=None, matcher=None):
281 """return a list of largefiles in the working copy or the
281 """return a list of largefiles in the working copy or the
282 specified changeset"""
282 specified changeset"""
283
283
284 if matcher is None:
284 if matcher is None:
285 matcher = getstandinmatcher(repo)
285 matcher = getstandinmatcher(repo)
286
286
287 # ignore unknown files in working directory
287 # ignore unknown files in working directory
288 return [
288 return [
289 splitstandin(f)
289 splitstandin(f)
290 for f in repo[rev].walk(matcher)
290 for f in repo[rev].walk(matcher)
291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
292 ]
292 ]
293
293
294
294
295 def instore(repo, hash, forcelocal=False):
295 def instore(repo, hash, forcelocal=False):
296 '''Return true if a largefile with the given hash exists in the store'''
296 '''Return true if a largefile with the given hash exists in the store'''
297 return os.path.exists(storepath(repo, hash, forcelocal))
297 return os.path.exists(storepath(repo, hash, forcelocal))
298
298
299
299
300 def storepath(repo, hash, forcelocal=False):
300 def storepath(repo, hash, forcelocal=False):
301 """Return the correct location in the repository largefiles store for a
301 """Return the correct location in the repository largefiles store for a
302 file with the given hash."""
302 file with the given hash."""
303 if not forcelocal and repo.shared():
303 if not forcelocal and repo.shared():
304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
305 return repo.vfs.join(longname, hash)
305 return repo.vfs.join(longname, hash)
306
306
307
307
308 def findstorepath(repo, hash):
308 def findstorepath(repo, hash):
309 """Search through the local store path(s) to find the file for the given
309 """Search through the local store path(s) to find the file for the given
310 hash. If the file is not found, its path in the primary store is returned.
310 hash. If the file is not found, its path in the primary store is returned.
311 The return value is a tuple of (path, exists(path)).
311 The return value is a tuple of (path, exists(path)).
312 """
312 """
313 # For shared repos, the primary store is in the share source. But for
313 # For shared repos, the primary store is in the share source. But for
314 # backward compatibility, force a lookup in the local store if it wasn't
314 # backward compatibility, force a lookup in the local store if it wasn't
315 # found in the share source.
315 # found in the share source.
316 path = storepath(repo, hash, False)
316 path = storepath(repo, hash, False)
317
317
318 if instore(repo, hash):
318 if instore(repo, hash):
319 return (path, True)
319 return (path, True)
320 elif repo.shared() and instore(repo, hash, True):
320 elif repo.shared() and instore(repo, hash, True):
321 return storepath(repo, hash, True), True
321 return storepath(repo, hash, True), True
322
322
323 return (path, False)
323 return (path, False)
324
324
325
325
326 def copyfromcache(repo, hash, filename):
326 def copyfromcache(repo, hash, filename):
327 """Copy the specified largefile from the repo or system cache to
327 """Copy the specified largefile from the repo or system cache to
328 filename in the repository. Return true on success or false if the
328 filename in the repository. Return true on success or false if the
329 file was not found in either cache (which should not happened:
329 file was not found in either cache (which should not happened:
330 this is meant to be called only after ensuring that the needed
330 this is meant to be called only after ensuring that the needed
331 largefile exists in the cache)."""
331 largefile exists in the cache)."""
332 wvfs = repo.wvfs
332 wvfs = repo.wvfs
333 path = findfile(repo, hash)
333 path = findfile(repo, hash)
334 if path is None:
334 if path is None:
335 return False
335 return False
336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
337 # The write may fail before the file is fully written, but we
337 # The write may fail before the file is fully written, but we
338 # don't use atomic writes in the working copy.
338 # don't use atomic writes in the working copy.
339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
341 if gothash != hash:
341 if gothash != hash:
342 repo.ui.warn(
342 repo.ui.warn(
343 _(b'%s: data corruption in %s with hash %s\n')
343 _(b'%s: data corruption in %s with hash %s\n')
344 % (filename, path, gothash)
344 % (filename, path, gothash)
345 )
345 )
346 wvfs.unlink(filename)
346 wvfs.unlink(filename)
347 return False
347 return False
348 return True
348 return True
349
349
350
350
351 def copytostore(repo, ctx, file, fstandin):
351 def copytostore(repo, ctx, file, fstandin):
352 wvfs = repo.wvfs
352 wvfs = repo.wvfs
353 hash = readasstandin(ctx[fstandin])
353 hash = readasstandin(ctx[fstandin])
354 if instore(repo, hash):
354 if instore(repo, hash):
355 return
355 return
356 if wvfs.exists(file):
356 if wvfs.exists(file):
357 copytostoreabsolute(repo, wvfs.join(file), hash)
357 copytostoreabsolute(repo, wvfs.join(file), hash)
358 else:
358 else:
359 repo.ui.warn(
359 repo.ui.warn(
360 _(b"%s: largefile %s not available from local store\n")
360 _(b"%s: largefile %s not available from local store\n")
361 % (file, hash)
361 % (file, hash)
362 )
362 )
363
363
364
364
365 def copyalltostore(repo, node):
365 def copyalltostore(repo, node):
366 '''Copy all largefiles in a given revision to the store'''
366 '''Copy all largefiles in a given revision to the store'''
367
367
368 ctx = repo[node]
368 ctx = repo[node]
369 for filename in ctx.files():
369 for filename in ctx.files():
370 realfile = splitstandin(filename)
370 realfile = splitstandin(filename)
371 if realfile is not None and filename in ctx.manifest():
371 if realfile is not None and filename in ctx.manifest():
372 copytostore(repo, ctx, realfile, filename)
372 copytostore(repo, ctx, realfile, filename)
373
373
374
374
375 def copytostoreabsolute(repo, file, hash):
375 def copytostoreabsolute(repo, file, hash):
376 if inusercache(repo.ui, hash):
376 if inusercache(repo.ui, hash):
377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
378 else:
378 else:
379 util.makedirs(os.path.dirname(storepath(repo, hash)))
379 util.makedirs(os.path.dirname(storepath(repo, hash)))
380 with open(file, b'rb') as srcf:
380 with open(file, b'rb') as srcf:
381 with util.atomictempfile(
381 with util.atomictempfile(
382 storepath(repo, hash), createmode=repo.store.createmode
382 storepath(repo, hash), createmode=repo.store.createmode
383 ) as dstf:
383 ) as dstf:
384 for chunk in util.filechunkiter(srcf):
384 for chunk in util.filechunkiter(srcf):
385 dstf.write(chunk)
385 dstf.write(chunk)
386 linktousercache(repo, hash)
386 linktousercache(repo, hash)
387
387
388
388
389 def linktousercache(repo, hash):
389 def linktousercache(repo, hash):
390 """Link / copy the largefile with the specified hash from the store
390 """Link / copy the largefile with the specified hash from the store
391 to the cache."""
391 to the cache."""
392 path = usercachepath(repo.ui, hash)
392 path = usercachepath(repo.ui, hash)
393 link(storepath(repo, hash), path)
393 link(storepath(repo, hash), path)
394
394
395
395
396 def getstandinmatcher(repo, rmatcher=None):
396 def getstandinmatcher(repo, rmatcher=None):
397 '''Return a match object that applies rmatcher to the standin directory'''
397 '''Return a match object that applies rmatcher to the standin directory'''
398 wvfs = repo.wvfs
398 wvfs = repo.wvfs
399 standindir = shortname
399 standindir = shortname
400
400
401 # no warnings about missing files or directories
401 # no warnings about missing files or directories
402 badfn = lambda f, msg: None
402 badfn = lambda f, msg: None
403
403
404 if rmatcher and not rmatcher.always():
404 if rmatcher and not rmatcher.always():
405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
406 if not pats:
406 if not pats:
407 pats = [wvfs.join(standindir)]
407 pats = [wvfs.join(standindir)]
408 match = scmutil.match(repo[None], pats, badfn=badfn)
408 match = scmutil.match(repo[None], pats, badfn=badfn)
409 else:
409 else:
410 # no patterns: relative to repo root
410 # no patterns: relative to repo root
411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
412 return match
412 return match
413
413
414
414
415 def composestandinmatcher(repo, rmatcher):
415 def composestandinmatcher(repo, rmatcher):
416 """Return a matcher that accepts standins corresponding to the
416 """Return a matcher that accepts standins corresponding to the
417 files accepted by rmatcher. Pass the list of files in the matcher
417 files accepted by rmatcher. Pass the list of files in the matcher
418 as the paths specified by the user."""
418 as the paths specified by the user."""
419 smatcher = getstandinmatcher(repo, rmatcher)
419 smatcher = getstandinmatcher(repo, rmatcher)
420 isstandin = smatcher.matchfn
420 isstandin = smatcher.matchfn
421
421
422 def composedmatchfn(f):
422 def composedmatchfn(f):
423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
424
424
425 smatcher.matchfn = composedmatchfn
425 smatcher.matchfn = composedmatchfn
426
426
427 return smatcher
427 return smatcher
428
428
429
429
430 def standin(filename):
430 def standin(filename):
431 """Return the repo-relative path to the standin for the specified big
431 """Return the repo-relative path to the standin for the specified big
432 file."""
432 file."""
433 # Notes:
433 # Notes:
434 # 1) Some callers want an absolute path, but for instance addlargefiles
434 # 1) Some callers want an absolute path, but for instance addlargefiles
435 # needs it repo-relative so it can be passed to repo[None].add(). So
435 # needs it repo-relative so it can be passed to repo[None].add(). So
436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
437 # 2) Join with '/' because that's what dirstate always uses, even on
437 # 2) Join with '/' because that's what dirstate always uses, even on
438 # Windows. Change existing separator to '/' first in case we are
438 # Windows. Change existing separator to '/' first in case we are
439 # passed filenames from an external source (like the command line).
439 # passed filenames from an external source (like the command line).
440 return shortnameslash + util.pconvert(filename)
440 return shortnameslash + util.pconvert(filename)
441
441
442
442
443 def isstandin(filename):
443 def isstandin(filename):
444 """Return true if filename is a big file standin. filename must be
444 """Return true if filename is a big file standin. filename must be
445 in Mercurial's internal form (slash-separated)."""
445 in Mercurial's internal form (slash-separated)."""
446 return filename.startswith(shortnameslash)
446 return filename.startswith(shortnameslash)
447
447
448
448
449 def splitstandin(filename):
449 def splitstandin(filename):
450 # Split on / because that's what dirstate always uses, even on Windows.
450 # Split on / because that's what dirstate always uses, even on Windows.
451 # Change local separator to / first just in case we are passed filenames
451 # Change local separator to / first just in case we are passed filenames
452 # from an external source (like the command line).
452 # from an external source (like the command line).
453 bits = util.pconvert(filename).split(b'/', 1)
453 bits = util.pconvert(filename).split(b'/', 1)
454 if len(bits) == 2 and bits[0] == shortname:
454 if len(bits) == 2 and bits[0] == shortname:
455 return bits[1]
455 return bits[1]
456 else:
456 else:
457 return None
457 return None
458
458
459
459
460 def updatestandin(repo, lfile, standin):
460 def updatestandin(repo, lfile, standin):
461 """Re-calculate hash value of lfile and write it into standin
461 """Re-calculate hash value of lfile and write it into standin
462
462
463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
464 """
464 """
465 file = repo.wjoin(lfile)
465 file = repo.wjoin(lfile)
466 if repo.wvfs.exists(lfile):
466 if repo.wvfs.exists(lfile):
467 hash = hashfile(file)
467 hash = hashfile(file)
468 executable = getexecutable(file)
468 executable = getexecutable(file)
469 writestandin(repo, standin, hash, executable)
469 writestandin(repo, standin, hash, executable)
470 else:
470 else:
471 raise error.Abort(_(b'%s: file not found!') % lfile)
471 raise error.Abort(_(b'%s: file not found!') % lfile)
472
472
473
473
474 def readasstandin(fctx):
474 def readasstandin(fctx):
475 """read hex hash from given filectx of standin file
475 """read hex hash from given filectx of standin file
476
476
477 This encapsulates how "standin" data is stored into storage layer."""
477 This encapsulates how "standin" data is stored into storage layer."""
478 return fctx.data().strip()
478 return fctx.data().strip()
479
479
480
480
481 def writestandin(repo, standin, hash, executable):
481 def writestandin(repo, standin, hash, executable):
482 '''write hash to <repo.root>/<standin>'''
482 '''write hash to <repo.root>/<standin>'''
483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
484
484
485
485
486 def copyandhash(instream, outfile):
486 def copyandhash(instream, outfile):
487 """Read bytes from instream (iterable) and write them to outfile,
487 """Read bytes from instream (iterable) and write them to outfile,
488 computing the SHA-1 hash of the data along the way. Return the hash."""
488 computing the SHA-1 hash of the data along the way. Return the hash."""
489 hasher = hashutil.sha1(b'')
489 hasher = hashutil.sha1(b'')
490 for data in instream:
490 for data in instream:
491 hasher.update(data)
491 hasher.update(data)
492 outfile.write(data)
492 outfile.write(data)
493 return hex(hasher.digest())
493 return hex(hasher.digest())
494
494
495
495
496 def hashfile(file):
496 def hashfile(file):
497 if not os.path.exists(file):
497 if not os.path.exists(file):
498 return b''
498 return b''
499 with open(file, b'rb') as fd:
499 with open(file, b'rb') as fd:
500 return hexsha1(fd)
500 return hexsha1(fd)
501
501
502
502
503 def getexecutable(filename):
503 def getexecutable(filename):
504 mode = os.stat(filename).st_mode
504 mode = os.stat(filename).st_mode
505 return (
505 return (
506 (mode & stat.S_IXUSR)
506 (mode & stat.S_IXUSR)
507 and (mode & stat.S_IXGRP)
507 and (mode & stat.S_IXGRP)
508 and (mode & stat.S_IXOTH)
508 and (mode & stat.S_IXOTH)
509 )
509 )
510
510
511
511
512 def urljoin(first, second, *arg):
512 def urljoin(first, second, *arg):
513 def join(left, right):
513 def join(left, right):
514 if not left.endswith(b'/'):
514 if not left.endswith(b'/'):
515 left += b'/'
515 left += b'/'
516 if right.startswith(b'/'):
516 if right.startswith(b'/'):
517 right = right[1:]
517 right = right[1:]
518 return left + right
518 return left + right
519
519
520 url = join(first, second)
520 url = join(first, second)
521 for a in arg:
521 for a in arg:
522 url = join(url, a)
522 url = join(url, a)
523 return url
523 return url
524
524
525
525
526 def hexsha1(fileobj):
526 def hexsha1(fileobj):
527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
528 object data"""
528 object data"""
529 h = hashutil.sha1()
529 h = hashutil.sha1()
530 for chunk in util.filechunkiter(fileobj):
530 for chunk in util.filechunkiter(fileobj):
531 h.update(chunk)
531 h.update(chunk)
532 return hex(h.digest())
532 return hex(h.digest())
533
533
534
534
535 def httpsendfile(ui, filename):
535 def httpsendfile(ui, filename):
536 return httpconnection.httpsendfile(ui, filename, b'rb')
536 return httpconnection.httpsendfile(ui, filename, b'rb')
537
537
538
538
539 def unixpath(path):
539 def unixpath(path):
540 '''Return a version of path normalized for use with the lfdirstate.'''
540 '''Return a version of path normalized for use with the lfdirstate.'''
541 return util.pconvert(os.path.normpath(path))
541 return util.pconvert(os.path.normpath(path))
542
542
543
543
544 def islfilesrepo(repo):
544 def islfilesrepo(repo):
545 '''Return true if the repo is a largefile repo.'''
545 '''Return true if the repo is a largefile repo.'''
546 if b'largefiles' in repo.requirements and any(
546 if b'largefiles' in repo.requirements and any(
547 shortnameslash in f[1] for f in repo.store.datafiles()
547 shortnameslash in f[1] for f in repo.store.datafiles()
548 ):
548 ):
549 return True
549 return True
550
550
551 return any(openlfdirstate(repo.ui, repo, False))
551 return any(openlfdirstate(repo.ui, repo, False))
552
552
553
553
554 class storeprotonotcapable(Exception):
554 class storeprotonotcapable(Exception):
555 def __init__(self, storetypes):
555 def __init__(self, storetypes):
556 self.storetypes = storetypes
556 self.storetypes = storetypes
557
557
558
558
559 def getstandinsstate(repo):
559 def getstandinsstate(repo):
560 standins = []
560 standins = []
561 matcher = getstandinmatcher(repo)
561 matcher = getstandinmatcher(repo)
562 wctx = repo[None]
562 wctx = repo[None]
563 for standin in repo.dirstate.walk(
563 for standin in repo.dirstate.walk(
564 matcher, subrepos=[], unknown=False, ignored=False
564 matcher, subrepos=[], unknown=False, ignored=False
565 ):
565 ):
566 lfile = splitstandin(standin)
566 lfile = splitstandin(standin)
567 try:
567 try:
568 hash = readasstandin(wctx[standin])
568 hash = readasstandin(wctx[standin])
569 except IOError:
569 except IOError:
570 hash = None
570 hash = None
571 standins.append((lfile, hash))
571 standins.append((lfile, hash))
572 return standins
572 return standins
573
573
574
574
575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
576 lfstandin = standin(lfile)
576 lfstandin = standin(lfile)
577 if lfstandin not in repo.dirstate:
577 if lfstandin not in repo.dirstate:
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
578 lfdirstate.hacky_extension_update_file(
579 lfile,
580 p1_tracked=False,
581 wc_tracked=False,
582 )
579 else:
583 else:
580 entry = repo.dirstate.get_entry(lfstandin)
584 entry = repo.dirstate.get_entry(lfstandin)
581 lfdirstate.update_file(
585 lfdirstate.hacky_extension_update_file(
582 lfile,
586 lfile,
583 wc_tracked=entry.tracked,
587 wc_tracked=entry.tracked,
584 p1_tracked=entry.p1_tracked,
588 p1_tracked=entry.p1_tracked,
585 p2_info=entry.p2_info,
589 p2_info=entry.p2_info,
586 possibly_dirty=True,
590 possibly_dirty=True,
587 )
591 )
588
592
589
593
590 def markcommitted(orig, ctx, node):
594 def markcommitted(orig, ctx, node):
591 repo = ctx.repo()
595 repo = ctx.repo()
592
596
593 lfdirstate = openlfdirstate(repo.ui, repo)
597 lfdirstate = openlfdirstate(repo.ui, repo)
594 with lfdirstate.changing_parents(repo):
598 with lfdirstate.changing_parents(repo):
595 orig(node)
599 orig(node)
596
600
597 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
601 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
598 # because files coming from the 2nd parent are omitted in the latter.
602 # because files coming from the 2nd parent are omitted in the latter.
599 #
603 #
600 # The former should be used to get targets of "synclfdirstate",
604 # The former should be used to get targets of "synclfdirstate",
601 # because such files:
605 # because such files:
602 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
606 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
603 # - have to be marked as "n" after commit, but
607 # - have to be marked as "n" after commit, but
604 # - aren't listed in "repo[node].files()"
608 # - aren't listed in "repo[node].files()"
605
609
606 for f in ctx.files():
610 for f in ctx.files():
607 lfile = splitstandin(f)
611 lfile = splitstandin(f)
608 if lfile is not None:
612 if lfile is not None:
609 synclfdirstate(repo, lfdirstate, lfile, False)
613 synclfdirstate(repo, lfdirstate, lfile, False)
610 lfdirstate.write(repo.currenttransaction())
614 lfdirstate.write(repo.currenttransaction())
611
615
612 # As part of committing, copy all of the largefiles into the cache.
616 # As part of committing, copy all of the largefiles into the cache.
613 #
617 #
614 # Using "node" instead of "ctx" implies additional "repo[node]"
618 # Using "node" instead of "ctx" implies additional "repo[node]"
615 # lookup while copyalltostore(), but can omit redundant check for
619 # lookup while copyalltostore(), but can omit redundant check for
616 # files comming from the 2nd parent, which should exist in store
620 # files comming from the 2nd parent, which should exist in store
617 # at merging.
621 # at merging.
618 copyalltostore(repo, node)
622 copyalltostore(repo, node)
619
623
620
624
621 def getlfilestoupdate(oldstandins, newstandins):
625 def getlfilestoupdate(oldstandins, newstandins):
622 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
626 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
623 filelist = []
627 filelist = []
624 for f in changedstandins:
628 for f in changedstandins:
625 if f[0] not in filelist:
629 if f[0] not in filelist:
626 filelist.append(f[0])
630 filelist.append(f[0])
627 return filelist
631 return filelist
628
632
629
633
630 def getlfilestoupload(repo, missing, addfunc):
634 def getlfilestoupload(repo, missing, addfunc):
631 makeprogress = repo.ui.makeprogress
635 makeprogress = repo.ui.makeprogress
632 with makeprogress(
636 with makeprogress(
633 _(b'finding outgoing largefiles'),
637 _(b'finding outgoing largefiles'),
634 unit=_(b'revisions'),
638 unit=_(b'revisions'),
635 total=len(missing),
639 total=len(missing),
636 ) as progress:
640 ) as progress:
637 for i, n in enumerate(missing):
641 for i, n in enumerate(missing):
638 progress.update(i)
642 progress.update(i)
639 parents = [p for p in repo[n].parents() if p != repo.nullid]
643 parents = [p for p in repo[n].parents() if p != repo.nullid]
640
644
641 with lfstatus(repo, value=False):
645 with lfstatus(repo, value=False):
642 ctx = repo[n]
646 ctx = repo[n]
643
647
644 files = set(ctx.files())
648 files = set(ctx.files())
645 if len(parents) == 2:
649 if len(parents) == 2:
646 mc = ctx.manifest()
650 mc = ctx.manifest()
647 mp1 = ctx.p1().manifest()
651 mp1 = ctx.p1().manifest()
648 mp2 = ctx.p2().manifest()
652 mp2 = ctx.p2().manifest()
649 for f in mp1:
653 for f in mp1:
650 if f not in mc:
654 if f not in mc:
651 files.add(f)
655 files.add(f)
652 for f in mp2:
656 for f in mp2:
653 if f not in mc:
657 if f not in mc:
654 files.add(f)
658 files.add(f)
655 for f in mc:
659 for f in mc:
656 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
660 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
657 files.add(f)
661 files.add(f)
658 for fn in files:
662 for fn in files:
659 if isstandin(fn) and fn in ctx:
663 if isstandin(fn) and fn in ctx:
660 addfunc(fn, readasstandin(ctx[fn]))
664 addfunc(fn, readasstandin(ctx[fn]))
661
665
662
666
663 def updatestandinsbymatch(repo, match):
667 def updatestandinsbymatch(repo, match):
664 """Update standins in the working directory according to specified match
668 """Update standins in the working directory according to specified match
665
669
666 This returns (possibly modified) ``match`` object to be used for
670 This returns (possibly modified) ``match`` object to be used for
667 subsequent commit process.
671 subsequent commit process.
668 """
672 """
669
673
670 ui = repo.ui
674 ui = repo.ui
671
675
672 # Case 1: user calls commit with no specific files or
676 # Case 1: user calls commit with no specific files or
673 # include/exclude patterns: refresh and commit all files that
677 # include/exclude patterns: refresh and commit all files that
674 # are "dirty".
678 # are "dirty".
675 if match is None or match.always():
679 if match is None or match.always():
676 # Spend a bit of time here to get a list of files we know
680 # Spend a bit of time here to get a list of files we know
677 # are modified so we can compare only against those.
681 # are modified so we can compare only against those.
678 # It can cost a lot of time (several seconds)
682 # It can cost a lot of time (several seconds)
679 # otherwise to update all standins if the largefiles are
683 # otherwise to update all standins if the largefiles are
680 # large.
684 # large.
681 lfdirstate = openlfdirstate(ui, repo)
685 lfdirstate = openlfdirstate(ui, repo)
682 dirtymatch = matchmod.always()
686 dirtymatch = matchmod.always()
683 unsure, s, mtime_boundary = lfdirstate.status(
687 unsure, s, mtime_boundary = lfdirstate.status(
684 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
688 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
685 )
689 )
686 modifiedfiles = unsure + s.modified + s.added + s.removed
690 modifiedfiles = unsure + s.modified + s.added + s.removed
687 lfiles = listlfiles(repo)
691 lfiles = listlfiles(repo)
688 # this only loops through largefiles that exist (not
692 # this only loops through largefiles that exist (not
689 # removed/renamed)
693 # removed/renamed)
690 for lfile in lfiles:
694 for lfile in lfiles:
691 if lfile in modifiedfiles:
695 if lfile in modifiedfiles:
692 fstandin = standin(lfile)
696 fstandin = standin(lfile)
693 if repo.wvfs.exists(fstandin):
697 if repo.wvfs.exists(fstandin):
694 # this handles the case where a rebase is being
698 # this handles the case where a rebase is being
695 # performed and the working copy is not updated
699 # performed and the working copy is not updated
696 # yet.
700 # yet.
697 if repo.wvfs.exists(lfile):
701 if repo.wvfs.exists(lfile):
698 updatestandin(repo, lfile, fstandin)
702 updatestandin(repo, lfile, fstandin)
699
703
700 return match
704 return match
701
705
702 lfiles = listlfiles(repo)
706 lfiles = listlfiles(repo)
703 match._files = repo._subdirlfs(match.files(), lfiles)
707 match._files = repo._subdirlfs(match.files(), lfiles)
704
708
705 # Case 2: user calls commit with specified patterns: refresh
709 # Case 2: user calls commit with specified patterns: refresh
706 # any matching big files.
710 # any matching big files.
707 smatcher = composestandinmatcher(repo, match)
711 smatcher = composestandinmatcher(repo, match)
708 standins = repo.dirstate.walk(
712 standins = repo.dirstate.walk(
709 smatcher, subrepos=[], unknown=False, ignored=False
713 smatcher, subrepos=[], unknown=False, ignored=False
710 )
714 )
711
715
712 # No matching big files: get out of the way and pass control to
716 # No matching big files: get out of the way and pass control to
713 # the usual commit() method.
717 # the usual commit() method.
714 if not standins:
718 if not standins:
715 return match
719 return match
716
720
717 # Refresh all matching big files. It's possible that the
721 # Refresh all matching big files. It's possible that the
718 # commit will end up failing, in which case the big files will
722 # commit will end up failing, in which case the big files will
719 # stay refreshed. No harm done: the user modified them and
723 # stay refreshed. No harm done: the user modified them and
720 # asked to commit them, so sooner or later we're going to
724 # asked to commit them, so sooner or later we're going to
721 # refresh the standins. Might as well leave them refreshed.
725 # refresh the standins. Might as well leave them refreshed.
722 lfdirstate = openlfdirstate(ui, repo)
726 lfdirstate = openlfdirstate(ui, repo)
723 for fstandin in standins:
727 for fstandin in standins:
724 lfile = splitstandin(fstandin)
728 lfile = splitstandin(fstandin)
725 if lfdirstate.get_entry(lfile).tracked:
729 if lfdirstate.get_entry(lfile).tracked:
726 updatestandin(repo, lfile, fstandin)
730 updatestandin(repo, lfile, fstandin)
727
731
728 # Cook up a new matcher that only matches regular files or
732 # Cook up a new matcher that only matches regular files or
729 # standins corresponding to the big files requested by the
733 # standins corresponding to the big files requested by the
730 # user. Have to modify _files to prevent commit() from
734 # user. Have to modify _files to prevent commit() from
731 # complaining "not tracked" for big files.
735 # complaining "not tracked" for big files.
732 match = copy.copy(match)
736 match = copy.copy(match)
733 origmatchfn = match.matchfn
737 origmatchfn = match.matchfn
734
738
735 # Check both the list of largefiles and the list of
739 # Check both the list of largefiles and the list of
736 # standins because if a largefile was removed, it
740 # standins because if a largefile was removed, it
737 # won't be in the list of largefiles at this point
741 # won't be in the list of largefiles at this point
738 match._files += sorted(standins)
742 match._files += sorted(standins)
739
743
740 actualfiles = []
744 actualfiles = []
741 for f in match._files:
745 for f in match._files:
742 fstandin = standin(f)
746 fstandin = standin(f)
743
747
744 # For largefiles, only one of the normal and standin should be
748 # For largefiles, only one of the normal and standin should be
745 # committed (except if one of them is a remove). In the case of a
749 # committed (except if one of them is a remove). In the case of a
746 # standin removal, drop the normal file if it is unknown to dirstate.
750 # standin removal, drop the normal file if it is unknown to dirstate.
747 # Thus, skip plain largefile names but keep the standin.
751 # Thus, skip plain largefile names but keep the standin.
748 if f in lfiles or fstandin in standins:
752 if f in lfiles or fstandin in standins:
749 if not repo.dirstate.get_entry(fstandin).removed:
753 if not repo.dirstate.get_entry(fstandin).removed:
750 if not repo.dirstate.get_entry(f).removed:
754 if not repo.dirstate.get_entry(f).removed:
751 continue
755 continue
752 elif not repo.dirstate.get_entry(f).any_tracked:
756 elif not repo.dirstate.get_entry(f).any_tracked:
753 continue
757 continue
754
758
755 actualfiles.append(f)
759 actualfiles.append(f)
756 match._files = actualfiles
760 match._files = actualfiles
757
761
758 def matchfn(f):
762 def matchfn(f):
759 if origmatchfn(f):
763 if origmatchfn(f):
760 return f not in lfiles
764 return f not in lfiles
761 else:
765 else:
762 return f in standins
766 return f in standins
763
767
764 match.matchfn = matchfn
768 match.matchfn = matchfn
765
769
766 return match
770 return match
767
771
768
772
769 class automatedcommithook:
773 class automatedcommithook:
770 """Stateful hook to update standins at the 1st commit of resuming
774 """Stateful hook to update standins at the 1st commit of resuming
771
775
772 For efficiency, updating standins in the working directory should
776 For efficiency, updating standins in the working directory should
773 be avoided while automated committing (like rebase, transplant and
777 be avoided while automated committing (like rebase, transplant and
774 so on), because they should be updated before committing.
778 so on), because they should be updated before committing.
775
779
776 But the 1st commit of resuming automated committing (e.g. ``rebase
780 But the 1st commit of resuming automated committing (e.g. ``rebase
777 --continue``) should update them, because largefiles may be
781 --continue``) should update them, because largefiles may be
778 modified manually.
782 modified manually.
779 """
783 """
780
784
781 def __init__(self, resuming):
785 def __init__(self, resuming):
782 self.resuming = resuming
786 self.resuming = resuming
783
787
784 def __call__(self, repo, match):
788 def __call__(self, repo, match):
785 if self.resuming:
789 if self.resuming:
786 self.resuming = False # avoids updating at subsequent commits
790 self.resuming = False # avoids updating at subsequent commits
787 return updatestandinsbymatch(repo, match)
791 return updatestandinsbymatch(repo, match)
788 else:
792 else:
789 return match
793 return match
790
794
791
795
792 def getstatuswriter(ui, repo, forcibly=None):
796 def getstatuswriter(ui, repo, forcibly=None):
793 """Return the function to write largefiles specific status out
797 """Return the function to write largefiles specific status out
794
798
795 If ``forcibly`` is ``None``, this returns the last element of
799 If ``forcibly`` is ``None``, this returns the last element of
796 ``repo._lfstatuswriters`` as "default" writer function.
800 ``repo._lfstatuswriters`` as "default" writer function.
797
801
798 Otherwise, this returns the function to always write out (or
802 Otherwise, this returns the function to always write out (or
799 ignore if ``not forcibly``) status.
803 ignore if ``not forcibly``) status.
800 """
804 """
801 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
805 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
802 return repo._lfstatuswriters[-1]
806 return repo._lfstatuswriters[-1]
803 else:
807 else:
804 if forcibly:
808 if forcibly:
805 return ui.status # forcibly WRITE OUT
809 return ui.status # forcibly WRITE OUT
806 else:
810 else:
807 return lambda *msg, **opts: None # forcibly IGNORE
811 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now