##// END OF EJS Templates
largefiles: rely on main scoping for writing dirstate in `markcommitted`...
marmoute -
r50945:22cd517b default
parent child Browse files
Show More
@@ -1,818 +1,817 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163
163
164 def __getitem__(self, key):
164 def __getitem__(self, key):
165 return super(largefilesdirstate, self).__getitem__(unixpath(key))
165 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166
166
167 def set_tracked(self, f):
167 def set_tracked(self, f):
168 return super(largefilesdirstate, self).set_tracked(unixpath(f))
168 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169
169
170 def set_untracked(self, f):
170 def set_untracked(self, f):
171 return super(largefilesdirstate, self).set_untracked(unixpath(f))
171 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172
172
173 def normal(self, f, parentfiledata=None):
173 def normal(self, f, parentfiledata=None):
174 # not sure if we should pass the `parentfiledata` down or throw it
174 # not sure if we should pass the `parentfiledata` down or throw it
175 # away. So throwing it away to stay on the safe side.
175 # away. So throwing it away to stay on the safe side.
176 return super(largefilesdirstate, self).normal(unixpath(f))
176 return super(largefilesdirstate, self).normal(unixpath(f))
177
177
178 def remove(self, f):
178 def remove(self, f):
179 return super(largefilesdirstate, self).remove(unixpath(f))
179 return super(largefilesdirstate, self).remove(unixpath(f))
180
180
181 def add(self, f):
181 def add(self, f):
182 return super(largefilesdirstate, self).add(unixpath(f))
182 return super(largefilesdirstate, self).add(unixpath(f))
183
183
184 def drop(self, f):
184 def drop(self, f):
185 return super(largefilesdirstate, self).drop(unixpath(f))
185 return super(largefilesdirstate, self).drop(unixpath(f))
186
186
187 def forget(self, f):
187 def forget(self, f):
188 return super(largefilesdirstate, self).forget(unixpath(f))
188 return super(largefilesdirstate, self).forget(unixpath(f))
189
189
190 def normallookup(self, f):
190 def normallookup(self, f):
191 return super(largefilesdirstate, self).normallookup(unixpath(f))
191 return super(largefilesdirstate, self).normallookup(unixpath(f))
192
192
193 def _ignore(self, f):
193 def _ignore(self, f):
194 return False
194 return False
195
195
196 def write(self, tr):
196 def write(self, tr):
197 # (1) disable PENDING mode always
197 # (1) disable PENDING mode always
198 # (lfdirstate isn't yet managed as a part of the transaction)
198 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (2) avoid develwarn 'use dirstate.write with ....'
199 # (2) avoid develwarn 'use dirstate.write with ....'
200 if tr:
200 if tr:
201 tr.addbackup(b'largefiles/dirstate', location=b'plain')
201 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 super(largefilesdirstate, self).write(None)
202 super(largefilesdirstate, self).write(None)
203
203
204
204
205 def openlfdirstate(ui, repo, create=True):
205 def openlfdirstate(ui, repo, create=True):
206 """
206 """
207 Return a dirstate object that tracks largefiles: i.e. its root is
207 Return a dirstate object that tracks largefiles: i.e. its root is
208 the repo root, but it is saved in .hg/largefiles/dirstate.
208 the repo root, but it is saved in .hg/largefiles/dirstate.
209
209
210 If a dirstate object already exists and is being used for a 'changing_*'
210 If a dirstate object already exists and is being used for a 'changing_*'
211 context, it will be returned.
211 context, it will be returned.
212 """
212 """
213 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
213 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 if sub_dirstate is not None:
214 if sub_dirstate is not None:
215 return sub_dirstate
215 return sub_dirstate
216 vfs = repo.vfs
216 vfs = repo.vfs
217 lfstoredir = longname
217 lfstoredir = longname
218 opener = vfsmod.vfs(vfs.join(lfstoredir))
218 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
219 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 lfdirstate = largefilesdirstate(
220 lfdirstate = largefilesdirstate(
221 opener,
221 opener,
222 ui,
222 ui,
223 repo.root,
223 repo.root,
224 repo.dirstate._validate,
224 repo.dirstate._validate,
225 lambda: sparse.matcher(repo),
225 lambda: sparse.matcher(repo),
226 repo.nodeconstants,
226 repo.nodeconstants,
227 use_dirstate_v2,
227 use_dirstate_v2,
228 )
228 )
229
229
230 # If the largefiles dirstate does not exist, populate and create
230 # If the largefiles dirstate does not exist, populate and create
231 # it. This ensures that we create it on the first meaningful
231 # it. This ensures that we create it on the first meaningful
232 # largefiles operation in a new clone.
232 # largefiles operation in a new clone.
233 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
233 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 try:
234 try:
235 with repo.wlock(wait=False):
235 with repo.wlock(wait=False):
236 matcher = getstandinmatcher(repo)
236 matcher = getstandinmatcher(repo)
237 standins = repo.dirstate.walk(
237 standins = repo.dirstate.walk(
238 matcher, subrepos=[], unknown=False, ignored=False
238 matcher, subrepos=[], unknown=False, ignored=False
239 )
239 )
240
240
241 if len(standins) > 0:
241 if len(standins) > 0:
242 vfs.makedirs(lfstoredir)
242 vfs.makedirs(lfstoredir)
243
243
244 for standin in standins:
244 for standin in standins:
245 lfile = splitstandin(standin)
245 lfile = splitstandin(standin)
246 lfdirstate.hacky_extension_update_file(
246 lfdirstate.hacky_extension_update_file(
247 lfile,
247 lfile,
248 p1_tracked=True,
248 p1_tracked=True,
249 wc_tracked=True,
249 wc_tracked=True,
250 possibly_dirty=True,
250 possibly_dirty=True,
251 )
251 )
252 except error.LockError:
252 except error.LockError:
253 # Assume that whatever was holding the lock was important.
253 # Assume that whatever was holding the lock was important.
254 # If we were doing something important, we would already have
254 # If we were doing something important, we would already have
255 # either the lock or a largefile dirstate.
255 # either the lock or a largefile dirstate.
256 pass
256 pass
257 return lfdirstate
257 return lfdirstate
258
258
259
259
260 def lfdirstatestatus(lfdirstate, repo):
260 def lfdirstatestatus(lfdirstate, repo):
261 pctx = repo[b'.']
261 pctx = repo[b'.']
262 match = matchmod.always()
262 match = matchmod.always()
263 unsure, s, mtime_boundary = lfdirstate.status(
263 unsure, s, mtime_boundary = lfdirstate.status(
264 match, subrepos=[], ignored=False, clean=False, unknown=False
264 match, subrepos=[], ignored=False, clean=False, unknown=False
265 )
265 )
266 modified, clean = s.modified, s.clean
266 modified, clean = s.modified, s.clean
267 wctx = repo[None]
267 wctx = repo[None]
268 for lfile in unsure:
268 for lfile in unsure:
269 try:
269 try:
270 fctx = pctx[standin(lfile)]
270 fctx = pctx[standin(lfile)]
271 except LookupError:
271 except LookupError:
272 fctx = None
272 fctx = None
273 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
273 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
274 modified.append(lfile)
274 modified.append(lfile)
275 else:
275 else:
276 clean.append(lfile)
276 clean.append(lfile)
277 st = wctx[lfile].lstat()
277 st = wctx[lfile].lstat()
278 mode = st.st_mode
278 mode = st.st_mode
279 size = st.st_size
279 size = st.st_size
280 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
280 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
281 if mtime is not None:
281 if mtime is not None:
282 cache_data = (mode, size, mtime)
282 cache_data = (mode, size, mtime)
283 lfdirstate.set_clean(lfile, cache_data)
283 lfdirstate.set_clean(lfile, cache_data)
284 return s
284 return s
285
285
286
286
287 def listlfiles(repo, rev=None, matcher=None):
287 def listlfiles(repo, rev=None, matcher=None):
288 """return a list of largefiles in the working copy or the
288 """return a list of largefiles in the working copy or the
289 specified changeset"""
289 specified changeset"""
290
290
291 if matcher is None:
291 if matcher is None:
292 matcher = getstandinmatcher(repo)
292 matcher = getstandinmatcher(repo)
293
293
294 # ignore unknown files in working directory
294 # ignore unknown files in working directory
295 return [
295 return [
296 splitstandin(f)
296 splitstandin(f)
297 for f in repo[rev].walk(matcher)
297 for f in repo[rev].walk(matcher)
298 if rev is not None or repo.dirstate.get_entry(f).any_tracked
298 if rev is not None or repo.dirstate.get_entry(f).any_tracked
299 ]
299 ]
300
300
301
301
302 def instore(repo, hash, forcelocal=False):
302 def instore(repo, hash, forcelocal=False):
303 '''Return true if a largefile with the given hash exists in the store'''
303 '''Return true if a largefile with the given hash exists in the store'''
304 return os.path.exists(storepath(repo, hash, forcelocal))
304 return os.path.exists(storepath(repo, hash, forcelocal))
305
305
306
306
307 def storepath(repo, hash, forcelocal=False):
307 def storepath(repo, hash, forcelocal=False):
308 """Return the correct location in the repository largefiles store for a
308 """Return the correct location in the repository largefiles store for a
309 file with the given hash."""
309 file with the given hash."""
310 if not forcelocal and repo.shared():
310 if not forcelocal and repo.shared():
311 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
311 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
312 return repo.vfs.join(longname, hash)
312 return repo.vfs.join(longname, hash)
313
313
314
314
315 def findstorepath(repo, hash):
315 def findstorepath(repo, hash):
316 """Search through the local store path(s) to find the file for the given
316 """Search through the local store path(s) to find the file for the given
317 hash. If the file is not found, its path in the primary store is returned.
317 hash. If the file is not found, its path in the primary store is returned.
318 The return value is a tuple of (path, exists(path)).
318 The return value is a tuple of (path, exists(path)).
319 """
319 """
320 # For shared repos, the primary store is in the share source. But for
320 # For shared repos, the primary store is in the share source. But for
321 # backward compatibility, force a lookup in the local store if it wasn't
321 # backward compatibility, force a lookup in the local store if it wasn't
322 # found in the share source.
322 # found in the share source.
323 path = storepath(repo, hash, False)
323 path = storepath(repo, hash, False)
324
324
325 if instore(repo, hash):
325 if instore(repo, hash):
326 return (path, True)
326 return (path, True)
327 elif repo.shared() and instore(repo, hash, True):
327 elif repo.shared() and instore(repo, hash, True):
328 return storepath(repo, hash, True), True
328 return storepath(repo, hash, True), True
329
329
330 return (path, False)
330 return (path, False)
331
331
332
332
333 def copyfromcache(repo, hash, filename):
333 def copyfromcache(repo, hash, filename):
334 """Copy the specified largefile from the repo or system cache to
334 """Copy the specified largefile from the repo or system cache to
335 filename in the repository. Return true on success or false if the
335 filename in the repository. Return true on success or false if the
336 file was not found in either cache (which should not happened:
336 file was not found in either cache (which should not happened:
337 this is meant to be called only after ensuring that the needed
337 this is meant to be called only after ensuring that the needed
338 largefile exists in the cache)."""
338 largefile exists in the cache)."""
339 wvfs = repo.wvfs
339 wvfs = repo.wvfs
340 path = findfile(repo, hash)
340 path = findfile(repo, hash)
341 if path is None:
341 if path is None:
342 return False
342 return False
343 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
343 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
344 # The write may fail before the file is fully written, but we
344 # The write may fail before the file is fully written, but we
345 # don't use atomic writes in the working copy.
345 # don't use atomic writes in the working copy.
346 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
346 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
347 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
347 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
348 if gothash != hash:
348 if gothash != hash:
349 repo.ui.warn(
349 repo.ui.warn(
350 _(b'%s: data corruption in %s with hash %s\n')
350 _(b'%s: data corruption in %s with hash %s\n')
351 % (filename, path, gothash)
351 % (filename, path, gothash)
352 )
352 )
353 wvfs.unlink(filename)
353 wvfs.unlink(filename)
354 return False
354 return False
355 return True
355 return True
356
356
357
357
358 def copytostore(repo, ctx, file, fstandin):
358 def copytostore(repo, ctx, file, fstandin):
359 wvfs = repo.wvfs
359 wvfs = repo.wvfs
360 hash = readasstandin(ctx[fstandin])
360 hash = readasstandin(ctx[fstandin])
361 if instore(repo, hash):
361 if instore(repo, hash):
362 return
362 return
363 if wvfs.exists(file):
363 if wvfs.exists(file):
364 copytostoreabsolute(repo, wvfs.join(file), hash)
364 copytostoreabsolute(repo, wvfs.join(file), hash)
365 else:
365 else:
366 repo.ui.warn(
366 repo.ui.warn(
367 _(b"%s: largefile %s not available from local store\n")
367 _(b"%s: largefile %s not available from local store\n")
368 % (file, hash)
368 % (file, hash)
369 )
369 )
370
370
371
371
372 def copyalltostore(repo, node):
372 def copyalltostore(repo, node):
373 '''Copy all largefiles in a given revision to the store'''
373 '''Copy all largefiles in a given revision to the store'''
374
374
375 ctx = repo[node]
375 ctx = repo[node]
376 for filename in ctx.files():
376 for filename in ctx.files():
377 realfile = splitstandin(filename)
377 realfile = splitstandin(filename)
378 if realfile is not None and filename in ctx.manifest():
378 if realfile is not None and filename in ctx.manifest():
379 copytostore(repo, ctx, realfile, filename)
379 copytostore(repo, ctx, realfile, filename)
380
380
381
381
382 def copytostoreabsolute(repo, file, hash):
382 def copytostoreabsolute(repo, file, hash):
383 if inusercache(repo.ui, hash):
383 if inusercache(repo.ui, hash):
384 link(usercachepath(repo.ui, hash), storepath(repo, hash))
384 link(usercachepath(repo.ui, hash), storepath(repo, hash))
385 else:
385 else:
386 util.makedirs(os.path.dirname(storepath(repo, hash)))
386 util.makedirs(os.path.dirname(storepath(repo, hash)))
387 with open(file, b'rb') as srcf:
387 with open(file, b'rb') as srcf:
388 with util.atomictempfile(
388 with util.atomictempfile(
389 storepath(repo, hash), createmode=repo.store.createmode
389 storepath(repo, hash), createmode=repo.store.createmode
390 ) as dstf:
390 ) as dstf:
391 for chunk in util.filechunkiter(srcf):
391 for chunk in util.filechunkiter(srcf):
392 dstf.write(chunk)
392 dstf.write(chunk)
393 linktousercache(repo, hash)
393 linktousercache(repo, hash)
394
394
395
395
396 def linktousercache(repo, hash):
396 def linktousercache(repo, hash):
397 """Link / copy the largefile with the specified hash from the store
397 """Link / copy the largefile with the specified hash from the store
398 to the cache."""
398 to the cache."""
399 path = usercachepath(repo.ui, hash)
399 path = usercachepath(repo.ui, hash)
400 link(storepath(repo, hash), path)
400 link(storepath(repo, hash), path)
401
401
402
402
403 def getstandinmatcher(repo, rmatcher=None):
403 def getstandinmatcher(repo, rmatcher=None):
404 '''Return a match object that applies rmatcher to the standin directory'''
404 '''Return a match object that applies rmatcher to the standin directory'''
405 wvfs = repo.wvfs
405 wvfs = repo.wvfs
406 standindir = shortname
406 standindir = shortname
407
407
408 # no warnings about missing files or directories
408 # no warnings about missing files or directories
409 badfn = lambda f, msg: None
409 badfn = lambda f, msg: None
410
410
411 if rmatcher and not rmatcher.always():
411 if rmatcher and not rmatcher.always():
412 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
412 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
413 if not pats:
413 if not pats:
414 pats = [wvfs.join(standindir)]
414 pats = [wvfs.join(standindir)]
415 match = scmutil.match(repo[None], pats, badfn=badfn)
415 match = scmutil.match(repo[None], pats, badfn=badfn)
416 else:
416 else:
417 # no patterns: relative to repo root
417 # no patterns: relative to repo root
418 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
418 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
419 return match
419 return match
420
420
421
421
422 def composestandinmatcher(repo, rmatcher):
422 def composestandinmatcher(repo, rmatcher):
423 """Return a matcher that accepts standins corresponding to the
423 """Return a matcher that accepts standins corresponding to the
424 files accepted by rmatcher. Pass the list of files in the matcher
424 files accepted by rmatcher. Pass the list of files in the matcher
425 as the paths specified by the user."""
425 as the paths specified by the user."""
426 smatcher = getstandinmatcher(repo, rmatcher)
426 smatcher = getstandinmatcher(repo, rmatcher)
427 isstandin = smatcher.matchfn
427 isstandin = smatcher.matchfn
428
428
429 def composedmatchfn(f):
429 def composedmatchfn(f):
430 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
430 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
431
431
432 smatcher.matchfn = composedmatchfn
432 smatcher.matchfn = composedmatchfn
433
433
434 return smatcher
434 return smatcher
435
435
436
436
437 def standin(filename):
437 def standin(filename):
438 """Return the repo-relative path to the standin for the specified big
438 """Return the repo-relative path to the standin for the specified big
439 file."""
439 file."""
440 # Notes:
440 # Notes:
441 # 1) Some callers want an absolute path, but for instance addlargefiles
441 # 1) Some callers want an absolute path, but for instance addlargefiles
442 # needs it repo-relative so it can be passed to repo[None].add(). So
442 # needs it repo-relative so it can be passed to repo[None].add(). So
443 # leave it up to the caller to use repo.wjoin() to get an absolute path.
443 # leave it up to the caller to use repo.wjoin() to get an absolute path.
444 # 2) Join with '/' because that's what dirstate always uses, even on
444 # 2) Join with '/' because that's what dirstate always uses, even on
445 # Windows. Change existing separator to '/' first in case we are
445 # Windows. Change existing separator to '/' first in case we are
446 # passed filenames from an external source (like the command line).
446 # passed filenames from an external source (like the command line).
447 return shortnameslash + util.pconvert(filename)
447 return shortnameslash + util.pconvert(filename)
448
448
449
449
450 def isstandin(filename):
450 def isstandin(filename):
451 """Return true if filename is a big file standin. filename must be
451 """Return true if filename is a big file standin. filename must be
452 in Mercurial's internal form (slash-separated)."""
452 in Mercurial's internal form (slash-separated)."""
453 return filename.startswith(shortnameslash)
453 return filename.startswith(shortnameslash)
454
454
455
455
456 def splitstandin(filename):
456 def splitstandin(filename):
457 # Split on / because that's what dirstate always uses, even on Windows.
457 # Split on / because that's what dirstate always uses, even on Windows.
458 # Change local separator to / first just in case we are passed filenames
458 # Change local separator to / first just in case we are passed filenames
459 # from an external source (like the command line).
459 # from an external source (like the command line).
460 bits = util.pconvert(filename).split(b'/', 1)
460 bits = util.pconvert(filename).split(b'/', 1)
461 if len(bits) == 2 and bits[0] == shortname:
461 if len(bits) == 2 and bits[0] == shortname:
462 return bits[1]
462 return bits[1]
463 else:
463 else:
464 return None
464 return None
465
465
466
466
467 def updatestandin(repo, lfile, standin):
467 def updatestandin(repo, lfile, standin):
468 """Re-calculate hash value of lfile and write it into standin
468 """Re-calculate hash value of lfile and write it into standin
469
469
470 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
470 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
471 """
471 """
472 file = repo.wjoin(lfile)
472 file = repo.wjoin(lfile)
473 if repo.wvfs.exists(lfile):
473 if repo.wvfs.exists(lfile):
474 hash = hashfile(file)
474 hash = hashfile(file)
475 executable = getexecutable(file)
475 executable = getexecutable(file)
476 writestandin(repo, standin, hash, executable)
476 writestandin(repo, standin, hash, executable)
477 else:
477 else:
478 raise error.Abort(_(b'%s: file not found!') % lfile)
478 raise error.Abort(_(b'%s: file not found!') % lfile)
479
479
480
480
481 def readasstandin(fctx):
481 def readasstandin(fctx):
482 """read hex hash from given filectx of standin file
482 """read hex hash from given filectx of standin file
483
483
484 This encapsulates how "standin" data is stored into storage layer."""
484 This encapsulates how "standin" data is stored into storage layer."""
485 return fctx.data().strip()
485 return fctx.data().strip()
486
486
487
487
488 def writestandin(repo, standin, hash, executable):
488 def writestandin(repo, standin, hash, executable):
489 '''write hash to <repo.root>/<standin>'''
489 '''write hash to <repo.root>/<standin>'''
490 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
490 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
491
491
492
492
493 def copyandhash(instream, outfile):
493 def copyandhash(instream, outfile):
494 """Read bytes from instream (iterable) and write them to outfile,
494 """Read bytes from instream (iterable) and write them to outfile,
495 computing the SHA-1 hash of the data along the way. Return the hash."""
495 computing the SHA-1 hash of the data along the way. Return the hash."""
496 hasher = hashutil.sha1(b'')
496 hasher = hashutil.sha1(b'')
497 for data in instream:
497 for data in instream:
498 hasher.update(data)
498 hasher.update(data)
499 outfile.write(data)
499 outfile.write(data)
500 return hex(hasher.digest())
500 return hex(hasher.digest())
501
501
502
502
503 def hashfile(file):
503 def hashfile(file):
504 if not os.path.exists(file):
504 if not os.path.exists(file):
505 return b''
505 return b''
506 with open(file, b'rb') as fd:
506 with open(file, b'rb') as fd:
507 return hexsha1(fd)
507 return hexsha1(fd)
508
508
509
509
510 def getexecutable(filename):
510 def getexecutable(filename):
511 mode = os.stat(filename).st_mode
511 mode = os.stat(filename).st_mode
512 return (
512 return (
513 (mode & stat.S_IXUSR)
513 (mode & stat.S_IXUSR)
514 and (mode & stat.S_IXGRP)
514 and (mode & stat.S_IXGRP)
515 and (mode & stat.S_IXOTH)
515 and (mode & stat.S_IXOTH)
516 )
516 )
517
517
518
518
519 def urljoin(first, second, *arg):
519 def urljoin(first, second, *arg):
520 def join(left, right):
520 def join(left, right):
521 if not left.endswith(b'/'):
521 if not left.endswith(b'/'):
522 left += b'/'
522 left += b'/'
523 if right.startswith(b'/'):
523 if right.startswith(b'/'):
524 right = right[1:]
524 right = right[1:]
525 return left + right
525 return left + right
526
526
527 url = join(first, second)
527 url = join(first, second)
528 for a in arg:
528 for a in arg:
529 url = join(url, a)
529 url = join(url, a)
530 return url
530 return url
531
531
532
532
533 def hexsha1(fileobj):
533 def hexsha1(fileobj):
534 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
534 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
535 object data"""
535 object data"""
536 h = hashutil.sha1()
536 h = hashutil.sha1()
537 for chunk in util.filechunkiter(fileobj):
537 for chunk in util.filechunkiter(fileobj):
538 h.update(chunk)
538 h.update(chunk)
539 return hex(h.digest())
539 return hex(h.digest())
540
540
541
541
542 def httpsendfile(ui, filename):
542 def httpsendfile(ui, filename):
543 return httpconnection.httpsendfile(ui, filename, b'rb')
543 return httpconnection.httpsendfile(ui, filename, b'rb')
544
544
545
545
546 def unixpath(path):
546 def unixpath(path):
547 '''Return a version of path normalized for use with the lfdirstate.'''
547 '''Return a version of path normalized for use with the lfdirstate.'''
548 return util.pconvert(os.path.normpath(path))
548 return util.pconvert(os.path.normpath(path))
549
549
550
550
551 def islfilesrepo(repo):
551 def islfilesrepo(repo):
552 '''Return true if the repo is a largefile repo.'''
552 '''Return true if the repo is a largefile repo.'''
553 if b'largefiles' in repo.requirements and any(
553 if b'largefiles' in repo.requirements and any(
554 shortnameslash in f[1] for f in repo.store.datafiles()
554 shortnameslash in f[1] for f in repo.store.datafiles()
555 ):
555 ):
556 return True
556 return True
557
557
558 return any(openlfdirstate(repo.ui, repo, False))
558 return any(openlfdirstate(repo.ui, repo, False))
559
559
560
560
561 class storeprotonotcapable(Exception):
561 class storeprotonotcapable(Exception):
562 def __init__(self, storetypes):
562 def __init__(self, storetypes):
563 self.storetypes = storetypes
563 self.storetypes = storetypes
564
564
565
565
566 def getstandinsstate(repo):
566 def getstandinsstate(repo):
567 standins = []
567 standins = []
568 matcher = getstandinmatcher(repo)
568 matcher = getstandinmatcher(repo)
569 wctx = repo[None]
569 wctx = repo[None]
570 for standin in repo.dirstate.walk(
570 for standin in repo.dirstate.walk(
571 matcher, subrepos=[], unknown=False, ignored=False
571 matcher, subrepos=[], unknown=False, ignored=False
572 ):
572 ):
573 lfile = splitstandin(standin)
573 lfile = splitstandin(standin)
574 try:
574 try:
575 hash = readasstandin(wctx[standin])
575 hash = readasstandin(wctx[standin])
576 except IOError:
576 except IOError:
577 hash = None
577 hash = None
578 standins.append((lfile, hash))
578 standins.append((lfile, hash))
579 return standins
579 return standins
580
580
581
581
582 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
582 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
583 lfstandin = standin(lfile)
583 lfstandin = standin(lfile)
584 if lfstandin not in repo.dirstate:
584 if lfstandin not in repo.dirstate:
585 lfdirstate.hacky_extension_update_file(
585 lfdirstate.hacky_extension_update_file(
586 lfile,
586 lfile,
587 p1_tracked=False,
587 p1_tracked=False,
588 wc_tracked=False,
588 wc_tracked=False,
589 )
589 )
590 else:
590 else:
591 entry = repo.dirstate.get_entry(lfstandin)
591 entry = repo.dirstate.get_entry(lfstandin)
592 lfdirstate.hacky_extension_update_file(
592 lfdirstate.hacky_extension_update_file(
593 lfile,
593 lfile,
594 wc_tracked=entry.tracked,
594 wc_tracked=entry.tracked,
595 p1_tracked=entry.p1_tracked,
595 p1_tracked=entry.p1_tracked,
596 p2_info=entry.p2_info,
596 p2_info=entry.p2_info,
597 possibly_dirty=True,
597 possibly_dirty=True,
598 )
598 )
599
599
600
600
601 def markcommitted(orig, ctx, node):
601 def markcommitted(orig, ctx, node):
602 repo = ctx.repo()
602 repo = ctx.repo()
603
603
604 lfdirstate = openlfdirstate(repo.ui, repo)
604 with repo.dirstate.changing_parents(repo):
605 with lfdirstate.changing_parents(repo):
606 orig(node)
605 orig(node)
607
606
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
607 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
609 # because files coming from the 2nd parent are omitted in the latter.
608 # because files coming from the 2nd parent are omitted in the latter.
610 #
609 #
611 # The former should be used to get targets of "synclfdirstate",
610 # The former should be used to get targets of "synclfdirstate",
612 # because such files:
611 # because such files:
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
612 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
614 # - have to be marked as "n" after commit, but
613 # - have to be marked as "n" after commit, but
615 # - aren't listed in "repo[node].files()"
614 # - aren't listed in "repo[node].files()"
616
615
616 lfdirstate = openlfdirstate(repo.ui, repo)
617 for f in ctx.files():
617 for f in ctx.files():
618 lfile = splitstandin(f)
618 lfile = splitstandin(f)
619 if lfile is not None:
619 if lfile is not None:
620 synclfdirstate(repo, lfdirstate, lfile, False)
620 synclfdirstate(repo, lfdirstate, lfile, False)
621 lfdirstate.write(repo.currenttransaction())
622
621
623 # As part of committing, copy all of the largefiles into the cache.
622 # As part of committing, copy all of the largefiles into the cache.
624 #
623 #
625 # Using "node" instead of "ctx" implies additional "repo[node]"
624 # Using "node" instead of "ctx" implies additional "repo[node]"
626 # lookup while copyalltostore(), but can omit redundant check for
625 # lookup while copyalltostore(), but can omit redundant check for
627 # files comming from the 2nd parent, which should exist in store
626 # files comming from the 2nd parent, which should exist in store
628 # at merging.
627 # at merging.
629 copyalltostore(repo, node)
628 copyalltostore(repo, node)
630
629
631
630
632 def getlfilestoupdate(oldstandins, newstandins):
631 def getlfilestoupdate(oldstandins, newstandins):
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
632 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
634 filelist = []
633 filelist = []
635 for f in changedstandins:
634 for f in changedstandins:
636 if f[0] not in filelist:
635 if f[0] not in filelist:
637 filelist.append(f[0])
636 filelist.append(f[0])
638 return filelist
637 return filelist
639
638
640
639
641 def getlfilestoupload(repo, missing, addfunc):
640 def getlfilestoupload(repo, missing, addfunc):
642 makeprogress = repo.ui.makeprogress
641 makeprogress = repo.ui.makeprogress
643 with makeprogress(
642 with makeprogress(
644 _(b'finding outgoing largefiles'),
643 _(b'finding outgoing largefiles'),
645 unit=_(b'revisions'),
644 unit=_(b'revisions'),
646 total=len(missing),
645 total=len(missing),
647 ) as progress:
646 ) as progress:
648 for i, n in enumerate(missing):
647 for i, n in enumerate(missing):
649 progress.update(i)
648 progress.update(i)
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
649 parents = [p for p in repo[n].parents() if p != repo.nullid]
651
650
652 with lfstatus(repo, value=False):
651 with lfstatus(repo, value=False):
653 ctx = repo[n]
652 ctx = repo[n]
654
653
655 files = set(ctx.files())
654 files = set(ctx.files())
656 if len(parents) == 2:
655 if len(parents) == 2:
657 mc = ctx.manifest()
656 mc = ctx.manifest()
658 mp1 = ctx.p1().manifest()
657 mp1 = ctx.p1().manifest()
659 mp2 = ctx.p2().manifest()
658 mp2 = ctx.p2().manifest()
660 for f in mp1:
659 for f in mp1:
661 if f not in mc:
660 if f not in mc:
662 files.add(f)
661 files.add(f)
663 for f in mp2:
662 for f in mp2:
664 if f not in mc:
663 if f not in mc:
665 files.add(f)
664 files.add(f)
666 for f in mc:
665 for f in mc:
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
666 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
668 files.add(f)
667 files.add(f)
669 for fn in files:
668 for fn in files:
670 if isstandin(fn) and fn in ctx:
669 if isstandin(fn) and fn in ctx:
671 addfunc(fn, readasstandin(ctx[fn]))
670 addfunc(fn, readasstandin(ctx[fn]))
672
671
673
672
674 def updatestandinsbymatch(repo, match):
673 def updatestandinsbymatch(repo, match):
675 """Update standins in the working directory according to specified match
674 """Update standins in the working directory according to specified match
676
675
677 This returns (possibly modified) ``match`` object to be used for
676 This returns (possibly modified) ``match`` object to be used for
678 subsequent commit process.
677 subsequent commit process.
679 """
678 """
680
679
681 ui = repo.ui
680 ui = repo.ui
682
681
683 # Case 1: user calls commit with no specific files or
682 # Case 1: user calls commit with no specific files or
684 # include/exclude patterns: refresh and commit all files that
683 # include/exclude patterns: refresh and commit all files that
685 # are "dirty".
684 # are "dirty".
686 if match is None or match.always():
685 if match is None or match.always():
687 # Spend a bit of time here to get a list of files we know
686 # Spend a bit of time here to get a list of files we know
688 # are modified so we can compare only against those.
687 # are modified so we can compare only against those.
689 # It can cost a lot of time (several seconds)
688 # It can cost a lot of time (several seconds)
690 # otherwise to update all standins if the largefiles are
689 # otherwise to update all standins if the largefiles are
691 # large.
690 # large.
692 lfdirstate = openlfdirstate(ui, repo)
691 lfdirstate = openlfdirstate(ui, repo)
693 dirtymatch = matchmod.always()
692 dirtymatch = matchmod.always()
694 unsure, s, mtime_boundary = lfdirstate.status(
693 unsure, s, mtime_boundary = lfdirstate.status(
695 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
694 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
696 )
695 )
697 modifiedfiles = unsure + s.modified + s.added + s.removed
696 modifiedfiles = unsure + s.modified + s.added + s.removed
698 lfiles = listlfiles(repo)
697 lfiles = listlfiles(repo)
699 # this only loops through largefiles that exist (not
698 # this only loops through largefiles that exist (not
700 # removed/renamed)
699 # removed/renamed)
701 for lfile in lfiles:
700 for lfile in lfiles:
702 if lfile in modifiedfiles:
701 if lfile in modifiedfiles:
703 fstandin = standin(lfile)
702 fstandin = standin(lfile)
704 if repo.wvfs.exists(fstandin):
703 if repo.wvfs.exists(fstandin):
705 # this handles the case where a rebase is being
704 # this handles the case where a rebase is being
706 # performed and the working copy is not updated
705 # performed and the working copy is not updated
707 # yet.
706 # yet.
708 if repo.wvfs.exists(lfile):
707 if repo.wvfs.exists(lfile):
709 updatestandin(repo, lfile, fstandin)
708 updatestandin(repo, lfile, fstandin)
710
709
711 return match
710 return match
712
711
713 lfiles = listlfiles(repo)
712 lfiles = listlfiles(repo)
714 match._files = repo._subdirlfs(match.files(), lfiles)
713 match._files = repo._subdirlfs(match.files(), lfiles)
715
714
716 # Case 2: user calls commit with specified patterns: refresh
715 # Case 2: user calls commit with specified patterns: refresh
717 # any matching big files.
716 # any matching big files.
718 smatcher = composestandinmatcher(repo, match)
717 smatcher = composestandinmatcher(repo, match)
719 standins = repo.dirstate.walk(
718 standins = repo.dirstate.walk(
720 smatcher, subrepos=[], unknown=False, ignored=False
719 smatcher, subrepos=[], unknown=False, ignored=False
721 )
720 )
722
721
723 # No matching big files: get out of the way and pass control to
722 # No matching big files: get out of the way and pass control to
724 # the usual commit() method.
723 # the usual commit() method.
725 if not standins:
724 if not standins:
726 return match
725 return match
727
726
728 # Refresh all matching big files. It's possible that the
727 # Refresh all matching big files. It's possible that the
729 # commit will end up failing, in which case the big files will
728 # commit will end up failing, in which case the big files will
730 # stay refreshed. No harm done: the user modified them and
729 # stay refreshed. No harm done: the user modified them and
731 # asked to commit them, so sooner or later we're going to
730 # asked to commit them, so sooner or later we're going to
732 # refresh the standins. Might as well leave them refreshed.
731 # refresh the standins. Might as well leave them refreshed.
733 lfdirstate = openlfdirstate(ui, repo)
732 lfdirstate = openlfdirstate(ui, repo)
734 for fstandin in standins:
733 for fstandin in standins:
735 lfile = splitstandin(fstandin)
734 lfile = splitstandin(fstandin)
736 if lfdirstate.get_entry(lfile).tracked:
735 if lfdirstate.get_entry(lfile).tracked:
737 updatestandin(repo, lfile, fstandin)
736 updatestandin(repo, lfile, fstandin)
738
737
739 # Cook up a new matcher that only matches regular files or
738 # Cook up a new matcher that only matches regular files or
740 # standins corresponding to the big files requested by the
739 # standins corresponding to the big files requested by the
741 # user. Have to modify _files to prevent commit() from
740 # user. Have to modify _files to prevent commit() from
742 # complaining "not tracked" for big files.
741 # complaining "not tracked" for big files.
743 match = copy.copy(match)
742 match = copy.copy(match)
744 origmatchfn = match.matchfn
743 origmatchfn = match.matchfn
745
744
746 # Check both the list of largefiles and the list of
745 # Check both the list of largefiles and the list of
747 # standins because if a largefile was removed, it
746 # standins because if a largefile was removed, it
748 # won't be in the list of largefiles at this point
747 # won't be in the list of largefiles at this point
749 match._files += sorted(standins)
748 match._files += sorted(standins)
750
749
751 actualfiles = []
750 actualfiles = []
752 for f in match._files:
751 for f in match._files:
753 fstandin = standin(f)
752 fstandin = standin(f)
754
753
755 # For largefiles, only one of the normal and standin should be
754 # For largefiles, only one of the normal and standin should be
756 # committed (except if one of them is a remove). In the case of a
755 # committed (except if one of them is a remove). In the case of a
757 # standin removal, drop the normal file if it is unknown to dirstate.
756 # standin removal, drop the normal file if it is unknown to dirstate.
758 # Thus, skip plain largefile names but keep the standin.
757 # Thus, skip plain largefile names but keep the standin.
759 if f in lfiles or fstandin in standins:
758 if f in lfiles or fstandin in standins:
760 if not repo.dirstate.get_entry(fstandin).removed:
759 if not repo.dirstate.get_entry(fstandin).removed:
761 if not repo.dirstate.get_entry(f).removed:
760 if not repo.dirstate.get_entry(f).removed:
762 continue
761 continue
763 elif not repo.dirstate.get_entry(f).any_tracked:
762 elif not repo.dirstate.get_entry(f).any_tracked:
764 continue
763 continue
765
764
766 actualfiles.append(f)
765 actualfiles.append(f)
767 match._files = actualfiles
766 match._files = actualfiles
768
767
769 def matchfn(f):
768 def matchfn(f):
770 if origmatchfn(f):
769 if origmatchfn(f):
771 return f not in lfiles
770 return f not in lfiles
772 else:
771 else:
773 return f in standins
772 return f in standins
774
773
775 match.matchfn = matchfn
774 match.matchfn = matchfn
776
775
777 return match
776 return match
778
777
779
778
780 class automatedcommithook:
779 class automatedcommithook:
781 """Stateful hook to update standins at the 1st commit of resuming
780 """Stateful hook to update standins at the 1st commit of resuming
782
781
783 For efficiency, updating standins in the working directory should
782 For efficiency, updating standins in the working directory should
784 be avoided while automated committing (like rebase, transplant and
783 be avoided while automated committing (like rebase, transplant and
785 so on), because they should be updated before committing.
784 so on), because they should be updated before committing.
786
785
787 But the 1st commit of resuming automated committing (e.g. ``rebase
786 But the 1st commit of resuming automated committing (e.g. ``rebase
788 --continue``) should update them, because largefiles may be
787 --continue``) should update them, because largefiles may be
789 modified manually.
788 modified manually.
790 """
789 """
791
790
792 def __init__(self, resuming):
791 def __init__(self, resuming):
793 self.resuming = resuming
792 self.resuming = resuming
794
793
795 def __call__(self, repo, match):
794 def __call__(self, repo, match):
796 if self.resuming:
795 if self.resuming:
797 self.resuming = False # avoids updating at subsequent commits
796 self.resuming = False # avoids updating at subsequent commits
798 return updatestandinsbymatch(repo, match)
797 return updatestandinsbymatch(repo, match)
799 else:
798 else:
800 return match
799 return match
801
800
802
801
803 def getstatuswriter(ui, repo, forcibly=None):
802 def getstatuswriter(ui, repo, forcibly=None):
804 """Return the function to write largefiles specific status out
803 """Return the function to write largefiles specific status out
805
804
806 If ``forcibly`` is ``None``, this returns the last element of
805 If ``forcibly`` is ``None``, this returns the last element of
807 ``repo._lfstatuswriters`` as "default" writer function.
806 ``repo._lfstatuswriters`` as "default" writer function.
808
807
809 Otherwise, this returns the function to always write out (or
808 Otherwise, this returns the function to always write out (or
810 ignore if ``not forcibly``) status.
809 ignore if ``not forcibly``) status.
811 """
810 """
812 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
811 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
813 return repo._lfstatuswriters[-1]
812 return repo._lfstatuswriters[-1]
814 else:
813 else:
815 if forcibly:
814 if forcibly:
816 return ui.status # forcibly WRITE OUT
815 return ui.status # forcibly WRITE OUT
817 else:
816 else:
818 return lambda *msg, **opts: None # forcibly IGNORE
817 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now