##// END OF EJS Templates
largefile: use sysstr to check for attribute presence in `getstatuswriter`...
marmoute -
r51782:3934d85c default
parent child Browse files
Show More
@@ -1,823 +1,823 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
163 _tr_key_suffix = b'-large-files'
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
167
167
168 def set_tracked(self, f):
168 def set_tracked(self, f):
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
170
170
171 def set_untracked(self, f):
171 def set_untracked(self, f):
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173
173
174 def normal(self, f, parentfiledata=None):
174 def normal(self, f, parentfiledata=None):
175 # not sure if we should pass the `parentfiledata` down or throw it
175 # not sure if we should pass the `parentfiledata` down or throw it
176 # away. So throwing it away to stay on the safe side.
176 # away. So throwing it away to stay on the safe side.
177 return super(largefilesdirstate, self).normal(unixpath(f))
177 return super(largefilesdirstate, self).normal(unixpath(f))
178
178
179 def remove(self, f):
179 def remove(self, f):
180 return super(largefilesdirstate, self).remove(unixpath(f))
180 return super(largefilesdirstate, self).remove(unixpath(f))
181
181
182 def add(self, f):
182 def add(self, f):
183 return super(largefilesdirstate, self).add(unixpath(f))
183 return super(largefilesdirstate, self).add(unixpath(f))
184
184
185 def drop(self, f):
185 def drop(self, f):
186 return super(largefilesdirstate, self).drop(unixpath(f))
186 return super(largefilesdirstate, self).drop(unixpath(f))
187
187
188 def forget(self, f):
188 def forget(self, f):
189 return super(largefilesdirstate, self).forget(unixpath(f))
189 return super(largefilesdirstate, self).forget(unixpath(f))
190
190
191 def normallookup(self, f):
191 def normallookup(self, f):
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193
193
194 def _ignore(self, f):
194 def _ignore(self, f):
195 return False
195 return False
196
196
197 def write(self, tr):
197 def write(self, tr):
198 # (1) disable PENDING mode always
198 # (1) disable PENDING mode always
199 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (lfdirstate isn't yet managed as a part of the transaction)
200 # (2) avoid develwarn 'use dirstate.write with ....'
200 # (2) avoid develwarn 'use dirstate.write with ....'
201 if tr:
201 if tr:
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 super(largefilesdirstate, self).write(None)
203 super(largefilesdirstate, self).write(None)
204
204
205
205
206 def openlfdirstate(ui, repo, create=True):
206 def openlfdirstate(ui, repo, create=True):
207 """
207 """
208 Return a dirstate object that tracks largefiles: i.e. its root is
208 Return a dirstate object that tracks largefiles: i.e. its root is
209 the repo root, but it is saved in .hg/largefiles/dirstate.
209 the repo root, but it is saved in .hg/largefiles/dirstate.
210
210
211 If a dirstate object already exists and is being used for a 'changing_*'
211 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
212 context, it will be returned.
213 """
213 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
215 if sub_dirstate is not None:
216 return sub_dirstate
216 return sub_dirstate
217 vfs = repo.vfs
217 vfs = repo.vfs
218 lfstoredir = longname
218 lfstoredir = longname
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 lfdirstate = largefilesdirstate(
221 lfdirstate = largefilesdirstate(
222 opener,
222 opener,
223 ui,
223 ui,
224 repo.root,
224 repo.root,
225 repo.dirstate._validate,
225 repo.dirstate._validate,
226 lambda: sparse.matcher(repo),
226 lambda: sparse.matcher(repo),
227 repo.nodeconstants,
227 repo.nodeconstants,
228 use_dirstate_v2,
228 use_dirstate_v2,
229 )
229 )
230
230
231 # If the largefiles dirstate does not exist, populate and create
231 # If the largefiles dirstate does not exist, populate and create
232 # it. This ensures that we create it on the first meaningful
232 # it. This ensures that we create it on the first meaningful
233 # largefiles operation in a new clone.
233 # largefiles operation in a new clone.
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 try:
235 try:
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 matcher = getstandinmatcher(repo)
237 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
238 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
239 matcher, subrepos=[], unknown=False, ignored=False
240 )
240 )
241
241
242 if len(standins) > 0:
242 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
243 vfs.makedirs(lfstoredir)
244
244
245 for standin in standins:
245 for standin in standins:
246 lfile = splitstandin(standin)
246 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
247 lfdirstate.hacky_extension_update_file(
248 lfile,
248 lfile,
249 p1_tracked=True,
249 p1_tracked=True,
250 wc_tracked=True,
250 wc_tracked=True,
251 possibly_dirty=True,
251 possibly_dirty=True,
252 )
252 )
253 except error.LockError:
253 except error.LockError:
254 # Assume that whatever was holding the lock was important.
254 # Assume that whatever was holding the lock was important.
255 # If we were doing something important, we would already have
255 # If we were doing something important, we would already have
256 # either the lock or a largefile dirstate.
256 # either the lock or a largefile dirstate.
257 pass
257 pass
258 return lfdirstate
258 return lfdirstate
259
259
260
260
261 def lfdirstatestatus(lfdirstate, repo):
261 def lfdirstatestatus(lfdirstate, repo):
262 pctx = repo[b'.']
262 pctx = repo[b'.']
263 match = matchmod.always()
263 match = matchmod.always()
264 unsure, s, mtime_boundary = lfdirstate.status(
264 unsure, s, mtime_boundary = lfdirstate.status(
265 match, subrepos=[], ignored=False, clean=False, unknown=False
265 match, subrepos=[], ignored=False, clean=False, unknown=False
266 )
266 )
267 modified, clean = s.modified, s.clean
267 modified, clean = s.modified, s.clean
268 wctx = repo[None]
268 wctx = repo[None]
269 for lfile in unsure:
269 for lfile in unsure:
270 try:
270 try:
271 fctx = pctx[standin(lfile)]
271 fctx = pctx[standin(lfile)]
272 except LookupError:
272 except LookupError:
273 fctx = None
273 fctx = None
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
275 modified.append(lfile)
275 modified.append(lfile)
276 else:
276 else:
277 clean.append(lfile)
277 clean.append(lfile)
278 st = wctx[lfile].lstat()
278 st = wctx[lfile].lstat()
279 mode = st.st_mode
279 mode = st.st_mode
280 size = st.st_size
280 size = st.st_size
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
282 if mtime is not None:
282 if mtime is not None:
283 cache_data = (mode, size, mtime)
283 cache_data = (mode, size, mtime)
284 lfdirstate.set_clean(lfile, cache_data)
284 lfdirstate.set_clean(lfile, cache_data)
285 return s
285 return s
286
286
287
287
288 def listlfiles(repo, rev=None, matcher=None):
288 def listlfiles(repo, rev=None, matcher=None):
289 """return a list of largefiles in the working copy or the
289 """return a list of largefiles in the working copy or the
290 specified changeset"""
290 specified changeset"""
291
291
292 if matcher is None:
292 if matcher is None:
293 matcher = getstandinmatcher(repo)
293 matcher = getstandinmatcher(repo)
294
294
295 # ignore unknown files in working directory
295 # ignore unknown files in working directory
296 return [
296 return [
297 splitstandin(f)
297 splitstandin(f)
298 for f in repo[rev].walk(matcher)
298 for f in repo[rev].walk(matcher)
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
300 ]
300 ]
301
301
302
302
303 def instore(repo, hash, forcelocal=False):
303 def instore(repo, hash, forcelocal=False):
304 '''Return true if a largefile with the given hash exists in the store'''
304 '''Return true if a largefile with the given hash exists in the store'''
305 return os.path.exists(storepath(repo, hash, forcelocal))
305 return os.path.exists(storepath(repo, hash, forcelocal))
306
306
307
307
308 def storepath(repo, hash, forcelocal=False):
308 def storepath(repo, hash, forcelocal=False):
309 """Return the correct location in the repository largefiles store for a
309 """Return the correct location in the repository largefiles store for a
310 file with the given hash."""
310 file with the given hash."""
311 if not forcelocal and repo.shared():
311 if not forcelocal and repo.shared():
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
313 return repo.vfs.join(longname, hash)
313 return repo.vfs.join(longname, hash)
314
314
315
315
316 def findstorepath(repo, hash):
316 def findstorepath(repo, hash):
317 """Search through the local store path(s) to find the file for the given
317 """Search through the local store path(s) to find the file for the given
318 hash. If the file is not found, its path in the primary store is returned.
318 hash. If the file is not found, its path in the primary store is returned.
319 The return value is a tuple of (path, exists(path)).
319 The return value is a tuple of (path, exists(path)).
320 """
320 """
321 # For shared repos, the primary store is in the share source. But for
321 # For shared repos, the primary store is in the share source. But for
322 # backward compatibility, force a lookup in the local store if it wasn't
322 # backward compatibility, force a lookup in the local store if it wasn't
323 # found in the share source.
323 # found in the share source.
324 path = storepath(repo, hash, False)
324 path = storepath(repo, hash, False)
325
325
326 if instore(repo, hash):
326 if instore(repo, hash):
327 return (path, True)
327 return (path, True)
328 elif repo.shared() and instore(repo, hash, True):
328 elif repo.shared() and instore(repo, hash, True):
329 return storepath(repo, hash, True), True
329 return storepath(repo, hash, True), True
330
330
331 return (path, False)
331 return (path, False)
332
332
333
333
334 def copyfromcache(repo, hash, filename):
334 def copyfromcache(repo, hash, filename):
335 """Copy the specified largefile from the repo or system cache to
335 """Copy the specified largefile from the repo or system cache to
336 filename in the repository. Return true on success or false if the
336 filename in the repository. Return true on success or false if the
337 file was not found in either cache (which should not happened:
337 file was not found in either cache (which should not happened:
338 this is meant to be called only after ensuring that the needed
338 this is meant to be called only after ensuring that the needed
339 largefile exists in the cache)."""
339 largefile exists in the cache)."""
340 wvfs = repo.wvfs
340 wvfs = repo.wvfs
341 path = findfile(repo, hash)
341 path = findfile(repo, hash)
342 if path is None:
342 if path is None:
343 return False
343 return False
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
345 # The write may fail before the file is fully written, but we
345 # The write may fail before the file is fully written, but we
346 # don't use atomic writes in the working copy.
346 # don't use atomic writes in the working copy.
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
349 if gothash != hash:
349 if gothash != hash:
350 repo.ui.warn(
350 repo.ui.warn(
351 _(b'%s: data corruption in %s with hash %s\n')
351 _(b'%s: data corruption in %s with hash %s\n')
352 % (filename, path, gothash)
352 % (filename, path, gothash)
353 )
353 )
354 wvfs.unlink(filename)
354 wvfs.unlink(filename)
355 return False
355 return False
356 return True
356 return True
357
357
358
358
359 def copytostore(repo, ctx, file, fstandin):
359 def copytostore(repo, ctx, file, fstandin):
360 wvfs = repo.wvfs
360 wvfs = repo.wvfs
361 hash = readasstandin(ctx[fstandin])
361 hash = readasstandin(ctx[fstandin])
362 if instore(repo, hash):
362 if instore(repo, hash):
363 return
363 return
364 if wvfs.exists(file):
364 if wvfs.exists(file):
365 copytostoreabsolute(repo, wvfs.join(file), hash)
365 copytostoreabsolute(repo, wvfs.join(file), hash)
366 else:
366 else:
367 repo.ui.warn(
367 repo.ui.warn(
368 _(b"%s: largefile %s not available from local store\n")
368 _(b"%s: largefile %s not available from local store\n")
369 % (file, hash)
369 % (file, hash)
370 )
370 )
371
371
372
372
373 def copyalltostore(repo, node):
373 def copyalltostore(repo, node):
374 '''Copy all largefiles in a given revision to the store'''
374 '''Copy all largefiles in a given revision to the store'''
375
375
376 ctx = repo[node]
376 ctx = repo[node]
377 for filename in ctx.files():
377 for filename in ctx.files():
378 realfile = splitstandin(filename)
378 realfile = splitstandin(filename)
379 if realfile is not None and filename in ctx.manifest():
379 if realfile is not None and filename in ctx.manifest():
380 copytostore(repo, ctx, realfile, filename)
380 copytostore(repo, ctx, realfile, filename)
381
381
382
382
383 def copytostoreabsolute(repo, file, hash):
383 def copytostoreabsolute(repo, file, hash):
384 if inusercache(repo.ui, hash):
384 if inusercache(repo.ui, hash):
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
386 else:
386 else:
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
388 with open(file, b'rb') as srcf:
388 with open(file, b'rb') as srcf:
389 with util.atomictempfile(
389 with util.atomictempfile(
390 storepath(repo, hash), createmode=repo.store.createmode
390 storepath(repo, hash), createmode=repo.store.createmode
391 ) as dstf:
391 ) as dstf:
392 for chunk in util.filechunkiter(srcf):
392 for chunk in util.filechunkiter(srcf):
393 dstf.write(chunk)
393 dstf.write(chunk)
394 linktousercache(repo, hash)
394 linktousercache(repo, hash)
395
395
396
396
397 def linktousercache(repo, hash):
397 def linktousercache(repo, hash):
398 """Link / copy the largefile with the specified hash from the store
398 """Link / copy the largefile with the specified hash from the store
399 to the cache."""
399 to the cache."""
400 path = usercachepath(repo.ui, hash)
400 path = usercachepath(repo.ui, hash)
401 link(storepath(repo, hash), path)
401 link(storepath(repo, hash), path)
402
402
403
403
404 def getstandinmatcher(repo, rmatcher=None):
404 def getstandinmatcher(repo, rmatcher=None):
405 '''Return a match object that applies rmatcher to the standin directory'''
405 '''Return a match object that applies rmatcher to the standin directory'''
406 wvfs = repo.wvfs
406 wvfs = repo.wvfs
407 standindir = shortname
407 standindir = shortname
408
408
409 # no warnings about missing files or directories
409 # no warnings about missing files or directories
410 badfn = lambda f, msg: None
410 badfn = lambda f, msg: None
411
411
412 if rmatcher and not rmatcher.always():
412 if rmatcher and not rmatcher.always():
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
414 if not pats:
414 if not pats:
415 pats = [wvfs.join(standindir)]
415 pats = [wvfs.join(standindir)]
416 match = scmutil.match(repo[None], pats, badfn=badfn)
416 match = scmutil.match(repo[None], pats, badfn=badfn)
417 else:
417 else:
418 # no patterns: relative to repo root
418 # no patterns: relative to repo root
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
420 return match
420 return match
421
421
422
422
423 def composestandinmatcher(repo, rmatcher):
423 def composestandinmatcher(repo, rmatcher):
424 """Return a matcher that accepts standins corresponding to the
424 """Return a matcher that accepts standins corresponding to the
425 files accepted by rmatcher. Pass the list of files in the matcher
425 files accepted by rmatcher. Pass the list of files in the matcher
426 as the paths specified by the user."""
426 as the paths specified by the user."""
427 smatcher = getstandinmatcher(repo, rmatcher)
427 smatcher = getstandinmatcher(repo, rmatcher)
428 isstandin = smatcher.matchfn
428 isstandin = smatcher.matchfn
429
429
430 def composedmatchfn(f):
430 def composedmatchfn(f):
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
432
432
433 smatcher.matchfn = composedmatchfn
433 smatcher.matchfn = composedmatchfn
434
434
435 return smatcher
435 return smatcher
436
436
437
437
438 def standin(filename):
438 def standin(filename):
439 """Return the repo-relative path to the standin for the specified big
439 """Return the repo-relative path to the standin for the specified big
440 file."""
440 file."""
441 # Notes:
441 # Notes:
442 # 1) Some callers want an absolute path, but for instance addlargefiles
442 # 1) Some callers want an absolute path, but for instance addlargefiles
443 # needs it repo-relative so it can be passed to repo[None].add(). So
443 # needs it repo-relative so it can be passed to repo[None].add(). So
444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
445 # 2) Join with '/' because that's what dirstate always uses, even on
445 # 2) Join with '/' because that's what dirstate always uses, even on
446 # Windows. Change existing separator to '/' first in case we are
446 # Windows. Change existing separator to '/' first in case we are
447 # passed filenames from an external source (like the command line).
447 # passed filenames from an external source (like the command line).
448 return shortnameslash + util.pconvert(filename)
448 return shortnameslash + util.pconvert(filename)
449
449
450
450
451 def isstandin(filename):
451 def isstandin(filename):
452 """Return true if filename is a big file standin. filename must be
452 """Return true if filename is a big file standin. filename must be
453 in Mercurial's internal form (slash-separated)."""
453 in Mercurial's internal form (slash-separated)."""
454 return filename.startswith(shortnameslash)
454 return filename.startswith(shortnameslash)
455
455
456
456
457 def splitstandin(filename):
457 def splitstandin(filename):
458 # Split on / because that's what dirstate always uses, even on Windows.
458 # Split on / because that's what dirstate always uses, even on Windows.
459 # Change local separator to / first just in case we are passed filenames
459 # Change local separator to / first just in case we are passed filenames
460 # from an external source (like the command line).
460 # from an external source (like the command line).
461 bits = util.pconvert(filename).split(b'/', 1)
461 bits = util.pconvert(filename).split(b'/', 1)
462 if len(bits) == 2 and bits[0] == shortname:
462 if len(bits) == 2 and bits[0] == shortname:
463 return bits[1]
463 return bits[1]
464 else:
464 else:
465 return None
465 return None
466
466
467
467
468 def updatestandin(repo, lfile, standin):
468 def updatestandin(repo, lfile, standin):
469 """Re-calculate hash value of lfile and write it into standin
469 """Re-calculate hash value of lfile and write it into standin
470
470
471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
472 """
472 """
473 file = repo.wjoin(lfile)
473 file = repo.wjoin(lfile)
474 if repo.wvfs.exists(lfile):
474 if repo.wvfs.exists(lfile):
475 hash = hashfile(file)
475 hash = hashfile(file)
476 executable = getexecutable(file)
476 executable = getexecutable(file)
477 writestandin(repo, standin, hash, executable)
477 writestandin(repo, standin, hash, executable)
478 else:
478 else:
479 raise error.Abort(_(b'%s: file not found!') % lfile)
479 raise error.Abort(_(b'%s: file not found!') % lfile)
480
480
481
481
482 def readasstandin(fctx):
482 def readasstandin(fctx):
483 """read hex hash from given filectx of standin file
483 """read hex hash from given filectx of standin file
484
484
485 This encapsulates how "standin" data is stored into storage layer."""
485 This encapsulates how "standin" data is stored into storage layer."""
486 return fctx.data().strip()
486 return fctx.data().strip()
487
487
488
488
489 def writestandin(repo, standin, hash, executable):
489 def writestandin(repo, standin, hash, executable):
490 '''write hash to <repo.root>/<standin>'''
490 '''write hash to <repo.root>/<standin>'''
491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
492
492
493
493
494 def copyandhash(instream, outfile):
494 def copyandhash(instream, outfile):
495 """Read bytes from instream (iterable) and write them to outfile,
495 """Read bytes from instream (iterable) and write them to outfile,
496 computing the SHA-1 hash of the data along the way. Return the hash."""
496 computing the SHA-1 hash of the data along the way. Return the hash."""
497 hasher = hashutil.sha1(b'')
497 hasher = hashutil.sha1(b'')
498 for data in instream:
498 for data in instream:
499 hasher.update(data)
499 hasher.update(data)
500 outfile.write(data)
500 outfile.write(data)
501 return hex(hasher.digest())
501 return hex(hasher.digest())
502
502
503
503
504 def hashfile(file):
504 def hashfile(file):
505 if not os.path.exists(file):
505 if not os.path.exists(file):
506 return b''
506 return b''
507 with open(file, b'rb') as fd:
507 with open(file, b'rb') as fd:
508 return hexsha1(fd)
508 return hexsha1(fd)
509
509
510
510
511 def getexecutable(filename):
511 def getexecutable(filename):
512 mode = os.stat(filename).st_mode
512 mode = os.stat(filename).st_mode
513 return (
513 return (
514 (mode & stat.S_IXUSR)
514 (mode & stat.S_IXUSR)
515 and (mode & stat.S_IXGRP)
515 and (mode & stat.S_IXGRP)
516 and (mode & stat.S_IXOTH)
516 and (mode & stat.S_IXOTH)
517 )
517 )
518
518
519
519
520 def urljoin(first, second, *arg):
520 def urljoin(first, second, *arg):
521 def join(left, right):
521 def join(left, right):
522 if not left.endswith(b'/'):
522 if not left.endswith(b'/'):
523 left += b'/'
523 left += b'/'
524 if right.startswith(b'/'):
524 if right.startswith(b'/'):
525 right = right[1:]
525 right = right[1:]
526 return left + right
526 return left + right
527
527
528 url = join(first, second)
528 url = join(first, second)
529 for a in arg:
529 for a in arg:
530 url = join(url, a)
530 url = join(url, a)
531 return url
531 return url
532
532
533
533
534 def hexsha1(fileobj):
534 def hexsha1(fileobj):
535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
536 object data"""
536 object data"""
537 h = hashutil.sha1()
537 h = hashutil.sha1()
538 for chunk in util.filechunkiter(fileobj):
538 for chunk in util.filechunkiter(fileobj):
539 h.update(chunk)
539 h.update(chunk)
540 return hex(h.digest())
540 return hex(h.digest())
541
541
542
542
543 def httpsendfile(ui, filename):
543 def httpsendfile(ui, filename):
544 return httpconnection.httpsendfile(ui, filename, b'rb')
544 return httpconnection.httpsendfile(ui, filename, b'rb')
545
545
546
546
547 def unixpath(path):
547 def unixpath(path):
548 '''Return a version of path normalized for use with the lfdirstate.'''
548 '''Return a version of path normalized for use with the lfdirstate.'''
549 return util.pconvert(os.path.normpath(path))
549 return util.pconvert(os.path.normpath(path))
550
550
551
551
552 def islfilesrepo(repo):
552 def islfilesrepo(repo):
553 '''Return true if the repo is a largefile repo.'''
553 '''Return true if the repo is a largefile repo.'''
554 if b'largefiles' in repo.requirements:
554 if b'largefiles' in repo.requirements:
555 for entry in repo.store.data_entries():
555 for entry in repo.store.data_entries():
556 if entry.is_revlog and shortnameslash in entry.target_id:
556 if entry.is_revlog and shortnameslash in entry.target_id:
557 return True
557 return True
558
558
559 return any(openlfdirstate(repo.ui, repo, False))
559 return any(openlfdirstate(repo.ui, repo, False))
560
560
561
561
562 class storeprotonotcapable(Exception):
562 class storeprotonotcapable(Exception):
563 def __init__(self, storetypes):
563 def __init__(self, storetypes):
564 self.storetypes = storetypes
564 self.storetypes = storetypes
565
565
566
566
567 def getstandinsstate(repo):
567 def getstandinsstate(repo):
568 standins = []
568 standins = []
569 matcher = getstandinmatcher(repo)
569 matcher = getstandinmatcher(repo)
570 wctx = repo[None]
570 wctx = repo[None]
571 for standin in repo.dirstate.walk(
571 for standin in repo.dirstate.walk(
572 matcher, subrepos=[], unknown=False, ignored=False
572 matcher, subrepos=[], unknown=False, ignored=False
573 ):
573 ):
574 lfile = splitstandin(standin)
574 lfile = splitstandin(standin)
575 try:
575 try:
576 hash = readasstandin(wctx[standin])
576 hash = readasstandin(wctx[standin])
577 except IOError:
577 except IOError:
578 hash = None
578 hash = None
579 standins.append((lfile, hash))
579 standins.append((lfile, hash))
580 return standins
580 return standins
581
581
582
582
583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
584 lfstandin = standin(lfile)
584 lfstandin = standin(lfile)
585 if lfstandin not in repo.dirstate:
585 if lfstandin not in repo.dirstate:
586 lfdirstate.hacky_extension_update_file(
586 lfdirstate.hacky_extension_update_file(
587 lfile,
587 lfile,
588 p1_tracked=False,
588 p1_tracked=False,
589 wc_tracked=False,
589 wc_tracked=False,
590 )
590 )
591 else:
591 else:
592 entry = repo.dirstate.get_entry(lfstandin)
592 entry = repo.dirstate.get_entry(lfstandin)
593 lfdirstate.hacky_extension_update_file(
593 lfdirstate.hacky_extension_update_file(
594 lfile,
594 lfile,
595 wc_tracked=entry.tracked,
595 wc_tracked=entry.tracked,
596 p1_tracked=entry.p1_tracked,
596 p1_tracked=entry.p1_tracked,
597 p2_info=entry.p2_info,
597 p2_info=entry.p2_info,
598 possibly_dirty=True,
598 possibly_dirty=True,
599 )
599 )
600
600
601
601
602 def markcommitted(orig, ctx, node):
602 def markcommitted(orig, ctx, node):
603 repo = ctx.repo()
603 repo = ctx.repo()
604
604
605 with repo.dirstate.changing_parents(repo):
605 with repo.dirstate.changing_parents(repo):
606 orig(node)
606 orig(node)
607
607
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
609 # because files coming from the 2nd parent are omitted in the latter.
609 # because files coming from the 2nd parent are omitted in the latter.
610 #
610 #
611 # The former should be used to get targets of "synclfdirstate",
611 # The former should be used to get targets of "synclfdirstate",
612 # because such files:
612 # because such files:
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
614 # - have to be marked as "n" after commit, but
614 # - have to be marked as "n" after commit, but
615 # - aren't listed in "repo[node].files()"
615 # - aren't listed in "repo[node].files()"
616
616
617 lfdirstate = openlfdirstate(repo.ui, repo)
617 lfdirstate = openlfdirstate(repo.ui, repo)
618 for f in ctx.files():
618 for f in ctx.files():
619 lfile = splitstandin(f)
619 lfile = splitstandin(f)
620 if lfile is not None:
620 if lfile is not None:
621 synclfdirstate(repo, lfdirstate, lfile, False)
621 synclfdirstate(repo, lfdirstate, lfile, False)
622
622
623 # As part of committing, copy all of the largefiles into the cache.
623 # As part of committing, copy all of the largefiles into the cache.
624 #
624 #
625 # Using "node" instead of "ctx" implies additional "repo[node]"
625 # Using "node" instead of "ctx" implies additional "repo[node]"
626 # lookup while copyalltostore(), but can omit redundant check for
626 # lookup while copyalltostore(), but can omit redundant check for
627 # files comming from the 2nd parent, which should exist in store
627 # files comming from the 2nd parent, which should exist in store
628 # at merging.
628 # at merging.
629 copyalltostore(repo, node)
629 copyalltostore(repo, node)
630
630
631
631
632 def getlfilestoupdate(oldstandins, newstandins):
632 def getlfilestoupdate(oldstandins, newstandins):
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
634 filelist = []
634 filelist = []
635 for f in changedstandins:
635 for f in changedstandins:
636 if f[0] not in filelist:
636 if f[0] not in filelist:
637 filelist.append(f[0])
637 filelist.append(f[0])
638 return filelist
638 return filelist
639
639
640
640
641 def getlfilestoupload(repo, missing, addfunc):
641 def getlfilestoupload(repo, missing, addfunc):
642 makeprogress = repo.ui.makeprogress
642 makeprogress = repo.ui.makeprogress
643 with makeprogress(
643 with makeprogress(
644 _(b'finding outgoing largefiles'),
644 _(b'finding outgoing largefiles'),
645 unit=_(b'revisions'),
645 unit=_(b'revisions'),
646 total=len(missing),
646 total=len(missing),
647 ) as progress:
647 ) as progress:
648 for i, n in enumerate(missing):
648 for i, n in enumerate(missing):
649 progress.update(i)
649 progress.update(i)
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
651
651
652 with lfstatus(repo, value=False):
652 with lfstatus(repo, value=False):
653 ctx = repo[n]
653 ctx = repo[n]
654
654
655 files = set(ctx.files())
655 files = set(ctx.files())
656 if len(parents) == 2:
656 if len(parents) == 2:
657 mc = ctx.manifest()
657 mc = ctx.manifest()
658 mp1 = ctx.p1().manifest()
658 mp1 = ctx.p1().manifest()
659 mp2 = ctx.p2().manifest()
659 mp2 = ctx.p2().manifest()
660 for f in mp1:
660 for f in mp1:
661 if f not in mc:
661 if f not in mc:
662 files.add(f)
662 files.add(f)
663 for f in mp2:
663 for f in mp2:
664 if f not in mc:
664 if f not in mc:
665 files.add(f)
665 files.add(f)
666 for f in mc:
666 for f in mc:
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
668 files.add(f)
668 files.add(f)
669 for fn in files:
669 for fn in files:
670 if isstandin(fn) and fn in ctx:
670 if isstandin(fn) and fn in ctx:
671 addfunc(fn, readasstandin(ctx[fn]))
671 addfunc(fn, readasstandin(ctx[fn]))
672
672
673
673
674 def updatestandinsbymatch(repo, match):
674 def updatestandinsbymatch(repo, match):
675 """Update standins in the working directory according to specified match
675 """Update standins in the working directory according to specified match
676
676
677 This returns (possibly modified) ``match`` object to be used for
677 This returns (possibly modified) ``match`` object to be used for
678 subsequent commit process.
678 subsequent commit process.
679 """
679 """
680
680
681 ui = repo.ui
681 ui = repo.ui
682
682
683 # Case 1: user calls commit with no specific files or
683 # Case 1: user calls commit with no specific files or
684 # include/exclude patterns: refresh and commit all files that
684 # include/exclude patterns: refresh and commit all files that
685 # are "dirty".
685 # are "dirty".
686 if match is None or match.always():
686 if match is None or match.always():
687 # Spend a bit of time here to get a list of files we know
687 # Spend a bit of time here to get a list of files we know
688 # are modified so we can compare only against those.
688 # are modified so we can compare only against those.
689 # It can cost a lot of time (several seconds)
689 # It can cost a lot of time (several seconds)
690 # otherwise to update all standins if the largefiles are
690 # otherwise to update all standins if the largefiles are
691 # large.
691 # large.
692 dirtymatch = matchmod.always()
692 dirtymatch = matchmod.always()
693 with repo.dirstate.running_status(repo):
693 with repo.dirstate.running_status(repo):
694 lfdirstate = openlfdirstate(ui, repo)
694 lfdirstate = openlfdirstate(ui, repo)
695 unsure, s, mtime_boundary = lfdirstate.status(
695 unsure, s, mtime_boundary = lfdirstate.status(
696 dirtymatch,
696 dirtymatch,
697 subrepos=[],
697 subrepos=[],
698 ignored=False,
698 ignored=False,
699 clean=False,
699 clean=False,
700 unknown=False,
700 unknown=False,
701 )
701 )
702 modifiedfiles = unsure + s.modified + s.added + s.removed
702 modifiedfiles = unsure + s.modified + s.added + s.removed
703 lfiles = listlfiles(repo)
703 lfiles = listlfiles(repo)
704 # this only loops through largefiles that exist (not
704 # this only loops through largefiles that exist (not
705 # removed/renamed)
705 # removed/renamed)
706 for lfile in lfiles:
706 for lfile in lfiles:
707 if lfile in modifiedfiles:
707 if lfile in modifiedfiles:
708 fstandin = standin(lfile)
708 fstandin = standin(lfile)
709 if repo.wvfs.exists(fstandin):
709 if repo.wvfs.exists(fstandin):
710 # this handles the case where a rebase is being
710 # this handles the case where a rebase is being
711 # performed and the working copy is not updated
711 # performed and the working copy is not updated
712 # yet.
712 # yet.
713 if repo.wvfs.exists(lfile):
713 if repo.wvfs.exists(lfile):
714 updatestandin(repo, lfile, fstandin)
714 updatestandin(repo, lfile, fstandin)
715
715
716 return match
716 return match
717
717
718 lfiles = listlfiles(repo)
718 lfiles = listlfiles(repo)
719 match._files = repo._subdirlfs(match.files(), lfiles)
719 match._files = repo._subdirlfs(match.files(), lfiles)
720
720
721 # Case 2: user calls commit with specified patterns: refresh
721 # Case 2: user calls commit with specified patterns: refresh
722 # any matching big files.
722 # any matching big files.
723 smatcher = composestandinmatcher(repo, match)
723 smatcher = composestandinmatcher(repo, match)
724 standins = repo.dirstate.walk(
724 standins = repo.dirstate.walk(
725 smatcher, subrepos=[], unknown=False, ignored=False
725 smatcher, subrepos=[], unknown=False, ignored=False
726 )
726 )
727
727
728 # No matching big files: get out of the way and pass control to
728 # No matching big files: get out of the way and pass control to
729 # the usual commit() method.
729 # the usual commit() method.
730 if not standins:
730 if not standins:
731 return match
731 return match
732
732
733 # Refresh all matching big files. It's possible that the
733 # Refresh all matching big files. It's possible that the
734 # commit will end up failing, in which case the big files will
734 # commit will end up failing, in which case the big files will
735 # stay refreshed. No harm done: the user modified them and
735 # stay refreshed. No harm done: the user modified them and
736 # asked to commit them, so sooner or later we're going to
736 # asked to commit them, so sooner or later we're going to
737 # refresh the standins. Might as well leave them refreshed.
737 # refresh the standins. Might as well leave them refreshed.
738 lfdirstate = openlfdirstate(ui, repo)
738 lfdirstate = openlfdirstate(ui, repo)
739 for fstandin in standins:
739 for fstandin in standins:
740 lfile = splitstandin(fstandin)
740 lfile = splitstandin(fstandin)
741 if lfdirstate.get_entry(lfile).tracked:
741 if lfdirstate.get_entry(lfile).tracked:
742 updatestandin(repo, lfile, fstandin)
742 updatestandin(repo, lfile, fstandin)
743
743
744 # Cook up a new matcher that only matches regular files or
744 # Cook up a new matcher that only matches regular files or
745 # standins corresponding to the big files requested by the
745 # standins corresponding to the big files requested by the
746 # user. Have to modify _files to prevent commit() from
746 # user. Have to modify _files to prevent commit() from
747 # complaining "not tracked" for big files.
747 # complaining "not tracked" for big files.
748 match = copy.copy(match)
748 match = copy.copy(match)
749 origmatchfn = match.matchfn
749 origmatchfn = match.matchfn
750
750
751 # Check both the list of largefiles and the list of
751 # Check both the list of largefiles and the list of
752 # standins because if a largefile was removed, it
752 # standins because if a largefile was removed, it
753 # won't be in the list of largefiles at this point
753 # won't be in the list of largefiles at this point
754 match._files += sorted(standins)
754 match._files += sorted(standins)
755
755
756 actualfiles = []
756 actualfiles = []
757 for f in match._files:
757 for f in match._files:
758 fstandin = standin(f)
758 fstandin = standin(f)
759
759
760 # For largefiles, only one of the normal and standin should be
760 # For largefiles, only one of the normal and standin should be
761 # committed (except if one of them is a remove). In the case of a
761 # committed (except if one of them is a remove). In the case of a
762 # standin removal, drop the normal file if it is unknown to dirstate.
762 # standin removal, drop the normal file if it is unknown to dirstate.
763 # Thus, skip plain largefile names but keep the standin.
763 # Thus, skip plain largefile names but keep the standin.
764 if f in lfiles or fstandin in standins:
764 if f in lfiles or fstandin in standins:
765 if not repo.dirstate.get_entry(fstandin).removed:
765 if not repo.dirstate.get_entry(fstandin).removed:
766 if not repo.dirstate.get_entry(f).removed:
766 if not repo.dirstate.get_entry(f).removed:
767 continue
767 continue
768 elif not repo.dirstate.get_entry(f).any_tracked:
768 elif not repo.dirstate.get_entry(f).any_tracked:
769 continue
769 continue
770
770
771 actualfiles.append(f)
771 actualfiles.append(f)
772 match._files = actualfiles
772 match._files = actualfiles
773
773
774 def matchfn(f):
774 def matchfn(f):
775 if origmatchfn(f):
775 if origmatchfn(f):
776 return f not in lfiles
776 return f not in lfiles
777 else:
777 else:
778 return f in standins
778 return f in standins
779
779
780 match.matchfn = matchfn
780 match.matchfn = matchfn
781
781
782 return match
782 return match
783
783
784
784
785 class automatedcommithook:
785 class automatedcommithook:
786 """Stateful hook to update standins at the 1st commit of resuming
786 """Stateful hook to update standins at the 1st commit of resuming
787
787
788 For efficiency, updating standins in the working directory should
788 For efficiency, updating standins in the working directory should
789 be avoided while automated committing (like rebase, transplant and
789 be avoided while automated committing (like rebase, transplant and
790 so on), because they should be updated before committing.
790 so on), because they should be updated before committing.
791
791
792 But the 1st commit of resuming automated committing (e.g. ``rebase
792 But the 1st commit of resuming automated committing (e.g. ``rebase
793 --continue``) should update them, because largefiles may be
793 --continue``) should update them, because largefiles may be
794 modified manually.
794 modified manually.
795 """
795 """
796
796
797 def __init__(self, resuming):
797 def __init__(self, resuming):
798 self.resuming = resuming
798 self.resuming = resuming
799
799
800 def __call__(self, repo, match):
800 def __call__(self, repo, match):
801 if self.resuming:
801 if self.resuming:
802 self.resuming = False # avoids updating at subsequent commits
802 self.resuming = False # avoids updating at subsequent commits
803 return updatestandinsbymatch(repo, match)
803 return updatestandinsbymatch(repo, match)
804 else:
804 else:
805 return match
805 return match
806
806
807
807
808 def getstatuswriter(ui, repo, forcibly=None):
808 def getstatuswriter(ui, repo, forcibly=None):
809 """Return the function to write largefiles specific status out
809 """Return the function to write largefiles specific status out
810
810
811 If ``forcibly`` is ``None``, this returns the last element of
811 If ``forcibly`` is ``None``, this returns the last element of
812 ``repo._lfstatuswriters`` as "default" writer function.
812 ``repo._lfstatuswriters`` as "default" writer function.
813
813
814 Otherwise, this returns the function to always write out (or
814 Otherwise, this returns the function to always write out (or
815 ignore if ``not forcibly``) status.
815 ignore if ``not forcibly``) status.
816 """
816 """
817 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
817 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
818 return repo._lfstatuswriters[-1]
818 return repo._lfstatuswriters[-1]
819 else:
819 else:
820 if forcibly:
820 if forcibly:
821 return ui.status # forcibly WRITE OUT
821 return ui.status # forcibly WRITE OUT
822 else:
822 else:
823 return lambda *msg, **opts: None # forcibly IGNORE
823 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now