##// END OF EJS Templates
large-files: make sure we write newly initialized standin file early...
marmoute -
r51011:9b49809e default
parent child Browse files
Show More
@@ -1,817 +1,819 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163
163
164 def __getitem__(self, key):
164 def __getitem__(self, key):
165 return super(largefilesdirstate, self).__getitem__(unixpath(key))
165 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166
166
167 def set_tracked(self, f):
167 def set_tracked(self, f):
168 return super(largefilesdirstate, self).set_tracked(unixpath(f))
168 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169
169
170 def set_untracked(self, f):
170 def set_untracked(self, f):
171 return super(largefilesdirstate, self).set_untracked(unixpath(f))
171 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172
172
173 def normal(self, f, parentfiledata=None):
173 def normal(self, f, parentfiledata=None):
174 # not sure if we should pass the `parentfiledata` down or throw it
174 # not sure if we should pass the `parentfiledata` down or throw it
175 # away. So throwing it away to stay on the safe side.
175 # away. So throwing it away to stay on the safe side.
176 return super(largefilesdirstate, self).normal(unixpath(f))
176 return super(largefilesdirstate, self).normal(unixpath(f))
177
177
178 def remove(self, f):
178 def remove(self, f):
179 return super(largefilesdirstate, self).remove(unixpath(f))
179 return super(largefilesdirstate, self).remove(unixpath(f))
180
180
181 def add(self, f):
181 def add(self, f):
182 return super(largefilesdirstate, self).add(unixpath(f))
182 return super(largefilesdirstate, self).add(unixpath(f))
183
183
184 def drop(self, f):
184 def drop(self, f):
185 return super(largefilesdirstate, self).drop(unixpath(f))
185 return super(largefilesdirstate, self).drop(unixpath(f))
186
186
187 def forget(self, f):
187 def forget(self, f):
188 return super(largefilesdirstate, self).forget(unixpath(f))
188 return super(largefilesdirstate, self).forget(unixpath(f))
189
189
190 def normallookup(self, f):
190 def normallookup(self, f):
191 return super(largefilesdirstate, self).normallookup(unixpath(f))
191 return super(largefilesdirstate, self).normallookup(unixpath(f))
192
192
193 def _ignore(self, f):
193 def _ignore(self, f):
194 return False
194 return False
195
195
196 def write(self, tr):
196 def write(self, tr):
197 # (1) disable PENDING mode always
197 # (1) disable PENDING mode always
198 # (lfdirstate isn't yet managed as a part of the transaction)
198 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (2) avoid develwarn 'use dirstate.write with ....'
199 # (2) avoid develwarn 'use dirstate.write with ....'
200 if tr:
200 if tr:
201 tr.addbackup(b'largefiles/dirstate', location=b'plain')
201 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 super(largefilesdirstate, self).write(None)
202 super(largefilesdirstate, self).write(None)
203
203
204
204
205 def openlfdirstate(ui, repo, create=True):
205 def openlfdirstate(ui, repo, create=True):
206 """
206 """
207 Return a dirstate object that tracks largefiles: i.e. its root is
207 Return a dirstate object that tracks largefiles: i.e. its root is
208 the repo root, but it is saved in .hg/largefiles/dirstate.
208 the repo root, but it is saved in .hg/largefiles/dirstate.
209
209
210 If a dirstate object already exists and is being used for a 'changing_*'
210 If a dirstate object already exists and is being used for a 'changing_*'
211 context, it will be returned.
211 context, it will be returned.
212 """
212 """
213 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
213 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 if sub_dirstate is not None:
214 if sub_dirstate is not None:
215 return sub_dirstate
215 return sub_dirstate
216 vfs = repo.vfs
216 vfs = repo.vfs
217 lfstoredir = longname
217 lfstoredir = longname
218 opener = vfsmod.vfs(vfs.join(lfstoredir))
218 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
219 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 lfdirstate = largefilesdirstate(
220 lfdirstate = largefilesdirstate(
221 opener,
221 opener,
222 ui,
222 ui,
223 repo.root,
223 repo.root,
224 repo.dirstate._validate,
224 repo.dirstate._validate,
225 lambda: sparse.matcher(repo),
225 lambda: sparse.matcher(repo),
226 repo.nodeconstants,
226 repo.nodeconstants,
227 use_dirstate_v2,
227 use_dirstate_v2,
228 )
228 )
229
229
230 # If the largefiles dirstate does not exist, populate and create
230 # If the largefiles dirstate does not exist, populate and create
231 # it. This ensures that we create it on the first meaningful
231 # it. This ensures that we create it on the first meaningful
232 # largefiles operation in a new clone.
232 # largefiles operation in a new clone.
233 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
233 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 try:
234 try:
235 with repo.wlock(wait=False):
235 with repo.wlock(wait=False):
236 matcher = getstandinmatcher(repo)
236 matcher = getstandinmatcher(repo)
237 standins = repo.dirstate.walk(
237 standins = repo.dirstate.walk(
238 matcher, subrepos=[], unknown=False, ignored=False
238 matcher, subrepos=[], unknown=False, ignored=False
239 )
239 )
240
240
241 if len(standins) > 0:
241 if len(standins) > 0:
242 vfs.makedirs(lfstoredir)
242 vfs.makedirs(lfstoredir)
243
243
244 for standin in standins:
244 for standin in standins:
245 lfile = splitstandin(standin)
245 lfile = splitstandin(standin)
246 lfdirstate.hacky_extension_update_file(
246 lfdirstate.hacky_extension_update_file(
247 lfile,
247 lfile,
248 p1_tracked=True,
248 p1_tracked=True,
249 wc_tracked=True,
249 wc_tracked=True,
250 possibly_dirty=True,
250 possibly_dirty=True,
251 )
251 )
252 # avoid getting dirty dirstate before other operations
253 lfdirstate.write(repo.currenttransaction())
252 except error.LockError:
254 except error.LockError:
253 # Assume that whatever was holding the lock was important.
255 # Assume that whatever was holding the lock was important.
254 # If we were doing something important, we would already have
256 # If we were doing something important, we would already have
255 # either the lock or a largefile dirstate.
257 # either the lock or a largefile dirstate.
256 pass
258 pass
257 return lfdirstate
259 return lfdirstate
258
260
259
261
260 def lfdirstatestatus(lfdirstate, repo):
262 def lfdirstatestatus(lfdirstate, repo):
261 pctx = repo[b'.']
263 pctx = repo[b'.']
262 match = matchmod.always()
264 match = matchmod.always()
263 unsure, s, mtime_boundary = lfdirstate.status(
265 unsure, s, mtime_boundary = lfdirstate.status(
264 match, subrepos=[], ignored=False, clean=False, unknown=False
266 match, subrepos=[], ignored=False, clean=False, unknown=False
265 )
267 )
266 modified, clean = s.modified, s.clean
268 modified, clean = s.modified, s.clean
267 wctx = repo[None]
269 wctx = repo[None]
268 for lfile in unsure:
270 for lfile in unsure:
269 try:
271 try:
270 fctx = pctx[standin(lfile)]
272 fctx = pctx[standin(lfile)]
271 except LookupError:
273 except LookupError:
272 fctx = None
274 fctx = None
273 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
275 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
274 modified.append(lfile)
276 modified.append(lfile)
275 else:
277 else:
276 clean.append(lfile)
278 clean.append(lfile)
277 st = wctx[lfile].lstat()
279 st = wctx[lfile].lstat()
278 mode = st.st_mode
280 mode = st.st_mode
279 size = st.st_size
281 size = st.st_size
280 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
282 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
281 if mtime is not None:
283 if mtime is not None:
282 cache_data = (mode, size, mtime)
284 cache_data = (mode, size, mtime)
283 lfdirstate.set_clean(lfile, cache_data)
285 lfdirstate.set_clean(lfile, cache_data)
284 return s
286 return s
285
287
286
288
287 def listlfiles(repo, rev=None, matcher=None):
289 def listlfiles(repo, rev=None, matcher=None):
288 """return a list of largefiles in the working copy or the
290 """return a list of largefiles in the working copy or the
289 specified changeset"""
291 specified changeset"""
290
292
291 if matcher is None:
293 if matcher is None:
292 matcher = getstandinmatcher(repo)
294 matcher = getstandinmatcher(repo)
293
295
294 # ignore unknown files in working directory
296 # ignore unknown files in working directory
295 return [
297 return [
296 splitstandin(f)
298 splitstandin(f)
297 for f in repo[rev].walk(matcher)
299 for f in repo[rev].walk(matcher)
298 if rev is not None or repo.dirstate.get_entry(f).any_tracked
300 if rev is not None or repo.dirstate.get_entry(f).any_tracked
299 ]
301 ]
300
302
301
303
302 def instore(repo, hash, forcelocal=False):
304 def instore(repo, hash, forcelocal=False):
303 '''Return true if a largefile with the given hash exists in the store'''
305 '''Return true if a largefile with the given hash exists in the store'''
304 return os.path.exists(storepath(repo, hash, forcelocal))
306 return os.path.exists(storepath(repo, hash, forcelocal))
305
307
306
308
307 def storepath(repo, hash, forcelocal=False):
309 def storepath(repo, hash, forcelocal=False):
308 """Return the correct location in the repository largefiles store for a
310 """Return the correct location in the repository largefiles store for a
309 file with the given hash."""
311 file with the given hash."""
310 if not forcelocal and repo.shared():
312 if not forcelocal and repo.shared():
311 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
313 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
312 return repo.vfs.join(longname, hash)
314 return repo.vfs.join(longname, hash)
313
315
314
316
315 def findstorepath(repo, hash):
317 def findstorepath(repo, hash):
316 """Search through the local store path(s) to find the file for the given
318 """Search through the local store path(s) to find the file for the given
317 hash. If the file is not found, its path in the primary store is returned.
319 hash. If the file is not found, its path in the primary store is returned.
318 The return value is a tuple of (path, exists(path)).
320 The return value is a tuple of (path, exists(path)).
319 """
321 """
320 # For shared repos, the primary store is in the share source. But for
322 # For shared repos, the primary store is in the share source. But for
321 # backward compatibility, force a lookup in the local store if it wasn't
323 # backward compatibility, force a lookup in the local store if it wasn't
322 # found in the share source.
324 # found in the share source.
323 path = storepath(repo, hash, False)
325 path = storepath(repo, hash, False)
324
326
325 if instore(repo, hash):
327 if instore(repo, hash):
326 return (path, True)
328 return (path, True)
327 elif repo.shared() and instore(repo, hash, True):
329 elif repo.shared() and instore(repo, hash, True):
328 return storepath(repo, hash, True), True
330 return storepath(repo, hash, True), True
329
331
330 return (path, False)
332 return (path, False)
331
333
332
334
333 def copyfromcache(repo, hash, filename):
335 def copyfromcache(repo, hash, filename):
334 """Copy the specified largefile from the repo or system cache to
336 """Copy the specified largefile from the repo or system cache to
335 filename in the repository. Return true on success or false if the
337 filename in the repository. Return true on success or false if the
336 file was not found in either cache (which should not happened:
338 file was not found in either cache (which should not happened:
337 this is meant to be called only after ensuring that the needed
339 this is meant to be called only after ensuring that the needed
338 largefile exists in the cache)."""
340 largefile exists in the cache)."""
339 wvfs = repo.wvfs
341 wvfs = repo.wvfs
340 path = findfile(repo, hash)
342 path = findfile(repo, hash)
341 if path is None:
343 if path is None:
342 return False
344 return False
343 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
345 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
344 # The write may fail before the file is fully written, but we
346 # The write may fail before the file is fully written, but we
345 # don't use atomic writes in the working copy.
347 # don't use atomic writes in the working copy.
346 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
348 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
347 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
349 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
348 if gothash != hash:
350 if gothash != hash:
349 repo.ui.warn(
351 repo.ui.warn(
350 _(b'%s: data corruption in %s with hash %s\n')
352 _(b'%s: data corruption in %s with hash %s\n')
351 % (filename, path, gothash)
353 % (filename, path, gothash)
352 )
354 )
353 wvfs.unlink(filename)
355 wvfs.unlink(filename)
354 return False
356 return False
355 return True
357 return True
356
358
357
359
358 def copytostore(repo, ctx, file, fstandin):
360 def copytostore(repo, ctx, file, fstandin):
359 wvfs = repo.wvfs
361 wvfs = repo.wvfs
360 hash = readasstandin(ctx[fstandin])
362 hash = readasstandin(ctx[fstandin])
361 if instore(repo, hash):
363 if instore(repo, hash):
362 return
364 return
363 if wvfs.exists(file):
365 if wvfs.exists(file):
364 copytostoreabsolute(repo, wvfs.join(file), hash)
366 copytostoreabsolute(repo, wvfs.join(file), hash)
365 else:
367 else:
366 repo.ui.warn(
368 repo.ui.warn(
367 _(b"%s: largefile %s not available from local store\n")
369 _(b"%s: largefile %s not available from local store\n")
368 % (file, hash)
370 % (file, hash)
369 )
371 )
370
372
371
373
372 def copyalltostore(repo, node):
374 def copyalltostore(repo, node):
373 '''Copy all largefiles in a given revision to the store'''
375 '''Copy all largefiles in a given revision to the store'''
374
376
375 ctx = repo[node]
377 ctx = repo[node]
376 for filename in ctx.files():
378 for filename in ctx.files():
377 realfile = splitstandin(filename)
379 realfile = splitstandin(filename)
378 if realfile is not None and filename in ctx.manifest():
380 if realfile is not None and filename in ctx.manifest():
379 copytostore(repo, ctx, realfile, filename)
381 copytostore(repo, ctx, realfile, filename)
380
382
381
383
382 def copytostoreabsolute(repo, file, hash):
384 def copytostoreabsolute(repo, file, hash):
383 if inusercache(repo.ui, hash):
385 if inusercache(repo.ui, hash):
384 link(usercachepath(repo.ui, hash), storepath(repo, hash))
386 link(usercachepath(repo.ui, hash), storepath(repo, hash))
385 else:
387 else:
386 util.makedirs(os.path.dirname(storepath(repo, hash)))
388 util.makedirs(os.path.dirname(storepath(repo, hash)))
387 with open(file, b'rb') as srcf:
389 with open(file, b'rb') as srcf:
388 with util.atomictempfile(
390 with util.atomictempfile(
389 storepath(repo, hash), createmode=repo.store.createmode
391 storepath(repo, hash), createmode=repo.store.createmode
390 ) as dstf:
392 ) as dstf:
391 for chunk in util.filechunkiter(srcf):
393 for chunk in util.filechunkiter(srcf):
392 dstf.write(chunk)
394 dstf.write(chunk)
393 linktousercache(repo, hash)
395 linktousercache(repo, hash)
394
396
395
397
396 def linktousercache(repo, hash):
398 def linktousercache(repo, hash):
397 """Link / copy the largefile with the specified hash from the store
399 """Link / copy the largefile with the specified hash from the store
398 to the cache."""
400 to the cache."""
399 path = usercachepath(repo.ui, hash)
401 path = usercachepath(repo.ui, hash)
400 link(storepath(repo, hash), path)
402 link(storepath(repo, hash), path)
401
403
402
404
403 def getstandinmatcher(repo, rmatcher=None):
405 def getstandinmatcher(repo, rmatcher=None):
404 '''Return a match object that applies rmatcher to the standin directory'''
406 '''Return a match object that applies rmatcher to the standin directory'''
405 wvfs = repo.wvfs
407 wvfs = repo.wvfs
406 standindir = shortname
408 standindir = shortname
407
409
408 # no warnings about missing files or directories
410 # no warnings about missing files or directories
409 badfn = lambda f, msg: None
411 badfn = lambda f, msg: None
410
412
411 if rmatcher and not rmatcher.always():
413 if rmatcher and not rmatcher.always():
412 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
414 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
413 if not pats:
415 if not pats:
414 pats = [wvfs.join(standindir)]
416 pats = [wvfs.join(standindir)]
415 match = scmutil.match(repo[None], pats, badfn=badfn)
417 match = scmutil.match(repo[None], pats, badfn=badfn)
416 else:
418 else:
417 # no patterns: relative to repo root
419 # no patterns: relative to repo root
418 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
420 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
419 return match
421 return match
420
422
421
423
422 def composestandinmatcher(repo, rmatcher):
424 def composestandinmatcher(repo, rmatcher):
423 """Return a matcher that accepts standins corresponding to the
425 """Return a matcher that accepts standins corresponding to the
424 files accepted by rmatcher. Pass the list of files in the matcher
426 files accepted by rmatcher. Pass the list of files in the matcher
425 as the paths specified by the user."""
427 as the paths specified by the user."""
426 smatcher = getstandinmatcher(repo, rmatcher)
428 smatcher = getstandinmatcher(repo, rmatcher)
427 isstandin = smatcher.matchfn
429 isstandin = smatcher.matchfn
428
430
429 def composedmatchfn(f):
431 def composedmatchfn(f):
430 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
432 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
431
433
432 smatcher.matchfn = composedmatchfn
434 smatcher.matchfn = composedmatchfn
433
435
434 return smatcher
436 return smatcher
435
437
436
438
437 def standin(filename):
439 def standin(filename):
438 """Return the repo-relative path to the standin for the specified big
440 """Return the repo-relative path to the standin for the specified big
439 file."""
441 file."""
440 # Notes:
442 # Notes:
441 # 1) Some callers want an absolute path, but for instance addlargefiles
443 # 1) Some callers want an absolute path, but for instance addlargefiles
442 # needs it repo-relative so it can be passed to repo[None].add(). So
444 # needs it repo-relative so it can be passed to repo[None].add(). So
443 # leave it up to the caller to use repo.wjoin() to get an absolute path.
445 # leave it up to the caller to use repo.wjoin() to get an absolute path.
444 # 2) Join with '/' because that's what dirstate always uses, even on
446 # 2) Join with '/' because that's what dirstate always uses, even on
445 # Windows. Change existing separator to '/' first in case we are
447 # Windows. Change existing separator to '/' first in case we are
446 # passed filenames from an external source (like the command line).
448 # passed filenames from an external source (like the command line).
447 return shortnameslash + util.pconvert(filename)
449 return shortnameslash + util.pconvert(filename)
448
450
449
451
450 def isstandin(filename):
452 def isstandin(filename):
451 """Return true if filename is a big file standin. filename must be
453 """Return true if filename is a big file standin. filename must be
452 in Mercurial's internal form (slash-separated)."""
454 in Mercurial's internal form (slash-separated)."""
453 return filename.startswith(shortnameslash)
455 return filename.startswith(shortnameslash)
454
456
455
457
456 def splitstandin(filename):
458 def splitstandin(filename):
457 # Split on / because that's what dirstate always uses, even on Windows.
459 # Split on / because that's what dirstate always uses, even on Windows.
458 # Change local separator to / first just in case we are passed filenames
460 # Change local separator to / first just in case we are passed filenames
459 # from an external source (like the command line).
461 # from an external source (like the command line).
460 bits = util.pconvert(filename).split(b'/', 1)
462 bits = util.pconvert(filename).split(b'/', 1)
461 if len(bits) == 2 and bits[0] == shortname:
463 if len(bits) == 2 and bits[0] == shortname:
462 return bits[1]
464 return bits[1]
463 else:
465 else:
464 return None
466 return None
465
467
466
468
467 def updatestandin(repo, lfile, standin):
469 def updatestandin(repo, lfile, standin):
468 """Re-calculate hash value of lfile and write it into standin
470 """Re-calculate hash value of lfile and write it into standin
469
471
470 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
472 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
471 """
473 """
472 file = repo.wjoin(lfile)
474 file = repo.wjoin(lfile)
473 if repo.wvfs.exists(lfile):
475 if repo.wvfs.exists(lfile):
474 hash = hashfile(file)
476 hash = hashfile(file)
475 executable = getexecutable(file)
477 executable = getexecutable(file)
476 writestandin(repo, standin, hash, executable)
478 writestandin(repo, standin, hash, executable)
477 else:
479 else:
478 raise error.Abort(_(b'%s: file not found!') % lfile)
480 raise error.Abort(_(b'%s: file not found!') % lfile)
479
481
480
482
481 def readasstandin(fctx):
483 def readasstandin(fctx):
482 """read hex hash from given filectx of standin file
484 """read hex hash from given filectx of standin file
483
485
484 This encapsulates how "standin" data is stored into storage layer."""
486 This encapsulates how "standin" data is stored into storage layer."""
485 return fctx.data().strip()
487 return fctx.data().strip()
486
488
487
489
488 def writestandin(repo, standin, hash, executable):
490 def writestandin(repo, standin, hash, executable):
489 '''write hash to <repo.root>/<standin>'''
491 '''write hash to <repo.root>/<standin>'''
490 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
492 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
491
493
492
494
493 def copyandhash(instream, outfile):
495 def copyandhash(instream, outfile):
494 """Read bytes from instream (iterable) and write them to outfile,
496 """Read bytes from instream (iterable) and write them to outfile,
495 computing the SHA-1 hash of the data along the way. Return the hash."""
497 computing the SHA-1 hash of the data along the way. Return the hash."""
496 hasher = hashutil.sha1(b'')
498 hasher = hashutil.sha1(b'')
497 for data in instream:
499 for data in instream:
498 hasher.update(data)
500 hasher.update(data)
499 outfile.write(data)
501 outfile.write(data)
500 return hex(hasher.digest())
502 return hex(hasher.digest())
501
503
502
504
503 def hashfile(file):
505 def hashfile(file):
504 if not os.path.exists(file):
506 if not os.path.exists(file):
505 return b''
507 return b''
506 with open(file, b'rb') as fd:
508 with open(file, b'rb') as fd:
507 return hexsha1(fd)
509 return hexsha1(fd)
508
510
509
511
510 def getexecutable(filename):
512 def getexecutable(filename):
511 mode = os.stat(filename).st_mode
513 mode = os.stat(filename).st_mode
512 return (
514 return (
513 (mode & stat.S_IXUSR)
515 (mode & stat.S_IXUSR)
514 and (mode & stat.S_IXGRP)
516 and (mode & stat.S_IXGRP)
515 and (mode & stat.S_IXOTH)
517 and (mode & stat.S_IXOTH)
516 )
518 )
517
519
518
520
519 def urljoin(first, second, *arg):
521 def urljoin(first, second, *arg):
520 def join(left, right):
522 def join(left, right):
521 if not left.endswith(b'/'):
523 if not left.endswith(b'/'):
522 left += b'/'
524 left += b'/'
523 if right.startswith(b'/'):
525 if right.startswith(b'/'):
524 right = right[1:]
526 right = right[1:]
525 return left + right
527 return left + right
526
528
527 url = join(first, second)
529 url = join(first, second)
528 for a in arg:
530 for a in arg:
529 url = join(url, a)
531 url = join(url, a)
530 return url
532 return url
531
533
532
534
533 def hexsha1(fileobj):
535 def hexsha1(fileobj):
534 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
536 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
535 object data"""
537 object data"""
536 h = hashutil.sha1()
538 h = hashutil.sha1()
537 for chunk in util.filechunkiter(fileobj):
539 for chunk in util.filechunkiter(fileobj):
538 h.update(chunk)
540 h.update(chunk)
539 return hex(h.digest())
541 return hex(h.digest())
540
542
541
543
542 def httpsendfile(ui, filename):
544 def httpsendfile(ui, filename):
543 return httpconnection.httpsendfile(ui, filename, b'rb')
545 return httpconnection.httpsendfile(ui, filename, b'rb')
544
546
545
547
546 def unixpath(path):
548 def unixpath(path):
547 '''Return a version of path normalized for use with the lfdirstate.'''
549 '''Return a version of path normalized for use with the lfdirstate.'''
548 return util.pconvert(os.path.normpath(path))
550 return util.pconvert(os.path.normpath(path))
549
551
550
552
551 def islfilesrepo(repo):
553 def islfilesrepo(repo):
552 '''Return true if the repo is a largefile repo.'''
554 '''Return true if the repo is a largefile repo.'''
553 if b'largefiles' in repo.requirements and any(
555 if b'largefiles' in repo.requirements and any(
554 shortnameslash in f[1] for f in repo.store.datafiles()
556 shortnameslash in f[1] for f in repo.store.datafiles()
555 ):
557 ):
556 return True
558 return True
557
559
558 return any(openlfdirstate(repo.ui, repo, False))
560 return any(openlfdirstate(repo.ui, repo, False))
559
561
560
562
561 class storeprotonotcapable(Exception):
563 class storeprotonotcapable(Exception):
562 def __init__(self, storetypes):
564 def __init__(self, storetypes):
563 self.storetypes = storetypes
565 self.storetypes = storetypes
564
566
565
567
566 def getstandinsstate(repo):
568 def getstandinsstate(repo):
567 standins = []
569 standins = []
568 matcher = getstandinmatcher(repo)
570 matcher = getstandinmatcher(repo)
569 wctx = repo[None]
571 wctx = repo[None]
570 for standin in repo.dirstate.walk(
572 for standin in repo.dirstate.walk(
571 matcher, subrepos=[], unknown=False, ignored=False
573 matcher, subrepos=[], unknown=False, ignored=False
572 ):
574 ):
573 lfile = splitstandin(standin)
575 lfile = splitstandin(standin)
574 try:
576 try:
575 hash = readasstandin(wctx[standin])
577 hash = readasstandin(wctx[standin])
576 except IOError:
578 except IOError:
577 hash = None
579 hash = None
578 standins.append((lfile, hash))
580 standins.append((lfile, hash))
579 return standins
581 return standins
580
582
581
583
582 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
584 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
583 lfstandin = standin(lfile)
585 lfstandin = standin(lfile)
584 if lfstandin not in repo.dirstate:
586 if lfstandin not in repo.dirstate:
585 lfdirstate.hacky_extension_update_file(
587 lfdirstate.hacky_extension_update_file(
586 lfile,
588 lfile,
587 p1_tracked=False,
589 p1_tracked=False,
588 wc_tracked=False,
590 wc_tracked=False,
589 )
591 )
590 else:
592 else:
591 entry = repo.dirstate.get_entry(lfstandin)
593 entry = repo.dirstate.get_entry(lfstandin)
592 lfdirstate.hacky_extension_update_file(
594 lfdirstate.hacky_extension_update_file(
593 lfile,
595 lfile,
594 wc_tracked=entry.tracked,
596 wc_tracked=entry.tracked,
595 p1_tracked=entry.p1_tracked,
597 p1_tracked=entry.p1_tracked,
596 p2_info=entry.p2_info,
598 p2_info=entry.p2_info,
597 possibly_dirty=True,
599 possibly_dirty=True,
598 )
600 )
599
601
600
602
601 def markcommitted(orig, ctx, node):
603 def markcommitted(orig, ctx, node):
602 repo = ctx.repo()
604 repo = ctx.repo()
603
605
604 with repo.dirstate.changing_parents(repo):
606 with repo.dirstate.changing_parents(repo):
605 orig(node)
607 orig(node)
606
608
607 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
609 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
608 # because files coming from the 2nd parent are omitted in the latter.
610 # because files coming from the 2nd parent are omitted in the latter.
609 #
611 #
610 # The former should be used to get targets of "synclfdirstate",
612 # The former should be used to get targets of "synclfdirstate",
611 # because such files:
613 # because such files:
612 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
614 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
613 # - have to be marked as "n" after commit, but
615 # - have to be marked as "n" after commit, but
614 # - aren't listed in "repo[node].files()"
616 # - aren't listed in "repo[node].files()"
615
617
616 lfdirstate = openlfdirstate(repo.ui, repo)
618 lfdirstate = openlfdirstate(repo.ui, repo)
617 for f in ctx.files():
619 for f in ctx.files():
618 lfile = splitstandin(f)
620 lfile = splitstandin(f)
619 if lfile is not None:
621 if lfile is not None:
620 synclfdirstate(repo, lfdirstate, lfile, False)
622 synclfdirstate(repo, lfdirstate, lfile, False)
621
623
622 # As part of committing, copy all of the largefiles into the cache.
624 # As part of committing, copy all of the largefiles into the cache.
623 #
625 #
624 # Using "node" instead of "ctx" implies additional "repo[node]"
626 # Using "node" instead of "ctx" implies additional "repo[node]"
625 # lookup while copyalltostore(), but can omit redundant check for
627 # lookup while copyalltostore(), but can omit redundant check for
626 # files comming from the 2nd parent, which should exist in store
628 # files comming from the 2nd parent, which should exist in store
627 # at merging.
629 # at merging.
628 copyalltostore(repo, node)
630 copyalltostore(repo, node)
629
631
630
632
631 def getlfilestoupdate(oldstandins, newstandins):
633 def getlfilestoupdate(oldstandins, newstandins):
632 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
634 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
633 filelist = []
635 filelist = []
634 for f in changedstandins:
636 for f in changedstandins:
635 if f[0] not in filelist:
637 if f[0] not in filelist:
636 filelist.append(f[0])
638 filelist.append(f[0])
637 return filelist
639 return filelist
638
640
639
641
640 def getlfilestoupload(repo, missing, addfunc):
642 def getlfilestoupload(repo, missing, addfunc):
641 makeprogress = repo.ui.makeprogress
643 makeprogress = repo.ui.makeprogress
642 with makeprogress(
644 with makeprogress(
643 _(b'finding outgoing largefiles'),
645 _(b'finding outgoing largefiles'),
644 unit=_(b'revisions'),
646 unit=_(b'revisions'),
645 total=len(missing),
647 total=len(missing),
646 ) as progress:
648 ) as progress:
647 for i, n in enumerate(missing):
649 for i, n in enumerate(missing):
648 progress.update(i)
650 progress.update(i)
649 parents = [p for p in repo[n].parents() if p != repo.nullid]
651 parents = [p for p in repo[n].parents() if p != repo.nullid]
650
652
651 with lfstatus(repo, value=False):
653 with lfstatus(repo, value=False):
652 ctx = repo[n]
654 ctx = repo[n]
653
655
654 files = set(ctx.files())
656 files = set(ctx.files())
655 if len(parents) == 2:
657 if len(parents) == 2:
656 mc = ctx.manifest()
658 mc = ctx.manifest()
657 mp1 = ctx.p1().manifest()
659 mp1 = ctx.p1().manifest()
658 mp2 = ctx.p2().manifest()
660 mp2 = ctx.p2().manifest()
659 for f in mp1:
661 for f in mp1:
660 if f not in mc:
662 if f not in mc:
661 files.add(f)
663 files.add(f)
662 for f in mp2:
664 for f in mp2:
663 if f not in mc:
665 if f not in mc:
664 files.add(f)
666 files.add(f)
665 for f in mc:
667 for f in mc:
666 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
668 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
667 files.add(f)
669 files.add(f)
668 for fn in files:
670 for fn in files:
669 if isstandin(fn) and fn in ctx:
671 if isstandin(fn) and fn in ctx:
670 addfunc(fn, readasstandin(ctx[fn]))
672 addfunc(fn, readasstandin(ctx[fn]))
671
673
672
674
673 def updatestandinsbymatch(repo, match):
675 def updatestandinsbymatch(repo, match):
674 """Update standins in the working directory according to specified match
676 """Update standins in the working directory according to specified match
675
677
676 This returns (possibly modified) ``match`` object to be used for
678 This returns (possibly modified) ``match`` object to be used for
677 subsequent commit process.
679 subsequent commit process.
678 """
680 """
679
681
680 ui = repo.ui
682 ui = repo.ui
681
683
682 # Case 1: user calls commit with no specific files or
684 # Case 1: user calls commit with no specific files or
683 # include/exclude patterns: refresh and commit all files that
685 # include/exclude patterns: refresh and commit all files that
684 # are "dirty".
686 # are "dirty".
685 if match is None or match.always():
687 if match is None or match.always():
686 # Spend a bit of time here to get a list of files we know
688 # Spend a bit of time here to get a list of files we know
687 # are modified so we can compare only against those.
689 # are modified so we can compare only against those.
688 # It can cost a lot of time (several seconds)
690 # It can cost a lot of time (several seconds)
689 # otherwise to update all standins if the largefiles are
691 # otherwise to update all standins if the largefiles are
690 # large.
692 # large.
691 lfdirstate = openlfdirstate(ui, repo)
693 lfdirstate = openlfdirstate(ui, repo)
692 dirtymatch = matchmod.always()
694 dirtymatch = matchmod.always()
693 unsure, s, mtime_boundary = lfdirstate.status(
695 unsure, s, mtime_boundary = lfdirstate.status(
694 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
696 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
695 )
697 )
696 modifiedfiles = unsure + s.modified + s.added + s.removed
698 modifiedfiles = unsure + s.modified + s.added + s.removed
697 lfiles = listlfiles(repo)
699 lfiles = listlfiles(repo)
698 # this only loops through largefiles that exist (not
700 # this only loops through largefiles that exist (not
699 # removed/renamed)
701 # removed/renamed)
700 for lfile in lfiles:
702 for lfile in lfiles:
701 if lfile in modifiedfiles:
703 if lfile in modifiedfiles:
702 fstandin = standin(lfile)
704 fstandin = standin(lfile)
703 if repo.wvfs.exists(fstandin):
705 if repo.wvfs.exists(fstandin):
704 # this handles the case where a rebase is being
706 # this handles the case where a rebase is being
705 # performed and the working copy is not updated
707 # performed and the working copy is not updated
706 # yet.
708 # yet.
707 if repo.wvfs.exists(lfile):
709 if repo.wvfs.exists(lfile):
708 updatestandin(repo, lfile, fstandin)
710 updatestandin(repo, lfile, fstandin)
709
711
710 return match
712 return match
711
713
712 lfiles = listlfiles(repo)
714 lfiles = listlfiles(repo)
713 match._files = repo._subdirlfs(match.files(), lfiles)
715 match._files = repo._subdirlfs(match.files(), lfiles)
714
716
715 # Case 2: user calls commit with specified patterns: refresh
717 # Case 2: user calls commit with specified patterns: refresh
716 # any matching big files.
718 # any matching big files.
717 smatcher = composestandinmatcher(repo, match)
719 smatcher = composestandinmatcher(repo, match)
718 standins = repo.dirstate.walk(
720 standins = repo.dirstate.walk(
719 smatcher, subrepos=[], unknown=False, ignored=False
721 smatcher, subrepos=[], unknown=False, ignored=False
720 )
722 )
721
723
722 # No matching big files: get out of the way and pass control to
724 # No matching big files: get out of the way and pass control to
723 # the usual commit() method.
725 # the usual commit() method.
724 if not standins:
726 if not standins:
725 return match
727 return match
726
728
727 # Refresh all matching big files. It's possible that the
729 # Refresh all matching big files. It's possible that the
728 # commit will end up failing, in which case the big files will
730 # commit will end up failing, in which case the big files will
729 # stay refreshed. No harm done: the user modified them and
731 # stay refreshed. No harm done: the user modified them and
730 # asked to commit them, so sooner or later we're going to
732 # asked to commit them, so sooner or later we're going to
731 # refresh the standins. Might as well leave them refreshed.
733 # refresh the standins. Might as well leave them refreshed.
732 lfdirstate = openlfdirstate(ui, repo)
734 lfdirstate = openlfdirstate(ui, repo)
733 for fstandin in standins:
735 for fstandin in standins:
734 lfile = splitstandin(fstandin)
736 lfile = splitstandin(fstandin)
735 if lfdirstate.get_entry(lfile).tracked:
737 if lfdirstate.get_entry(lfile).tracked:
736 updatestandin(repo, lfile, fstandin)
738 updatestandin(repo, lfile, fstandin)
737
739
738 # Cook up a new matcher that only matches regular files or
740 # Cook up a new matcher that only matches regular files or
739 # standins corresponding to the big files requested by the
741 # standins corresponding to the big files requested by the
740 # user. Have to modify _files to prevent commit() from
742 # user. Have to modify _files to prevent commit() from
741 # complaining "not tracked" for big files.
743 # complaining "not tracked" for big files.
742 match = copy.copy(match)
744 match = copy.copy(match)
743 origmatchfn = match.matchfn
745 origmatchfn = match.matchfn
744
746
745 # Check both the list of largefiles and the list of
747 # Check both the list of largefiles and the list of
746 # standins because if a largefile was removed, it
748 # standins because if a largefile was removed, it
747 # won't be in the list of largefiles at this point
749 # won't be in the list of largefiles at this point
748 match._files += sorted(standins)
750 match._files += sorted(standins)
749
751
750 actualfiles = []
752 actualfiles = []
751 for f in match._files:
753 for f in match._files:
752 fstandin = standin(f)
754 fstandin = standin(f)
753
755
754 # For largefiles, only one of the normal and standin should be
756 # For largefiles, only one of the normal and standin should be
755 # committed (except if one of them is a remove). In the case of a
757 # committed (except if one of them is a remove). In the case of a
756 # standin removal, drop the normal file if it is unknown to dirstate.
758 # standin removal, drop the normal file if it is unknown to dirstate.
757 # Thus, skip plain largefile names but keep the standin.
759 # Thus, skip plain largefile names but keep the standin.
758 if f in lfiles or fstandin in standins:
760 if f in lfiles or fstandin in standins:
759 if not repo.dirstate.get_entry(fstandin).removed:
761 if not repo.dirstate.get_entry(fstandin).removed:
760 if not repo.dirstate.get_entry(f).removed:
762 if not repo.dirstate.get_entry(f).removed:
761 continue
763 continue
762 elif not repo.dirstate.get_entry(f).any_tracked:
764 elif not repo.dirstate.get_entry(f).any_tracked:
763 continue
765 continue
764
766
765 actualfiles.append(f)
767 actualfiles.append(f)
766 match._files = actualfiles
768 match._files = actualfiles
767
769
768 def matchfn(f):
770 def matchfn(f):
769 if origmatchfn(f):
771 if origmatchfn(f):
770 return f not in lfiles
772 return f not in lfiles
771 else:
773 else:
772 return f in standins
774 return f in standins
773
775
774 match.matchfn = matchfn
776 match.matchfn = matchfn
775
777
776 return match
778 return match
777
779
778
780
779 class automatedcommithook:
781 class automatedcommithook:
780 """Stateful hook to update standins at the 1st commit of resuming
782 """Stateful hook to update standins at the 1st commit of resuming
781
783
782 For efficiency, updating standins in the working directory should
784 For efficiency, updating standins in the working directory should
783 be avoided while automated committing (like rebase, transplant and
785 be avoided while automated committing (like rebase, transplant and
784 so on), because they should be updated before committing.
786 so on), because they should be updated before committing.
785
787
786 But the 1st commit of resuming automated committing (e.g. ``rebase
788 But the 1st commit of resuming automated committing (e.g. ``rebase
787 --continue``) should update them, because largefiles may be
789 --continue``) should update them, because largefiles may be
788 modified manually.
790 modified manually.
789 """
791 """
790
792
791 def __init__(self, resuming):
793 def __init__(self, resuming):
792 self.resuming = resuming
794 self.resuming = resuming
793
795
794 def __call__(self, repo, match):
796 def __call__(self, repo, match):
795 if self.resuming:
797 if self.resuming:
796 self.resuming = False # avoids updating at subsequent commits
798 self.resuming = False # avoids updating at subsequent commits
797 return updatestandinsbymatch(repo, match)
799 return updatestandinsbymatch(repo, match)
798 else:
800 else:
799 return match
801 return match
800
802
801
803
802 def getstatuswriter(ui, repo, forcibly=None):
804 def getstatuswriter(ui, repo, forcibly=None):
803 """Return the function to write largefiles specific status out
805 """Return the function to write largefiles specific status out
804
806
805 If ``forcibly`` is ``None``, this returns the last element of
807 If ``forcibly`` is ``None``, this returns the last element of
806 ``repo._lfstatuswriters`` as "default" writer function.
808 ``repo._lfstatuswriters`` as "default" writer function.
807
809
808 Otherwise, this returns the function to always write out (or
810 Otherwise, this returns the function to always write out (or
809 ignore if ``not forcibly``) status.
811 ignore if ``not forcibly``) status.
810 """
812 """
811 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
813 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
812 return repo._lfstatuswriters[-1]
814 return repo._lfstatuswriters[-1]
813 else:
815 else:
814 if forcibly:
816 if forcibly:
815 return ui.status # forcibly WRITE OUT
817 return ui.status # forcibly WRITE OUT
816 else:
818 else:
817 return lambda *msg, **opts: None # forcibly IGNORE
819 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now