##// END OF EJS Templates
largefile: use `parentchange` markcommitted...
marmoute -
r48456:b1b6d0ca default
parent child Browse files
Show More
@@ -1,791 +1,791 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f):
171 def normal(self, f):
172 return super(largefilesdirstate, self).normal(unixpath(f))
172 return super(largefilesdirstate, self).normal(unixpath(f))
173
173
174 def remove(self, f):
174 def remove(self, f):
175 return super(largefilesdirstate, self).remove(unixpath(f))
175 return super(largefilesdirstate, self).remove(unixpath(f))
176
176
177 def add(self, f):
177 def add(self, f):
178 return super(largefilesdirstate, self).add(unixpath(f))
178 return super(largefilesdirstate, self).add(unixpath(f))
179
179
180 def drop(self, f):
180 def drop(self, f):
181 return super(largefilesdirstate, self).drop(unixpath(f))
181 return super(largefilesdirstate, self).drop(unixpath(f))
182
182
183 def forget(self, f):
183 def forget(self, f):
184 return super(largefilesdirstate, self).forget(unixpath(f))
184 return super(largefilesdirstate, self).forget(unixpath(f))
185
185
186 def normallookup(self, f):
186 def normallookup(self, f):
187 return super(largefilesdirstate, self).normallookup(unixpath(f))
187 return super(largefilesdirstate, self).normallookup(unixpath(f))
188
188
189 def _ignore(self, f):
189 def _ignore(self, f):
190 return False
190 return False
191
191
192 def write(self, tr=False):
192 def write(self, tr=False):
193 # (1) disable PENDING mode always
193 # (1) disable PENDING mode always
194 # (lfdirstate isn't yet managed as a part of the transaction)
194 # (lfdirstate isn't yet managed as a part of the transaction)
195 # (2) avoid develwarn 'use dirstate.write with ....'
195 # (2) avoid develwarn 'use dirstate.write with ....'
196 super(largefilesdirstate, self).write(None)
196 super(largefilesdirstate, self).write(None)
197
197
198
198
199 def openlfdirstate(ui, repo, create=True):
199 def openlfdirstate(ui, repo, create=True):
200 """
200 """
201 Return a dirstate object that tracks largefiles: i.e. its root is
201 Return a dirstate object that tracks largefiles: i.e. its root is
202 the repo root, but it is saved in .hg/largefiles/dirstate.
202 the repo root, but it is saved in .hg/largefiles/dirstate.
203 """
203 """
204 vfs = repo.vfs
204 vfs = repo.vfs
205 lfstoredir = longname
205 lfstoredir = longname
206 opener = vfsmod.vfs(vfs.join(lfstoredir))
206 opener = vfsmod.vfs(vfs.join(lfstoredir))
207 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
207 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
208 lfdirstate = largefilesdirstate(
208 lfdirstate = largefilesdirstate(
209 opener,
209 opener,
210 ui,
210 ui,
211 repo.root,
211 repo.root,
212 repo.dirstate._validate,
212 repo.dirstate._validate,
213 lambda: sparse.matcher(repo),
213 lambda: sparse.matcher(repo),
214 repo.nodeconstants,
214 repo.nodeconstants,
215 use_dirstate_v2,
215 use_dirstate_v2,
216 )
216 )
217
217
218 # If the largefiles dirstate does not exist, populate and create
218 # If the largefiles dirstate does not exist, populate and create
219 # it. This ensures that we create it on the first meaningful
219 # it. This ensures that we create it on the first meaningful
220 # largefiles operation in a new clone.
220 # largefiles operation in a new clone.
221 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
221 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
222 matcher = getstandinmatcher(repo)
222 matcher = getstandinmatcher(repo)
223 standins = repo.dirstate.walk(
223 standins = repo.dirstate.walk(
224 matcher, subrepos=[], unknown=False, ignored=False
224 matcher, subrepos=[], unknown=False, ignored=False
225 )
225 )
226
226
227 if len(standins) > 0:
227 if len(standins) > 0:
228 vfs.makedirs(lfstoredir)
228 vfs.makedirs(lfstoredir)
229
229
230 for standin in standins:
230 for standin in standins:
231 lfile = splitstandin(standin)
231 lfile = splitstandin(standin)
232 lfdirstate.normallookup(lfile)
232 lfdirstate.normallookup(lfile)
233 return lfdirstate
233 return lfdirstate
234
234
235
235
236 def lfdirstatestatus(lfdirstate, repo):
236 def lfdirstatestatus(lfdirstate, repo):
237 pctx = repo[b'.']
237 pctx = repo[b'.']
238 match = matchmod.always()
238 match = matchmod.always()
239 unsure, s = lfdirstate.status(
239 unsure, s = lfdirstate.status(
240 match, subrepos=[], ignored=False, clean=False, unknown=False
240 match, subrepos=[], ignored=False, clean=False, unknown=False
241 )
241 )
242 modified, clean = s.modified, s.clean
242 modified, clean = s.modified, s.clean
243 for lfile in unsure:
243 for lfile in unsure:
244 try:
244 try:
245 fctx = pctx[standin(lfile)]
245 fctx = pctx[standin(lfile)]
246 except LookupError:
246 except LookupError:
247 fctx = None
247 fctx = None
248 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
248 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
249 modified.append(lfile)
249 modified.append(lfile)
250 else:
250 else:
251 clean.append(lfile)
251 clean.append(lfile)
252 lfdirstate.normal(lfile)
252 lfdirstate.normal(lfile)
253 return s
253 return s
254
254
255
255
256 def listlfiles(repo, rev=None, matcher=None):
256 def listlfiles(repo, rev=None, matcher=None):
257 """return a list of largefiles in the working copy or the
257 """return a list of largefiles in the working copy or the
258 specified changeset"""
258 specified changeset"""
259
259
260 if matcher is None:
260 if matcher is None:
261 matcher = getstandinmatcher(repo)
261 matcher = getstandinmatcher(repo)
262
262
263 # ignore unknown files in working directory
263 # ignore unknown files in working directory
264 return [
264 return [
265 splitstandin(f)
265 splitstandin(f)
266 for f in repo[rev].walk(matcher)
266 for f in repo[rev].walk(matcher)
267 if rev is not None or repo.dirstate[f] != b'?'
267 if rev is not None or repo.dirstate[f] != b'?'
268 ]
268 ]
269
269
270
270
271 def instore(repo, hash, forcelocal=False):
271 def instore(repo, hash, forcelocal=False):
272 '''Return true if a largefile with the given hash exists in the store'''
272 '''Return true if a largefile with the given hash exists in the store'''
273 return os.path.exists(storepath(repo, hash, forcelocal))
273 return os.path.exists(storepath(repo, hash, forcelocal))
274
274
275
275
276 def storepath(repo, hash, forcelocal=False):
276 def storepath(repo, hash, forcelocal=False):
277 """Return the correct location in the repository largefiles store for a
277 """Return the correct location in the repository largefiles store for a
278 file with the given hash."""
278 file with the given hash."""
279 if not forcelocal and repo.shared():
279 if not forcelocal and repo.shared():
280 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
280 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
281 return repo.vfs.join(longname, hash)
281 return repo.vfs.join(longname, hash)
282
282
283
283
284 def findstorepath(repo, hash):
284 def findstorepath(repo, hash):
285 """Search through the local store path(s) to find the file for the given
285 """Search through the local store path(s) to find the file for the given
286 hash. If the file is not found, its path in the primary store is returned.
286 hash. If the file is not found, its path in the primary store is returned.
287 The return value is a tuple of (path, exists(path)).
287 The return value is a tuple of (path, exists(path)).
288 """
288 """
289 # For shared repos, the primary store is in the share source. But for
289 # For shared repos, the primary store is in the share source. But for
290 # backward compatibility, force a lookup in the local store if it wasn't
290 # backward compatibility, force a lookup in the local store if it wasn't
291 # found in the share source.
291 # found in the share source.
292 path = storepath(repo, hash, False)
292 path = storepath(repo, hash, False)
293
293
294 if instore(repo, hash):
294 if instore(repo, hash):
295 return (path, True)
295 return (path, True)
296 elif repo.shared() and instore(repo, hash, True):
296 elif repo.shared() and instore(repo, hash, True):
297 return storepath(repo, hash, True), True
297 return storepath(repo, hash, True), True
298
298
299 return (path, False)
299 return (path, False)
300
300
301
301
302 def copyfromcache(repo, hash, filename):
302 def copyfromcache(repo, hash, filename):
303 """Copy the specified largefile from the repo or system cache to
303 """Copy the specified largefile from the repo or system cache to
304 filename in the repository. Return true on success or false if the
304 filename in the repository. Return true on success or false if the
305 file was not found in either cache (which should not happened:
305 file was not found in either cache (which should not happened:
306 this is meant to be called only after ensuring that the needed
306 this is meant to be called only after ensuring that the needed
307 largefile exists in the cache)."""
307 largefile exists in the cache)."""
308 wvfs = repo.wvfs
308 wvfs = repo.wvfs
309 path = findfile(repo, hash)
309 path = findfile(repo, hash)
310 if path is None:
310 if path is None:
311 return False
311 return False
312 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
312 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
313 # The write may fail before the file is fully written, but we
313 # The write may fail before the file is fully written, but we
314 # don't use atomic writes in the working copy.
314 # don't use atomic writes in the working copy.
315 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
315 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
316 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
316 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
317 if gothash != hash:
317 if gothash != hash:
318 repo.ui.warn(
318 repo.ui.warn(
319 _(b'%s: data corruption in %s with hash %s\n')
319 _(b'%s: data corruption in %s with hash %s\n')
320 % (filename, path, gothash)
320 % (filename, path, gothash)
321 )
321 )
322 wvfs.unlink(filename)
322 wvfs.unlink(filename)
323 return False
323 return False
324 return True
324 return True
325
325
326
326
327 def copytostore(repo, ctx, file, fstandin):
327 def copytostore(repo, ctx, file, fstandin):
328 wvfs = repo.wvfs
328 wvfs = repo.wvfs
329 hash = readasstandin(ctx[fstandin])
329 hash = readasstandin(ctx[fstandin])
330 if instore(repo, hash):
330 if instore(repo, hash):
331 return
331 return
332 if wvfs.exists(file):
332 if wvfs.exists(file):
333 copytostoreabsolute(repo, wvfs.join(file), hash)
333 copytostoreabsolute(repo, wvfs.join(file), hash)
334 else:
334 else:
335 repo.ui.warn(
335 repo.ui.warn(
336 _(b"%s: largefile %s not available from local store\n")
336 _(b"%s: largefile %s not available from local store\n")
337 % (file, hash)
337 % (file, hash)
338 )
338 )
339
339
340
340
341 def copyalltostore(repo, node):
341 def copyalltostore(repo, node):
342 '''Copy all largefiles in a given revision to the store'''
342 '''Copy all largefiles in a given revision to the store'''
343
343
344 ctx = repo[node]
344 ctx = repo[node]
345 for filename in ctx.files():
345 for filename in ctx.files():
346 realfile = splitstandin(filename)
346 realfile = splitstandin(filename)
347 if realfile is not None and filename in ctx.manifest():
347 if realfile is not None and filename in ctx.manifest():
348 copytostore(repo, ctx, realfile, filename)
348 copytostore(repo, ctx, realfile, filename)
349
349
350
350
351 def copytostoreabsolute(repo, file, hash):
351 def copytostoreabsolute(repo, file, hash):
352 if inusercache(repo.ui, hash):
352 if inusercache(repo.ui, hash):
353 link(usercachepath(repo.ui, hash), storepath(repo, hash))
353 link(usercachepath(repo.ui, hash), storepath(repo, hash))
354 else:
354 else:
355 util.makedirs(os.path.dirname(storepath(repo, hash)))
355 util.makedirs(os.path.dirname(storepath(repo, hash)))
356 with open(file, b'rb') as srcf:
356 with open(file, b'rb') as srcf:
357 with util.atomictempfile(
357 with util.atomictempfile(
358 storepath(repo, hash), createmode=repo.store.createmode
358 storepath(repo, hash), createmode=repo.store.createmode
359 ) as dstf:
359 ) as dstf:
360 for chunk in util.filechunkiter(srcf):
360 for chunk in util.filechunkiter(srcf):
361 dstf.write(chunk)
361 dstf.write(chunk)
362 linktousercache(repo, hash)
362 linktousercache(repo, hash)
363
363
364
364
365 def linktousercache(repo, hash):
365 def linktousercache(repo, hash):
366 """Link / copy the largefile with the specified hash from the store
366 """Link / copy the largefile with the specified hash from the store
367 to the cache."""
367 to the cache."""
368 path = usercachepath(repo.ui, hash)
368 path = usercachepath(repo.ui, hash)
369 link(storepath(repo, hash), path)
369 link(storepath(repo, hash), path)
370
370
371
371
372 def getstandinmatcher(repo, rmatcher=None):
372 def getstandinmatcher(repo, rmatcher=None):
373 '''Return a match object that applies rmatcher to the standin directory'''
373 '''Return a match object that applies rmatcher to the standin directory'''
374 wvfs = repo.wvfs
374 wvfs = repo.wvfs
375 standindir = shortname
375 standindir = shortname
376
376
377 # no warnings about missing files or directories
377 # no warnings about missing files or directories
378 badfn = lambda f, msg: None
378 badfn = lambda f, msg: None
379
379
380 if rmatcher and not rmatcher.always():
380 if rmatcher and not rmatcher.always():
381 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
381 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
382 if not pats:
382 if not pats:
383 pats = [wvfs.join(standindir)]
383 pats = [wvfs.join(standindir)]
384 match = scmutil.match(repo[None], pats, badfn=badfn)
384 match = scmutil.match(repo[None], pats, badfn=badfn)
385 else:
385 else:
386 # no patterns: relative to repo root
386 # no patterns: relative to repo root
387 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
387 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
388 return match
388 return match
389
389
390
390
391 def composestandinmatcher(repo, rmatcher):
391 def composestandinmatcher(repo, rmatcher):
392 """Return a matcher that accepts standins corresponding to the
392 """Return a matcher that accepts standins corresponding to the
393 files accepted by rmatcher. Pass the list of files in the matcher
393 files accepted by rmatcher. Pass the list of files in the matcher
394 as the paths specified by the user."""
394 as the paths specified by the user."""
395 smatcher = getstandinmatcher(repo, rmatcher)
395 smatcher = getstandinmatcher(repo, rmatcher)
396 isstandin = smatcher.matchfn
396 isstandin = smatcher.matchfn
397
397
398 def composedmatchfn(f):
398 def composedmatchfn(f):
399 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
399 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
400
400
401 smatcher.matchfn = composedmatchfn
401 smatcher.matchfn = composedmatchfn
402
402
403 return smatcher
403 return smatcher
404
404
405
405
406 def standin(filename):
406 def standin(filename):
407 """Return the repo-relative path to the standin for the specified big
407 """Return the repo-relative path to the standin for the specified big
408 file."""
408 file."""
409 # Notes:
409 # Notes:
410 # 1) Some callers want an absolute path, but for instance addlargefiles
410 # 1) Some callers want an absolute path, but for instance addlargefiles
411 # needs it repo-relative so it can be passed to repo[None].add(). So
411 # needs it repo-relative so it can be passed to repo[None].add(). So
412 # leave it up to the caller to use repo.wjoin() to get an absolute path.
412 # leave it up to the caller to use repo.wjoin() to get an absolute path.
413 # 2) Join with '/' because that's what dirstate always uses, even on
413 # 2) Join with '/' because that's what dirstate always uses, even on
414 # Windows. Change existing separator to '/' first in case we are
414 # Windows. Change existing separator to '/' first in case we are
415 # passed filenames from an external source (like the command line).
415 # passed filenames from an external source (like the command line).
416 return shortnameslash + util.pconvert(filename)
416 return shortnameslash + util.pconvert(filename)
417
417
418
418
419 def isstandin(filename):
419 def isstandin(filename):
420 """Return true if filename is a big file standin. filename must be
420 """Return true if filename is a big file standin. filename must be
421 in Mercurial's internal form (slash-separated)."""
421 in Mercurial's internal form (slash-separated)."""
422 return filename.startswith(shortnameslash)
422 return filename.startswith(shortnameslash)
423
423
424
424
425 def splitstandin(filename):
425 def splitstandin(filename):
426 # Split on / because that's what dirstate always uses, even on Windows.
426 # Split on / because that's what dirstate always uses, even on Windows.
427 # Change local separator to / first just in case we are passed filenames
427 # Change local separator to / first just in case we are passed filenames
428 # from an external source (like the command line).
428 # from an external source (like the command line).
429 bits = util.pconvert(filename).split(b'/', 1)
429 bits = util.pconvert(filename).split(b'/', 1)
430 if len(bits) == 2 and bits[0] == shortname:
430 if len(bits) == 2 and bits[0] == shortname:
431 return bits[1]
431 return bits[1]
432 else:
432 else:
433 return None
433 return None
434
434
435
435
436 def updatestandin(repo, lfile, standin):
436 def updatestandin(repo, lfile, standin):
437 """Re-calculate hash value of lfile and write it into standin
437 """Re-calculate hash value of lfile and write it into standin
438
438
439 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
439 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
440 """
440 """
441 file = repo.wjoin(lfile)
441 file = repo.wjoin(lfile)
442 if repo.wvfs.exists(lfile):
442 if repo.wvfs.exists(lfile):
443 hash = hashfile(file)
443 hash = hashfile(file)
444 executable = getexecutable(file)
444 executable = getexecutable(file)
445 writestandin(repo, standin, hash, executable)
445 writestandin(repo, standin, hash, executable)
446 else:
446 else:
447 raise error.Abort(_(b'%s: file not found!') % lfile)
447 raise error.Abort(_(b'%s: file not found!') % lfile)
448
448
449
449
450 def readasstandin(fctx):
450 def readasstandin(fctx):
451 """read hex hash from given filectx of standin file
451 """read hex hash from given filectx of standin file
452
452
453 This encapsulates how "standin" data is stored into storage layer."""
453 This encapsulates how "standin" data is stored into storage layer."""
454 return fctx.data().strip()
454 return fctx.data().strip()
455
455
456
456
457 def writestandin(repo, standin, hash, executable):
457 def writestandin(repo, standin, hash, executable):
458 '''write hash to <repo.root>/<standin>'''
458 '''write hash to <repo.root>/<standin>'''
459 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
459 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
460
460
461
461
462 def copyandhash(instream, outfile):
462 def copyandhash(instream, outfile):
463 """Read bytes from instream (iterable) and write them to outfile,
463 """Read bytes from instream (iterable) and write them to outfile,
464 computing the SHA-1 hash of the data along the way. Return the hash."""
464 computing the SHA-1 hash of the data along the way. Return the hash."""
465 hasher = hashutil.sha1(b'')
465 hasher = hashutil.sha1(b'')
466 for data in instream:
466 for data in instream:
467 hasher.update(data)
467 hasher.update(data)
468 outfile.write(data)
468 outfile.write(data)
469 return hex(hasher.digest())
469 return hex(hasher.digest())
470
470
471
471
472 def hashfile(file):
472 def hashfile(file):
473 if not os.path.exists(file):
473 if not os.path.exists(file):
474 return b''
474 return b''
475 with open(file, b'rb') as fd:
475 with open(file, b'rb') as fd:
476 return hexsha1(fd)
476 return hexsha1(fd)
477
477
478
478
479 def getexecutable(filename):
479 def getexecutable(filename):
480 mode = os.stat(filename).st_mode
480 mode = os.stat(filename).st_mode
481 return (
481 return (
482 (mode & stat.S_IXUSR)
482 (mode & stat.S_IXUSR)
483 and (mode & stat.S_IXGRP)
483 and (mode & stat.S_IXGRP)
484 and (mode & stat.S_IXOTH)
484 and (mode & stat.S_IXOTH)
485 )
485 )
486
486
487
487
488 def urljoin(first, second, *arg):
488 def urljoin(first, second, *arg):
489 def join(left, right):
489 def join(left, right):
490 if not left.endswith(b'/'):
490 if not left.endswith(b'/'):
491 left += b'/'
491 left += b'/'
492 if right.startswith(b'/'):
492 if right.startswith(b'/'):
493 right = right[1:]
493 right = right[1:]
494 return left + right
494 return left + right
495
495
496 url = join(first, second)
496 url = join(first, second)
497 for a in arg:
497 for a in arg:
498 url = join(url, a)
498 url = join(url, a)
499 return url
499 return url
500
500
501
501
502 def hexsha1(fileobj):
502 def hexsha1(fileobj):
503 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
503 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
504 object data"""
504 object data"""
505 h = hashutil.sha1()
505 h = hashutil.sha1()
506 for chunk in util.filechunkiter(fileobj):
506 for chunk in util.filechunkiter(fileobj):
507 h.update(chunk)
507 h.update(chunk)
508 return hex(h.digest())
508 return hex(h.digest())
509
509
510
510
511 def httpsendfile(ui, filename):
511 def httpsendfile(ui, filename):
512 return httpconnection.httpsendfile(ui, filename, b'rb')
512 return httpconnection.httpsendfile(ui, filename, b'rb')
513
513
514
514
515 def unixpath(path):
515 def unixpath(path):
516 '''Return a version of path normalized for use with the lfdirstate.'''
516 '''Return a version of path normalized for use with the lfdirstate.'''
517 return util.pconvert(os.path.normpath(path))
517 return util.pconvert(os.path.normpath(path))
518
518
519
519
520 def islfilesrepo(repo):
520 def islfilesrepo(repo):
521 '''Return true if the repo is a largefile repo.'''
521 '''Return true if the repo is a largefile repo.'''
522 if b'largefiles' in repo.requirements and any(
522 if b'largefiles' in repo.requirements and any(
523 shortnameslash in f[1] for f in repo.store.datafiles()
523 shortnameslash in f[1] for f in repo.store.datafiles()
524 ):
524 ):
525 return True
525 return True
526
526
527 return any(openlfdirstate(repo.ui, repo, False))
527 return any(openlfdirstate(repo.ui, repo, False))
528
528
529
529
530 class storeprotonotcapable(Exception):
530 class storeprotonotcapable(Exception):
531 def __init__(self, storetypes):
531 def __init__(self, storetypes):
532 self.storetypes = storetypes
532 self.storetypes = storetypes
533
533
534
534
535 def getstandinsstate(repo):
535 def getstandinsstate(repo):
536 standins = []
536 standins = []
537 matcher = getstandinmatcher(repo)
537 matcher = getstandinmatcher(repo)
538 wctx = repo[None]
538 wctx = repo[None]
539 for standin in repo.dirstate.walk(
539 for standin in repo.dirstate.walk(
540 matcher, subrepos=[], unknown=False, ignored=False
540 matcher, subrepos=[], unknown=False, ignored=False
541 ):
541 ):
542 lfile = splitstandin(standin)
542 lfile = splitstandin(standin)
543 try:
543 try:
544 hash = readasstandin(wctx[standin])
544 hash = readasstandin(wctx[standin])
545 except IOError:
545 except IOError:
546 hash = None
546 hash = None
547 standins.append((lfile, hash))
547 standins.append((lfile, hash))
548 return standins
548 return standins
549
549
550
550
551 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
551 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
552 lfstandin = standin(lfile)
552 lfstandin = standin(lfile)
553 if lfstandin in repo.dirstate:
553 if lfstandin in repo.dirstate:
554 stat = repo.dirstate._map[lfstandin]
554 stat = repo.dirstate._map[lfstandin]
555 state, mtime = stat.state, stat.mtime
555 state, mtime = stat.state, stat.mtime
556 else:
556 else:
557 state, mtime = b'?', -1
557 state, mtime = b'?', -1
558 if state == b'n':
558 if state == b'n':
559 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
559 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
560 # state 'n' doesn't ensure 'clean' in this case
560 # state 'n' doesn't ensure 'clean' in this case
561 lfdirstate.normallookup(lfile)
561 lfdirstate.normallookup(lfile)
562 else:
562 else:
563 lfdirstate.normal(lfile)
563 lfdirstate.normal(lfile)
564 elif state == b'm':
564 elif state == b'm':
565 lfdirstate.normallookup(lfile)
565 lfdirstate.normallookup(lfile)
566 elif state == b'r':
566 elif state == b'r':
567 lfdirstate.remove(lfile)
567 lfdirstate.remove(lfile)
568 elif state == b'a':
568 elif state == b'a':
569 lfdirstate.add(lfile)
569 lfdirstate.add(lfile)
570 elif state == b'?':
570 elif state == b'?':
571 lfdirstate.drop(lfile)
571 lfdirstate.drop(lfile)
572
572
573
573
574 def markcommitted(orig, ctx, node):
574 def markcommitted(orig, ctx, node):
575 repo = ctx.repo()
575 repo = ctx.repo()
576
576
577 with ctx._repo.dirstate.parentchange():
577 lfdirstate = openlfdirstate(repo.ui, repo)
578 with lfdirstate.parentchange():
578 orig(node)
579 orig(node)
579
580
580 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
581 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
581 # because files coming from the 2nd parent are omitted in the latter.
582 # because files coming from the 2nd parent are omitted in the latter.
582 #
583 #
583 # The former should be used to get targets of "synclfdirstate",
584 # The former should be used to get targets of "synclfdirstate",
584 # because such files:
585 # because such files:
585 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
586 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
586 # - have to be marked as "n" after commit, but
587 # - have to be marked as "n" after commit, but
587 # - aren't listed in "repo[node].files()"
588 # - aren't listed in "repo[node].files()"
588
589
589 lfdirstate = openlfdirstate(repo.ui, repo)
590 for f in ctx.files():
590 for f in ctx.files():
591 lfile = splitstandin(f)
591 lfile = splitstandin(f)
592 if lfile is not None:
592 if lfile is not None:
593 synclfdirstate(repo, lfdirstate, lfile, False)
593 synclfdirstate(repo, lfdirstate, lfile, False)
594 lfdirstate.write()
594 lfdirstate.write()
595
595
596 # As part of committing, copy all of the largefiles into the cache.
596 # As part of committing, copy all of the largefiles into the cache.
597 #
597 #
598 # Using "node" instead of "ctx" implies additional "repo[node]"
598 # Using "node" instead of "ctx" implies additional "repo[node]"
599 # lookup while copyalltostore(), but can omit redundant check for
599 # lookup while copyalltostore(), but can omit redundant check for
600 # files comming from the 2nd parent, which should exist in store
600 # files comming from the 2nd parent, which should exist in store
601 # at merging.
601 # at merging.
602 copyalltostore(repo, node)
602 copyalltostore(repo, node)
603
603
604
604
605 def getlfilestoupdate(oldstandins, newstandins):
605 def getlfilestoupdate(oldstandins, newstandins):
606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
607 filelist = []
607 filelist = []
608 for f in changedstandins:
608 for f in changedstandins:
609 if f[0] not in filelist:
609 if f[0] not in filelist:
610 filelist.append(f[0])
610 filelist.append(f[0])
611 return filelist
611 return filelist
612
612
613
613
614 def getlfilestoupload(repo, missing, addfunc):
614 def getlfilestoupload(repo, missing, addfunc):
615 makeprogress = repo.ui.makeprogress
615 makeprogress = repo.ui.makeprogress
616 with makeprogress(
616 with makeprogress(
617 _(b'finding outgoing largefiles'),
617 _(b'finding outgoing largefiles'),
618 unit=_(b'revisions'),
618 unit=_(b'revisions'),
619 total=len(missing),
619 total=len(missing),
620 ) as progress:
620 ) as progress:
621 for i, n in enumerate(missing):
621 for i, n in enumerate(missing):
622 progress.update(i)
622 progress.update(i)
623 parents = [p for p in repo[n].parents() if p != repo.nullid]
623 parents = [p for p in repo[n].parents() if p != repo.nullid]
624
624
625 with lfstatus(repo, value=False):
625 with lfstatus(repo, value=False):
626 ctx = repo[n]
626 ctx = repo[n]
627
627
628 files = set(ctx.files())
628 files = set(ctx.files())
629 if len(parents) == 2:
629 if len(parents) == 2:
630 mc = ctx.manifest()
630 mc = ctx.manifest()
631 mp1 = ctx.p1().manifest()
631 mp1 = ctx.p1().manifest()
632 mp2 = ctx.p2().manifest()
632 mp2 = ctx.p2().manifest()
633 for f in mp1:
633 for f in mp1:
634 if f not in mc:
634 if f not in mc:
635 files.add(f)
635 files.add(f)
636 for f in mp2:
636 for f in mp2:
637 if f not in mc:
637 if f not in mc:
638 files.add(f)
638 files.add(f)
639 for f in mc:
639 for f in mc:
640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
641 files.add(f)
641 files.add(f)
642 for fn in files:
642 for fn in files:
643 if isstandin(fn) and fn in ctx:
643 if isstandin(fn) and fn in ctx:
644 addfunc(fn, readasstandin(ctx[fn]))
644 addfunc(fn, readasstandin(ctx[fn]))
645
645
646
646
647 def updatestandinsbymatch(repo, match):
647 def updatestandinsbymatch(repo, match):
648 """Update standins in the working directory according to specified match
648 """Update standins in the working directory according to specified match
649
649
650 This returns (possibly modified) ``match`` object to be used for
650 This returns (possibly modified) ``match`` object to be used for
651 subsequent commit process.
651 subsequent commit process.
652 """
652 """
653
653
654 ui = repo.ui
654 ui = repo.ui
655
655
656 # Case 1: user calls commit with no specific files or
656 # Case 1: user calls commit with no specific files or
657 # include/exclude patterns: refresh and commit all files that
657 # include/exclude patterns: refresh and commit all files that
658 # are "dirty".
658 # are "dirty".
659 if match is None or match.always():
659 if match is None or match.always():
660 # Spend a bit of time here to get a list of files we know
660 # Spend a bit of time here to get a list of files we know
661 # are modified so we can compare only against those.
661 # are modified so we can compare only against those.
662 # It can cost a lot of time (several seconds)
662 # It can cost a lot of time (several seconds)
663 # otherwise to update all standins if the largefiles are
663 # otherwise to update all standins if the largefiles are
664 # large.
664 # large.
665 lfdirstate = openlfdirstate(ui, repo)
665 lfdirstate = openlfdirstate(ui, repo)
666 dirtymatch = matchmod.always()
666 dirtymatch = matchmod.always()
667 unsure, s = lfdirstate.status(
667 unsure, s = lfdirstate.status(
668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
669 )
669 )
670 modifiedfiles = unsure + s.modified + s.added + s.removed
670 modifiedfiles = unsure + s.modified + s.added + s.removed
671 lfiles = listlfiles(repo)
671 lfiles = listlfiles(repo)
672 # this only loops through largefiles that exist (not
672 # this only loops through largefiles that exist (not
673 # removed/renamed)
673 # removed/renamed)
674 for lfile in lfiles:
674 for lfile in lfiles:
675 if lfile in modifiedfiles:
675 if lfile in modifiedfiles:
676 fstandin = standin(lfile)
676 fstandin = standin(lfile)
677 if repo.wvfs.exists(fstandin):
677 if repo.wvfs.exists(fstandin):
678 # this handles the case where a rebase is being
678 # this handles the case where a rebase is being
679 # performed and the working copy is not updated
679 # performed and the working copy is not updated
680 # yet.
680 # yet.
681 if repo.wvfs.exists(lfile):
681 if repo.wvfs.exists(lfile):
682 updatestandin(repo, lfile, fstandin)
682 updatestandin(repo, lfile, fstandin)
683
683
684 return match
684 return match
685
685
686 lfiles = listlfiles(repo)
686 lfiles = listlfiles(repo)
687 match._files = repo._subdirlfs(match.files(), lfiles)
687 match._files = repo._subdirlfs(match.files(), lfiles)
688
688
689 # Case 2: user calls commit with specified patterns: refresh
689 # Case 2: user calls commit with specified patterns: refresh
690 # any matching big files.
690 # any matching big files.
691 smatcher = composestandinmatcher(repo, match)
691 smatcher = composestandinmatcher(repo, match)
692 standins = repo.dirstate.walk(
692 standins = repo.dirstate.walk(
693 smatcher, subrepos=[], unknown=False, ignored=False
693 smatcher, subrepos=[], unknown=False, ignored=False
694 )
694 )
695
695
696 # No matching big files: get out of the way and pass control to
696 # No matching big files: get out of the way and pass control to
697 # the usual commit() method.
697 # the usual commit() method.
698 if not standins:
698 if not standins:
699 return match
699 return match
700
700
701 # Refresh all matching big files. It's possible that the
701 # Refresh all matching big files. It's possible that the
702 # commit will end up failing, in which case the big files will
702 # commit will end up failing, in which case the big files will
703 # stay refreshed. No harm done: the user modified them and
703 # stay refreshed. No harm done: the user modified them and
704 # asked to commit them, so sooner or later we're going to
704 # asked to commit them, so sooner or later we're going to
705 # refresh the standins. Might as well leave them refreshed.
705 # refresh the standins. Might as well leave them refreshed.
706 lfdirstate = openlfdirstate(ui, repo)
706 lfdirstate = openlfdirstate(ui, repo)
707 for fstandin in standins:
707 for fstandin in standins:
708 lfile = splitstandin(fstandin)
708 lfile = splitstandin(fstandin)
709 if lfdirstate[lfile] != b'r':
709 if lfdirstate[lfile] != b'r':
710 updatestandin(repo, lfile, fstandin)
710 updatestandin(repo, lfile, fstandin)
711
711
712 # Cook up a new matcher that only matches regular files or
712 # Cook up a new matcher that only matches regular files or
713 # standins corresponding to the big files requested by the
713 # standins corresponding to the big files requested by the
714 # user. Have to modify _files to prevent commit() from
714 # user. Have to modify _files to prevent commit() from
715 # complaining "not tracked" for big files.
715 # complaining "not tracked" for big files.
716 match = copy.copy(match)
716 match = copy.copy(match)
717 origmatchfn = match.matchfn
717 origmatchfn = match.matchfn
718
718
719 # Check both the list of largefiles and the list of
719 # Check both the list of largefiles and the list of
720 # standins because if a largefile was removed, it
720 # standins because if a largefile was removed, it
721 # won't be in the list of largefiles at this point
721 # won't be in the list of largefiles at this point
722 match._files += sorted(standins)
722 match._files += sorted(standins)
723
723
724 actualfiles = []
724 actualfiles = []
725 for f in match._files:
725 for f in match._files:
726 fstandin = standin(f)
726 fstandin = standin(f)
727
727
728 # For largefiles, only one of the normal and standin should be
728 # For largefiles, only one of the normal and standin should be
729 # committed (except if one of them is a remove). In the case of a
729 # committed (except if one of them is a remove). In the case of a
730 # standin removal, drop the normal file if it is unknown to dirstate.
730 # standin removal, drop the normal file if it is unknown to dirstate.
731 # Thus, skip plain largefile names but keep the standin.
731 # Thus, skip plain largefile names but keep the standin.
732 if f in lfiles or fstandin in standins:
732 if f in lfiles or fstandin in standins:
733 if repo.dirstate[fstandin] != b'r':
733 if repo.dirstate[fstandin] != b'r':
734 if repo.dirstate[f] != b'r':
734 if repo.dirstate[f] != b'r':
735 continue
735 continue
736 elif repo.dirstate[f] == b'?':
736 elif repo.dirstate[f] == b'?':
737 continue
737 continue
738
738
739 actualfiles.append(f)
739 actualfiles.append(f)
740 match._files = actualfiles
740 match._files = actualfiles
741
741
742 def matchfn(f):
742 def matchfn(f):
743 if origmatchfn(f):
743 if origmatchfn(f):
744 return f not in lfiles
744 return f not in lfiles
745 else:
745 else:
746 return f in standins
746 return f in standins
747
747
748 match.matchfn = matchfn
748 match.matchfn = matchfn
749
749
750 return match
750 return match
751
751
752
752
753 class automatedcommithook(object):
753 class automatedcommithook(object):
754 """Stateful hook to update standins at the 1st commit of resuming
754 """Stateful hook to update standins at the 1st commit of resuming
755
755
756 For efficiency, updating standins in the working directory should
756 For efficiency, updating standins in the working directory should
757 be avoided while automated committing (like rebase, transplant and
757 be avoided while automated committing (like rebase, transplant and
758 so on), because they should be updated before committing.
758 so on), because they should be updated before committing.
759
759
760 But the 1st commit of resuming automated committing (e.g. ``rebase
760 But the 1st commit of resuming automated committing (e.g. ``rebase
761 --continue``) should update them, because largefiles may be
761 --continue``) should update them, because largefiles may be
762 modified manually.
762 modified manually.
763 """
763 """
764
764
765 def __init__(self, resuming):
765 def __init__(self, resuming):
766 self.resuming = resuming
766 self.resuming = resuming
767
767
768 def __call__(self, repo, match):
768 def __call__(self, repo, match):
769 if self.resuming:
769 if self.resuming:
770 self.resuming = False # avoids updating at subsequent commits
770 self.resuming = False # avoids updating at subsequent commits
771 return updatestandinsbymatch(repo, match)
771 return updatestandinsbymatch(repo, match)
772 else:
772 else:
773 return match
773 return match
774
774
775
775
776 def getstatuswriter(ui, repo, forcibly=None):
776 def getstatuswriter(ui, repo, forcibly=None):
777 """Return the function to write largefiles specific status out
777 """Return the function to write largefiles specific status out
778
778
779 If ``forcibly`` is ``None``, this returns the last element of
779 If ``forcibly`` is ``None``, this returns the last element of
780 ``repo._lfstatuswriters`` as "default" writer function.
780 ``repo._lfstatuswriters`` as "default" writer function.
781
781
782 Otherwise, this returns the function to always write out (or
782 Otherwise, this returns the function to always write out (or
783 ignore if ``not forcibly``) status.
783 ignore if ``not forcibly``) status.
784 """
784 """
785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
786 return repo._lfstatuswriters[-1]
786 return repo._lfstatuswriters[-1]
787 else:
787 else:
788 if forcibly:
788 if forcibly:
789 return ui.status # forcibly WRITE OUT
789 return ui.status # forcibly WRITE OUT
790 else:
790 else:
791 return lambda *msg, **opts: None # forcibly IGNORE
791 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now