##// END OF EJS Templates
largefile: use `set_clean` instead of `normal` in `lfdirstatestatus`...
marmoute -
r48514:9a7d723a default
parent child Browse files
Show More
@@ -1,791 +1,791 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr=False):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 super(largefilesdirstate, self).write(None)
198 super(largefilesdirstate, self).write(None)
199
199
200
200
201 def openlfdirstate(ui, repo, create=True):
201 def openlfdirstate(ui, repo, create=True):
202 """
202 """
203 Return a dirstate object that tracks largefiles: i.e. its root is
203 Return a dirstate object that tracks largefiles: i.e. its root is
204 the repo root, but it is saved in .hg/largefiles/dirstate.
204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 """
205 """
206 vfs = repo.vfs
206 vfs = repo.vfs
207 lfstoredir = longname
207 lfstoredir = longname
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 lfdirstate = largefilesdirstate(
210 lfdirstate = largefilesdirstate(
211 opener,
211 opener,
212 ui,
212 ui,
213 repo.root,
213 repo.root,
214 repo.dirstate._validate,
214 repo.dirstate._validate,
215 lambda: sparse.matcher(repo),
215 lambda: sparse.matcher(repo),
216 repo.nodeconstants,
216 repo.nodeconstants,
217 use_dirstate_v2,
217 use_dirstate_v2,
218 )
218 )
219
219
220 # If the largefiles dirstate does not exist, populate and create
220 # If the largefiles dirstate does not exist, populate and create
221 # it. This ensures that we create it on the first meaningful
221 # it. This ensures that we create it on the first meaningful
222 # largefiles operation in a new clone.
222 # largefiles operation in a new clone.
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 matcher = getstandinmatcher(repo)
224 matcher = getstandinmatcher(repo)
225 standins = repo.dirstate.walk(
225 standins = repo.dirstate.walk(
226 matcher, subrepos=[], unknown=False, ignored=False
226 matcher, subrepos=[], unknown=False, ignored=False
227 )
227 )
228
228
229 if len(standins) > 0:
229 if len(standins) > 0:
230 vfs.makedirs(lfstoredir)
230 vfs.makedirs(lfstoredir)
231
231
232 for standin in standins:
232 for standin in standins:
233 lfile = splitstandin(standin)
233 lfile = splitstandin(standin)
234 lfdirstate.normallookup(lfile)
234 lfdirstate.normallookup(lfile)
235 return lfdirstate
235 return lfdirstate
236
236
237
237
238 def lfdirstatestatus(lfdirstate, repo):
238 def lfdirstatestatus(lfdirstate, repo):
239 pctx = repo[b'.']
239 pctx = repo[b'.']
240 match = matchmod.always()
240 match = matchmod.always()
241 unsure, s = lfdirstate.status(
241 unsure, s = lfdirstate.status(
242 match, subrepos=[], ignored=False, clean=False, unknown=False
242 match, subrepos=[], ignored=False, clean=False, unknown=False
243 )
243 )
244 modified, clean = s.modified, s.clean
244 modified, clean = s.modified, s.clean
245 for lfile in unsure:
245 for lfile in unsure:
246 try:
246 try:
247 fctx = pctx[standin(lfile)]
247 fctx = pctx[standin(lfile)]
248 except LookupError:
248 except LookupError:
249 fctx = None
249 fctx = None
250 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
250 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
251 modified.append(lfile)
251 modified.append(lfile)
252 else:
252 else:
253 clean.append(lfile)
253 clean.append(lfile)
254 lfdirstate.normal(lfile)
254 lfdirstate.set_clean(lfile)
255 return s
255 return s
256
256
257
257
258 def listlfiles(repo, rev=None, matcher=None):
258 def listlfiles(repo, rev=None, matcher=None):
259 """return a list of largefiles in the working copy or the
259 """return a list of largefiles in the working copy or the
260 specified changeset"""
260 specified changeset"""
261
261
262 if matcher is None:
262 if matcher is None:
263 matcher = getstandinmatcher(repo)
263 matcher = getstandinmatcher(repo)
264
264
265 # ignore unknown files in working directory
265 # ignore unknown files in working directory
266 return [
266 return [
267 splitstandin(f)
267 splitstandin(f)
268 for f in repo[rev].walk(matcher)
268 for f in repo[rev].walk(matcher)
269 if rev is not None or repo.dirstate[f] != b'?'
269 if rev is not None or repo.dirstate[f] != b'?'
270 ]
270 ]
271
271
272
272
273 def instore(repo, hash, forcelocal=False):
273 def instore(repo, hash, forcelocal=False):
274 '''Return true if a largefile with the given hash exists in the store'''
274 '''Return true if a largefile with the given hash exists in the store'''
275 return os.path.exists(storepath(repo, hash, forcelocal))
275 return os.path.exists(storepath(repo, hash, forcelocal))
276
276
277
277
278 def storepath(repo, hash, forcelocal=False):
278 def storepath(repo, hash, forcelocal=False):
279 """Return the correct location in the repository largefiles store for a
279 """Return the correct location in the repository largefiles store for a
280 file with the given hash."""
280 file with the given hash."""
281 if not forcelocal and repo.shared():
281 if not forcelocal and repo.shared():
282 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
282 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
283 return repo.vfs.join(longname, hash)
283 return repo.vfs.join(longname, hash)
284
284
285
285
286 def findstorepath(repo, hash):
286 def findstorepath(repo, hash):
287 """Search through the local store path(s) to find the file for the given
287 """Search through the local store path(s) to find the file for the given
288 hash. If the file is not found, its path in the primary store is returned.
288 hash. If the file is not found, its path in the primary store is returned.
289 The return value is a tuple of (path, exists(path)).
289 The return value is a tuple of (path, exists(path)).
290 """
290 """
291 # For shared repos, the primary store is in the share source. But for
291 # For shared repos, the primary store is in the share source. But for
292 # backward compatibility, force a lookup in the local store if it wasn't
292 # backward compatibility, force a lookup in the local store if it wasn't
293 # found in the share source.
293 # found in the share source.
294 path = storepath(repo, hash, False)
294 path = storepath(repo, hash, False)
295
295
296 if instore(repo, hash):
296 if instore(repo, hash):
297 return (path, True)
297 return (path, True)
298 elif repo.shared() and instore(repo, hash, True):
298 elif repo.shared() and instore(repo, hash, True):
299 return storepath(repo, hash, True), True
299 return storepath(repo, hash, True), True
300
300
301 return (path, False)
301 return (path, False)
302
302
303
303
304 def copyfromcache(repo, hash, filename):
304 def copyfromcache(repo, hash, filename):
305 """Copy the specified largefile from the repo or system cache to
305 """Copy the specified largefile from the repo or system cache to
306 filename in the repository. Return true on success or false if the
306 filename in the repository. Return true on success or false if the
307 file was not found in either cache (which should not happened:
307 file was not found in either cache (which should not happened:
308 this is meant to be called only after ensuring that the needed
308 this is meant to be called only after ensuring that the needed
309 largefile exists in the cache)."""
309 largefile exists in the cache)."""
310 wvfs = repo.wvfs
310 wvfs = repo.wvfs
311 path = findfile(repo, hash)
311 path = findfile(repo, hash)
312 if path is None:
312 if path is None:
313 return False
313 return False
314 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
314 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
315 # The write may fail before the file is fully written, but we
315 # The write may fail before the file is fully written, but we
316 # don't use atomic writes in the working copy.
316 # don't use atomic writes in the working copy.
317 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
317 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
318 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
318 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
319 if gothash != hash:
319 if gothash != hash:
320 repo.ui.warn(
320 repo.ui.warn(
321 _(b'%s: data corruption in %s with hash %s\n')
321 _(b'%s: data corruption in %s with hash %s\n')
322 % (filename, path, gothash)
322 % (filename, path, gothash)
323 )
323 )
324 wvfs.unlink(filename)
324 wvfs.unlink(filename)
325 return False
325 return False
326 return True
326 return True
327
327
328
328
329 def copytostore(repo, ctx, file, fstandin):
329 def copytostore(repo, ctx, file, fstandin):
330 wvfs = repo.wvfs
330 wvfs = repo.wvfs
331 hash = readasstandin(ctx[fstandin])
331 hash = readasstandin(ctx[fstandin])
332 if instore(repo, hash):
332 if instore(repo, hash):
333 return
333 return
334 if wvfs.exists(file):
334 if wvfs.exists(file):
335 copytostoreabsolute(repo, wvfs.join(file), hash)
335 copytostoreabsolute(repo, wvfs.join(file), hash)
336 else:
336 else:
337 repo.ui.warn(
337 repo.ui.warn(
338 _(b"%s: largefile %s not available from local store\n")
338 _(b"%s: largefile %s not available from local store\n")
339 % (file, hash)
339 % (file, hash)
340 )
340 )
341
341
342
342
343 def copyalltostore(repo, node):
343 def copyalltostore(repo, node):
344 '''Copy all largefiles in a given revision to the store'''
344 '''Copy all largefiles in a given revision to the store'''
345
345
346 ctx = repo[node]
346 ctx = repo[node]
347 for filename in ctx.files():
347 for filename in ctx.files():
348 realfile = splitstandin(filename)
348 realfile = splitstandin(filename)
349 if realfile is not None and filename in ctx.manifest():
349 if realfile is not None and filename in ctx.manifest():
350 copytostore(repo, ctx, realfile, filename)
350 copytostore(repo, ctx, realfile, filename)
351
351
352
352
353 def copytostoreabsolute(repo, file, hash):
353 def copytostoreabsolute(repo, file, hash):
354 if inusercache(repo.ui, hash):
354 if inusercache(repo.ui, hash):
355 link(usercachepath(repo.ui, hash), storepath(repo, hash))
355 link(usercachepath(repo.ui, hash), storepath(repo, hash))
356 else:
356 else:
357 util.makedirs(os.path.dirname(storepath(repo, hash)))
357 util.makedirs(os.path.dirname(storepath(repo, hash)))
358 with open(file, b'rb') as srcf:
358 with open(file, b'rb') as srcf:
359 with util.atomictempfile(
359 with util.atomictempfile(
360 storepath(repo, hash), createmode=repo.store.createmode
360 storepath(repo, hash), createmode=repo.store.createmode
361 ) as dstf:
361 ) as dstf:
362 for chunk in util.filechunkiter(srcf):
362 for chunk in util.filechunkiter(srcf):
363 dstf.write(chunk)
363 dstf.write(chunk)
364 linktousercache(repo, hash)
364 linktousercache(repo, hash)
365
365
366
366
367 def linktousercache(repo, hash):
367 def linktousercache(repo, hash):
368 """Link / copy the largefile with the specified hash from the store
368 """Link / copy the largefile with the specified hash from the store
369 to the cache."""
369 to the cache."""
370 path = usercachepath(repo.ui, hash)
370 path = usercachepath(repo.ui, hash)
371 link(storepath(repo, hash), path)
371 link(storepath(repo, hash), path)
372
372
373
373
374 def getstandinmatcher(repo, rmatcher=None):
374 def getstandinmatcher(repo, rmatcher=None):
375 '''Return a match object that applies rmatcher to the standin directory'''
375 '''Return a match object that applies rmatcher to the standin directory'''
376 wvfs = repo.wvfs
376 wvfs = repo.wvfs
377 standindir = shortname
377 standindir = shortname
378
378
379 # no warnings about missing files or directories
379 # no warnings about missing files or directories
380 badfn = lambda f, msg: None
380 badfn = lambda f, msg: None
381
381
382 if rmatcher and not rmatcher.always():
382 if rmatcher and not rmatcher.always():
383 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
383 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
384 if not pats:
384 if not pats:
385 pats = [wvfs.join(standindir)]
385 pats = [wvfs.join(standindir)]
386 match = scmutil.match(repo[None], pats, badfn=badfn)
386 match = scmutil.match(repo[None], pats, badfn=badfn)
387 else:
387 else:
388 # no patterns: relative to repo root
388 # no patterns: relative to repo root
389 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
389 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
390 return match
390 return match
391
391
392
392
393 def composestandinmatcher(repo, rmatcher):
393 def composestandinmatcher(repo, rmatcher):
394 """Return a matcher that accepts standins corresponding to the
394 """Return a matcher that accepts standins corresponding to the
395 files accepted by rmatcher. Pass the list of files in the matcher
395 files accepted by rmatcher. Pass the list of files in the matcher
396 as the paths specified by the user."""
396 as the paths specified by the user."""
397 smatcher = getstandinmatcher(repo, rmatcher)
397 smatcher = getstandinmatcher(repo, rmatcher)
398 isstandin = smatcher.matchfn
398 isstandin = smatcher.matchfn
399
399
400 def composedmatchfn(f):
400 def composedmatchfn(f):
401 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
401 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
402
402
403 smatcher.matchfn = composedmatchfn
403 smatcher.matchfn = composedmatchfn
404
404
405 return smatcher
405 return smatcher
406
406
407
407
408 def standin(filename):
408 def standin(filename):
409 """Return the repo-relative path to the standin for the specified big
409 """Return the repo-relative path to the standin for the specified big
410 file."""
410 file."""
411 # Notes:
411 # Notes:
412 # 1) Some callers want an absolute path, but for instance addlargefiles
412 # 1) Some callers want an absolute path, but for instance addlargefiles
413 # needs it repo-relative so it can be passed to repo[None].add(). So
413 # needs it repo-relative so it can be passed to repo[None].add(). So
414 # leave it up to the caller to use repo.wjoin() to get an absolute path.
414 # leave it up to the caller to use repo.wjoin() to get an absolute path.
415 # 2) Join with '/' because that's what dirstate always uses, even on
415 # 2) Join with '/' because that's what dirstate always uses, even on
416 # Windows. Change existing separator to '/' first in case we are
416 # Windows. Change existing separator to '/' first in case we are
417 # passed filenames from an external source (like the command line).
417 # passed filenames from an external source (like the command line).
418 return shortnameslash + util.pconvert(filename)
418 return shortnameslash + util.pconvert(filename)
419
419
420
420
421 def isstandin(filename):
421 def isstandin(filename):
422 """Return true if filename is a big file standin. filename must be
422 """Return true if filename is a big file standin. filename must be
423 in Mercurial's internal form (slash-separated)."""
423 in Mercurial's internal form (slash-separated)."""
424 return filename.startswith(shortnameslash)
424 return filename.startswith(shortnameslash)
425
425
426
426
427 def splitstandin(filename):
427 def splitstandin(filename):
428 # Split on / because that's what dirstate always uses, even on Windows.
428 # Split on / because that's what dirstate always uses, even on Windows.
429 # Change local separator to / first just in case we are passed filenames
429 # Change local separator to / first just in case we are passed filenames
430 # from an external source (like the command line).
430 # from an external source (like the command line).
431 bits = util.pconvert(filename).split(b'/', 1)
431 bits = util.pconvert(filename).split(b'/', 1)
432 if len(bits) == 2 and bits[0] == shortname:
432 if len(bits) == 2 and bits[0] == shortname:
433 return bits[1]
433 return bits[1]
434 else:
434 else:
435 return None
435 return None
436
436
437
437
438 def updatestandin(repo, lfile, standin):
438 def updatestandin(repo, lfile, standin):
439 """Re-calculate hash value of lfile and write it into standin
439 """Re-calculate hash value of lfile and write it into standin
440
440
441 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
441 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
442 """
442 """
443 file = repo.wjoin(lfile)
443 file = repo.wjoin(lfile)
444 if repo.wvfs.exists(lfile):
444 if repo.wvfs.exists(lfile):
445 hash = hashfile(file)
445 hash = hashfile(file)
446 executable = getexecutable(file)
446 executable = getexecutable(file)
447 writestandin(repo, standin, hash, executable)
447 writestandin(repo, standin, hash, executable)
448 else:
448 else:
449 raise error.Abort(_(b'%s: file not found!') % lfile)
449 raise error.Abort(_(b'%s: file not found!') % lfile)
450
450
451
451
452 def readasstandin(fctx):
452 def readasstandin(fctx):
453 """read hex hash from given filectx of standin file
453 """read hex hash from given filectx of standin file
454
454
455 This encapsulates how "standin" data is stored into storage layer."""
455 This encapsulates how "standin" data is stored into storage layer."""
456 return fctx.data().strip()
456 return fctx.data().strip()
457
457
458
458
459 def writestandin(repo, standin, hash, executable):
459 def writestandin(repo, standin, hash, executable):
460 '''write hash to <repo.root>/<standin>'''
460 '''write hash to <repo.root>/<standin>'''
461 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
461 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
462
462
463
463
464 def copyandhash(instream, outfile):
464 def copyandhash(instream, outfile):
465 """Read bytes from instream (iterable) and write them to outfile,
465 """Read bytes from instream (iterable) and write them to outfile,
466 computing the SHA-1 hash of the data along the way. Return the hash."""
466 computing the SHA-1 hash of the data along the way. Return the hash."""
467 hasher = hashutil.sha1(b'')
467 hasher = hashutil.sha1(b'')
468 for data in instream:
468 for data in instream:
469 hasher.update(data)
469 hasher.update(data)
470 outfile.write(data)
470 outfile.write(data)
471 return hex(hasher.digest())
471 return hex(hasher.digest())
472
472
473
473
474 def hashfile(file):
474 def hashfile(file):
475 if not os.path.exists(file):
475 if not os.path.exists(file):
476 return b''
476 return b''
477 with open(file, b'rb') as fd:
477 with open(file, b'rb') as fd:
478 return hexsha1(fd)
478 return hexsha1(fd)
479
479
480
480
481 def getexecutable(filename):
481 def getexecutable(filename):
482 mode = os.stat(filename).st_mode
482 mode = os.stat(filename).st_mode
483 return (
483 return (
484 (mode & stat.S_IXUSR)
484 (mode & stat.S_IXUSR)
485 and (mode & stat.S_IXGRP)
485 and (mode & stat.S_IXGRP)
486 and (mode & stat.S_IXOTH)
486 and (mode & stat.S_IXOTH)
487 )
487 )
488
488
489
489
490 def urljoin(first, second, *arg):
490 def urljoin(first, second, *arg):
491 def join(left, right):
491 def join(left, right):
492 if not left.endswith(b'/'):
492 if not left.endswith(b'/'):
493 left += b'/'
493 left += b'/'
494 if right.startswith(b'/'):
494 if right.startswith(b'/'):
495 right = right[1:]
495 right = right[1:]
496 return left + right
496 return left + right
497
497
498 url = join(first, second)
498 url = join(first, second)
499 for a in arg:
499 for a in arg:
500 url = join(url, a)
500 url = join(url, a)
501 return url
501 return url
502
502
503
503
504 def hexsha1(fileobj):
504 def hexsha1(fileobj):
505 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
505 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
506 object data"""
506 object data"""
507 h = hashutil.sha1()
507 h = hashutil.sha1()
508 for chunk in util.filechunkiter(fileobj):
508 for chunk in util.filechunkiter(fileobj):
509 h.update(chunk)
509 h.update(chunk)
510 return hex(h.digest())
510 return hex(h.digest())
511
511
512
512
513 def httpsendfile(ui, filename):
513 def httpsendfile(ui, filename):
514 return httpconnection.httpsendfile(ui, filename, b'rb')
514 return httpconnection.httpsendfile(ui, filename, b'rb')
515
515
516
516
517 def unixpath(path):
517 def unixpath(path):
518 '''Return a version of path normalized for use with the lfdirstate.'''
518 '''Return a version of path normalized for use with the lfdirstate.'''
519 return util.pconvert(os.path.normpath(path))
519 return util.pconvert(os.path.normpath(path))
520
520
521
521
522 def islfilesrepo(repo):
522 def islfilesrepo(repo):
523 '''Return true if the repo is a largefile repo.'''
523 '''Return true if the repo is a largefile repo.'''
524 if b'largefiles' in repo.requirements and any(
524 if b'largefiles' in repo.requirements and any(
525 shortnameslash in f[1] for f in repo.store.datafiles()
525 shortnameslash in f[1] for f in repo.store.datafiles()
526 ):
526 ):
527 return True
527 return True
528
528
529 return any(openlfdirstate(repo.ui, repo, False))
529 return any(openlfdirstate(repo.ui, repo, False))
530
530
531
531
532 class storeprotonotcapable(Exception):
532 class storeprotonotcapable(Exception):
533 def __init__(self, storetypes):
533 def __init__(self, storetypes):
534 self.storetypes = storetypes
534 self.storetypes = storetypes
535
535
536
536
537 def getstandinsstate(repo):
537 def getstandinsstate(repo):
538 standins = []
538 standins = []
539 matcher = getstandinmatcher(repo)
539 matcher = getstandinmatcher(repo)
540 wctx = repo[None]
540 wctx = repo[None]
541 for standin in repo.dirstate.walk(
541 for standin in repo.dirstate.walk(
542 matcher, subrepos=[], unknown=False, ignored=False
542 matcher, subrepos=[], unknown=False, ignored=False
543 ):
543 ):
544 lfile = splitstandin(standin)
544 lfile = splitstandin(standin)
545 try:
545 try:
546 hash = readasstandin(wctx[standin])
546 hash = readasstandin(wctx[standin])
547 except IOError:
547 except IOError:
548 hash = None
548 hash = None
549 standins.append((lfile, hash))
549 standins.append((lfile, hash))
550 return standins
550 return standins
551
551
552
552
553 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
553 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
554 lfstandin = standin(lfile)
554 lfstandin = standin(lfile)
555 if lfstandin not in repo.dirstate:
555 if lfstandin not in repo.dirstate:
556 lfdirstate.drop(lfile)
556 lfdirstate.drop(lfile)
557 else:
557 else:
558 stat = repo.dirstate._map[lfstandin]
558 stat = repo.dirstate._map[lfstandin]
559 state, mtime = stat.state, stat.mtime
559 state, mtime = stat.state, stat.mtime
560 if state == b'n':
560 if state == b'n':
561 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
561 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
562 # state 'n' doesn't ensure 'clean' in this case
562 # state 'n' doesn't ensure 'clean' in this case
563 lfdirstate.normallookup(lfile)
563 lfdirstate.normallookup(lfile)
564 else:
564 else:
565 lfdirstate.normal(lfile)
565 lfdirstate.normal(lfile)
566 elif state == b'm':
566 elif state == b'm':
567 lfdirstate.normallookup(lfile)
567 lfdirstate.normallookup(lfile)
568 elif state == b'r':
568 elif state == b'r':
569 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
569 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
570 elif state == b'a':
570 elif state == b'a':
571 lfdirstate.add(lfile)
571 lfdirstate.add(lfile)
572
572
573
573
574 def markcommitted(orig, ctx, node):
574 def markcommitted(orig, ctx, node):
575 repo = ctx.repo()
575 repo = ctx.repo()
576
576
577 lfdirstate = openlfdirstate(repo.ui, repo)
577 lfdirstate = openlfdirstate(repo.ui, repo)
578 with lfdirstate.parentchange():
578 with lfdirstate.parentchange():
579 orig(node)
579 orig(node)
580
580
581 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
581 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
582 # because files coming from the 2nd parent are omitted in the latter.
582 # because files coming from the 2nd parent are omitted in the latter.
583 #
583 #
584 # The former should be used to get targets of "synclfdirstate",
584 # The former should be used to get targets of "synclfdirstate",
585 # because such files:
585 # because such files:
586 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
586 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
587 # - have to be marked as "n" after commit, but
587 # - have to be marked as "n" after commit, but
588 # - aren't listed in "repo[node].files()"
588 # - aren't listed in "repo[node].files()"
589
589
590 for f in ctx.files():
590 for f in ctx.files():
591 lfile = splitstandin(f)
591 lfile = splitstandin(f)
592 if lfile is not None:
592 if lfile is not None:
593 synclfdirstate(repo, lfdirstate, lfile, False)
593 synclfdirstate(repo, lfdirstate, lfile, False)
594 lfdirstate.write()
594 lfdirstate.write()
595
595
596 # As part of committing, copy all of the largefiles into the cache.
596 # As part of committing, copy all of the largefiles into the cache.
597 #
597 #
598 # Using "node" instead of "ctx" implies additional "repo[node]"
598 # Using "node" instead of "ctx" implies additional "repo[node]"
599 # lookup while copyalltostore(), but can omit redundant check for
599 # lookup while copyalltostore(), but can omit redundant check for
600 # files comming from the 2nd parent, which should exist in store
600 # files comming from the 2nd parent, which should exist in store
601 # at merging.
601 # at merging.
602 copyalltostore(repo, node)
602 copyalltostore(repo, node)
603
603
604
604
605 def getlfilestoupdate(oldstandins, newstandins):
605 def getlfilestoupdate(oldstandins, newstandins):
606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
607 filelist = []
607 filelist = []
608 for f in changedstandins:
608 for f in changedstandins:
609 if f[0] not in filelist:
609 if f[0] not in filelist:
610 filelist.append(f[0])
610 filelist.append(f[0])
611 return filelist
611 return filelist
612
612
613
613
614 def getlfilestoupload(repo, missing, addfunc):
614 def getlfilestoupload(repo, missing, addfunc):
615 makeprogress = repo.ui.makeprogress
615 makeprogress = repo.ui.makeprogress
616 with makeprogress(
616 with makeprogress(
617 _(b'finding outgoing largefiles'),
617 _(b'finding outgoing largefiles'),
618 unit=_(b'revisions'),
618 unit=_(b'revisions'),
619 total=len(missing),
619 total=len(missing),
620 ) as progress:
620 ) as progress:
621 for i, n in enumerate(missing):
621 for i, n in enumerate(missing):
622 progress.update(i)
622 progress.update(i)
623 parents = [p for p in repo[n].parents() if p != repo.nullid]
623 parents = [p for p in repo[n].parents() if p != repo.nullid]
624
624
625 with lfstatus(repo, value=False):
625 with lfstatus(repo, value=False):
626 ctx = repo[n]
626 ctx = repo[n]
627
627
628 files = set(ctx.files())
628 files = set(ctx.files())
629 if len(parents) == 2:
629 if len(parents) == 2:
630 mc = ctx.manifest()
630 mc = ctx.manifest()
631 mp1 = ctx.p1().manifest()
631 mp1 = ctx.p1().manifest()
632 mp2 = ctx.p2().manifest()
632 mp2 = ctx.p2().manifest()
633 for f in mp1:
633 for f in mp1:
634 if f not in mc:
634 if f not in mc:
635 files.add(f)
635 files.add(f)
636 for f in mp2:
636 for f in mp2:
637 if f not in mc:
637 if f not in mc:
638 files.add(f)
638 files.add(f)
639 for f in mc:
639 for f in mc:
640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
641 files.add(f)
641 files.add(f)
642 for fn in files:
642 for fn in files:
643 if isstandin(fn) and fn in ctx:
643 if isstandin(fn) and fn in ctx:
644 addfunc(fn, readasstandin(ctx[fn]))
644 addfunc(fn, readasstandin(ctx[fn]))
645
645
646
646
647 def updatestandinsbymatch(repo, match):
647 def updatestandinsbymatch(repo, match):
648 """Update standins in the working directory according to specified match
648 """Update standins in the working directory according to specified match
649
649
650 This returns (possibly modified) ``match`` object to be used for
650 This returns (possibly modified) ``match`` object to be used for
651 subsequent commit process.
651 subsequent commit process.
652 """
652 """
653
653
654 ui = repo.ui
654 ui = repo.ui
655
655
656 # Case 1: user calls commit with no specific files or
656 # Case 1: user calls commit with no specific files or
657 # include/exclude patterns: refresh and commit all files that
657 # include/exclude patterns: refresh and commit all files that
658 # are "dirty".
658 # are "dirty".
659 if match is None or match.always():
659 if match is None or match.always():
660 # Spend a bit of time here to get a list of files we know
660 # Spend a bit of time here to get a list of files we know
661 # are modified so we can compare only against those.
661 # are modified so we can compare only against those.
662 # It can cost a lot of time (several seconds)
662 # It can cost a lot of time (several seconds)
663 # otherwise to update all standins if the largefiles are
663 # otherwise to update all standins if the largefiles are
664 # large.
664 # large.
665 lfdirstate = openlfdirstate(ui, repo)
665 lfdirstate = openlfdirstate(ui, repo)
666 dirtymatch = matchmod.always()
666 dirtymatch = matchmod.always()
667 unsure, s = lfdirstate.status(
667 unsure, s = lfdirstate.status(
668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
669 )
669 )
670 modifiedfiles = unsure + s.modified + s.added + s.removed
670 modifiedfiles = unsure + s.modified + s.added + s.removed
671 lfiles = listlfiles(repo)
671 lfiles = listlfiles(repo)
672 # this only loops through largefiles that exist (not
672 # this only loops through largefiles that exist (not
673 # removed/renamed)
673 # removed/renamed)
674 for lfile in lfiles:
674 for lfile in lfiles:
675 if lfile in modifiedfiles:
675 if lfile in modifiedfiles:
676 fstandin = standin(lfile)
676 fstandin = standin(lfile)
677 if repo.wvfs.exists(fstandin):
677 if repo.wvfs.exists(fstandin):
678 # this handles the case where a rebase is being
678 # this handles the case where a rebase is being
679 # performed and the working copy is not updated
679 # performed and the working copy is not updated
680 # yet.
680 # yet.
681 if repo.wvfs.exists(lfile):
681 if repo.wvfs.exists(lfile):
682 updatestandin(repo, lfile, fstandin)
682 updatestandin(repo, lfile, fstandin)
683
683
684 return match
684 return match
685
685
686 lfiles = listlfiles(repo)
686 lfiles = listlfiles(repo)
687 match._files = repo._subdirlfs(match.files(), lfiles)
687 match._files = repo._subdirlfs(match.files(), lfiles)
688
688
689 # Case 2: user calls commit with specified patterns: refresh
689 # Case 2: user calls commit with specified patterns: refresh
690 # any matching big files.
690 # any matching big files.
691 smatcher = composestandinmatcher(repo, match)
691 smatcher = composestandinmatcher(repo, match)
692 standins = repo.dirstate.walk(
692 standins = repo.dirstate.walk(
693 smatcher, subrepos=[], unknown=False, ignored=False
693 smatcher, subrepos=[], unknown=False, ignored=False
694 )
694 )
695
695
696 # No matching big files: get out of the way and pass control to
696 # No matching big files: get out of the way and pass control to
697 # the usual commit() method.
697 # the usual commit() method.
698 if not standins:
698 if not standins:
699 return match
699 return match
700
700
701 # Refresh all matching big files. It's possible that the
701 # Refresh all matching big files. It's possible that the
702 # commit will end up failing, in which case the big files will
702 # commit will end up failing, in which case the big files will
703 # stay refreshed. No harm done: the user modified them and
703 # stay refreshed. No harm done: the user modified them and
704 # asked to commit them, so sooner or later we're going to
704 # asked to commit them, so sooner or later we're going to
705 # refresh the standins. Might as well leave them refreshed.
705 # refresh the standins. Might as well leave them refreshed.
706 lfdirstate = openlfdirstate(ui, repo)
706 lfdirstate = openlfdirstate(ui, repo)
707 for fstandin in standins:
707 for fstandin in standins:
708 lfile = splitstandin(fstandin)
708 lfile = splitstandin(fstandin)
709 if lfdirstate[lfile] != b'r':
709 if lfdirstate[lfile] != b'r':
710 updatestandin(repo, lfile, fstandin)
710 updatestandin(repo, lfile, fstandin)
711
711
712 # Cook up a new matcher that only matches regular files or
712 # Cook up a new matcher that only matches regular files or
713 # standins corresponding to the big files requested by the
713 # standins corresponding to the big files requested by the
714 # user. Have to modify _files to prevent commit() from
714 # user. Have to modify _files to prevent commit() from
715 # complaining "not tracked" for big files.
715 # complaining "not tracked" for big files.
716 match = copy.copy(match)
716 match = copy.copy(match)
717 origmatchfn = match.matchfn
717 origmatchfn = match.matchfn
718
718
719 # Check both the list of largefiles and the list of
719 # Check both the list of largefiles and the list of
720 # standins because if a largefile was removed, it
720 # standins because if a largefile was removed, it
721 # won't be in the list of largefiles at this point
721 # won't be in the list of largefiles at this point
722 match._files += sorted(standins)
722 match._files += sorted(standins)
723
723
724 actualfiles = []
724 actualfiles = []
725 for f in match._files:
725 for f in match._files:
726 fstandin = standin(f)
726 fstandin = standin(f)
727
727
728 # For largefiles, only one of the normal and standin should be
728 # For largefiles, only one of the normal and standin should be
729 # committed (except if one of them is a remove). In the case of a
729 # committed (except if one of them is a remove). In the case of a
730 # standin removal, drop the normal file if it is unknown to dirstate.
730 # standin removal, drop the normal file if it is unknown to dirstate.
731 # Thus, skip plain largefile names but keep the standin.
731 # Thus, skip plain largefile names but keep the standin.
732 if f in lfiles or fstandin in standins:
732 if f in lfiles or fstandin in standins:
733 if repo.dirstate[fstandin] != b'r':
733 if repo.dirstate[fstandin] != b'r':
734 if repo.dirstate[f] != b'r':
734 if repo.dirstate[f] != b'r':
735 continue
735 continue
736 elif repo.dirstate[f] == b'?':
736 elif repo.dirstate[f] == b'?':
737 continue
737 continue
738
738
739 actualfiles.append(f)
739 actualfiles.append(f)
740 match._files = actualfiles
740 match._files = actualfiles
741
741
742 def matchfn(f):
742 def matchfn(f):
743 if origmatchfn(f):
743 if origmatchfn(f):
744 return f not in lfiles
744 return f not in lfiles
745 else:
745 else:
746 return f in standins
746 return f in standins
747
747
748 match.matchfn = matchfn
748 match.matchfn = matchfn
749
749
750 return match
750 return match
751
751
752
752
753 class automatedcommithook(object):
753 class automatedcommithook(object):
754 """Stateful hook to update standins at the 1st commit of resuming
754 """Stateful hook to update standins at the 1st commit of resuming
755
755
756 For efficiency, updating standins in the working directory should
756 For efficiency, updating standins in the working directory should
757 be avoided while automated committing (like rebase, transplant and
757 be avoided while automated committing (like rebase, transplant and
758 so on), because they should be updated before committing.
758 so on), because they should be updated before committing.
759
759
760 But the 1st commit of resuming automated committing (e.g. ``rebase
760 But the 1st commit of resuming automated committing (e.g. ``rebase
761 --continue``) should update them, because largefiles may be
761 --continue``) should update them, because largefiles may be
762 modified manually.
762 modified manually.
763 """
763 """
764
764
765 def __init__(self, resuming):
765 def __init__(self, resuming):
766 self.resuming = resuming
766 self.resuming = resuming
767
767
768 def __call__(self, repo, match):
768 def __call__(self, repo, match):
769 if self.resuming:
769 if self.resuming:
770 self.resuming = False # avoids updating at subsequent commits
770 self.resuming = False # avoids updating at subsequent commits
771 return updatestandinsbymatch(repo, match)
771 return updatestandinsbymatch(repo, match)
772 else:
772 else:
773 return match
773 return match
774
774
775
775
776 def getstatuswriter(ui, repo, forcibly=None):
776 def getstatuswriter(ui, repo, forcibly=None):
777 """Return the function to write largefiles specific status out
777 """Return the function to write largefiles specific status out
778
778
779 If ``forcibly`` is ``None``, this returns the last element of
779 If ``forcibly`` is ``None``, this returns the last element of
780 ``repo._lfstatuswriters`` as "default" writer function.
780 ``repo._lfstatuswriters`` as "default" writer function.
781
781
782 Otherwise, this returns the function to always write out (or
782 Otherwise, this returns the function to always write out (or
783 ignore if ``not forcibly``) status.
783 ignore if ``not forcibly``) status.
784 """
784 """
785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
786 return repo._lfstatuswriters[-1]
786 return repo._lfstatuswriters[-1]
787 else:
787 else:
788 if forcibly:
788 if forcibly:
789 return ui.status # forcibly WRITE OUT
789 return ui.status # forcibly WRITE OUT
790 else:
790 else:
791 return lambda *msg, **opts: None # forcibly IGNORE
791 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now