##// END OF EJS Templates
largefile: respect the `normal` signature...
marmoute -
r48512:95b864a6 default
parent child Browse files
Show More
@@ -1,789 +1,791 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
172 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
173
175
174 def remove(self, f):
176 def remove(self, f):
175 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
176
178
177 def add(self, f):
179 def add(self, f):
178 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
179
181
180 def drop(self, f):
182 def drop(self, f):
181 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
182
184
183 def forget(self, f):
185 def forget(self, f):
184 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
185
187
186 def normallookup(self, f):
188 def normallookup(self, f):
187 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
188
190
189 def _ignore(self, f):
191 def _ignore(self, f):
190 return False
192 return False
191
193
192 def write(self, tr=False):
194 def write(self, tr=False):
193 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
194 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
195 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
196 super(largefilesdirstate, self).write(None)
198 super(largefilesdirstate, self).write(None)
197
199
198
200
199 def openlfdirstate(ui, repo, create=True):
201 def openlfdirstate(ui, repo, create=True):
200 """
202 """
201 Return a dirstate object that tracks largefiles: i.e. its root is
203 Return a dirstate object that tracks largefiles: i.e. its root is
202 the repo root, but it is saved in .hg/largefiles/dirstate.
204 the repo root, but it is saved in .hg/largefiles/dirstate.
203 """
205 """
204 vfs = repo.vfs
206 vfs = repo.vfs
205 lfstoredir = longname
207 lfstoredir = longname
206 opener = vfsmod.vfs(vfs.join(lfstoredir))
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
207 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
208 lfdirstate = largefilesdirstate(
210 lfdirstate = largefilesdirstate(
209 opener,
211 opener,
210 ui,
212 ui,
211 repo.root,
213 repo.root,
212 repo.dirstate._validate,
214 repo.dirstate._validate,
213 lambda: sparse.matcher(repo),
215 lambda: sparse.matcher(repo),
214 repo.nodeconstants,
216 repo.nodeconstants,
215 use_dirstate_v2,
217 use_dirstate_v2,
216 )
218 )
217
219
218 # If the largefiles dirstate does not exist, populate and create
220 # If the largefiles dirstate does not exist, populate and create
219 # it. This ensures that we create it on the first meaningful
221 # it. This ensures that we create it on the first meaningful
220 # largefiles operation in a new clone.
222 # largefiles operation in a new clone.
221 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
222 matcher = getstandinmatcher(repo)
224 matcher = getstandinmatcher(repo)
223 standins = repo.dirstate.walk(
225 standins = repo.dirstate.walk(
224 matcher, subrepos=[], unknown=False, ignored=False
226 matcher, subrepos=[], unknown=False, ignored=False
225 )
227 )
226
228
227 if len(standins) > 0:
229 if len(standins) > 0:
228 vfs.makedirs(lfstoredir)
230 vfs.makedirs(lfstoredir)
229
231
230 for standin in standins:
232 for standin in standins:
231 lfile = splitstandin(standin)
233 lfile = splitstandin(standin)
232 lfdirstate.normallookup(lfile)
234 lfdirstate.normallookup(lfile)
233 return lfdirstate
235 return lfdirstate
234
236
235
237
236 def lfdirstatestatus(lfdirstate, repo):
238 def lfdirstatestatus(lfdirstate, repo):
237 pctx = repo[b'.']
239 pctx = repo[b'.']
238 match = matchmod.always()
240 match = matchmod.always()
239 unsure, s = lfdirstate.status(
241 unsure, s = lfdirstate.status(
240 match, subrepos=[], ignored=False, clean=False, unknown=False
242 match, subrepos=[], ignored=False, clean=False, unknown=False
241 )
243 )
242 modified, clean = s.modified, s.clean
244 modified, clean = s.modified, s.clean
243 for lfile in unsure:
245 for lfile in unsure:
244 try:
246 try:
245 fctx = pctx[standin(lfile)]
247 fctx = pctx[standin(lfile)]
246 except LookupError:
248 except LookupError:
247 fctx = None
249 fctx = None
248 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
250 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
249 modified.append(lfile)
251 modified.append(lfile)
250 else:
252 else:
251 clean.append(lfile)
253 clean.append(lfile)
252 lfdirstate.normal(lfile)
254 lfdirstate.normal(lfile)
253 return s
255 return s
254
256
255
257
256 def listlfiles(repo, rev=None, matcher=None):
258 def listlfiles(repo, rev=None, matcher=None):
257 """return a list of largefiles in the working copy or the
259 """return a list of largefiles in the working copy or the
258 specified changeset"""
260 specified changeset"""
259
261
260 if matcher is None:
262 if matcher is None:
261 matcher = getstandinmatcher(repo)
263 matcher = getstandinmatcher(repo)
262
264
263 # ignore unknown files in working directory
265 # ignore unknown files in working directory
264 return [
266 return [
265 splitstandin(f)
267 splitstandin(f)
266 for f in repo[rev].walk(matcher)
268 for f in repo[rev].walk(matcher)
267 if rev is not None or repo.dirstate[f] != b'?'
269 if rev is not None or repo.dirstate[f] != b'?'
268 ]
270 ]
269
271
270
272
271 def instore(repo, hash, forcelocal=False):
273 def instore(repo, hash, forcelocal=False):
272 '''Return true if a largefile with the given hash exists in the store'''
274 '''Return true if a largefile with the given hash exists in the store'''
273 return os.path.exists(storepath(repo, hash, forcelocal))
275 return os.path.exists(storepath(repo, hash, forcelocal))
274
276
275
277
276 def storepath(repo, hash, forcelocal=False):
278 def storepath(repo, hash, forcelocal=False):
277 """Return the correct location in the repository largefiles store for a
279 """Return the correct location in the repository largefiles store for a
278 file with the given hash."""
280 file with the given hash."""
279 if not forcelocal and repo.shared():
281 if not forcelocal and repo.shared():
280 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
282 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
281 return repo.vfs.join(longname, hash)
283 return repo.vfs.join(longname, hash)
282
284
283
285
284 def findstorepath(repo, hash):
286 def findstorepath(repo, hash):
285 """Search through the local store path(s) to find the file for the given
287 """Search through the local store path(s) to find the file for the given
286 hash. If the file is not found, its path in the primary store is returned.
288 hash. If the file is not found, its path in the primary store is returned.
287 The return value is a tuple of (path, exists(path)).
289 The return value is a tuple of (path, exists(path)).
288 """
290 """
289 # For shared repos, the primary store is in the share source. But for
291 # For shared repos, the primary store is in the share source. But for
290 # backward compatibility, force a lookup in the local store if it wasn't
292 # backward compatibility, force a lookup in the local store if it wasn't
291 # found in the share source.
293 # found in the share source.
292 path = storepath(repo, hash, False)
294 path = storepath(repo, hash, False)
293
295
294 if instore(repo, hash):
296 if instore(repo, hash):
295 return (path, True)
297 return (path, True)
296 elif repo.shared() and instore(repo, hash, True):
298 elif repo.shared() and instore(repo, hash, True):
297 return storepath(repo, hash, True), True
299 return storepath(repo, hash, True), True
298
300
299 return (path, False)
301 return (path, False)
300
302
301
303
302 def copyfromcache(repo, hash, filename):
304 def copyfromcache(repo, hash, filename):
303 """Copy the specified largefile from the repo or system cache to
305 """Copy the specified largefile from the repo or system cache to
304 filename in the repository. Return true on success or false if the
306 filename in the repository. Return true on success or false if the
305 file was not found in either cache (which should not happened:
307 file was not found in either cache (which should not happened:
306 this is meant to be called only after ensuring that the needed
308 this is meant to be called only after ensuring that the needed
307 largefile exists in the cache)."""
309 largefile exists in the cache)."""
308 wvfs = repo.wvfs
310 wvfs = repo.wvfs
309 path = findfile(repo, hash)
311 path = findfile(repo, hash)
310 if path is None:
312 if path is None:
311 return False
313 return False
312 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
314 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
313 # The write may fail before the file is fully written, but we
315 # The write may fail before the file is fully written, but we
314 # don't use atomic writes in the working copy.
316 # don't use atomic writes in the working copy.
315 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
317 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
316 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
318 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
317 if gothash != hash:
319 if gothash != hash:
318 repo.ui.warn(
320 repo.ui.warn(
319 _(b'%s: data corruption in %s with hash %s\n')
321 _(b'%s: data corruption in %s with hash %s\n')
320 % (filename, path, gothash)
322 % (filename, path, gothash)
321 )
323 )
322 wvfs.unlink(filename)
324 wvfs.unlink(filename)
323 return False
325 return False
324 return True
326 return True
325
327
326
328
327 def copytostore(repo, ctx, file, fstandin):
329 def copytostore(repo, ctx, file, fstandin):
328 wvfs = repo.wvfs
330 wvfs = repo.wvfs
329 hash = readasstandin(ctx[fstandin])
331 hash = readasstandin(ctx[fstandin])
330 if instore(repo, hash):
332 if instore(repo, hash):
331 return
333 return
332 if wvfs.exists(file):
334 if wvfs.exists(file):
333 copytostoreabsolute(repo, wvfs.join(file), hash)
335 copytostoreabsolute(repo, wvfs.join(file), hash)
334 else:
336 else:
335 repo.ui.warn(
337 repo.ui.warn(
336 _(b"%s: largefile %s not available from local store\n")
338 _(b"%s: largefile %s not available from local store\n")
337 % (file, hash)
339 % (file, hash)
338 )
340 )
339
341
340
342
341 def copyalltostore(repo, node):
343 def copyalltostore(repo, node):
342 '''Copy all largefiles in a given revision to the store'''
344 '''Copy all largefiles in a given revision to the store'''
343
345
344 ctx = repo[node]
346 ctx = repo[node]
345 for filename in ctx.files():
347 for filename in ctx.files():
346 realfile = splitstandin(filename)
348 realfile = splitstandin(filename)
347 if realfile is not None and filename in ctx.manifest():
349 if realfile is not None and filename in ctx.manifest():
348 copytostore(repo, ctx, realfile, filename)
350 copytostore(repo, ctx, realfile, filename)
349
351
350
352
351 def copytostoreabsolute(repo, file, hash):
353 def copytostoreabsolute(repo, file, hash):
352 if inusercache(repo.ui, hash):
354 if inusercache(repo.ui, hash):
353 link(usercachepath(repo.ui, hash), storepath(repo, hash))
355 link(usercachepath(repo.ui, hash), storepath(repo, hash))
354 else:
356 else:
355 util.makedirs(os.path.dirname(storepath(repo, hash)))
357 util.makedirs(os.path.dirname(storepath(repo, hash)))
356 with open(file, b'rb') as srcf:
358 with open(file, b'rb') as srcf:
357 with util.atomictempfile(
359 with util.atomictempfile(
358 storepath(repo, hash), createmode=repo.store.createmode
360 storepath(repo, hash), createmode=repo.store.createmode
359 ) as dstf:
361 ) as dstf:
360 for chunk in util.filechunkiter(srcf):
362 for chunk in util.filechunkiter(srcf):
361 dstf.write(chunk)
363 dstf.write(chunk)
362 linktousercache(repo, hash)
364 linktousercache(repo, hash)
363
365
364
366
365 def linktousercache(repo, hash):
367 def linktousercache(repo, hash):
366 """Link / copy the largefile with the specified hash from the store
368 """Link / copy the largefile with the specified hash from the store
367 to the cache."""
369 to the cache."""
368 path = usercachepath(repo.ui, hash)
370 path = usercachepath(repo.ui, hash)
369 link(storepath(repo, hash), path)
371 link(storepath(repo, hash), path)
370
372
371
373
372 def getstandinmatcher(repo, rmatcher=None):
374 def getstandinmatcher(repo, rmatcher=None):
373 '''Return a match object that applies rmatcher to the standin directory'''
375 '''Return a match object that applies rmatcher to the standin directory'''
374 wvfs = repo.wvfs
376 wvfs = repo.wvfs
375 standindir = shortname
377 standindir = shortname
376
378
377 # no warnings about missing files or directories
379 # no warnings about missing files or directories
378 badfn = lambda f, msg: None
380 badfn = lambda f, msg: None
379
381
380 if rmatcher and not rmatcher.always():
382 if rmatcher and not rmatcher.always():
381 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
383 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
382 if not pats:
384 if not pats:
383 pats = [wvfs.join(standindir)]
385 pats = [wvfs.join(standindir)]
384 match = scmutil.match(repo[None], pats, badfn=badfn)
386 match = scmutil.match(repo[None], pats, badfn=badfn)
385 else:
387 else:
386 # no patterns: relative to repo root
388 # no patterns: relative to repo root
387 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
389 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
388 return match
390 return match
389
391
390
392
391 def composestandinmatcher(repo, rmatcher):
393 def composestandinmatcher(repo, rmatcher):
392 """Return a matcher that accepts standins corresponding to the
394 """Return a matcher that accepts standins corresponding to the
393 files accepted by rmatcher. Pass the list of files in the matcher
395 files accepted by rmatcher. Pass the list of files in the matcher
394 as the paths specified by the user."""
396 as the paths specified by the user."""
395 smatcher = getstandinmatcher(repo, rmatcher)
397 smatcher = getstandinmatcher(repo, rmatcher)
396 isstandin = smatcher.matchfn
398 isstandin = smatcher.matchfn
397
399
398 def composedmatchfn(f):
400 def composedmatchfn(f):
399 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
401 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
400
402
401 smatcher.matchfn = composedmatchfn
403 smatcher.matchfn = composedmatchfn
402
404
403 return smatcher
405 return smatcher
404
406
405
407
406 def standin(filename):
408 def standin(filename):
407 """Return the repo-relative path to the standin for the specified big
409 """Return the repo-relative path to the standin for the specified big
408 file."""
410 file."""
409 # Notes:
411 # Notes:
410 # 1) Some callers want an absolute path, but for instance addlargefiles
412 # 1) Some callers want an absolute path, but for instance addlargefiles
411 # needs it repo-relative so it can be passed to repo[None].add(). So
413 # needs it repo-relative so it can be passed to repo[None].add(). So
412 # leave it up to the caller to use repo.wjoin() to get an absolute path.
414 # leave it up to the caller to use repo.wjoin() to get an absolute path.
413 # 2) Join with '/' because that's what dirstate always uses, even on
415 # 2) Join with '/' because that's what dirstate always uses, even on
414 # Windows. Change existing separator to '/' first in case we are
416 # Windows. Change existing separator to '/' first in case we are
415 # passed filenames from an external source (like the command line).
417 # passed filenames from an external source (like the command line).
416 return shortnameslash + util.pconvert(filename)
418 return shortnameslash + util.pconvert(filename)
417
419
418
420
419 def isstandin(filename):
421 def isstandin(filename):
420 """Return true if filename is a big file standin. filename must be
422 """Return true if filename is a big file standin. filename must be
421 in Mercurial's internal form (slash-separated)."""
423 in Mercurial's internal form (slash-separated)."""
422 return filename.startswith(shortnameslash)
424 return filename.startswith(shortnameslash)
423
425
424
426
425 def splitstandin(filename):
427 def splitstandin(filename):
426 # Split on / because that's what dirstate always uses, even on Windows.
428 # Split on / because that's what dirstate always uses, even on Windows.
427 # Change local separator to / first just in case we are passed filenames
429 # Change local separator to / first just in case we are passed filenames
428 # from an external source (like the command line).
430 # from an external source (like the command line).
429 bits = util.pconvert(filename).split(b'/', 1)
431 bits = util.pconvert(filename).split(b'/', 1)
430 if len(bits) == 2 and bits[0] == shortname:
432 if len(bits) == 2 and bits[0] == shortname:
431 return bits[1]
433 return bits[1]
432 else:
434 else:
433 return None
435 return None
434
436
435
437
436 def updatestandin(repo, lfile, standin):
438 def updatestandin(repo, lfile, standin):
437 """Re-calculate hash value of lfile and write it into standin
439 """Re-calculate hash value of lfile and write it into standin
438
440
439 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
441 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
440 """
442 """
441 file = repo.wjoin(lfile)
443 file = repo.wjoin(lfile)
442 if repo.wvfs.exists(lfile):
444 if repo.wvfs.exists(lfile):
443 hash = hashfile(file)
445 hash = hashfile(file)
444 executable = getexecutable(file)
446 executable = getexecutable(file)
445 writestandin(repo, standin, hash, executable)
447 writestandin(repo, standin, hash, executable)
446 else:
448 else:
447 raise error.Abort(_(b'%s: file not found!') % lfile)
449 raise error.Abort(_(b'%s: file not found!') % lfile)
448
450
449
451
450 def readasstandin(fctx):
452 def readasstandin(fctx):
451 """read hex hash from given filectx of standin file
453 """read hex hash from given filectx of standin file
452
454
453 This encapsulates how "standin" data is stored into storage layer."""
455 This encapsulates how "standin" data is stored into storage layer."""
454 return fctx.data().strip()
456 return fctx.data().strip()
455
457
456
458
457 def writestandin(repo, standin, hash, executable):
459 def writestandin(repo, standin, hash, executable):
458 '''write hash to <repo.root>/<standin>'''
460 '''write hash to <repo.root>/<standin>'''
459 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
461 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
460
462
461
463
462 def copyandhash(instream, outfile):
464 def copyandhash(instream, outfile):
463 """Read bytes from instream (iterable) and write them to outfile,
465 """Read bytes from instream (iterable) and write them to outfile,
464 computing the SHA-1 hash of the data along the way. Return the hash."""
466 computing the SHA-1 hash of the data along the way. Return the hash."""
465 hasher = hashutil.sha1(b'')
467 hasher = hashutil.sha1(b'')
466 for data in instream:
468 for data in instream:
467 hasher.update(data)
469 hasher.update(data)
468 outfile.write(data)
470 outfile.write(data)
469 return hex(hasher.digest())
471 return hex(hasher.digest())
470
472
471
473
472 def hashfile(file):
474 def hashfile(file):
473 if not os.path.exists(file):
475 if not os.path.exists(file):
474 return b''
476 return b''
475 with open(file, b'rb') as fd:
477 with open(file, b'rb') as fd:
476 return hexsha1(fd)
478 return hexsha1(fd)
477
479
478
480
479 def getexecutable(filename):
481 def getexecutable(filename):
480 mode = os.stat(filename).st_mode
482 mode = os.stat(filename).st_mode
481 return (
483 return (
482 (mode & stat.S_IXUSR)
484 (mode & stat.S_IXUSR)
483 and (mode & stat.S_IXGRP)
485 and (mode & stat.S_IXGRP)
484 and (mode & stat.S_IXOTH)
486 and (mode & stat.S_IXOTH)
485 )
487 )
486
488
487
489
488 def urljoin(first, second, *arg):
490 def urljoin(first, second, *arg):
489 def join(left, right):
491 def join(left, right):
490 if not left.endswith(b'/'):
492 if not left.endswith(b'/'):
491 left += b'/'
493 left += b'/'
492 if right.startswith(b'/'):
494 if right.startswith(b'/'):
493 right = right[1:]
495 right = right[1:]
494 return left + right
496 return left + right
495
497
496 url = join(first, second)
498 url = join(first, second)
497 for a in arg:
499 for a in arg:
498 url = join(url, a)
500 url = join(url, a)
499 return url
501 return url
500
502
501
503
502 def hexsha1(fileobj):
504 def hexsha1(fileobj):
503 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
505 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
504 object data"""
506 object data"""
505 h = hashutil.sha1()
507 h = hashutil.sha1()
506 for chunk in util.filechunkiter(fileobj):
508 for chunk in util.filechunkiter(fileobj):
507 h.update(chunk)
509 h.update(chunk)
508 return hex(h.digest())
510 return hex(h.digest())
509
511
510
512
511 def httpsendfile(ui, filename):
513 def httpsendfile(ui, filename):
512 return httpconnection.httpsendfile(ui, filename, b'rb')
514 return httpconnection.httpsendfile(ui, filename, b'rb')
513
515
514
516
515 def unixpath(path):
517 def unixpath(path):
516 '''Return a version of path normalized for use with the lfdirstate.'''
518 '''Return a version of path normalized for use with the lfdirstate.'''
517 return util.pconvert(os.path.normpath(path))
519 return util.pconvert(os.path.normpath(path))
518
520
519
521
520 def islfilesrepo(repo):
522 def islfilesrepo(repo):
521 '''Return true if the repo is a largefile repo.'''
523 '''Return true if the repo is a largefile repo.'''
522 if b'largefiles' in repo.requirements and any(
524 if b'largefiles' in repo.requirements and any(
523 shortnameslash in f[1] for f in repo.store.datafiles()
525 shortnameslash in f[1] for f in repo.store.datafiles()
524 ):
526 ):
525 return True
527 return True
526
528
527 return any(openlfdirstate(repo.ui, repo, False))
529 return any(openlfdirstate(repo.ui, repo, False))
528
530
529
531
530 class storeprotonotcapable(Exception):
532 class storeprotonotcapable(Exception):
531 def __init__(self, storetypes):
533 def __init__(self, storetypes):
532 self.storetypes = storetypes
534 self.storetypes = storetypes
533
535
534
536
535 def getstandinsstate(repo):
537 def getstandinsstate(repo):
536 standins = []
538 standins = []
537 matcher = getstandinmatcher(repo)
539 matcher = getstandinmatcher(repo)
538 wctx = repo[None]
540 wctx = repo[None]
539 for standin in repo.dirstate.walk(
541 for standin in repo.dirstate.walk(
540 matcher, subrepos=[], unknown=False, ignored=False
542 matcher, subrepos=[], unknown=False, ignored=False
541 ):
543 ):
542 lfile = splitstandin(standin)
544 lfile = splitstandin(standin)
543 try:
545 try:
544 hash = readasstandin(wctx[standin])
546 hash = readasstandin(wctx[standin])
545 except IOError:
547 except IOError:
546 hash = None
548 hash = None
547 standins.append((lfile, hash))
549 standins.append((lfile, hash))
548 return standins
550 return standins
549
551
550
552
551 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
553 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
552 lfstandin = standin(lfile)
554 lfstandin = standin(lfile)
553 if lfstandin not in repo.dirstate:
555 if lfstandin not in repo.dirstate:
554 lfdirstate.drop(lfile)
556 lfdirstate.drop(lfile)
555 else:
557 else:
556 stat = repo.dirstate._map[lfstandin]
558 stat = repo.dirstate._map[lfstandin]
557 state, mtime = stat.state, stat.mtime
559 state, mtime = stat.state, stat.mtime
558 if state == b'n':
560 if state == b'n':
559 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
561 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
560 # state 'n' doesn't ensure 'clean' in this case
562 # state 'n' doesn't ensure 'clean' in this case
561 lfdirstate.normallookup(lfile)
563 lfdirstate.normallookup(lfile)
562 else:
564 else:
563 lfdirstate.normal(lfile)
565 lfdirstate.normal(lfile)
564 elif state == b'm':
566 elif state == b'm':
565 lfdirstate.normallookup(lfile)
567 lfdirstate.normallookup(lfile)
566 elif state == b'r':
568 elif state == b'r':
567 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
569 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
568 elif state == b'a':
570 elif state == b'a':
569 lfdirstate.add(lfile)
571 lfdirstate.add(lfile)
570
572
571
573
572 def markcommitted(orig, ctx, node):
574 def markcommitted(orig, ctx, node):
573 repo = ctx.repo()
575 repo = ctx.repo()
574
576
575 lfdirstate = openlfdirstate(repo.ui, repo)
577 lfdirstate = openlfdirstate(repo.ui, repo)
576 with lfdirstate.parentchange():
578 with lfdirstate.parentchange():
577 orig(node)
579 orig(node)
578
580
579 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
581 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
580 # because files coming from the 2nd parent are omitted in the latter.
582 # because files coming from the 2nd parent are omitted in the latter.
581 #
583 #
582 # The former should be used to get targets of "synclfdirstate",
584 # The former should be used to get targets of "synclfdirstate",
583 # because such files:
585 # because such files:
584 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
586 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
585 # - have to be marked as "n" after commit, but
587 # - have to be marked as "n" after commit, but
586 # - aren't listed in "repo[node].files()"
588 # - aren't listed in "repo[node].files()"
587
589
588 for f in ctx.files():
590 for f in ctx.files():
589 lfile = splitstandin(f)
591 lfile = splitstandin(f)
590 if lfile is not None:
592 if lfile is not None:
591 synclfdirstate(repo, lfdirstate, lfile, False)
593 synclfdirstate(repo, lfdirstate, lfile, False)
592 lfdirstate.write()
594 lfdirstate.write()
593
595
594 # As part of committing, copy all of the largefiles into the cache.
596 # As part of committing, copy all of the largefiles into the cache.
595 #
597 #
596 # Using "node" instead of "ctx" implies additional "repo[node]"
598 # Using "node" instead of "ctx" implies additional "repo[node]"
597 # lookup while copyalltostore(), but can omit redundant check for
599 # lookup while copyalltostore(), but can omit redundant check for
598 # files comming from the 2nd parent, which should exist in store
600 # files comming from the 2nd parent, which should exist in store
599 # at merging.
601 # at merging.
600 copyalltostore(repo, node)
602 copyalltostore(repo, node)
601
603
602
604
603 def getlfilestoupdate(oldstandins, newstandins):
605 def getlfilestoupdate(oldstandins, newstandins):
604 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
606 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
605 filelist = []
607 filelist = []
606 for f in changedstandins:
608 for f in changedstandins:
607 if f[0] not in filelist:
609 if f[0] not in filelist:
608 filelist.append(f[0])
610 filelist.append(f[0])
609 return filelist
611 return filelist
610
612
611
613
612 def getlfilestoupload(repo, missing, addfunc):
614 def getlfilestoupload(repo, missing, addfunc):
613 makeprogress = repo.ui.makeprogress
615 makeprogress = repo.ui.makeprogress
614 with makeprogress(
616 with makeprogress(
615 _(b'finding outgoing largefiles'),
617 _(b'finding outgoing largefiles'),
616 unit=_(b'revisions'),
618 unit=_(b'revisions'),
617 total=len(missing),
619 total=len(missing),
618 ) as progress:
620 ) as progress:
619 for i, n in enumerate(missing):
621 for i, n in enumerate(missing):
620 progress.update(i)
622 progress.update(i)
621 parents = [p for p in repo[n].parents() if p != repo.nullid]
623 parents = [p for p in repo[n].parents() if p != repo.nullid]
622
624
623 with lfstatus(repo, value=False):
625 with lfstatus(repo, value=False):
624 ctx = repo[n]
626 ctx = repo[n]
625
627
626 files = set(ctx.files())
628 files = set(ctx.files())
627 if len(parents) == 2:
629 if len(parents) == 2:
628 mc = ctx.manifest()
630 mc = ctx.manifest()
629 mp1 = ctx.p1().manifest()
631 mp1 = ctx.p1().manifest()
630 mp2 = ctx.p2().manifest()
632 mp2 = ctx.p2().manifest()
631 for f in mp1:
633 for f in mp1:
632 if f not in mc:
634 if f not in mc:
633 files.add(f)
635 files.add(f)
634 for f in mp2:
636 for f in mp2:
635 if f not in mc:
637 if f not in mc:
636 files.add(f)
638 files.add(f)
637 for f in mc:
639 for f in mc:
638 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
640 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
639 files.add(f)
641 files.add(f)
640 for fn in files:
642 for fn in files:
641 if isstandin(fn) and fn in ctx:
643 if isstandin(fn) and fn in ctx:
642 addfunc(fn, readasstandin(ctx[fn]))
644 addfunc(fn, readasstandin(ctx[fn]))
643
645
644
646
645 def updatestandinsbymatch(repo, match):
647 def updatestandinsbymatch(repo, match):
646 """Update standins in the working directory according to specified match
648 """Update standins in the working directory according to specified match
647
649
648 This returns (possibly modified) ``match`` object to be used for
650 This returns (possibly modified) ``match`` object to be used for
649 subsequent commit process.
651 subsequent commit process.
650 """
652 """
651
653
652 ui = repo.ui
654 ui = repo.ui
653
655
654 # Case 1: user calls commit with no specific files or
656 # Case 1: user calls commit with no specific files or
655 # include/exclude patterns: refresh and commit all files that
657 # include/exclude patterns: refresh and commit all files that
656 # are "dirty".
658 # are "dirty".
657 if match is None or match.always():
659 if match is None or match.always():
658 # Spend a bit of time here to get a list of files we know
660 # Spend a bit of time here to get a list of files we know
659 # are modified so we can compare only against those.
661 # are modified so we can compare only against those.
660 # It can cost a lot of time (several seconds)
662 # It can cost a lot of time (several seconds)
661 # otherwise to update all standins if the largefiles are
663 # otherwise to update all standins if the largefiles are
662 # large.
664 # large.
663 lfdirstate = openlfdirstate(ui, repo)
665 lfdirstate = openlfdirstate(ui, repo)
664 dirtymatch = matchmod.always()
666 dirtymatch = matchmod.always()
665 unsure, s = lfdirstate.status(
667 unsure, s = lfdirstate.status(
666 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
668 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
667 )
669 )
668 modifiedfiles = unsure + s.modified + s.added + s.removed
670 modifiedfiles = unsure + s.modified + s.added + s.removed
669 lfiles = listlfiles(repo)
671 lfiles = listlfiles(repo)
670 # this only loops through largefiles that exist (not
672 # this only loops through largefiles that exist (not
671 # removed/renamed)
673 # removed/renamed)
672 for lfile in lfiles:
674 for lfile in lfiles:
673 if lfile in modifiedfiles:
675 if lfile in modifiedfiles:
674 fstandin = standin(lfile)
676 fstandin = standin(lfile)
675 if repo.wvfs.exists(fstandin):
677 if repo.wvfs.exists(fstandin):
676 # this handles the case where a rebase is being
678 # this handles the case where a rebase is being
677 # performed and the working copy is not updated
679 # performed and the working copy is not updated
678 # yet.
680 # yet.
679 if repo.wvfs.exists(lfile):
681 if repo.wvfs.exists(lfile):
680 updatestandin(repo, lfile, fstandin)
682 updatestandin(repo, lfile, fstandin)
681
683
682 return match
684 return match
683
685
684 lfiles = listlfiles(repo)
686 lfiles = listlfiles(repo)
685 match._files = repo._subdirlfs(match.files(), lfiles)
687 match._files = repo._subdirlfs(match.files(), lfiles)
686
688
687 # Case 2: user calls commit with specified patterns: refresh
689 # Case 2: user calls commit with specified patterns: refresh
688 # any matching big files.
690 # any matching big files.
689 smatcher = composestandinmatcher(repo, match)
691 smatcher = composestandinmatcher(repo, match)
690 standins = repo.dirstate.walk(
692 standins = repo.dirstate.walk(
691 smatcher, subrepos=[], unknown=False, ignored=False
693 smatcher, subrepos=[], unknown=False, ignored=False
692 )
694 )
693
695
694 # No matching big files: get out of the way and pass control to
696 # No matching big files: get out of the way and pass control to
695 # the usual commit() method.
697 # the usual commit() method.
696 if not standins:
698 if not standins:
697 return match
699 return match
698
700
699 # Refresh all matching big files. It's possible that the
701 # Refresh all matching big files. It's possible that the
700 # commit will end up failing, in which case the big files will
702 # commit will end up failing, in which case the big files will
701 # stay refreshed. No harm done: the user modified them and
703 # stay refreshed. No harm done: the user modified them and
702 # asked to commit them, so sooner or later we're going to
704 # asked to commit them, so sooner or later we're going to
703 # refresh the standins. Might as well leave them refreshed.
705 # refresh the standins. Might as well leave them refreshed.
704 lfdirstate = openlfdirstate(ui, repo)
706 lfdirstate = openlfdirstate(ui, repo)
705 for fstandin in standins:
707 for fstandin in standins:
706 lfile = splitstandin(fstandin)
708 lfile = splitstandin(fstandin)
707 if lfdirstate[lfile] != b'r':
709 if lfdirstate[lfile] != b'r':
708 updatestandin(repo, lfile, fstandin)
710 updatestandin(repo, lfile, fstandin)
709
711
710 # Cook up a new matcher that only matches regular files or
712 # Cook up a new matcher that only matches regular files or
711 # standins corresponding to the big files requested by the
713 # standins corresponding to the big files requested by the
712 # user. Have to modify _files to prevent commit() from
714 # user. Have to modify _files to prevent commit() from
713 # complaining "not tracked" for big files.
715 # complaining "not tracked" for big files.
714 match = copy.copy(match)
716 match = copy.copy(match)
715 origmatchfn = match.matchfn
717 origmatchfn = match.matchfn
716
718
717 # Check both the list of largefiles and the list of
719 # Check both the list of largefiles and the list of
718 # standins because if a largefile was removed, it
720 # standins because if a largefile was removed, it
719 # won't be in the list of largefiles at this point
721 # won't be in the list of largefiles at this point
720 match._files += sorted(standins)
722 match._files += sorted(standins)
721
723
722 actualfiles = []
724 actualfiles = []
723 for f in match._files:
725 for f in match._files:
724 fstandin = standin(f)
726 fstandin = standin(f)
725
727
726 # For largefiles, only one of the normal and standin should be
728 # For largefiles, only one of the normal and standin should be
727 # committed (except if one of them is a remove). In the case of a
729 # committed (except if one of them is a remove). In the case of a
728 # standin removal, drop the normal file if it is unknown to dirstate.
730 # standin removal, drop the normal file if it is unknown to dirstate.
729 # Thus, skip plain largefile names but keep the standin.
731 # Thus, skip plain largefile names but keep the standin.
730 if f in lfiles or fstandin in standins:
732 if f in lfiles or fstandin in standins:
731 if repo.dirstate[fstandin] != b'r':
733 if repo.dirstate[fstandin] != b'r':
732 if repo.dirstate[f] != b'r':
734 if repo.dirstate[f] != b'r':
733 continue
735 continue
734 elif repo.dirstate[f] == b'?':
736 elif repo.dirstate[f] == b'?':
735 continue
737 continue
736
738
737 actualfiles.append(f)
739 actualfiles.append(f)
738 match._files = actualfiles
740 match._files = actualfiles
739
741
740 def matchfn(f):
742 def matchfn(f):
741 if origmatchfn(f):
743 if origmatchfn(f):
742 return f not in lfiles
744 return f not in lfiles
743 else:
745 else:
744 return f in standins
746 return f in standins
745
747
746 match.matchfn = matchfn
748 match.matchfn = matchfn
747
749
748 return match
750 return match
749
751
750
752
751 class automatedcommithook(object):
753 class automatedcommithook(object):
752 """Stateful hook to update standins at the 1st commit of resuming
754 """Stateful hook to update standins at the 1st commit of resuming
753
755
754 For efficiency, updating standins in the working directory should
756 For efficiency, updating standins in the working directory should
755 be avoided while automated committing (like rebase, transplant and
757 be avoided while automated committing (like rebase, transplant and
756 so on), because they should be updated before committing.
758 so on), because they should be updated before committing.
757
759
758 But the 1st commit of resuming automated committing (e.g. ``rebase
760 But the 1st commit of resuming automated committing (e.g. ``rebase
759 --continue``) should update them, because largefiles may be
761 --continue``) should update them, because largefiles may be
760 modified manually.
762 modified manually.
761 """
763 """
762
764
763 def __init__(self, resuming):
765 def __init__(self, resuming):
764 self.resuming = resuming
766 self.resuming = resuming
765
767
766 def __call__(self, repo, match):
768 def __call__(self, repo, match):
767 if self.resuming:
769 if self.resuming:
768 self.resuming = False # avoids updating at subsequent commits
770 self.resuming = False # avoids updating at subsequent commits
769 return updatestandinsbymatch(repo, match)
771 return updatestandinsbymatch(repo, match)
770 else:
772 else:
771 return match
773 return match
772
774
773
775
774 def getstatuswriter(ui, repo, forcibly=None):
776 def getstatuswriter(ui, repo, forcibly=None):
775 """Return the function to write largefiles specific status out
777 """Return the function to write largefiles specific status out
776
778
777 If ``forcibly`` is ``None``, this returns the last element of
779 If ``forcibly`` is ``None``, this returns the last element of
778 ``repo._lfstatuswriters`` as "default" writer function.
780 ``repo._lfstatuswriters`` as "default" writer function.
779
781
780 Otherwise, this returns the function to always write out (or
782 Otherwise, this returns the function to always write out (or
781 ignore if ``not forcibly``) status.
783 ignore if ``not forcibly``) status.
782 """
784 """
783 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
785 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
784 return repo._lfstatuswriters[-1]
786 return repo._lfstatuswriters[-1]
785 else:
787 else:
786 if forcibly:
788 if forcibly:
787 return ui.status # forcibly WRITE OUT
789 return ui.status # forcibly WRITE OUT
788 else:
790 else:
789 return lambda *msg, **opts: None # forcibly IGNORE
791 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now