##// END OF EJS Templates
largefile: use `update_file` for `synclfdirstate` "n" case...
marmoute -
r48524:b0a39b66 default
parent child Browse files
Show More
@@ -1,794 +1,796 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr=False):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 super(largefilesdirstate, self).write(None)
198 super(largefilesdirstate, self).write(None)
199
199
200
200
201 def openlfdirstate(ui, repo, create=True):
201 def openlfdirstate(ui, repo, create=True):
202 """
202 """
203 Return a dirstate object that tracks largefiles: i.e. its root is
203 Return a dirstate object that tracks largefiles: i.e. its root is
204 the repo root, but it is saved in .hg/largefiles/dirstate.
204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 """
205 """
206 vfs = repo.vfs
206 vfs = repo.vfs
207 lfstoredir = longname
207 lfstoredir = longname
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 lfdirstate = largefilesdirstate(
210 lfdirstate = largefilesdirstate(
211 opener,
211 opener,
212 ui,
212 ui,
213 repo.root,
213 repo.root,
214 repo.dirstate._validate,
214 repo.dirstate._validate,
215 lambda: sparse.matcher(repo),
215 lambda: sparse.matcher(repo),
216 repo.nodeconstants,
216 repo.nodeconstants,
217 use_dirstate_v2,
217 use_dirstate_v2,
218 )
218 )
219
219
220 # If the largefiles dirstate does not exist, populate and create
220 # If the largefiles dirstate does not exist, populate and create
221 # it. This ensures that we create it on the first meaningful
221 # it. This ensures that we create it on the first meaningful
222 # largefiles operation in a new clone.
222 # largefiles operation in a new clone.
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 matcher = getstandinmatcher(repo)
224 matcher = getstandinmatcher(repo)
225 standins = repo.dirstate.walk(
225 standins = repo.dirstate.walk(
226 matcher, subrepos=[], unknown=False, ignored=False
226 matcher, subrepos=[], unknown=False, ignored=False
227 )
227 )
228
228
229 if len(standins) > 0:
229 if len(standins) > 0:
230 vfs.makedirs(lfstoredir)
230 vfs.makedirs(lfstoredir)
231
231
232 with lfdirstate.parentchange():
232 with lfdirstate.parentchange():
233 for standin in standins:
233 for standin in standins:
234 lfile = splitstandin(standin)
234 lfile = splitstandin(standin)
235 lfdirstate.update_file(
235 lfdirstate.update_file(
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
237 )
237 )
238 return lfdirstate
238 return lfdirstate
239
239
240
240
241 def lfdirstatestatus(lfdirstate, repo):
241 def lfdirstatestatus(lfdirstate, repo):
242 pctx = repo[b'.']
242 pctx = repo[b'.']
243 match = matchmod.always()
243 match = matchmod.always()
244 unsure, s = lfdirstate.status(
244 unsure, s = lfdirstate.status(
245 match, subrepos=[], ignored=False, clean=False, unknown=False
245 match, subrepos=[], ignored=False, clean=False, unknown=False
246 )
246 )
247 modified, clean = s.modified, s.clean
247 modified, clean = s.modified, s.clean
248 for lfile in unsure:
248 for lfile in unsure:
249 try:
249 try:
250 fctx = pctx[standin(lfile)]
250 fctx = pctx[standin(lfile)]
251 except LookupError:
251 except LookupError:
252 fctx = None
252 fctx = None
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
254 modified.append(lfile)
254 modified.append(lfile)
255 else:
255 else:
256 clean.append(lfile)
256 clean.append(lfile)
257 lfdirstate.set_clean(lfile)
257 lfdirstate.set_clean(lfile)
258 return s
258 return s
259
259
260
260
261 def listlfiles(repo, rev=None, matcher=None):
261 def listlfiles(repo, rev=None, matcher=None):
262 """return a list of largefiles in the working copy or the
262 """return a list of largefiles in the working copy or the
263 specified changeset"""
263 specified changeset"""
264
264
265 if matcher is None:
265 if matcher is None:
266 matcher = getstandinmatcher(repo)
266 matcher = getstandinmatcher(repo)
267
267
268 # ignore unknown files in working directory
268 # ignore unknown files in working directory
269 return [
269 return [
270 splitstandin(f)
270 splitstandin(f)
271 for f in repo[rev].walk(matcher)
271 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate[f] != b'?'
272 if rev is not None or repo.dirstate[f] != b'?'
273 ]
273 ]
274
274
275
275
276 def instore(repo, hash, forcelocal=False):
276 def instore(repo, hash, forcelocal=False):
277 '''Return true if a largefile with the given hash exists in the store'''
277 '''Return true if a largefile with the given hash exists in the store'''
278 return os.path.exists(storepath(repo, hash, forcelocal))
278 return os.path.exists(storepath(repo, hash, forcelocal))
279
279
280
280
281 def storepath(repo, hash, forcelocal=False):
281 def storepath(repo, hash, forcelocal=False):
282 """Return the correct location in the repository largefiles store for a
282 """Return the correct location in the repository largefiles store for a
283 file with the given hash."""
283 file with the given hash."""
284 if not forcelocal and repo.shared():
284 if not forcelocal and repo.shared():
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
286 return repo.vfs.join(longname, hash)
286 return repo.vfs.join(longname, hash)
287
287
288
288
289 def findstorepath(repo, hash):
289 def findstorepath(repo, hash):
290 """Search through the local store path(s) to find the file for the given
290 """Search through the local store path(s) to find the file for the given
291 hash. If the file is not found, its path in the primary store is returned.
291 hash. If the file is not found, its path in the primary store is returned.
292 The return value is a tuple of (path, exists(path)).
292 The return value is a tuple of (path, exists(path)).
293 """
293 """
294 # For shared repos, the primary store is in the share source. But for
294 # For shared repos, the primary store is in the share source. But for
295 # backward compatibility, force a lookup in the local store if it wasn't
295 # backward compatibility, force a lookup in the local store if it wasn't
296 # found in the share source.
296 # found in the share source.
297 path = storepath(repo, hash, False)
297 path = storepath(repo, hash, False)
298
298
299 if instore(repo, hash):
299 if instore(repo, hash):
300 return (path, True)
300 return (path, True)
301 elif repo.shared() and instore(repo, hash, True):
301 elif repo.shared() and instore(repo, hash, True):
302 return storepath(repo, hash, True), True
302 return storepath(repo, hash, True), True
303
303
304 return (path, False)
304 return (path, False)
305
305
306
306
307 def copyfromcache(repo, hash, filename):
307 def copyfromcache(repo, hash, filename):
308 """Copy the specified largefile from the repo or system cache to
308 """Copy the specified largefile from the repo or system cache to
309 filename in the repository. Return true on success or false if the
309 filename in the repository. Return true on success or false if the
310 file was not found in either cache (which should not happened:
310 file was not found in either cache (which should not happened:
311 this is meant to be called only after ensuring that the needed
311 this is meant to be called only after ensuring that the needed
312 largefile exists in the cache)."""
312 largefile exists in the cache)."""
313 wvfs = repo.wvfs
313 wvfs = repo.wvfs
314 path = findfile(repo, hash)
314 path = findfile(repo, hash)
315 if path is None:
315 if path is None:
316 return False
316 return False
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
318 # The write may fail before the file is fully written, but we
318 # The write may fail before the file is fully written, but we
319 # don't use atomic writes in the working copy.
319 # don't use atomic writes in the working copy.
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
322 if gothash != hash:
322 if gothash != hash:
323 repo.ui.warn(
323 repo.ui.warn(
324 _(b'%s: data corruption in %s with hash %s\n')
324 _(b'%s: data corruption in %s with hash %s\n')
325 % (filename, path, gothash)
325 % (filename, path, gothash)
326 )
326 )
327 wvfs.unlink(filename)
327 wvfs.unlink(filename)
328 return False
328 return False
329 return True
329 return True
330
330
331
331
332 def copytostore(repo, ctx, file, fstandin):
332 def copytostore(repo, ctx, file, fstandin):
333 wvfs = repo.wvfs
333 wvfs = repo.wvfs
334 hash = readasstandin(ctx[fstandin])
334 hash = readasstandin(ctx[fstandin])
335 if instore(repo, hash):
335 if instore(repo, hash):
336 return
336 return
337 if wvfs.exists(file):
337 if wvfs.exists(file):
338 copytostoreabsolute(repo, wvfs.join(file), hash)
338 copytostoreabsolute(repo, wvfs.join(file), hash)
339 else:
339 else:
340 repo.ui.warn(
340 repo.ui.warn(
341 _(b"%s: largefile %s not available from local store\n")
341 _(b"%s: largefile %s not available from local store\n")
342 % (file, hash)
342 % (file, hash)
343 )
343 )
344
344
345
345
346 def copyalltostore(repo, node):
346 def copyalltostore(repo, node):
347 '''Copy all largefiles in a given revision to the store'''
347 '''Copy all largefiles in a given revision to the store'''
348
348
349 ctx = repo[node]
349 ctx = repo[node]
350 for filename in ctx.files():
350 for filename in ctx.files():
351 realfile = splitstandin(filename)
351 realfile = splitstandin(filename)
352 if realfile is not None and filename in ctx.manifest():
352 if realfile is not None and filename in ctx.manifest():
353 copytostore(repo, ctx, realfile, filename)
353 copytostore(repo, ctx, realfile, filename)
354
354
355
355
356 def copytostoreabsolute(repo, file, hash):
356 def copytostoreabsolute(repo, file, hash):
357 if inusercache(repo.ui, hash):
357 if inusercache(repo.ui, hash):
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
359 else:
359 else:
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
361 with open(file, b'rb') as srcf:
361 with open(file, b'rb') as srcf:
362 with util.atomictempfile(
362 with util.atomictempfile(
363 storepath(repo, hash), createmode=repo.store.createmode
363 storepath(repo, hash), createmode=repo.store.createmode
364 ) as dstf:
364 ) as dstf:
365 for chunk in util.filechunkiter(srcf):
365 for chunk in util.filechunkiter(srcf):
366 dstf.write(chunk)
366 dstf.write(chunk)
367 linktousercache(repo, hash)
367 linktousercache(repo, hash)
368
368
369
369
370 def linktousercache(repo, hash):
370 def linktousercache(repo, hash):
371 """Link / copy the largefile with the specified hash from the store
371 """Link / copy the largefile with the specified hash from the store
372 to the cache."""
372 to the cache."""
373 path = usercachepath(repo.ui, hash)
373 path = usercachepath(repo.ui, hash)
374 link(storepath(repo, hash), path)
374 link(storepath(repo, hash), path)
375
375
376
376
377 def getstandinmatcher(repo, rmatcher=None):
377 def getstandinmatcher(repo, rmatcher=None):
378 '''Return a match object that applies rmatcher to the standin directory'''
378 '''Return a match object that applies rmatcher to the standin directory'''
379 wvfs = repo.wvfs
379 wvfs = repo.wvfs
380 standindir = shortname
380 standindir = shortname
381
381
382 # no warnings about missing files or directories
382 # no warnings about missing files or directories
383 badfn = lambda f, msg: None
383 badfn = lambda f, msg: None
384
384
385 if rmatcher and not rmatcher.always():
385 if rmatcher and not rmatcher.always():
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
387 if not pats:
387 if not pats:
388 pats = [wvfs.join(standindir)]
388 pats = [wvfs.join(standindir)]
389 match = scmutil.match(repo[None], pats, badfn=badfn)
389 match = scmutil.match(repo[None], pats, badfn=badfn)
390 else:
390 else:
391 # no patterns: relative to repo root
391 # no patterns: relative to repo root
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
393 return match
393 return match
394
394
395
395
396 def composestandinmatcher(repo, rmatcher):
396 def composestandinmatcher(repo, rmatcher):
397 """Return a matcher that accepts standins corresponding to the
397 """Return a matcher that accepts standins corresponding to the
398 files accepted by rmatcher. Pass the list of files in the matcher
398 files accepted by rmatcher. Pass the list of files in the matcher
399 as the paths specified by the user."""
399 as the paths specified by the user."""
400 smatcher = getstandinmatcher(repo, rmatcher)
400 smatcher = getstandinmatcher(repo, rmatcher)
401 isstandin = smatcher.matchfn
401 isstandin = smatcher.matchfn
402
402
403 def composedmatchfn(f):
403 def composedmatchfn(f):
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
405
405
406 smatcher.matchfn = composedmatchfn
406 smatcher.matchfn = composedmatchfn
407
407
408 return smatcher
408 return smatcher
409
409
410
410
411 def standin(filename):
411 def standin(filename):
412 """Return the repo-relative path to the standin for the specified big
412 """Return the repo-relative path to the standin for the specified big
413 file."""
413 file."""
414 # Notes:
414 # Notes:
415 # 1) Some callers want an absolute path, but for instance addlargefiles
415 # 1) Some callers want an absolute path, but for instance addlargefiles
416 # needs it repo-relative so it can be passed to repo[None].add(). So
416 # needs it repo-relative so it can be passed to repo[None].add(). So
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
418 # 2) Join with '/' because that's what dirstate always uses, even on
418 # 2) Join with '/' because that's what dirstate always uses, even on
419 # Windows. Change existing separator to '/' first in case we are
419 # Windows. Change existing separator to '/' first in case we are
420 # passed filenames from an external source (like the command line).
420 # passed filenames from an external source (like the command line).
421 return shortnameslash + util.pconvert(filename)
421 return shortnameslash + util.pconvert(filename)
422
422
423
423
424 def isstandin(filename):
424 def isstandin(filename):
425 """Return true if filename is a big file standin. filename must be
425 """Return true if filename is a big file standin. filename must be
426 in Mercurial's internal form (slash-separated)."""
426 in Mercurial's internal form (slash-separated)."""
427 return filename.startswith(shortnameslash)
427 return filename.startswith(shortnameslash)
428
428
429
429
430 def splitstandin(filename):
430 def splitstandin(filename):
431 # Split on / because that's what dirstate always uses, even on Windows.
431 # Split on / because that's what dirstate always uses, even on Windows.
432 # Change local separator to / first just in case we are passed filenames
432 # Change local separator to / first just in case we are passed filenames
433 # from an external source (like the command line).
433 # from an external source (like the command line).
434 bits = util.pconvert(filename).split(b'/', 1)
434 bits = util.pconvert(filename).split(b'/', 1)
435 if len(bits) == 2 and bits[0] == shortname:
435 if len(bits) == 2 and bits[0] == shortname:
436 return bits[1]
436 return bits[1]
437 else:
437 else:
438 return None
438 return None
439
439
440
440
441 def updatestandin(repo, lfile, standin):
441 def updatestandin(repo, lfile, standin):
442 """Re-calculate hash value of lfile and write it into standin
442 """Re-calculate hash value of lfile and write it into standin
443
443
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
445 """
445 """
446 file = repo.wjoin(lfile)
446 file = repo.wjoin(lfile)
447 if repo.wvfs.exists(lfile):
447 if repo.wvfs.exists(lfile):
448 hash = hashfile(file)
448 hash = hashfile(file)
449 executable = getexecutable(file)
449 executable = getexecutable(file)
450 writestandin(repo, standin, hash, executable)
450 writestandin(repo, standin, hash, executable)
451 else:
451 else:
452 raise error.Abort(_(b'%s: file not found!') % lfile)
452 raise error.Abort(_(b'%s: file not found!') % lfile)
453
453
454
454
455 def readasstandin(fctx):
455 def readasstandin(fctx):
456 """read hex hash from given filectx of standin file
456 """read hex hash from given filectx of standin file
457
457
458 This encapsulates how "standin" data is stored into storage layer."""
458 This encapsulates how "standin" data is stored into storage layer."""
459 return fctx.data().strip()
459 return fctx.data().strip()
460
460
461
461
462 def writestandin(repo, standin, hash, executable):
462 def writestandin(repo, standin, hash, executable):
463 '''write hash to <repo.root>/<standin>'''
463 '''write hash to <repo.root>/<standin>'''
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
465
465
466
466
467 def copyandhash(instream, outfile):
467 def copyandhash(instream, outfile):
468 """Read bytes from instream (iterable) and write them to outfile,
468 """Read bytes from instream (iterable) and write them to outfile,
469 computing the SHA-1 hash of the data along the way. Return the hash."""
469 computing the SHA-1 hash of the data along the way. Return the hash."""
470 hasher = hashutil.sha1(b'')
470 hasher = hashutil.sha1(b'')
471 for data in instream:
471 for data in instream:
472 hasher.update(data)
472 hasher.update(data)
473 outfile.write(data)
473 outfile.write(data)
474 return hex(hasher.digest())
474 return hex(hasher.digest())
475
475
476
476
477 def hashfile(file):
477 def hashfile(file):
478 if not os.path.exists(file):
478 if not os.path.exists(file):
479 return b''
479 return b''
480 with open(file, b'rb') as fd:
480 with open(file, b'rb') as fd:
481 return hexsha1(fd)
481 return hexsha1(fd)
482
482
483
483
484 def getexecutable(filename):
484 def getexecutable(filename):
485 mode = os.stat(filename).st_mode
485 mode = os.stat(filename).st_mode
486 return (
486 return (
487 (mode & stat.S_IXUSR)
487 (mode & stat.S_IXUSR)
488 and (mode & stat.S_IXGRP)
488 and (mode & stat.S_IXGRP)
489 and (mode & stat.S_IXOTH)
489 and (mode & stat.S_IXOTH)
490 )
490 )
491
491
492
492
493 def urljoin(first, second, *arg):
493 def urljoin(first, second, *arg):
494 def join(left, right):
494 def join(left, right):
495 if not left.endswith(b'/'):
495 if not left.endswith(b'/'):
496 left += b'/'
496 left += b'/'
497 if right.startswith(b'/'):
497 if right.startswith(b'/'):
498 right = right[1:]
498 right = right[1:]
499 return left + right
499 return left + right
500
500
501 url = join(first, second)
501 url = join(first, second)
502 for a in arg:
502 for a in arg:
503 url = join(url, a)
503 url = join(url, a)
504 return url
504 return url
505
505
506
506
507 def hexsha1(fileobj):
507 def hexsha1(fileobj):
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
509 object data"""
509 object data"""
510 h = hashutil.sha1()
510 h = hashutil.sha1()
511 for chunk in util.filechunkiter(fileobj):
511 for chunk in util.filechunkiter(fileobj):
512 h.update(chunk)
512 h.update(chunk)
513 return hex(h.digest())
513 return hex(h.digest())
514
514
515
515
516 def httpsendfile(ui, filename):
516 def httpsendfile(ui, filename):
517 return httpconnection.httpsendfile(ui, filename, b'rb')
517 return httpconnection.httpsendfile(ui, filename, b'rb')
518
518
519
519
520 def unixpath(path):
520 def unixpath(path):
521 '''Return a version of path normalized for use with the lfdirstate.'''
521 '''Return a version of path normalized for use with the lfdirstate.'''
522 return util.pconvert(os.path.normpath(path))
522 return util.pconvert(os.path.normpath(path))
523
523
524
524
525 def islfilesrepo(repo):
525 def islfilesrepo(repo):
526 '''Return true if the repo is a largefile repo.'''
526 '''Return true if the repo is a largefile repo.'''
527 if b'largefiles' in repo.requirements and any(
527 if b'largefiles' in repo.requirements and any(
528 shortnameslash in f[1] for f in repo.store.datafiles()
528 shortnameslash in f[1] for f in repo.store.datafiles()
529 ):
529 ):
530 return True
530 return True
531
531
532 return any(openlfdirstate(repo.ui, repo, False))
532 return any(openlfdirstate(repo.ui, repo, False))
533
533
534
534
535 class storeprotonotcapable(Exception):
535 class storeprotonotcapable(Exception):
536 def __init__(self, storetypes):
536 def __init__(self, storetypes):
537 self.storetypes = storetypes
537 self.storetypes = storetypes
538
538
539
539
540 def getstandinsstate(repo):
540 def getstandinsstate(repo):
541 standins = []
541 standins = []
542 matcher = getstandinmatcher(repo)
542 matcher = getstandinmatcher(repo)
543 wctx = repo[None]
543 wctx = repo[None]
544 for standin in repo.dirstate.walk(
544 for standin in repo.dirstate.walk(
545 matcher, subrepos=[], unknown=False, ignored=False
545 matcher, subrepos=[], unknown=False, ignored=False
546 ):
546 ):
547 lfile = splitstandin(standin)
547 lfile = splitstandin(standin)
548 try:
548 try:
549 hash = readasstandin(wctx[standin])
549 hash = readasstandin(wctx[standin])
550 except IOError:
550 except IOError:
551 hash = None
551 hash = None
552 standins.append((lfile, hash))
552 standins.append((lfile, hash))
553 return standins
553 return standins
554
554
555
555
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
557 lfstandin = standin(lfile)
557 lfstandin = standin(lfile)
558 if lfstandin not in repo.dirstate:
558 if lfstandin not in repo.dirstate:
559 lfdirstate.drop(lfile)
559 lfdirstate.drop(lfile)
560 else:
560 else:
561 stat = repo.dirstate._map[lfstandin]
561 stat = repo.dirstate._map[lfstandin]
562 state, mtime = stat.state, stat.mtime
562 state, mtime = stat.state, stat.mtime
563 if state == b'n':
563 if state == b'n':
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
565 # state 'n' doesn't ensure 'clean' in this case
565 # state 'n' doesn't ensure 'clean' in this case
566 lfdirstate.normallookup(lfile)
566 lfdirstate.update_file(
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
568 )
567 else:
569 else:
568 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
569 elif state == b'm':
571 elif state == b'm':
570 lfdirstate.normallookup(lfile)
572 lfdirstate.normallookup(lfile)
571 elif state == b'r':
573 elif state == b'r':
572 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
574 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
573 elif state == b'a':
575 elif state == b'a':
574 lfdirstate.add(lfile)
576 lfdirstate.add(lfile)
575
577
576
578
577 def markcommitted(orig, ctx, node):
579 def markcommitted(orig, ctx, node):
578 repo = ctx.repo()
580 repo = ctx.repo()
579
581
580 lfdirstate = openlfdirstate(repo.ui, repo)
582 lfdirstate = openlfdirstate(repo.ui, repo)
581 with lfdirstate.parentchange():
583 with lfdirstate.parentchange():
582 orig(node)
584 orig(node)
583
585
584 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
586 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
585 # because files coming from the 2nd parent are omitted in the latter.
587 # because files coming from the 2nd parent are omitted in the latter.
586 #
588 #
587 # The former should be used to get targets of "synclfdirstate",
589 # The former should be used to get targets of "synclfdirstate",
588 # because such files:
590 # because such files:
589 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
591 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
590 # - have to be marked as "n" after commit, but
592 # - have to be marked as "n" after commit, but
591 # - aren't listed in "repo[node].files()"
593 # - aren't listed in "repo[node].files()"
592
594
593 for f in ctx.files():
595 for f in ctx.files():
594 lfile = splitstandin(f)
596 lfile = splitstandin(f)
595 if lfile is not None:
597 if lfile is not None:
596 synclfdirstate(repo, lfdirstate, lfile, False)
598 synclfdirstate(repo, lfdirstate, lfile, False)
597 lfdirstate.write()
599 lfdirstate.write()
598
600
599 # As part of committing, copy all of the largefiles into the cache.
601 # As part of committing, copy all of the largefiles into the cache.
600 #
602 #
601 # Using "node" instead of "ctx" implies additional "repo[node]"
603 # Using "node" instead of "ctx" implies additional "repo[node]"
602 # lookup while copyalltostore(), but can omit redundant check for
604 # lookup while copyalltostore(), but can omit redundant check for
603 # files comming from the 2nd parent, which should exist in store
605 # files comming from the 2nd parent, which should exist in store
604 # at merging.
606 # at merging.
605 copyalltostore(repo, node)
607 copyalltostore(repo, node)
606
608
607
609
608 def getlfilestoupdate(oldstandins, newstandins):
610 def getlfilestoupdate(oldstandins, newstandins):
609 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
611 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
610 filelist = []
612 filelist = []
611 for f in changedstandins:
613 for f in changedstandins:
612 if f[0] not in filelist:
614 if f[0] not in filelist:
613 filelist.append(f[0])
615 filelist.append(f[0])
614 return filelist
616 return filelist
615
617
616
618
617 def getlfilestoupload(repo, missing, addfunc):
619 def getlfilestoupload(repo, missing, addfunc):
618 makeprogress = repo.ui.makeprogress
620 makeprogress = repo.ui.makeprogress
619 with makeprogress(
621 with makeprogress(
620 _(b'finding outgoing largefiles'),
622 _(b'finding outgoing largefiles'),
621 unit=_(b'revisions'),
623 unit=_(b'revisions'),
622 total=len(missing),
624 total=len(missing),
623 ) as progress:
625 ) as progress:
624 for i, n in enumerate(missing):
626 for i, n in enumerate(missing):
625 progress.update(i)
627 progress.update(i)
626 parents = [p for p in repo[n].parents() if p != repo.nullid]
628 parents = [p for p in repo[n].parents() if p != repo.nullid]
627
629
628 with lfstatus(repo, value=False):
630 with lfstatus(repo, value=False):
629 ctx = repo[n]
631 ctx = repo[n]
630
632
631 files = set(ctx.files())
633 files = set(ctx.files())
632 if len(parents) == 2:
634 if len(parents) == 2:
633 mc = ctx.manifest()
635 mc = ctx.manifest()
634 mp1 = ctx.p1().manifest()
636 mp1 = ctx.p1().manifest()
635 mp2 = ctx.p2().manifest()
637 mp2 = ctx.p2().manifest()
636 for f in mp1:
638 for f in mp1:
637 if f not in mc:
639 if f not in mc:
638 files.add(f)
640 files.add(f)
639 for f in mp2:
641 for f in mp2:
640 if f not in mc:
642 if f not in mc:
641 files.add(f)
643 files.add(f)
642 for f in mc:
644 for f in mc:
643 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
645 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
644 files.add(f)
646 files.add(f)
645 for fn in files:
647 for fn in files:
646 if isstandin(fn) and fn in ctx:
648 if isstandin(fn) and fn in ctx:
647 addfunc(fn, readasstandin(ctx[fn]))
649 addfunc(fn, readasstandin(ctx[fn]))
648
650
649
651
650 def updatestandinsbymatch(repo, match):
652 def updatestandinsbymatch(repo, match):
651 """Update standins in the working directory according to specified match
653 """Update standins in the working directory according to specified match
652
654
653 This returns (possibly modified) ``match`` object to be used for
655 This returns (possibly modified) ``match`` object to be used for
654 subsequent commit process.
656 subsequent commit process.
655 """
657 """
656
658
657 ui = repo.ui
659 ui = repo.ui
658
660
659 # Case 1: user calls commit with no specific files or
661 # Case 1: user calls commit with no specific files or
660 # include/exclude patterns: refresh and commit all files that
662 # include/exclude patterns: refresh and commit all files that
661 # are "dirty".
663 # are "dirty".
662 if match is None or match.always():
664 if match is None or match.always():
663 # Spend a bit of time here to get a list of files we know
665 # Spend a bit of time here to get a list of files we know
664 # are modified so we can compare only against those.
666 # are modified so we can compare only against those.
665 # It can cost a lot of time (several seconds)
667 # It can cost a lot of time (several seconds)
666 # otherwise to update all standins if the largefiles are
668 # otherwise to update all standins if the largefiles are
667 # large.
669 # large.
668 lfdirstate = openlfdirstate(ui, repo)
670 lfdirstate = openlfdirstate(ui, repo)
669 dirtymatch = matchmod.always()
671 dirtymatch = matchmod.always()
670 unsure, s = lfdirstate.status(
672 unsure, s = lfdirstate.status(
671 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
673 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
672 )
674 )
673 modifiedfiles = unsure + s.modified + s.added + s.removed
675 modifiedfiles = unsure + s.modified + s.added + s.removed
674 lfiles = listlfiles(repo)
676 lfiles = listlfiles(repo)
675 # this only loops through largefiles that exist (not
677 # this only loops through largefiles that exist (not
676 # removed/renamed)
678 # removed/renamed)
677 for lfile in lfiles:
679 for lfile in lfiles:
678 if lfile in modifiedfiles:
680 if lfile in modifiedfiles:
679 fstandin = standin(lfile)
681 fstandin = standin(lfile)
680 if repo.wvfs.exists(fstandin):
682 if repo.wvfs.exists(fstandin):
681 # this handles the case where a rebase is being
683 # this handles the case where a rebase is being
682 # performed and the working copy is not updated
684 # performed and the working copy is not updated
683 # yet.
685 # yet.
684 if repo.wvfs.exists(lfile):
686 if repo.wvfs.exists(lfile):
685 updatestandin(repo, lfile, fstandin)
687 updatestandin(repo, lfile, fstandin)
686
688
687 return match
689 return match
688
690
689 lfiles = listlfiles(repo)
691 lfiles = listlfiles(repo)
690 match._files = repo._subdirlfs(match.files(), lfiles)
692 match._files = repo._subdirlfs(match.files(), lfiles)
691
693
692 # Case 2: user calls commit with specified patterns: refresh
694 # Case 2: user calls commit with specified patterns: refresh
693 # any matching big files.
695 # any matching big files.
694 smatcher = composestandinmatcher(repo, match)
696 smatcher = composestandinmatcher(repo, match)
695 standins = repo.dirstate.walk(
697 standins = repo.dirstate.walk(
696 smatcher, subrepos=[], unknown=False, ignored=False
698 smatcher, subrepos=[], unknown=False, ignored=False
697 )
699 )
698
700
699 # No matching big files: get out of the way and pass control to
701 # No matching big files: get out of the way and pass control to
700 # the usual commit() method.
702 # the usual commit() method.
701 if not standins:
703 if not standins:
702 return match
704 return match
703
705
704 # Refresh all matching big files. It's possible that the
706 # Refresh all matching big files. It's possible that the
705 # commit will end up failing, in which case the big files will
707 # commit will end up failing, in which case the big files will
706 # stay refreshed. No harm done: the user modified them and
708 # stay refreshed. No harm done: the user modified them and
707 # asked to commit them, so sooner or later we're going to
709 # asked to commit them, so sooner or later we're going to
708 # refresh the standins. Might as well leave them refreshed.
710 # refresh the standins. Might as well leave them refreshed.
709 lfdirstate = openlfdirstate(ui, repo)
711 lfdirstate = openlfdirstate(ui, repo)
710 for fstandin in standins:
712 for fstandin in standins:
711 lfile = splitstandin(fstandin)
713 lfile = splitstandin(fstandin)
712 if lfdirstate[lfile] != b'r':
714 if lfdirstate[lfile] != b'r':
713 updatestandin(repo, lfile, fstandin)
715 updatestandin(repo, lfile, fstandin)
714
716
715 # Cook up a new matcher that only matches regular files or
717 # Cook up a new matcher that only matches regular files or
716 # standins corresponding to the big files requested by the
718 # standins corresponding to the big files requested by the
717 # user. Have to modify _files to prevent commit() from
719 # user. Have to modify _files to prevent commit() from
718 # complaining "not tracked" for big files.
720 # complaining "not tracked" for big files.
719 match = copy.copy(match)
721 match = copy.copy(match)
720 origmatchfn = match.matchfn
722 origmatchfn = match.matchfn
721
723
722 # Check both the list of largefiles and the list of
724 # Check both the list of largefiles and the list of
723 # standins because if a largefile was removed, it
725 # standins because if a largefile was removed, it
724 # won't be in the list of largefiles at this point
726 # won't be in the list of largefiles at this point
725 match._files += sorted(standins)
727 match._files += sorted(standins)
726
728
727 actualfiles = []
729 actualfiles = []
728 for f in match._files:
730 for f in match._files:
729 fstandin = standin(f)
731 fstandin = standin(f)
730
732
731 # For largefiles, only one of the normal and standin should be
733 # For largefiles, only one of the normal and standin should be
732 # committed (except if one of them is a remove). In the case of a
734 # committed (except if one of them is a remove). In the case of a
733 # standin removal, drop the normal file if it is unknown to dirstate.
735 # standin removal, drop the normal file if it is unknown to dirstate.
734 # Thus, skip plain largefile names but keep the standin.
736 # Thus, skip plain largefile names but keep the standin.
735 if f in lfiles or fstandin in standins:
737 if f in lfiles or fstandin in standins:
736 if repo.dirstate[fstandin] != b'r':
738 if repo.dirstate[fstandin] != b'r':
737 if repo.dirstate[f] != b'r':
739 if repo.dirstate[f] != b'r':
738 continue
740 continue
739 elif repo.dirstate[f] == b'?':
741 elif repo.dirstate[f] == b'?':
740 continue
742 continue
741
743
742 actualfiles.append(f)
744 actualfiles.append(f)
743 match._files = actualfiles
745 match._files = actualfiles
744
746
745 def matchfn(f):
747 def matchfn(f):
746 if origmatchfn(f):
748 if origmatchfn(f):
747 return f not in lfiles
749 return f not in lfiles
748 else:
750 else:
749 return f in standins
751 return f in standins
750
752
751 match.matchfn = matchfn
753 match.matchfn = matchfn
752
754
753 return match
755 return match
754
756
755
757
756 class automatedcommithook(object):
758 class automatedcommithook(object):
757 """Stateful hook to update standins at the 1st commit of resuming
759 """Stateful hook to update standins at the 1st commit of resuming
758
760
759 For efficiency, updating standins in the working directory should
761 For efficiency, updating standins in the working directory should
760 be avoided while automated committing (like rebase, transplant and
762 be avoided while automated committing (like rebase, transplant and
761 so on), because they should be updated before committing.
763 so on), because they should be updated before committing.
762
764
763 But the 1st commit of resuming automated committing (e.g. ``rebase
765 But the 1st commit of resuming automated committing (e.g. ``rebase
764 --continue``) should update them, because largefiles may be
766 --continue``) should update them, because largefiles may be
765 modified manually.
767 modified manually.
766 """
768 """
767
769
768 def __init__(self, resuming):
770 def __init__(self, resuming):
769 self.resuming = resuming
771 self.resuming = resuming
770
772
771 def __call__(self, repo, match):
773 def __call__(self, repo, match):
772 if self.resuming:
774 if self.resuming:
773 self.resuming = False # avoids updating at subsequent commits
775 self.resuming = False # avoids updating at subsequent commits
774 return updatestandinsbymatch(repo, match)
776 return updatestandinsbymatch(repo, match)
775 else:
777 else:
776 return match
778 return match
777
779
778
780
779 def getstatuswriter(ui, repo, forcibly=None):
781 def getstatuswriter(ui, repo, forcibly=None):
780 """Return the function to write largefiles specific status out
782 """Return the function to write largefiles specific status out
781
783
782 If ``forcibly`` is ``None``, this returns the last element of
784 If ``forcibly`` is ``None``, this returns the last element of
783 ``repo._lfstatuswriters`` as "default" writer function.
785 ``repo._lfstatuswriters`` as "default" writer function.
784
786
785 Otherwise, this returns the function to always write out (or
787 Otherwise, this returns the function to always write out (or
786 ignore if ``not forcibly``) status.
788 ignore if ``not forcibly``) status.
787 """
789 """
788 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
790 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
789 return repo._lfstatuswriters[-1]
791 return repo._lfstatuswriters[-1]
790 else:
792 else:
791 if forcibly:
793 if forcibly:
792 return ui.status # forcibly WRITE OUT
794 return ui.status # forcibly WRITE OUT
793 else:
795 else:
794 return lambda *msg, **opts: None # forcibly IGNORE
796 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now