##// END OF EJS Templates
largefile: use `update_file` for `synclfdirstate` "m" case...
marmoute -
r48525:1f3a87a7 default
parent child Browse files
Show More
@@ -1,796 +1,798 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr=False):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 super(largefilesdirstate, self).write(None)
198 super(largefilesdirstate, self).write(None)
199
199
200
200
201 def openlfdirstate(ui, repo, create=True):
201 def openlfdirstate(ui, repo, create=True):
202 """
202 """
203 Return a dirstate object that tracks largefiles: i.e. its root is
203 Return a dirstate object that tracks largefiles: i.e. its root is
204 the repo root, but it is saved in .hg/largefiles/dirstate.
204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 """
205 """
206 vfs = repo.vfs
206 vfs = repo.vfs
207 lfstoredir = longname
207 lfstoredir = longname
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 lfdirstate = largefilesdirstate(
210 lfdirstate = largefilesdirstate(
211 opener,
211 opener,
212 ui,
212 ui,
213 repo.root,
213 repo.root,
214 repo.dirstate._validate,
214 repo.dirstate._validate,
215 lambda: sparse.matcher(repo),
215 lambda: sparse.matcher(repo),
216 repo.nodeconstants,
216 repo.nodeconstants,
217 use_dirstate_v2,
217 use_dirstate_v2,
218 )
218 )
219
219
220 # If the largefiles dirstate does not exist, populate and create
220 # If the largefiles dirstate does not exist, populate and create
221 # it. This ensures that we create it on the first meaningful
221 # it. This ensures that we create it on the first meaningful
222 # largefiles operation in a new clone.
222 # largefiles operation in a new clone.
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 matcher = getstandinmatcher(repo)
224 matcher = getstandinmatcher(repo)
225 standins = repo.dirstate.walk(
225 standins = repo.dirstate.walk(
226 matcher, subrepos=[], unknown=False, ignored=False
226 matcher, subrepos=[], unknown=False, ignored=False
227 )
227 )
228
228
229 if len(standins) > 0:
229 if len(standins) > 0:
230 vfs.makedirs(lfstoredir)
230 vfs.makedirs(lfstoredir)
231
231
232 with lfdirstate.parentchange():
232 with lfdirstate.parentchange():
233 for standin in standins:
233 for standin in standins:
234 lfile = splitstandin(standin)
234 lfile = splitstandin(standin)
235 lfdirstate.update_file(
235 lfdirstate.update_file(
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
237 )
237 )
238 return lfdirstate
238 return lfdirstate
239
239
240
240
241 def lfdirstatestatus(lfdirstate, repo):
241 def lfdirstatestatus(lfdirstate, repo):
242 pctx = repo[b'.']
242 pctx = repo[b'.']
243 match = matchmod.always()
243 match = matchmod.always()
244 unsure, s = lfdirstate.status(
244 unsure, s = lfdirstate.status(
245 match, subrepos=[], ignored=False, clean=False, unknown=False
245 match, subrepos=[], ignored=False, clean=False, unknown=False
246 )
246 )
247 modified, clean = s.modified, s.clean
247 modified, clean = s.modified, s.clean
248 for lfile in unsure:
248 for lfile in unsure:
249 try:
249 try:
250 fctx = pctx[standin(lfile)]
250 fctx = pctx[standin(lfile)]
251 except LookupError:
251 except LookupError:
252 fctx = None
252 fctx = None
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
254 modified.append(lfile)
254 modified.append(lfile)
255 else:
255 else:
256 clean.append(lfile)
256 clean.append(lfile)
257 lfdirstate.set_clean(lfile)
257 lfdirstate.set_clean(lfile)
258 return s
258 return s
259
259
260
260
261 def listlfiles(repo, rev=None, matcher=None):
261 def listlfiles(repo, rev=None, matcher=None):
262 """return a list of largefiles in the working copy or the
262 """return a list of largefiles in the working copy or the
263 specified changeset"""
263 specified changeset"""
264
264
265 if matcher is None:
265 if matcher is None:
266 matcher = getstandinmatcher(repo)
266 matcher = getstandinmatcher(repo)
267
267
268 # ignore unknown files in working directory
268 # ignore unknown files in working directory
269 return [
269 return [
270 splitstandin(f)
270 splitstandin(f)
271 for f in repo[rev].walk(matcher)
271 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate[f] != b'?'
272 if rev is not None or repo.dirstate[f] != b'?'
273 ]
273 ]
274
274
275
275
276 def instore(repo, hash, forcelocal=False):
276 def instore(repo, hash, forcelocal=False):
277 '''Return true if a largefile with the given hash exists in the store'''
277 '''Return true if a largefile with the given hash exists in the store'''
278 return os.path.exists(storepath(repo, hash, forcelocal))
278 return os.path.exists(storepath(repo, hash, forcelocal))
279
279
280
280
281 def storepath(repo, hash, forcelocal=False):
281 def storepath(repo, hash, forcelocal=False):
282 """Return the correct location in the repository largefiles store for a
282 """Return the correct location in the repository largefiles store for a
283 file with the given hash."""
283 file with the given hash."""
284 if not forcelocal and repo.shared():
284 if not forcelocal and repo.shared():
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
286 return repo.vfs.join(longname, hash)
286 return repo.vfs.join(longname, hash)
287
287
288
288
289 def findstorepath(repo, hash):
289 def findstorepath(repo, hash):
290 """Search through the local store path(s) to find the file for the given
290 """Search through the local store path(s) to find the file for the given
291 hash. If the file is not found, its path in the primary store is returned.
291 hash. If the file is not found, its path in the primary store is returned.
292 The return value is a tuple of (path, exists(path)).
292 The return value is a tuple of (path, exists(path)).
293 """
293 """
294 # For shared repos, the primary store is in the share source. But for
294 # For shared repos, the primary store is in the share source. But for
295 # backward compatibility, force a lookup in the local store if it wasn't
295 # backward compatibility, force a lookup in the local store if it wasn't
296 # found in the share source.
296 # found in the share source.
297 path = storepath(repo, hash, False)
297 path = storepath(repo, hash, False)
298
298
299 if instore(repo, hash):
299 if instore(repo, hash):
300 return (path, True)
300 return (path, True)
301 elif repo.shared() and instore(repo, hash, True):
301 elif repo.shared() and instore(repo, hash, True):
302 return storepath(repo, hash, True), True
302 return storepath(repo, hash, True), True
303
303
304 return (path, False)
304 return (path, False)
305
305
306
306
307 def copyfromcache(repo, hash, filename):
307 def copyfromcache(repo, hash, filename):
308 """Copy the specified largefile from the repo or system cache to
308 """Copy the specified largefile from the repo or system cache to
309 filename in the repository. Return true on success or false if the
309 filename in the repository. Return true on success or false if the
310 file was not found in either cache (which should not happened:
310 file was not found in either cache (which should not happened:
311 this is meant to be called only after ensuring that the needed
311 this is meant to be called only after ensuring that the needed
312 largefile exists in the cache)."""
312 largefile exists in the cache)."""
313 wvfs = repo.wvfs
313 wvfs = repo.wvfs
314 path = findfile(repo, hash)
314 path = findfile(repo, hash)
315 if path is None:
315 if path is None:
316 return False
316 return False
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
318 # The write may fail before the file is fully written, but we
318 # The write may fail before the file is fully written, but we
319 # don't use atomic writes in the working copy.
319 # don't use atomic writes in the working copy.
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
322 if gothash != hash:
322 if gothash != hash:
323 repo.ui.warn(
323 repo.ui.warn(
324 _(b'%s: data corruption in %s with hash %s\n')
324 _(b'%s: data corruption in %s with hash %s\n')
325 % (filename, path, gothash)
325 % (filename, path, gothash)
326 )
326 )
327 wvfs.unlink(filename)
327 wvfs.unlink(filename)
328 return False
328 return False
329 return True
329 return True
330
330
331
331
332 def copytostore(repo, ctx, file, fstandin):
332 def copytostore(repo, ctx, file, fstandin):
333 wvfs = repo.wvfs
333 wvfs = repo.wvfs
334 hash = readasstandin(ctx[fstandin])
334 hash = readasstandin(ctx[fstandin])
335 if instore(repo, hash):
335 if instore(repo, hash):
336 return
336 return
337 if wvfs.exists(file):
337 if wvfs.exists(file):
338 copytostoreabsolute(repo, wvfs.join(file), hash)
338 copytostoreabsolute(repo, wvfs.join(file), hash)
339 else:
339 else:
340 repo.ui.warn(
340 repo.ui.warn(
341 _(b"%s: largefile %s not available from local store\n")
341 _(b"%s: largefile %s not available from local store\n")
342 % (file, hash)
342 % (file, hash)
343 )
343 )
344
344
345
345
346 def copyalltostore(repo, node):
346 def copyalltostore(repo, node):
347 '''Copy all largefiles in a given revision to the store'''
347 '''Copy all largefiles in a given revision to the store'''
348
348
349 ctx = repo[node]
349 ctx = repo[node]
350 for filename in ctx.files():
350 for filename in ctx.files():
351 realfile = splitstandin(filename)
351 realfile = splitstandin(filename)
352 if realfile is not None and filename in ctx.manifest():
352 if realfile is not None and filename in ctx.manifest():
353 copytostore(repo, ctx, realfile, filename)
353 copytostore(repo, ctx, realfile, filename)
354
354
355
355
356 def copytostoreabsolute(repo, file, hash):
356 def copytostoreabsolute(repo, file, hash):
357 if inusercache(repo.ui, hash):
357 if inusercache(repo.ui, hash):
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
359 else:
359 else:
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
361 with open(file, b'rb') as srcf:
361 with open(file, b'rb') as srcf:
362 with util.atomictempfile(
362 with util.atomictempfile(
363 storepath(repo, hash), createmode=repo.store.createmode
363 storepath(repo, hash), createmode=repo.store.createmode
364 ) as dstf:
364 ) as dstf:
365 for chunk in util.filechunkiter(srcf):
365 for chunk in util.filechunkiter(srcf):
366 dstf.write(chunk)
366 dstf.write(chunk)
367 linktousercache(repo, hash)
367 linktousercache(repo, hash)
368
368
369
369
370 def linktousercache(repo, hash):
370 def linktousercache(repo, hash):
371 """Link / copy the largefile with the specified hash from the store
371 """Link / copy the largefile with the specified hash from the store
372 to the cache."""
372 to the cache."""
373 path = usercachepath(repo.ui, hash)
373 path = usercachepath(repo.ui, hash)
374 link(storepath(repo, hash), path)
374 link(storepath(repo, hash), path)
375
375
376
376
377 def getstandinmatcher(repo, rmatcher=None):
377 def getstandinmatcher(repo, rmatcher=None):
378 '''Return a match object that applies rmatcher to the standin directory'''
378 '''Return a match object that applies rmatcher to the standin directory'''
379 wvfs = repo.wvfs
379 wvfs = repo.wvfs
380 standindir = shortname
380 standindir = shortname
381
381
382 # no warnings about missing files or directories
382 # no warnings about missing files or directories
383 badfn = lambda f, msg: None
383 badfn = lambda f, msg: None
384
384
385 if rmatcher and not rmatcher.always():
385 if rmatcher and not rmatcher.always():
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
387 if not pats:
387 if not pats:
388 pats = [wvfs.join(standindir)]
388 pats = [wvfs.join(standindir)]
389 match = scmutil.match(repo[None], pats, badfn=badfn)
389 match = scmutil.match(repo[None], pats, badfn=badfn)
390 else:
390 else:
391 # no patterns: relative to repo root
391 # no patterns: relative to repo root
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
393 return match
393 return match
394
394
395
395
396 def composestandinmatcher(repo, rmatcher):
396 def composestandinmatcher(repo, rmatcher):
397 """Return a matcher that accepts standins corresponding to the
397 """Return a matcher that accepts standins corresponding to the
398 files accepted by rmatcher. Pass the list of files in the matcher
398 files accepted by rmatcher. Pass the list of files in the matcher
399 as the paths specified by the user."""
399 as the paths specified by the user."""
400 smatcher = getstandinmatcher(repo, rmatcher)
400 smatcher = getstandinmatcher(repo, rmatcher)
401 isstandin = smatcher.matchfn
401 isstandin = smatcher.matchfn
402
402
403 def composedmatchfn(f):
403 def composedmatchfn(f):
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
405
405
406 smatcher.matchfn = composedmatchfn
406 smatcher.matchfn = composedmatchfn
407
407
408 return smatcher
408 return smatcher
409
409
410
410
411 def standin(filename):
411 def standin(filename):
412 """Return the repo-relative path to the standin for the specified big
412 """Return the repo-relative path to the standin for the specified big
413 file."""
413 file."""
414 # Notes:
414 # Notes:
415 # 1) Some callers want an absolute path, but for instance addlargefiles
415 # 1) Some callers want an absolute path, but for instance addlargefiles
416 # needs it repo-relative so it can be passed to repo[None].add(). So
416 # needs it repo-relative so it can be passed to repo[None].add(). So
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
418 # 2) Join with '/' because that's what dirstate always uses, even on
418 # 2) Join with '/' because that's what dirstate always uses, even on
419 # Windows. Change existing separator to '/' first in case we are
419 # Windows. Change existing separator to '/' first in case we are
420 # passed filenames from an external source (like the command line).
420 # passed filenames from an external source (like the command line).
421 return shortnameslash + util.pconvert(filename)
421 return shortnameslash + util.pconvert(filename)
422
422
423
423
424 def isstandin(filename):
424 def isstandin(filename):
425 """Return true if filename is a big file standin. filename must be
425 """Return true if filename is a big file standin. filename must be
426 in Mercurial's internal form (slash-separated)."""
426 in Mercurial's internal form (slash-separated)."""
427 return filename.startswith(shortnameslash)
427 return filename.startswith(shortnameslash)
428
428
429
429
430 def splitstandin(filename):
430 def splitstandin(filename):
431 # Split on / because that's what dirstate always uses, even on Windows.
431 # Split on / because that's what dirstate always uses, even on Windows.
432 # Change local separator to / first just in case we are passed filenames
432 # Change local separator to / first just in case we are passed filenames
433 # from an external source (like the command line).
433 # from an external source (like the command line).
434 bits = util.pconvert(filename).split(b'/', 1)
434 bits = util.pconvert(filename).split(b'/', 1)
435 if len(bits) == 2 and bits[0] == shortname:
435 if len(bits) == 2 and bits[0] == shortname:
436 return bits[1]
436 return bits[1]
437 else:
437 else:
438 return None
438 return None
439
439
440
440
441 def updatestandin(repo, lfile, standin):
441 def updatestandin(repo, lfile, standin):
442 """Re-calculate hash value of lfile and write it into standin
442 """Re-calculate hash value of lfile and write it into standin
443
443
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
445 """
445 """
446 file = repo.wjoin(lfile)
446 file = repo.wjoin(lfile)
447 if repo.wvfs.exists(lfile):
447 if repo.wvfs.exists(lfile):
448 hash = hashfile(file)
448 hash = hashfile(file)
449 executable = getexecutable(file)
449 executable = getexecutable(file)
450 writestandin(repo, standin, hash, executable)
450 writestandin(repo, standin, hash, executable)
451 else:
451 else:
452 raise error.Abort(_(b'%s: file not found!') % lfile)
452 raise error.Abort(_(b'%s: file not found!') % lfile)
453
453
454
454
455 def readasstandin(fctx):
455 def readasstandin(fctx):
456 """read hex hash from given filectx of standin file
456 """read hex hash from given filectx of standin file
457
457
458 This encapsulates how "standin" data is stored into storage layer."""
458 This encapsulates how "standin" data is stored into storage layer."""
459 return fctx.data().strip()
459 return fctx.data().strip()
460
460
461
461
462 def writestandin(repo, standin, hash, executable):
462 def writestandin(repo, standin, hash, executable):
463 '''write hash to <repo.root>/<standin>'''
463 '''write hash to <repo.root>/<standin>'''
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
465
465
466
466
467 def copyandhash(instream, outfile):
467 def copyandhash(instream, outfile):
468 """Read bytes from instream (iterable) and write them to outfile,
468 """Read bytes from instream (iterable) and write them to outfile,
469 computing the SHA-1 hash of the data along the way. Return the hash."""
469 computing the SHA-1 hash of the data along the way. Return the hash."""
470 hasher = hashutil.sha1(b'')
470 hasher = hashutil.sha1(b'')
471 for data in instream:
471 for data in instream:
472 hasher.update(data)
472 hasher.update(data)
473 outfile.write(data)
473 outfile.write(data)
474 return hex(hasher.digest())
474 return hex(hasher.digest())
475
475
476
476
477 def hashfile(file):
477 def hashfile(file):
478 if not os.path.exists(file):
478 if not os.path.exists(file):
479 return b''
479 return b''
480 with open(file, b'rb') as fd:
480 with open(file, b'rb') as fd:
481 return hexsha1(fd)
481 return hexsha1(fd)
482
482
483
483
484 def getexecutable(filename):
484 def getexecutable(filename):
485 mode = os.stat(filename).st_mode
485 mode = os.stat(filename).st_mode
486 return (
486 return (
487 (mode & stat.S_IXUSR)
487 (mode & stat.S_IXUSR)
488 and (mode & stat.S_IXGRP)
488 and (mode & stat.S_IXGRP)
489 and (mode & stat.S_IXOTH)
489 and (mode & stat.S_IXOTH)
490 )
490 )
491
491
492
492
493 def urljoin(first, second, *arg):
493 def urljoin(first, second, *arg):
494 def join(left, right):
494 def join(left, right):
495 if not left.endswith(b'/'):
495 if not left.endswith(b'/'):
496 left += b'/'
496 left += b'/'
497 if right.startswith(b'/'):
497 if right.startswith(b'/'):
498 right = right[1:]
498 right = right[1:]
499 return left + right
499 return left + right
500
500
501 url = join(first, second)
501 url = join(first, second)
502 for a in arg:
502 for a in arg:
503 url = join(url, a)
503 url = join(url, a)
504 return url
504 return url
505
505
506
506
507 def hexsha1(fileobj):
507 def hexsha1(fileobj):
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
509 object data"""
509 object data"""
510 h = hashutil.sha1()
510 h = hashutil.sha1()
511 for chunk in util.filechunkiter(fileobj):
511 for chunk in util.filechunkiter(fileobj):
512 h.update(chunk)
512 h.update(chunk)
513 return hex(h.digest())
513 return hex(h.digest())
514
514
515
515
516 def httpsendfile(ui, filename):
516 def httpsendfile(ui, filename):
517 return httpconnection.httpsendfile(ui, filename, b'rb')
517 return httpconnection.httpsendfile(ui, filename, b'rb')
518
518
519
519
520 def unixpath(path):
520 def unixpath(path):
521 '''Return a version of path normalized for use with the lfdirstate.'''
521 '''Return a version of path normalized for use with the lfdirstate.'''
522 return util.pconvert(os.path.normpath(path))
522 return util.pconvert(os.path.normpath(path))
523
523
524
524
525 def islfilesrepo(repo):
525 def islfilesrepo(repo):
526 '''Return true if the repo is a largefile repo.'''
526 '''Return true if the repo is a largefile repo.'''
527 if b'largefiles' in repo.requirements and any(
527 if b'largefiles' in repo.requirements and any(
528 shortnameslash in f[1] for f in repo.store.datafiles()
528 shortnameslash in f[1] for f in repo.store.datafiles()
529 ):
529 ):
530 return True
530 return True
531
531
532 return any(openlfdirstate(repo.ui, repo, False))
532 return any(openlfdirstate(repo.ui, repo, False))
533
533
534
534
535 class storeprotonotcapable(Exception):
535 class storeprotonotcapable(Exception):
536 def __init__(self, storetypes):
536 def __init__(self, storetypes):
537 self.storetypes = storetypes
537 self.storetypes = storetypes
538
538
539
539
540 def getstandinsstate(repo):
540 def getstandinsstate(repo):
541 standins = []
541 standins = []
542 matcher = getstandinmatcher(repo)
542 matcher = getstandinmatcher(repo)
543 wctx = repo[None]
543 wctx = repo[None]
544 for standin in repo.dirstate.walk(
544 for standin in repo.dirstate.walk(
545 matcher, subrepos=[], unknown=False, ignored=False
545 matcher, subrepos=[], unknown=False, ignored=False
546 ):
546 ):
547 lfile = splitstandin(standin)
547 lfile = splitstandin(standin)
548 try:
548 try:
549 hash = readasstandin(wctx[standin])
549 hash = readasstandin(wctx[standin])
550 except IOError:
550 except IOError:
551 hash = None
551 hash = None
552 standins.append((lfile, hash))
552 standins.append((lfile, hash))
553 return standins
553 return standins
554
554
555
555
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
557 lfstandin = standin(lfile)
557 lfstandin = standin(lfile)
558 if lfstandin not in repo.dirstate:
558 if lfstandin not in repo.dirstate:
559 lfdirstate.drop(lfile)
559 lfdirstate.drop(lfile)
560 else:
560 else:
561 stat = repo.dirstate._map[lfstandin]
561 stat = repo.dirstate._map[lfstandin]
562 state, mtime = stat.state, stat.mtime
562 state, mtime = stat.state, stat.mtime
563 if state == b'n':
563 if state == b'n':
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
565 # state 'n' doesn't ensure 'clean' in this case
565 # state 'n' doesn't ensure 'clean' in this case
566 lfdirstate.update_file(
566 lfdirstate.update_file(
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
568 )
568 )
569 else:
569 else:
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 elif state == b'm':
571 elif state == b'm':
572 lfdirstate.normallookup(lfile)
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
573 elif state == b'r':
575 elif state == b'r':
574 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
575 elif state == b'a':
577 elif state == b'a':
576 lfdirstate.add(lfile)
578 lfdirstate.add(lfile)
577
579
578
580
579 def markcommitted(orig, ctx, node):
581 def markcommitted(orig, ctx, node):
580 repo = ctx.repo()
582 repo = ctx.repo()
581
583
582 lfdirstate = openlfdirstate(repo.ui, repo)
584 lfdirstate = openlfdirstate(repo.ui, repo)
583 with lfdirstate.parentchange():
585 with lfdirstate.parentchange():
584 orig(node)
586 orig(node)
585
587
586 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
588 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
587 # because files coming from the 2nd parent are omitted in the latter.
589 # because files coming from the 2nd parent are omitted in the latter.
588 #
590 #
589 # The former should be used to get targets of "synclfdirstate",
591 # The former should be used to get targets of "synclfdirstate",
590 # because such files:
592 # because such files:
591 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
593 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
592 # - have to be marked as "n" after commit, but
594 # - have to be marked as "n" after commit, but
593 # - aren't listed in "repo[node].files()"
595 # - aren't listed in "repo[node].files()"
594
596
595 for f in ctx.files():
597 for f in ctx.files():
596 lfile = splitstandin(f)
598 lfile = splitstandin(f)
597 if lfile is not None:
599 if lfile is not None:
598 synclfdirstate(repo, lfdirstate, lfile, False)
600 synclfdirstate(repo, lfdirstate, lfile, False)
599 lfdirstate.write()
601 lfdirstate.write()
600
602
601 # As part of committing, copy all of the largefiles into the cache.
603 # As part of committing, copy all of the largefiles into the cache.
602 #
604 #
603 # Using "node" instead of "ctx" implies additional "repo[node]"
605 # Using "node" instead of "ctx" implies additional "repo[node]"
604 # lookup while copyalltostore(), but can omit redundant check for
606 # lookup while copyalltostore(), but can omit redundant check for
605 # files comming from the 2nd parent, which should exist in store
607 # files comming from the 2nd parent, which should exist in store
606 # at merging.
608 # at merging.
607 copyalltostore(repo, node)
609 copyalltostore(repo, node)
608
610
609
611
610 def getlfilestoupdate(oldstandins, newstandins):
612 def getlfilestoupdate(oldstandins, newstandins):
611 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
613 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
612 filelist = []
614 filelist = []
613 for f in changedstandins:
615 for f in changedstandins:
614 if f[0] not in filelist:
616 if f[0] not in filelist:
615 filelist.append(f[0])
617 filelist.append(f[0])
616 return filelist
618 return filelist
617
619
618
620
619 def getlfilestoupload(repo, missing, addfunc):
621 def getlfilestoupload(repo, missing, addfunc):
620 makeprogress = repo.ui.makeprogress
622 makeprogress = repo.ui.makeprogress
621 with makeprogress(
623 with makeprogress(
622 _(b'finding outgoing largefiles'),
624 _(b'finding outgoing largefiles'),
623 unit=_(b'revisions'),
625 unit=_(b'revisions'),
624 total=len(missing),
626 total=len(missing),
625 ) as progress:
627 ) as progress:
626 for i, n in enumerate(missing):
628 for i, n in enumerate(missing):
627 progress.update(i)
629 progress.update(i)
628 parents = [p for p in repo[n].parents() if p != repo.nullid]
630 parents = [p for p in repo[n].parents() if p != repo.nullid]
629
631
630 with lfstatus(repo, value=False):
632 with lfstatus(repo, value=False):
631 ctx = repo[n]
633 ctx = repo[n]
632
634
633 files = set(ctx.files())
635 files = set(ctx.files())
634 if len(parents) == 2:
636 if len(parents) == 2:
635 mc = ctx.manifest()
637 mc = ctx.manifest()
636 mp1 = ctx.p1().manifest()
638 mp1 = ctx.p1().manifest()
637 mp2 = ctx.p2().manifest()
639 mp2 = ctx.p2().manifest()
638 for f in mp1:
640 for f in mp1:
639 if f not in mc:
641 if f not in mc:
640 files.add(f)
642 files.add(f)
641 for f in mp2:
643 for f in mp2:
642 if f not in mc:
644 if f not in mc:
643 files.add(f)
645 files.add(f)
644 for f in mc:
646 for f in mc:
645 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
647 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
646 files.add(f)
648 files.add(f)
647 for fn in files:
649 for fn in files:
648 if isstandin(fn) and fn in ctx:
650 if isstandin(fn) and fn in ctx:
649 addfunc(fn, readasstandin(ctx[fn]))
651 addfunc(fn, readasstandin(ctx[fn]))
650
652
651
653
652 def updatestandinsbymatch(repo, match):
654 def updatestandinsbymatch(repo, match):
653 """Update standins in the working directory according to specified match
655 """Update standins in the working directory according to specified match
654
656
655 This returns (possibly modified) ``match`` object to be used for
657 This returns (possibly modified) ``match`` object to be used for
656 subsequent commit process.
658 subsequent commit process.
657 """
659 """
658
660
659 ui = repo.ui
661 ui = repo.ui
660
662
661 # Case 1: user calls commit with no specific files or
663 # Case 1: user calls commit with no specific files or
662 # include/exclude patterns: refresh and commit all files that
664 # include/exclude patterns: refresh and commit all files that
663 # are "dirty".
665 # are "dirty".
664 if match is None or match.always():
666 if match is None or match.always():
665 # Spend a bit of time here to get a list of files we know
667 # Spend a bit of time here to get a list of files we know
666 # are modified so we can compare only against those.
668 # are modified so we can compare only against those.
667 # It can cost a lot of time (several seconds)
669 # It can cost a lot of time (several seconds)
668 # otherwise to update all standins if the largefiles are
670 # otherwise to update all standins if the largefiles are
669 # large.
671 # large.
670 lfdirstate = openlfdirstate(ui, repo)
672 lfdirstate = openlfdirstate(ui, repo)
671 dirtymatch = matchmod.always()
673 dirtymatch = matchmod.always()
672 unsure, s = lfdirstate.status(
674 unsure, s = lfdirstate.status(
673 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
675 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
674 )
676 )
675 modifiedfiles = unsure + s.modified + s.added + s.removed
677 modifiedfiles = unsure + s.modified + s.added + s.removed
676 lfiles = listlfiles(repo)
678 lfiles = listlfiles(repo)
677 # this only loops through largefiles that exist (not
679 # this only loops through largefiles that exist (not
678 # removed/renamed)
680 # removed/renamed)
679 for lfile in lfiles:
681 for lfile in lfiles:
680 if lfile in modifiedfiles:
682 if lfile in modifiedfiles:
681 fstandin = standin(lfile)
683 fstandin = standin(lfile)
682 if repo.wvfs.exists(fstandin):
684 if repo.wvfs.exists(fstandin):
683 # this handles the case where a rebase is being
685 # this handles the case where a rebase is being
684 # performed and the working copy is not updated
686 # performed and the working copy is not updated
685 # yet.
687 # yet.
686 if repo.wvfs.exists(lfile):
688 if repo.wvfs.exists(lfile):
687 updatestandin(repo, lfile, fstandin)
689 updatestandin(repo, lfile, fstandin)
688
690
689 return match
691 return match
690
692
691 lfiles = listlfiles(repo)
693 lfiles = listlfiles(repo)
692 match._files = repo._subdirlfs(match.files(), lfiles)
694 match._files = repo._subdirlfs(match.files(), lfiles)
693
695
694 # Case 2: user calls commit with specified patterns: refresh
696 # Case 2: user calls commit with specified patterns: refresh
695 # any matching big files.
697 # any matching big files.
696 smatcher = composestandinmatcher(repo, match)
698 smatcher = composestandinmatcher(repo, match)
697 standins = repo.dirstate.walk(
699 standins = repo.dirstate.walk(
698 smatcher, subrepos=[], unknown=False, ignored=False
700 smatcher, subrepos=[], unknown=False, ignored=False
699 )
701 )
700
702
701 # No matching big files: get out of the way and pass control to
703 # No matching big files: get out of the way and pass control to
702 # the usual commit() method.
704 # the usual commit() method.
703 if not standins:
705 if not standins:
704 return match
706 return match
705
707
706 # Refresh all matching big files. It's possible that the
708 # Refresh all matching big files. It's possible that the
707 # commit will end up failing, in which case the big files will
709 # commit will end up failing, in which case the big files will
708 # stay refreshed. No harm done: the user modified them and
710 # stay refreshed. No harm done: the user modified them and
709 # asked to commit them, so sooner or later we're going to
711 # asked to commit them, so sooner or later we're going to
710 # refresh the standins. Might as well leave them refreshed.
712 # refresh the standins. Might as well leave them refreshed.
711 lfdirstate = openlfdirstate(ui, repo)
713 lfdirstate = openlfdirstate(ui, repo)
712 for fstandin in standins:
714 for fstandin in standins:
713 lfile = splitstandin(fstandin)
715 lfile = splitstandin(fstandin)
714 if lfdirstate[lfile] != b'r':
716 if lfdirstate[lfile] != b'r':
715 updatestandin(repo, lfile, fstandin)
717 updatestandin(repo, lfile, fstandin)
716
718
717 # Cook up a new matcher that only matches regular files or
719 # Cook up a new matcher that only matches regular files or
718 # standins corresponding to the big files requested by the
720 # standins corresponding to the big files requested by the
719 # user. Have to modify _files to prevent commit() from
721 # user. Have to modify _files to prevent commit() from
720 # complaining "not tracked" for big files.
722 # complaining "not tracked" for big files.
721 match = copy.copy(match)
723 match = copy.copy(match)
722 origmatchfn = match.matchfn
724 origmatchfn = match.matchfn
723
725
724 # Check both the list of largefiles and the list of
726 # Check both the list of largefiles and the list of
725 # standins because if a largefile was removed, it
727 # standins because if a largefile was removed, it
726 # won't be in the list of largefiles at this point
728 # won't be in the list of largefiles at this point
727 match._files += sorted(standins)
729 match._files += sorted(standins)
728
730
729 actualfiles = []
731 actualfiles = []
730 for f in match._files:
732 for f in match._files:
731 fstandin = standin(f)
733 fstandin = standin(f)
732
734
733 # For largefiles, only one of the normal and standin should be
735 # For largefiles, only one of the normal and standin should be
734 # committed (except if one of them is a remove). In the case of a
736 # committed (except if one of them is a remove). In the case of a
735 # standin removal, drop the normal file if it is unknown to dirstate.
737 # standin removal, drop the normal file if it is unknown to dirstate.
736 # Thus, skip plain largefile names but keep the standin.
738 # Thus, skip plain largefile names but keep the standin.
737 if f in lfiles or fstandin in standins:
739 if f in lfiles or fstandin in standins:
738 if repo.dirstate[fstandin] != b'r':
740 if repo.dirstate[fstandin] != b'r':
739 if repo.dirstate[f] != b'r':
741 if repo.dirstate[f] != b'r':
740 continue
742 continue
741 elif repo.dirstate[f] == b'?':
743 elif repo.dirstate[f] == b'?':
742 continue
744 continue
743
745
744 actualfiles.append(f)
746 actualfiles.append(f)
745 match._files = actualfiles
747 match._files = actualfiles
746
748
747 def matchfn(f):
749 def matchfn(f):
748 if origmatchfn(f):
750 if origmatchfn(f):
749 return f not in lfiles
751 return f not in lfiles
750 else:
752 else:
751 return f in standins
753 return f in standins
752
754
753 match.matchfn = matchfn
755 match.matchfn = matchfn
754
756
755 return match
757 return match
756
758
757
759
758 class automatedcommithook(object):
760 class automatedcommithook(object):
759 """Stateful hook to update standins at the 1st commit of resuming
761 """Stateful hook to update standins at the 1st commit of resuming
760
762
761 For efficiency, updating standins in the working directory should
763 For efficiency, updating standins in the working directory should
762 be avoided while automated committing (like rebase, transplant and
764 be avoided while automated committing (like rebase, transplant and
763 so on), because they should be updated before committing.
765 so on), because they should be updated before committing.
764
766
765 But the 1st commit of resuming automated committing (e.g. ``rebase
767 But the 1st commit of resuming automated committing (e.g. ``rebase
766 --continue``) should update them, because largefiles may be
768 --continue``) should update them, because largefiles may be
767 modified manually.
769 modified manually.
768 """
770 """
769
771
770 def __init__(self, resuming):
772 def __init__(self, resuming):
771 self.resuming = resuming
773 self.resuming = resuming
772
774
773 def __call__(self, repo, match):
775 def __call__(self, repo, match):
774 if self.resuming:
776 if self.resuming:
775 self.resuming = False # avoids updating at subsequent commits
777 self.resuming = False # avoids updating at subsequent commits
776 return updatestandinsbymatch(repo, match)
778 return updatestandinsbymatch(repo, match)
777 else:
779 else:
778 return match
780 return match
779
781
780
782
781 def getstatuswriter(ui, repo, forcibly=None):
783 def getstatuswriter(ui, repo, forcibly=None):
782 """Return the function to write largefiles specific status out
784 """Return the function to write largefiles specific status out
783
785
784 If ``forcibly`` is ``None``, this returns the last element of
786 If ``forcibly`` is ``None``, this returns the last element of
785 ``repo._lfstatuswriters`` as "default" writer function.
787 ``repo._lfstatuswriters`` as "default" writer function.
786
788
787 Otherwise, this returns the function to always write out (or
789 Otherwise, this returns the function to always write out (or
788 ignore if ``not forcibly``) status.
790 ignore if ``not forcibly``) status.
789 """
791 """
790 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
792 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
791 return repo._lfstatuswriters[-1]
793 return repo._lfstatuswriters[-1]
792 else:
794 else:
793 if forcibly:
795 if forcibly:
794 return ui.status # forcibly WRITE OUT
796 return ui.status # forcibly WRITE OUT
795 else:
797 else:
796 return lambda *msg, **opts: None # forcibly IGNORE
798 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now