##// END OF EJS Templates
largefiles: remove the `changing_parents` context in `openlfdirstate`...
marmoute -
r50915:e2f3cba6 default
parent child Browse files
Show More
@@ -1,811 +1,810 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
200 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
201
201
202
202
203 def openlfdirstate(ui, repo, create=True):
203 def openlfdirstate(ui, repo, create=True):
204 """
204 """
205 Return a dirstate object that tracks largefiles: i.e. its root is
205 Return a dirstate object that tracks largefiles: i.e. its root is
206 the repo root, but it is saved in .hg/largefiles/dirstate.
206 the repo root, but it is saved in .hg/largefiles/dirstate.
207 """
207 """
208 vfs = repo.vfs
208 vfs = repo.vfs
209 lfstoredir = longname
209 lfstoredir = longname
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
212 lfdirstate = largefilesdirstate(
212 lfdirstate = largefilesdirstate(
213 opener,
213 opener,
214 ui,
214 ui,
215 repo.root,
215 repo.root,
216 repo.dirstate._validate,
216 repo.dirstate._validate,
217 lambda: sparse.matcher(repo),
217 lambda: sparse.matcher(repo),
218 repo.nodeconstants,
218 repo.nodeconstants,
219 use_dirstate_v2,
219 use_dirstate_v2,
220 )
220 )
221
221
222 # If the largefiles dirstate does not exist, populate and create
222 # If the largefiles dirstate does not exist, populate and create
223 # it. This ensures that we create it on the first meaningful
223 # it. This ensures that we create it on the first meaningful
224 # largefiles operation in a new clone.
224 # largefiles operation in a new clone.
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
226 try:
226 try:
227 with repo.wlock(wait=False):
227 with repo.wlock(wait=False):
228 matcher = getstandinmatcher(repo)
228 matcher = getstandinmatcher(repo)
229 standins = repo.dirstate.walk(
229 standins = repo.dirstate.walk(
230 matcher, subrepos=[], unknown=False, ignored=False
230 matcher, subrepos=[], unknown=False, ignored=False
231 )
231 )
232
232
233 if len(standins) > 0:
233 if len(standins) > 0:
234 vfs.makedirs(lfstoredir)
234 vfs.makedirs(lfstoredir)
235
235
236 with lfdirstate.changing_parents(repo):
236 for standin in standins:
237 for standin in standins:
237 lfile = splitstandin(standin)
238 lfile = splitstandin(standin)
238 lfdirstate.hacky_extension_update_file(
239 lfdirstate.hacky_extension_update_file(
239 lfile,
240 lfile,
240 p1_tracked=True,
241 p1_tracked=True,
241 wc_tracked=True,
242 wc_tracked=True,
242 possibly_dirty=True,
243 possibly_dirty=True,
243 )
244 )
245 except error.LockError:
244 except error.LockError:
246 # Assume that whatever was holding the lock was important.
245 # Assume that whatever was holding the lock was important.
247 # If we were doing something important, we would already have
246 # If we were doing something important, we would already have
248 # either the lock or a largefile dirstate.
247 # either the lock or a largefile dirstate.
249 pass
248 pass
250 return lfdirstate
249 return lfdirstate
251
250
252
251
253 def lfdirstatestatus(lfdirstate, repo):
252 def lfdirstatestatus(lfdirstate, repo):
254 pctx = repo[b'.']
253 pctx = repo[b'.']
255 match = matchmod.always()
254 match = matchmod.always()
256 unsure, s, mtime_boundary = lfdirstate.status(
255 unsure, s, mtime_boundary = lfdirstate.status(
257 match, subrepos=[], ignored=False, clean=False, unknown=False
256 match, subrepos=[], ignored=False, clean=False, unknown=False
258 )
257 )
259 modified, clean = s.modified, s.clean
258 modified, clean = s.modified, s.clean
260 wctx = repo[None]
259 wctx = repo[None]
261 for lfile in unsure:
260 for lfile in unsure:
262 try:
261 try:
263 fctx = pctx[standin(lfile)]
262 fctx = pctx[standin(lfile)]
264 except LookupError:
263 except LookupError:
265 fctx = None
264 fctx = None
266 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
265 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
267 modified.append(lfile)
266 modified.append(lfile)
268 else:
267 else:
269 clean.append(lfile)
268 clean.append(lfile)
270 st = wctx[lfile].lstat()
269 st = wctx[lfile].lstat()
271 mode = st.st_mode
270 mode = st.st_mode
272 size = st.st_size
271 size = st.st_size
273 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
272 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
274 if mtime is not None:
273 if mtime is not None:
275 cache_data = (mode, size, mtime)
274 cache_data = (mode, size, mtime)
276 lfdirstate.set_clean(lfile, cache_data)
275 lfdirstate.set_clean(lfile, cache_data)
277 return s
276 return s
278
277
279
278
280 def listlfiles(repo, rev=None, matcher=None):
279 def listlfiles(repo, rev=None, matcher=None):
281 """return a list of largefiles in the working copy or the
280 """return a list of largefiles in the working copy or the
282 specified changeset"""
281 specified changeset"""
283
282
284 if matcher is None:
283 if matcher is None:
285 matcher = getstandinmatcher(repo)
284 matcher = getstandinmatcher(repo)
286
285
287 # ignore unknown files in working directory
286 # ignore unknown files in working directory
288 return [
287 return [
289 splitstandin(f)
288 splitstandin(f)
290 for f in repo[rev].walk(matcher)
289 for f in repo[rev].walk(matcher)
291 if rev is not None or repo.dirstate.get_entry(f).any_tracked
290 if rev is not None or repo.dirstate.get_entry(f).any_tracked
292 ]
291 ]
293
292
294
293
295 def instore(repo, hash, forcelocal=False):
294 def instore(repo, hash, forcelocal=False):
296 '''Return true if a largefile with the given hash exists in the store'''
295 '''Return true if a largefile with the given hash exists in the store'''
297 return os.path.exists(storepath(repo, hash, forcelocal))
296 return os.path.exists(storepath(repo, hash, forcelocal))
298
297
299
298
300 def storepath(repo, hash, forcelocal=False):
299 def storepath(repo, hash, forcelocal=False):
301 """Return the correct location in the repository largefiles store for a
300 """Return the correct location in the repository largefiles store for a
302 file with the given hash."""
301 file with the given hash."""
303 if not forcelocal and repo.shared():
302 if not forcelocal and repo.shared():
304 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
303 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
305 return repo.vfs.join(longname, hash)
304 return repo.vfs.join(longname, hash)
306
305
307
306
308 def findstorepath(repo, hash):
307 def findstorepath(repo, hash):
309 """Search through the local store path(s) to find the file for the given
308 """Search through the local store path(s) to find the file for the given
310 hash. If the file is not found, its path in the primary store is returned.
309 hash. If the file is not found, its path in the primary store is returned.
311 The return value is a tuple of (path, exists(path)).
310 The return value is a tuple of (path, exists(path)).
312 """
311 """
313 # For shared repos, the primary store is in the share source. But for
312 # For shared repos, the primary store is in the share source. But for
314 # backward compatibility, force a lookup in the local store if it wasn't
313 # backward compatibility, force a lookup in the local store if it wasn't
315 # found in the share source.
314 # found in the share source.
316 path = storepath(repo, hash, False)
315 path = storepath(repo, hash, False)
317
316
318 if instore(repo, hash):
317 if instore(repo, hash):
319 return (path, True)
318 return (path, True)
320 elif repo.shared() and instore(repo, hash, True):
319 elif repo.shared() and instore(repo, hash, True):
321 return storepath(repo, hash, True), True
320 return storepath(repo, hash, True), True
322
321
323 return (path, False)
322 return (path, False)
324
323
325
324
326 def copyfromcache(repo, hash, filename):
325 def copyfromcache(repo, hash, filename):
327 """Copy the specified largefile from the repo or system cache to
326 """Copy the specified largefile from the repo or system cache to
328 filename in the repository. Return true on success or false if the
327 filename in the repository. Return true on success or false if the
329 file was not found in either cache (which should not happened:
328 file was not found in either cache (which should not happened:
330 this is meant to be called only after ensuring that the needed
329 this is meant to be called only after ensuring that the needed
331 largefile exists in the cache)."""
330 largefile exists in the cache)."""
332 wvfs = repo.wvfs
331 wvfs = repo.wvfs
333 path = findfile(repo, hash)
332 path = findfile(repo, hash)
334 if path is None:
333 if path is None:
335 return False
334 return False
336 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
335 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
337 # The write may fail before the file is fully written, but we
336 # The write may fail before the file is fully written, but we
338 # don't use atomic writes in the working copy.
337 # don't use atomic writes in the working copy.
339 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
338 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
340 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
339 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
341 if gothash != hash:
340 if gothash != hash:
342 repo.ui.warn(
341 repo.ui.warn(
343 _(b'%s: data corruption in %s with hash %s\n')
342 _(b'%s: data corruption in %s with hash %s\n')
344 % (filename, path, gothash)
343 % (filename, path, gothash)
345 )
344 )
346 wvfs.unlink(filename)
345 wvfs.unlink(filename)
347 return False
346 return False
348 return True
347 return True
349
348
350
349
351 def copytostore(repo, ctx, file, fstandin):
350 def copytostore(repo, ctx, file, fstandin):
352 wvfs = repo.wvfs
351 wvfs = repo.wvfs
353 hash = readasstandin(ctx[fstandin])
352 hash = readasstandin(ctx[fstandin])
354 if instore(repo, hash):
353 if instore(repo, hash):
355 return
354 return
356 if wvfs.exists(file):
355 if wvfs.exists(file):
357 copytostoreabsolute(repo, wvfs.join(file), hash)
356 copytostoreabsolute(repo, wvfs.join(file), hash)
358 else:
357 else:
359 repo.ui.warn(
358 repo.ui.warn(
360 _(b"%s: largefile %s not available from local store\n")
359 _(b"%s: largefile %s not available from local store\n")
361 % (file, hash)
360 % (file, hash)
362 )
361 )
363
362
364
363
365 def copyalltostore(repo, node):
364 def copyalltostore(repo, node):
366 '''Copy all largefiles in a given revision to the store'''
365 '''Copy all largefiles in a given revision to the store'''
367
366
368 ctx = repo[node]
367 ctx = repo[node]
369 for filename in ctx.files():
368 for filename in ctx.files():
370 realfile = splitstandin(filename)
369 realfile = splitstandin(filename)
371 if realfile is not None and filename in ctx.manifest():
370 if realfile is not None and filename in ctx.manifest():
372 copytostore(repo, ctx, realfile, filename)
371 copytostore(repo, ctx, realfile, filename)
373
372
374
373
375 def copytostoreabsolute(repo, file, hash):
374 def copytostoreabsolute(repo, file, hash):
376 if inusercache(repo.ui, hash):
375 if inusercache(repo.ui, hash):
377 link(usercachepath(repo.ui, hash), storepath(repo, hash))
376 link(usercachepath(repo.ui, hash), storepath(repo, hash))
378 else:
377 else:
379 util.makedirs(os.path.dirname(storepath(repo, hash)))
378 util.makedirs(os.path.dirname(storepath(repo, hash)))
380 with open(file, b'rb') as srcf:
379 with open(file, b'rb') as srcf:
381 with util.atomictempfile(
380 with util.atomictempfile(
382 storepath(repo, hash), createmode=repo.store.createmode
381 storepath(repo, hash), createmode=repo.store.createmode
383 ) as dstf:
382 ) as dstf:
384 for chunk in util.filechunkiter(srcf):
383 for chunk in util.filechunkiter(srcf):
385 dstf.write(chunk)
384 dstf.write(chunk)
386 linktousercache(repo, hash)
385 linktousercache(repo, hash)
387
386
388
387
389 def linktousercache(repo, hash):
388 def linktousercache(repo, hash):
390 """Link / copy the largefile with the specified hash from the store
389 """Link / copy the largefile with the specified hash from the store
391 to the cache."""
390 to the cache."""
392 path = usercachepath(repo.ui, hash)
391 path = usercachepath(repo.ui, hash)
393 link(storepath(repo, hash), path)
392 link(storepath(repo, hash), path)
394
393
395
394
396 def getstandinmatcher(repo, rmatcher=None):
395 def getstandinmatcher(repo, rmatcher=None):
397 '''Return a match object that applies rmatcher to the standin directory'''
396 '''Return a match object that applies rmatcher to the standin directory'''
398 wvfs = repo.wvfs
397 wvfs = repo.wvfs
399 standindir = shortname
398 standindir = shortname
400
399
401 # no warnings about missing files or directories
400 # no warnings about missing files or directories
402 badfn = lambda f, msg: None
401 badfn = lambda f, msg: None
403
402
404 if rmatcher and not rmatcher.always():
403 if rmatcher and not rmatcher.always():
405 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
404 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
406 if not pats:
405 if not pats:
407 pats = [wvfs.join(standindir)]
406 pats = [wvfs.join(standindir)]
408 match = scmutil.match(repo[None], pats, badfn=badfn)
407 match = scmutil.match(repo[None], pats, badfn=badfn)
409 else:
408 else:
410 # no patterns: relative to repo root
409 # no patterns: relative to repo root
411 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
410 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
412 return match
411 return match
413
412
414
413
415 def composestandinmatcher(repo, rmatcher):
414 def composestandinmatcher(repo, rmatcher):
416 """Return a matcher that accepts standins corresponding to the
415 """Return a matcher that accepts standins corresponding to the
417 files accepted by rmatcher. Pass the list of files in the matcher
416 files accepted by rmatcher. Pass the list of files in the matcher
418 as the paths specified by the user."""
417 as the paths specified by the user."""
419 smatcher = getstandinmatcher(repo, rmatcher)
418 smatcher = getstandinmatcher(repo, rmatcher)
420 isstandin = smatcher.matchfn
419 isstandin = smatcher.matchfn
421
420
422 def composedmatchfn(f):
421 def composedmatchfn(f):
423 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
422 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
424
423
425 smatcher.matchfn = composedmatchfn
424 smatcher.matchfn = composedmatchfn
426
425
427 return smatcher
426 return smatcher
428
427
429
428
430 def standin(filename):
429 def standin(filename):
431 """Return the repo-relative path to the standin for the specified big
430 """Return the repo-relative path to the standin for the specified big
432 file."""
431 file."""
433 # Notes:
432 # Notes:
434 # 1) Some callers want an absolute path, but for instance addlargefiles
433 # 1) Some callers want an absolute path, but for instance addlargefiles
435 # needs it repo-relative so it can be passed to repo[None].add(). So
434 # needs it repo-relative so it can be passed to repo[None].add(). So
436 # leave it up to the caller to use repo.wjoin() to get an absolute path.
435 # leave it up to the caller to use repo.wjoin() to get an absolute path.
437 # 2) Join with '/' because that's what dirstate always uses, even on
436 # 2) Join with '/' because that's what dirstate always uses, even on
438 # Windows. Change existing separator to '/' first in case we are
437 # Windows. Change existing separator to '/' first in case we are
439 # passed filenames from an external source (like the command line).
438 # passed filenames from an external source (like the command line).
440 return shortnameslash + util.pconvert(filename)
439 return shortnameslash + util.pconvert(filename)
441
440
442
441
443 def isstandin(filename):
442 def isstandin(filename):
444 """Return true if filename is a big file standin. filename must be
443 """Return true if filename is a big file standin. filename must be
445 in Mercurial's internal form (slash-separated)."""
444 in Mercurial's internal form (slash-separated)."""
446 return filename.startswith(shortnameslash)
445 return filename.startswith(shortnameslash)
447
446
448
447
449 def splitstandin(filename):
448 def splitstandin(filename):
450 # Split on / because that's what dirstate always uses, even on Windows.
449 # Split on / because that's what dirstate always uses, even on Windows.
451 # Change local separator to / first just in case we are passed filenames
450 # Change local separator to / first just in case we are passed filenames
452 # from an external source (like the command line).
451 # from an external source (like the command line).
453 bits = util.pconvert(filename).split(b'/', 1)
452 bits = util.pconvert(filename).split(b'/', 1)
454 if len(bits) == 2 and bits[0] == shortname:
453 if len(bits) == 2 and bits[0] == shortname:
455 return bits[1]
454 return bits[1]
456 else:
455 else:
457 return None
456 return None
458
457
459
458
460 def updatestandin(repo, lfile, standin):
459 def updatestandin(repo, lfile, standin):
461 """Re-calculate hash value of lfile and write it into standin
460 """Re-calculate hash value of lfile and write it into standin
462
461
463 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
462 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
464 """
463 """
465 file = repo.wjoin(lfile)
464 file = repo.wjoin(lfile)
466 if repo.wvfs.exists(lfile):
465 if repo.wvfs.exists(lfile):
467 hash = hashfile(file)
466 hash = hashfile(file)
468 executable = getexecutable(file)
467 executable = getexecutable(file)
469 writestandin(repo, standin, hash, executable)
468 writestandin(repo, standin, hash, executable)
470 else:
469 else:
471 raise error.Abort(_(b'%s: file not found!') % lfile)
470 raise error.Abort(_(b'%s: file not found!') % lfile)
472
471
473
472
474 def readasstandin(fctx):
473 def readasstandin(fctx):
475 """read hex hash from given filectx of standin file
474 """read hex hash from given filectx of standin file
476
475
477 This encapsulates how "standin" data is stored into storage layer."""
476 This encapsulates how "standin" data is stored into storage layer."""
478 return fctx.data().strip()
477 return fctx.data().strip()
479
478
480
479
481 def writestandin(repo, standin, hash, executable):
480 def writestandin(repo, standin, hash, executable):
482 '''write hash to <repo.root>/<standin>'''
481 '''write hash to <repo.root>/<standin>'''
483 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
482 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
484
483
485
484
486 def copyandhash(instream, outfile):
485 def copyandhash(instream, outfile):
487 """Read bytes from instream (iterable) and write them to outfile,
486 """Read bytes from instream (iterable) and write them to outfile,
488 computing the SHA-1 hash of the data along the way. Return the hash."""
487 computing the SHA-1 hash of the data along the way. Return the hash."""
489 hasher = hashutil.sha1(b'')
488 hasher = hashutil.sha1(b'')
490 for data in instream:
489 for data in instream:
491 hasher.update(data)
490 hasher.update(data)
492 outfile.write(data)
491 outfile.write(data)
493 return hex(hasher.digest())
492 return hex(hasher.digest())
494
493
495
494
496 def hashfile(file):
495 def hashfile(file):
497 if not os.path.exists(file):
496 if not os.path.exists(file):
498 return b''
497 return b''
499 with open(file, b'rb') as fd:
498 with open(file, b'rb') as fd:
500 return hexsha1(fd)
499 return hexsha1(fd)
501
500
502
501
503 def getexecutable(filename):
502 def getexecutable(filename):
504 mode = os.stat(filename).st_mode
503 mode = os.stat(filename).st_mode
505 return (
504 return (
506 (mode & stat.S_IXUSR)
505 (mode & stat.S_IXUSR)
507 and (mode & stat.S_IXGRP)
506 and (mode & stat.S_IXGRP)
508 and (mode & stat.S_IXOTH)
507 and (mode & stat.S_IXOTH)
509 )
508 )
510
509
511
510
512 def urljoin(first, second, *arg):
511 def urljoin(first, second, *arg):
513 def join(left, right):
512 def join(left, right):
514 if not left.endswith(b'/'):
513 if not left.endswith(b'/'):
515 left += b'/'
514 left += b'/'
516 if right.startswith(b'/'):
515 if right.startswith(b'/'):
517 right = right[1:]
516 right = right[1:]
518 return left + right
517 return left + right
519
518
520 url = join(first, second)
519 url = join(first, second)
521 for a in arg:
520 for a in arg:
522 url = join(url, a)
521 url = join(url, a)
523 return url
522 return url
524
523
525
524
526 def hexsha1(fileobj):
525 def hexsha1(fileobj):
527 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
526 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
528 object data"""
527 object data"""
529 h = hashutil.sha1()
528 h = hashutil.sha1()
530 for chunk in util.filechunkiter(fileobj):
529 for chunk in util.filechunkiter(fileobj):
531 h.update(chunk)
530 h.update(chunk)
532 return hex(h.digest())
531 return hex(h.digest())
533
532
534
533
535 def httpsendfile(ui, filename):
534 def httpsendfile(ui, filename):
536 return httpconnection.httpsendfile(ui, filename, b'rb')
535 return httpconnection.httpsendfile(ui, filename, b'rb')
537
536
538
537
539 def unixpath(path):
538 def unixpath(path):
540 '''Return a version of path normalized for use with the lfdirstate.'''
539 '''Return a version of path normalized for use with the lfdirstate.'''
541 return util.pconvert(os.path.normpath(path))
540 return util.pconvert(os.path.normpath(path))
542
541
543
542
544 def islfilesrepo(repo):
543 def islfilesrepo(repo):
545 '''Return true if the repo is a largefile repo.'''
544 '''Return true if the repo is a largefile repo.'''
546 if b'largefiles' in repo.requirements and any(
545 if b'largefiles' in repo.requirements and any(
547 shortnameslash in f[1] for f in repo.store.datafiles()
546 shortnameslash in f[1] for f in repo.store.datafiles()
548 ):
547 ):
549 return True
548 return True
550
549
551 return any(openlfdirstate(repo.ui, repo, False))
550 return any(openlfdirstate(repo.ui, repo, False))
552
551
553
552
554 class storeprotonotcapable(Exception):
553 class storeprotonotcapable(Exception):
555 def __init__(self, storetypes):
554 def __init__(self, storetypes):
556 self.storetypes = storetypes
555 self.storetypes = storetypes
557
556
558
557
559 def getstandinsstate(repo):
558 def getstandinsstate(repo):
560 standins = []
559 standins = []
561 matcher = getstandinmatcher(repo)
560 matcher = getstandinmatcher(repo)
562 wctx = repo[None]
561 wctx = repo[None]
563 for standin in repo.dirstate.walk(
562 for standin in repo.dirstate.walk(
564 matcher, subrepos=[], unknown=False, ignored=False
563 matcher, subrepos=[], unknown=False, ignored=False
565 ):
564 ):
566 lfile = splitstandin(standin)
565 lfile = splitstandin(standin)
567 try:
566 try:
568 hash = readasstandin(wctx[standin])
567 hash = readasstandin(wctx[standin])
569 except IOError:
568 except IOError:
570 hash = None
569 hash = None
571 standins.append((lfile, hash))
570 standins.append((lfile, hash))
572 return standins
571 return standins
573
572
574
573
575 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
574 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
576 lfstandin = standin(lfile)
575 lfstandin = standin(lfile)
577 if lfstandin not in repo.dirstate:
576 if lfstandin not in repo.dirstate:
578 lfdirstate.hacky_extension_update_file(
577 lfdirstate.hacky_extension_update_file(
579 lfile,
578 lfile,
580 p1_tracked=False,
579 p1_tracked=False,
581 wc_tracked=False,
580 wc_tracked=False,
582 )
581 )
583 else:
582 else:
584 entry = repo.dirstate.get_entry(lfstandin)
583 entry = repo.dirstate.get_entry(lfstandin)
585 lfdirstate.hacky_extension_update_file(
584 lfdirstate.hacky_extension_update_file(
586 lfile,
585 lfile,
587 wc_tracked=entry.tracked,
586 wc_tracked=entry.tracked,
588 p1_tracked=entry.p1_tracked,
587 p1_tracked=entry.p1_tracked,
589 p2_info=entry.p2_info,
588 p2_info=entry.p2_info,
590 possibly_dirty=True,
589 possibly_dirty=True,
591 )
590 )
592
591
593
592
594 def markcommitted(orig, ctx, node):
593 def markcommitted(orig, ctx, node):
595 repo = ctx.repo()
594 repo = ctx.repo()
596
595
597 lfdirstate = openlfdirstate(repo.ui, repo)
596 lfdirstate = openlfdirstate(repo.ui, repo)
598 with lfdirstate.changing_parents(repo):
597 with lfdirstate.changing_parents(repo):
599 orig(node)
598 orig(node)
600
599
601 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
600 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
602 # because files coming from the 2nd parent are omitted in the latter.
601 # because files coming from the 2nd parent are omitted in the latter.
603 #
602 #
604 # The former should be used to get targets of "synclfdirstate",
603 # The former should be used to get targets of "synclfdirstate",
605 # because such files:
604 # because such files:
606 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
605 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
607 # - have to be marked as "n" after commit, but
606 # - have to be marked as "n" after commit, but
608 # - aren't listed in "repo[node].files()"
607 # - aren't listed in "repo[node].files()"
609
608
610 for f in ctx.files():
609 for f in ctx.files():
611 lfile = splitstandin(f)
610 lfile = splitstandin(f)
612 if lfile is not None:
611 if lfile is not None:
613 synclfdirstate(repo, lfdirstate, lfile, False)
612 synclfdirstate(repo, lfdirstate, lfile, False)
614 lfdirstate.write(repo.currenttransaction())
613 lfdirstate.write(repo.currenttransaction())
615
614
616 # As part of committing, copy all of the largefiles into the cache.
615 # As part of committing, copy all of the largefiles into the cache.
617 #
616 #
618 # Using "node" instead of "ctx" implies additional "repo[node]"
617 # Using "node" instead of "ctx" implies additional "repo[node]"
619 # lookup while copyalltostore(), but can omit redundant check for
618 # lookup while copyalltostore(), but can omit redundant check for
620 # files comming from the 2nd parent, which should exist in store
619 # files comming from the 2nd parent, which should exist in store
621 # at merging.
620 # at merging.
622 copyalltostore(repo, node)
621 copyalltostore(repo, node)
623
622
624
623
625 def getlfilestoupdate(oldstandins, newstandins):
624 def getlfilestoupdate(oldstandins, newstandins):
626 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
625 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
627 filelist = []
626 filelist = []
628 for f in changedstandins:
627 for f in changedstandins:
629 if f[0] not in filelist:
628 if f[0] not in filelist:
630 filelist.append(f[0])
629 filelist.append(f[0])
631 return filelist
630 return filelist
632
631
633
632
634 def getlfilestoupload(repo, missing, addfunc):
633 def getlfilestoupload(repo, missing, addfunc):
635 makeprogress = repo.ui.makeprogress
634 makeprogress = repo.ui.makeprogress
636 with makeprogress(
635 with makeprogress(
637 _(b'finding outgoing largefiles'),
636 _(b'finding outgoing largefiles'),
638 unit=_(b'revisions'),
637 unit=_(b'revisions'),
639 total=len(missing),
638 total=len(missing),
640 ) as progress:
639 ) as progress:
641 for i, n in enumerate(missing):
640 for i, n in enumerate(missing):
642 progress.update(i)
641 progress.update(i)
643 parents = [p for p in repo[n].parents() if p != repo.nullid]
642 parents = [p for p in repo[n].parents() if p != repo.nullid]
644
643
645 with lfstatus(repo, value=False):
644 with lfstatus(repo, value=False):
646 ctx = repo[n]
645 ctx = repo[n]
647
646
648 files = set(ctx.files())
647 files = set(ctx.files())
649 if len(parents) == 2:
648 if len(parents) == 2:
650 mc = ctx.manifest()
649 mc = ctx.manifest()
651 mp1 = ctx.p1().manifest()
650 mp1 = ctx.p1().manifest()
652 mp2 = ctx.p2().manifest()
651 mp2 = ctx.p2().manifest()
653 for f in mp1:
652 for f in mp1:
654 if f not in mc:
653 if f not in mc:
655 files.add(f)
654 files.add(f)
656 for f in mp2:
655 for f in mp2:
657 if f not in mc:
656 if f not in mc:
658 files.add(f)
657 files.add(f)
659 for f in mc:
658 for f in mc:
660 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
659 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
661 files.add(f)
660 files.add(f)
662 for fn in files:
661 for fn in files:
663 if isstandin(fn) and fn in ctx:
662 if isstandin(fn) and fn in ctx:
664 addfunc(fn, readasstandin(ctx[fn]))
663 addfunc(fn, readasstandin(ctx[fn]))
665
664
666
665
667 def updatestandinsbymatch(repo, match):
666 def updatestandinsbymatch(repo, match):
668 """Update standins in the working directory according to specified match
667 """Update standins in the working directory according to specified match
669
668
670 This returns (possibly modified) ``match`` object to be used for
669 This returns (possibly modified) ``match`` object to be used for
671 subsequent commit process.
670 subsequent commit process.
672 """
671 """
673
672
674 ui = repo.ui
673 ui = repo.ui
675
674
676 # Case 1: user calls commit with no specific files or
675 # Case 1: user calls commit with no specific files or
677 # include/exclude patterns: refresh and commit all files that
676 # include/exclude patterns: refresh and commit all files that
678 # are "dirty".
677 # are "dirty".
679 if match is None or match.always():
678 if match is None or match.always():
680 # Spend a bit of time here to get a list of files we know
679 # Spend a bit of time here to get a list of files we know
681 # are modified so we can compare only against those.
680 # are modified so we can compare only against those.
682 # It can cost a lot of time (several seconds)
681 # It can cost a lot of time (several seconds)
683 # otherwise to update all standins if the largefiles are
682 # otherwise to update all standins if the largefiles are
684 # large.
683 # large.
685 lfdirstate = openlfdirstate(ui, repo)
684 lfdirstate = openlfdirstate(ui, repo)
686 dirtymatch = matchmod.always()
685 dirtymatch = matchmod.always()
687 unsure, s, mtime_boundary = lfdirstate.status(
686 unsure, s, mtime_boundary = lfdirstate.status(
688 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
687 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
689 )
688 )
690 modifiedfiles = unsure + s.modified + s.added + s.removed
689 modifiedfiles = unsure + s.modified + s.added + s.removed
691 lfiles = listlfiles(repo)
690 lfiles = listlfiles(repo)
692 # this only loops through largefiles that exist (not
691 # this only loops through largefiles that exist (not
693 # removed/renamed)
692 # removed/renamed)
694 for lfile in lfiles:
693 for lfile in lfiles:
695 if lfile in modifiedfiles:
694 if lfile in modifiedfiles:
696 fstandin = standin(lfile)
695 fstandin = standin(lfile)
697 if repo.wvfs.exists(fstandin):
696 if repo.wvfs.exists(fstandin):
698 # this handles the case where a rebase is being
697 # this handles the case where a rebase is being
699 # performed and the working copy is not updated
698 # performed and the working copy is not updated
700 # yet.
699 # yet.
701 if repo.wvfs.exists(lfile):
700 if repo.wvfs.exists(lfile):
702 updatestandin(repo, lfile, fstandin)
701 updatestandin(repo, lfile, fstandin)
703
702
704 return match
703 return match
705
704
706 lfiles = listlfiles(repo)
705 lfiles = listlfiles(repo)
707 match._files = repo._subdirlfs(match.files(), lfiles)
706 match._files = repo._subdirlfs(match.files(), lfiles)
708
707
709 # Case 2: user calls commit with specified patterns: refresh
708 # Case 2: user calls commit with specified patterns: refresh
710 # any matching big files.
709 # any matching big files.
711 smatcher = composestandinmatcher(repo, match)
710 smatcher = composestandinmatcher(repo, match)
712 standins = repo.dirstate.walk(
711 standins = repo.dirstate.walk(
713 smatcher, subrepos=[], unknown=False, ignored=False
712 smatcher, subrepos=[], unknown=False, ignored=False
714 )
713 )
715
714
716 # No matching big files: get out of the way and pass control to
715 # No matching big files: get out of the way and pass control to
717 # the usual commit() method.
716 # the usual commit() method.
718 if not standins:
717 if not standins:
719 return match
718 return match
720
719
721 # Refresh all matching big files. It's possible that the
720 # Refresh all matching big files. It's possible that the
722 # commit will end up failing, in which case the big files will
721 # commit will end up failing, in which case the big files will
723 # stay refreshed. No harm done: the user modified them and
722 # stay refreshed. No harm done: the user modified them and
724 # asked to commit them, so sooner or later we're going to
723 # asked to commit them, so sooner or later we're going to
725 # refresh the standins. Might as well leave them refreshed.
724 # refresh the standins. Might as well leave them refreshed.
726 lfdirstate = openlfdirstate(ui, repo)
725 lfdirstate = openlfdirstate(ui, repo)
727 for fstandin in standins:
726 for fstandin in standins:
728 lfile = splitstandin(fstandin)
727 lfile = splitstandin(fstandin)
729 if lfdirstate.get_entry(lfile).tracked:
728 if lfdirstate.get_entry(lfile).tracked:
730 updatestandin(repo, lfile, fstandin)
729 updatestandin(repo, lfile, fstandin)
731
730
732 # Cook up a new matcher that only matches regular files or
731 # Cook up a new matcher that only matches regular files or
733 # standins corresponding to the big files requested by the
732 # standins corresponding to the big files requested by the
734 # user. Have to modify _files to prevent commit() from
733 # user. Have to modify _files to prevent commit() from
735 # complaining "not tracked" for big files.
734 # complaining "not tracked" for big files.
736 match = copy.copy(match)
735 match = copy.copy(match)
737 origmatchfn = match.matchfn
736 origmatchfn = match.matchfn
738
737
739 # Check both the list of largefiles and the list of
738 # Check both the list of largefiles and the list of
740 # standins because if a largefile was removed, it
739 # standins because if a largefile was removed, it
741 # won't be in the list of largefiles at this point
740 # won't be in the list of largefiles at this point
742 match._files += sorted(standins)
741 match._files += sorted(standins)
743
742
744 actualfiles = []
743 actualfiles = []
745 for f in match._files:
744 for f in match._files:
746 fstandin = standin(f)
745 fstandin = standin(f)
747
746
748 # For largefiles, only one of the normal and standin should be
747 # For largefiles, only one of the normal and standin should be
749 # committed (except if one of them is a remove). In the case of a
748 # committed (except if one of them is a remove). In the case of a
750 # standin removal, drop the normal file if it is unknown to dirstate.
749 # standin removal, drop the normal file if it is unknown to dirstate.
751 # Thus, skip plain largefile names but keep the standin.
750 # Thus, skip plain largefile names but keep the standin.
752 if f in lfiles or fstandin in standins:
751 if f in lfiles or fstandin in standins:
753 if not repo.dirstate.get_entry(fstandin).removed:
752 if not repo.dirstate.get_entry(fstandin).removed:
754 if not repo.dirstate.get_entry(f).removed:
753 if not repo.dirstate.get_entry(f).removed:
755 continue
754 continue
756 elif not repo.dirstate.get_entry(f).any_tracked:
755 elif not repo.dirstate.get_entry(f).any_tracked:
757 continue
756 continue
758
757
759 actualfiles.append(f)
758 actualfiles.append(f)
760 match._files = actualfiles
759 match._files = actualfiles
761
760
762 def matchfn(f):
761 def matchfn(f):
763 if origmatchfn(f):
762 if origmatchfn(f):
764 return f not in lfiles
763 return f not in lfiles
765 else:
764 else:
766 return f in standins
765 return f in standins
767
766
768 match.matchfn = matchfn
767 match.matchfn = matchfn
769
768
770 return match
769 return match
771
770
772
771
773 class automatedcommithook:
772 class automatedcommithook:
774 """Stateful hook to update standins at the 1st commit of resuming
773 """Stateful hook to update standins at the 1st commit of resuming
775
774
776 For efficiency, updating standins in the working directory should
775 For efficiency, updating standins in the working directory should
777 be avoided while automated committing (like rebase, transplant and
776 be avoided while automated committing (like rebase, transplant and
778 so on), because they should be updated before committing.
777 so on), because they should be updated before committing.
779
778
780 But the 1st commit of resuming automated committing (e.g. ``rebase
779 But the 1st commit of resuming automated committing (e.g. ``rebase
781 --continue``) should update them, because largefiles may be
780 --continue``) should update them, because largefiles may be
782 modified manually.
781 modified manually.
783 """
782 """
784
783
785 def __init__(self, resuming):
784 def __init__(self, resuming):
786 self.resuming = resuming
785 self.resuming = resuming
787
786
788 def __call__(self, repo, match):
787 def __call__(self, repo, match):
789 if self.resuming:
788 if self.resuming:
790 self.resuming = False # avoids updating at subsequent commits
789 self.resuming = False # avoids updating at subsequent commits
791 return updatestandinsbymatch(repo, match)
790 return updatestandinsbymatch(repo, match)
792 else:
791 else:
793 return match
792 return match
794
793
795
794
796 def getstatuswriter(ui, repo, forcibly=None):
795 def getstatuswriter(ui, repo, forcibly=None):
797 """Return the function to write largefiles specific status out
796 """Return the function to write largefiles specific status out
798
797
799 If ``forcibly`` is ``None``, this returns the last element of
798 If ``forcibly`` is ``None``, this returns the last element of
800 ``repo._lfstatuswriters`` as "default" writer function.
799 ``repo._lfstatuswriters`` as "default" writer function.
801
800
802 Otherwise, this returns the function to always write out (or
801 Otherwise, this returns the function to always write out (or
803 ignore if ``not forcibly``) status.
802 ignore if ``not forcibly``) status.
804 """
803 """
805 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
804 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
806 return repo._lfstatuswriters[-1]
805 return repo._lfstatuswriters[-1]
807 else:
806 else:
808 if forcibly:
807 if forcibly:
809 return ui.status # forcibly WRITE OUT
808 return ui.status # forcibly WRITE OUT
810 else:
809 else:
811 return lambda *msg, **opts: None # forcibly IGNORE
810 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now