##// END OF EJS Templates
large-files: use `running_status` in `updatestandinsbymatch`...
marmoute -
r51032:30277209 default
parent child Browse files
Show More
@@ -1,820 +1,825 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
163 _tr_key_suffix = b'-large-files'
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
167
167
168 def set_tracked(self, f):
168 def set_tracked(self, f):
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
170
170
171 def set_untracked(self, f):
171 def set_untracked(self, f):
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173
173
174 def normal(self, f, parentfiledata=None):
174 def normal(self, f, parentfiledata=None):
175 # not sure if we should pass the `parentfiledata` down or throw it
175 # not sure if we should pass the `parentfiledata` down or throw it
176 # away. So throwing it away to stay on the safe side.
176 # away. So throwing it away to stay on the safe side.
177 return super(largefilesdirstate, self).normal(unixpath(f))
177 return super(largefilesdirstate, self).normal(unixpath(f))
178
178
179 def remove(self, f):
179 def remove(self, f):
180 return super(largefilesdirstate, self).remove(unixpath(f))
180 return super(largefilesdirstate, self).remove(unixpath(f))
181
181
182 def add(self, f):
182 def add(self, f):
183 return super(largefilesdirstate, self).add(unixpath(f))
183 return super(largefilesdirstate, self).add(unixpath(f))
184
184
185 def drop(self, f):
185 def drop(self, f):
186 return super(largefilesdirstate, self).drop(unixpath(f))
186 return super(largefilesdirstate, self).drop(unixpath(f))
187
187
188 def forget(self, f):
188 def forget(self, f):
189 return super(largefilesdirstate, self).forget(unixpath(f))
189 return super(largefilesdirstate, self).forget(unixpath(f))
190
190
191 def normallookup(self, f):
191 def normallookup(self, f):
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193
193
194 def _ignore(self, f):
194 def _ignore(self, f):
195 return False
195 return False
196
196
197 def write(self, tr):
197 def write(self, tr):
198 # (1) disable PENDING mode always
198 # (1) disable PENDING mode always
199 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (lfdirstate isn't yet managed as a part of the transaction)
200 # (2) avoid develwarn 'use dirstate.write with ....'
200 # (2) avoid develwarn 'use dirstate.write with ....'
201 if tr:
201 if tr:
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 super(largefilesdirstate, self).write(None)
203 super(largefilesdirstate, self).write(None)
204
204
205
205
206 def openlfdirstate(ui, repo, create=True):
206 def openlfdirstate(ui, repo, create=True):
207 """
207 """
208 Return a dirstate object that tracks largefiles: i.e. its root is
208 Return a dirstate object that tracks largefiles: i.e. its root is
209 the repo root, but it is saved in .hg/largefiles/dirstate.
209 the repo root, but it is saved in .hg/largefiles/dirstate.
210
210
211 If a dirstate object already exists and is being used for a 'changing_*'
211 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
212 context, it will be returned.
213 """
213 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
215 if sub_dirstate is not None:
216 return sub_dirstate
216 return sub_dirstate
217 vfs = repo.vfs
217 vfs = repo.vfs
218 lfstoredir = longname
218 lfstoredir = longname
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 lfdirstate = largefilesdirstate(
221 lfdirstate = largefilesdirstate(
222 opener,
222 opener,
223 ui,
223 ui,
224 repo.root,
224 repo.root,
225 repo.dirstate._validate,
225 repo.dirstate._validate,
226 lambda: sparse.matcher(repo),
226 lambda: sparse.matcher(repo),
227 repo.nodeconstants,
227 repo.nodeconstants,
228 use_dirstate_v2,
228 use_dirstate_v2,
229 )
229 )
230
230
231 # If the largefiles dirstate does not exist, populate and create
231 # If the largefiles dirstate does not exist, populate and create
232 # it. This ensures that we create it on the first meaningful
232 # it. This ensures that we create it on the first meaningful
233 # largefiles operation in a new clone.
233 # largefiles operation in a new clone.
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 try:
235 try:
236 with repo.wlock(wait=False):
236 with repo.wlock(wait=False):
237 matcher = getstandinmatcher(repo)
237 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
238 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
239 matcher, subrepos=[], unknown=False, ignored=False
240 )
240 )
241
241
242 if len(standins) > 0:
242 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
243 vfs.makedirs(lfstoredir)
244
244
245 for standin in standins:
245 for standin in standins:
246 lfile = splitstandin(standin)
246 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
247 lfdirstate.hacky_extension_update_file(
248 lfile,
248 lfile,
249 p1_tracked=True,
249 p1_tracked=True,
250 wc_tracked=True,
250 wc_tracked=True,
251 possibly_dirty=True,
251 possibly_dirty=True,
252 )
252 )
253 # avoid getting dirty dirstate before other operations
253 # avoid getting dirty dirstate before other operations
254 lfdirstate.write(repo.currenttransaction())
254 lfdirstate.write(repo.currenttransaction())
255 except error.LockError:
255 except error.LockError:
256 # Assume that whatever was holding the lock was important.
256 # Assume that whatever was holding the lock was important.
257 # If we were doing something important, we would already have
257 # If we were doing something important, we would already have
258 # either the lock or a largefile dirstate.
258 # either the lock or a largefile dirstate.
259 pass
259 pass
260 return lfdirstate
260 return lfdirstate
261
261
262
262
263 def lfdirstatestatus(lfdirstate, repo):
263 def lfdirstatestatus(lfdirstate, repo):
264 pctx = repo[b'.']
264 pctx = repo[b'.']
265 match = matchmod.always()
265 match = matchmod.always()
266 unsure, s, mtime_boundary = lfdirstate.status(
266 unsure, s, mtime_boundary = lfdirstate.status(
267 match, subrepos=[], ignored=False, clean=False, unknown=False
267 match, subrepos=[], ignored=False, clean=False, unknown=False
268 )
268 )
269 modified, clean = s.modified, s.clean
269 modified, clean = s.modified, s.clean
270 wctx = repo[None]
270 wctx = repo[None]
271 for lfile in unsure:
271 for lfile in unsure:
272 try:
272 try:
273 fctx = pctx[standin(lfile)]
273 fctx = pctx[standin(lfile)]
274 except LookupError:
274 except LookupError:
275 fctx = None
275 fctx = None
276 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
276 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
277 modified.append(lfile)
277 modified.append(lfile)
278 else:
278 else:
279 clean.append(lfile)
279 clean.append(lfile)
280 st = wctx[lfile].lstat()
280 st = wctx[lfile].lstat()
281 mode = st.st_mode
281 mode = st.st_mode
282 size = st.st_size
282 size = st.st_size
283 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
283 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
284 if mtime is not None:
284 if mtime is not None:
285 cache_data = (mode, size, mtime)
285 cache_data = (mode, size, mtime)
286 lfdirstate.set_clean(lfile, cache_data)
286 lfdirstate.set_clean(lfile, cache_data)
287 return s
287 return s
288
288
289
289
290 def listlfiles(repo, rev=None, matcher=None):
290 def listlfiles(repo, rev=None, matcher=None):
291 """return a list of largefiles in the working copy or the
291 """return a list of largefiles in the working copy or the
292 specified changeset"""
292 specified changeset"""
293
293
294 if matcher is None:
294 if matcher is None:
295 matcher = getstandinmatcher(repo)
295 matcher = getstandinmatcher(repo)
296
296
297 # ignore unknown files in working directory
297 # ignore unknown files in working directory
298 return [
298 return [
299 splitstandin(f)
299 splitstandin(f)
300 for f in repo[rev].walk(matcher)
300 for f in repo[rev].walk(matcher)
301 if rev is not None or repo.dirstate.get_entry(f).any_tracked
301 if rev is not None or repo.dirstate.get_entry(f).any_tracked
302 ]
302 ]
303
303
304
304
305 def instore(repo, hash, forcelocal=False):
305 def instore(repo, hash, forcelocal=False):
306 '''Return true if a largefile with the given hash exists in the store'''
306 '''Return true if a largefile with the given hash exists in the store'''
307 return os.path.exists(storepath(repo, hash, forcelocal))
307 return os.path.exists(storepath(repo, hash, forcelocal))
308
308
309
309
310 def storepath(repo, hash, forcelocal=False):
310 def storepath(repo, hash, forcelocal=False):
311 """Return the correct location in the repository largefiles store for a
311 """Return the correct location in the repository largefiles store for a
312 file with the given hash."""
312 file with the given hash."""
313 if not forcelocal and repo.shared():
313 if not forcelocal and repo.shared():
314 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
314 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
315 return repo.vfs.join(longname, hash)
315 return repo.vfs.join(longname, hash)
316
316
317
317
318 def findstorepath(repo, hash):
318 def findstorepath(repo, hash):
319 """Search through the local store path(s) to find the file for the given
319 """Search through the local store path(s) to find the file for the given
320 hash. If the file is not found, its path in the primary store is returned.
320 hash. If the file is not found, its path in the primary store is returned.
321 The return value is a tuple of (path, exists(path)).
321 The return value is a tuple of (path, exists(path)).
322 """
322 """
323 # For shared repos, the primary store is in the share source. But for
323 # For shared repos, the primary store is in the share source. But for
324 # backward compatibility, force a lookup in the local store if it wasn't
324 # backward compatibility, force a lookup in the local store if it wasn't
325 # found in the share source.
325 # found in the share source.
326 path = storepath(repo, hash, False)
326 path = storepath(repo, hash, False)
327
327
328 if instore(repo, hash):
328 if instore(repo, hash):
329 return (path, True)
329 return (path, True)
330 elif repo.shared() and instore(repo, hash, True):
330 elif repo.shared() and instore(repo, hash, True):
331 return storepath(repo, hash, True), True
331 return storepath(repo, hash, True), True
332
332
333 return (path, False)
333 return (path, False)
334
334
335
335
336 def copyfromcache(repo, hash, filename):
336 def copyfromcache(repo, hash, filename):
337 """Copy the specified largefile from the repo or system cache to
337 """Copy the specified largefile from the repo or system cache to
338 filename in the repository. Return true on success or false if the
338 filename in the repository. Return true on success or false if the
339 file was not found in either cache (which should not happened:
339 file was not found in either cache (which should not happened:
340 this is meant to be called only after ensuring that the needed
340 this is meant to be called only after ensuring that the needed
341 largefile exists in the cache)."""
341 largefile exists in the cache)."""
342 wvfs = repo.wvfs
342 wvfs = repo.wvfs
343 path = findfile(repo, hash)
343 path = findfile(repo, hash)
344 if path is None:
344 if path is None:
345 return False
345 return False
346 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
346 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
347 # The write may fail before the file is fully written, but we
347 # The write may fail before the file is fully written, but we
348 # don't use atomic writes in the working copy.
348 # don't use atomic writes in the working copy.
349 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
349 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
350 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
350 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
351 if gothash != hash:
351 if gothash != hash:
352 repo.ui.warn(
352 repo.ui.warn(
353 _(b'%s: data corruption in %s with hash %s\n')
353 _(b'%s: data corruption in %s with hash %s\n')
354 % (filename, path, gothash)
354 % (filename, path, gothash)
355 )
355 )
356 wvfs.unlink(filename)
356 wvfs.unlink(filename)
357 return False
357 return False
358 return True
358 return True
359
359
360
360
361 def copytostore(repo, ctx, file, fstandin):
361 def copytostore(repo, ctx, file, fstandin):
362 wvfs = repo.wvfs
362 wvfs = repo.wvfs
363 hash = readasstandin(ctx[fstandin])
363 hash = readasstandin(ctx[fstandin])
364 if instore(repo, hash):
364 if instore(repo, hash):
365 return
365 return
366 if wvfs.exists(file):
366 if wvfs.exists(file):
367 copytostoreabsolute(repo, wvfs.join(file), hash)
367 copytostoreabsolute(repo, wvfs.join(file), hash)
368 else:
368 else:
369 repo.ui.warn(
369 repo.ui.warn(
370 _(b"%s: largefile %s not available from local store\n")
370 _(b"%s: largefile %s not available from local store\n")
371 % (file, hash)
371 % (file, hash)
372 )
372 )
373
373
374
374
375 def copyalltostore(repo, node):
375 def copyalltostore(repo, node):
376 '''Copy all largefiles in a given revision to the store'''
376 '''Copy all largefiles in a given revision to the store'''
377
377
378 ctx = repo[node]
378 ctx = repo[node]
379 for filename in ctx.files():
379 for filename in ctx.files():
380 realfile = splitstandin(filename)
380 realfile = splitstandin(filename)
381 if realfile is not None and filename in ctx.manifest():
381 if realfile is not None and filename in ctx.manifest():
382 copytostore(repo, ctx, realfile, filename)
382 copytostore(repo, ctx, realfile, filename)
383
383
384
384
385 def copytostoreabsolute(repo, file, hash):
385 def copytostoreabsolute(repo, file, hash):
386 if inusercache(repo.ui, hash):
386 if inusercache(repo.ui, hash):
387 link(usercachepath(repo.ui, hash), storepath(repo, hash))
387 link(usercachepath(repo.ui, hash), storepath(repo, hash))
388 else:
388 else:
389 util.makedirs(os.path.dirname(storepath(repo, hash)))
389 util.makedirs(os.path.dirname(storepath(repo, hash)))
390 with open(file, b'rb') as srcf:
390 with open(file, b'rb') as srcf:
391 with util.atomictempfile(
391 with util.atomictempfile(
392 storepath(repo, hash), createmode=repo.store.createmode
392 storepath(repo, hash), createmode=repo.store.createmode
393 ) as dstf:
393 ) as dstf:
394 for chunk in util.filechunkiter(srcf):
394 for chunk in util.filechunkiter(srcf):
395 dstf.write(chunk)
395 dstf.write(chunk)
396 linktousercache(repo, hash)
396 linktousercache(repo, hash)
397
397
398
398
399 def linktousercache(repo, hash):
399 def linktousercache(repo, hash):
400 """Link / copy the largefile with the specified hash from the store
400 """Link / copy the largefile with the specified hash from the store
401 to the cache."""
401 to the cache."""
402 path = usercachepath(repo.ui, hash)
402 path = usercachepath(repo.ui, hash)
403 link(storepath(repo, hash), path)
403 link(storepath(repo, hash), path)
404
404
405
405
406 def getstandinmatcher(repo, rmatcher=None):
406 def getstandinmatcher(repo, rmatcher=None):
407 '''Return a match object that applies rmatcher to the standin directory'''
407 '''Return a match object that applies rmatcher to the standin directory'''
408 wvfs = repo.wvfs
408 wvfs = repo.wvfs
409 standindir = shortname
409 standindir = shortname
410
410
411 # no warnings about missing files or directories
411 # no warnings about missing files or directories
412 badfn = lambda f, msg: None
412 badfn = lambda f, msg: None
413
413
414 if rmatcher and not rmatcher.always():
414 if rmatcher and not rmatcher.always():
415 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
415 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
416 if not pats:
416 if not pats:
417 pats = [wvfs.join(standindir)]
417 pats = [wvfs.join(standindir)]
418 match = scmutil.match(repo[None], pats, badfn=badfn)
418 match = scmutil.match(repo[None], pats, badfn=badfn)
419 else:
419 else:
420 # no patterns: relative to repo root
420 # no patterns: relative to repo root
421 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
421 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
422 return match
422 return match
423
423
424
424
425 def composestandinmatcher(repo, rmatcher):
425 def composestandinmatcher(repo, rmatcher):
426 """Return a matcher that accepts standins corresponding to the
426 """Return a matcher that accepts standins corresponding to the
427 files accepted by rmatcher. Pass the list of files in the matcher
427 files accepted by rmatcher. Pass the list of files in the matcher
428 as the paths specified by the user."""
428 as the paths specified by the user."""
429 smatcher = getstandinmatcher(repo, rmatcher)
429 smatcher = getstandinmatcher(repo, rmatcher)
430 isstandin = smatcher.matchfn
430 isstandin = smatcher.matchfn
431
431
432 def composedmatchfn(f):
432 def composedmatchfn(f):
433 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
433 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
434
434
435 smatcher.matchfn = composedmatchfn
435 smatcher.matchfn = composedmatchfn
436
436
437 return smatcher
437 return smatcher
438
438
439
439
440 def standin(filename):
440 def standin(filename):
441 """Return the repo-relative path to the standin for the specified big
441 """Return the repo-relative path to the standin for the specified big
442 file."""
442 file."""
443 # Notes:
443 # Notes:
444 # 1) Some callers want an absolute path, but for instance addlargefiles
444 # 1) Some callers want an absolute path, but for instance addlargefiles
445 # needs it repo-relative so it can be passed to repo[None].add(). So
445 # needs it repo-relative so it can be passed to repo[None].add(). So
446 # leave it up to the caller to use repo.wjoin() to get an absolute path.
446 # leave it up to the caller to use repo.wjoin() to get an absolute path.
447 # 2) Join with '/' because that's what dirstate always uses, even on
447 # 2) Join with '/' because that's what dirstate always uses, even on
448 # Windows. Change existing separator to '/' first in case we are
448 # Windows. Change existing separator to '/' first in case we are
449 # passed filenames from an external source (like the command line).
449 # passed filenames from an external source (like the command line).
450 return shortnameslash + util.pconvert(filename)
450 return shortnameslash + util.pconvert(filename)
451
451
452
452
453 def isstandin(filename):
453 def isstandin(filename):
454 """Return true if filename is a big file standin. filename must be
454 """Return true if filename is a big file standin. filename must be
455 in Mercurial's internal form (slash-separated)."""
455 in Mercurial's internal form (slash-separated)."""
456 return filename.startswith(shortnameslash)
456 return filename.startswith(shortnameslash)
457
457
458
458
459 def splitstandin(filename):
459 def splitstandin(filename):
460 # Split on / because that's what dirstate always uses, even on Windows.
460 # Split on / because that's what dirstate always uses, even on Windows.
461 # Change local separator to / first just in case we are passed filenames
461 # Change local separator to / first just in case we are passed filenames
462 # from an external source (like the command line).
462 # from an external source (like the command line).
463 bits = util.pconvert(filename).split(b'/', 1)
463 bits = util.pconvert(filename).split(b'/', 1)
464 if len(bits) == 2 and bits[0] == shortname:
464 if len(bits) == 2 and bits[0] == shortname:
465 return bits[1]
465 return bits[1]
466 else:
466 else:
467 return None
467 return None
468
468
469
469
470 def updatestandin(repo, lfile, standin):
470 def updatestandin(repo, lfile, standin):
471 """Re-calculate hash value of lfile and write it into standin
471 """Re-calculate hash value of lfile and write it into standin
472
472
473 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
473 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
474 """
474 """
475 file = repo.wjoin(lfile)
475 file = repo.wjoin(lfile)
476 if repo.wvfs.exists(lfile):
476 if repo.wvfs.exists(lfile):
477 hash = hashfile(file)
477 hash = hashfile(file)
478 executable = getexecutable(file)
478 executable = getexecutable(file)
479 writestandin(repo, standin, hash, executable)
479 writestandin(repo, standin, hash, executable)
480 else:
480 else:
481 raise error.Abort(_(b'%s: file not found!') % lfile)
481 raise error.Abort(_(b'%s: file not found!') % lfile)
482
482
483
483
484 def readasstandin(fctx):
484 def readasstandin(fctx):
485 """read hex hash from given filectx of standin file
485 """read hex hash from given filectx of standin file
486
486
487 This encapsulates how "standin" data is stored into storage layer."""
487 This encapsulates how "standin" data is stored into storage layer."""
488 return fctx.data().strip()
488 return fctx.data().strip()
489
489
490
490
491 def writestandin(repo, standin, hash, executable):
491 def writestandin(repo, standin, hash, executable):
492 '''write hash to <repo.root>/<standin>'''
492 '''write hash to <repo.root>/<standin>'''
493 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
493 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
494
494
495
495
496 def copyandhash(instream, outfile):
496 def copyandhash(instream, outfile):
497 """Read bytes from instream (iterable) and write them to outfile,
497 """Read bytes from instream (iterable) and write them to outfile,
498 computing the SHA-1 hash of the data along the way. Return the hash."""
498 computing the SHA-1 hash of the data along the way. Return the hash."""
499 hasher = hashutil.sha1(b'')
499 hasher = hashutil.sha1(b'')
500 for data in instream:
500 for data in instream:
501 hasher.update(data)
501 hasher.update(data)
502 outfile.write(data)
502 outfile.write(data)
503 return hex(hasher.digest())
503 return hex(hasher.digest())
504
504
505
505
506 def hashfile(file):
506 def hashfile(file):
507 if not os.path.exists(file):
507 if not os.path.exists(file):
508 return b''
508 return b''
509 with open(file, b'rb') as fd:
509 with open(file, b'rb') as fd:
510 return hexsha1(fd)
510 return hexsha1(fd)
511
511
512
512
513 def getexecutable(filename):
513 def getexecutable(filename):
514 mode = os.stat(filename).st_mode
514 mode = os.stat(filename).st_mode
515 return (
515 return (
516 (mode & stat.S_IXUSR)
516 (mode & stat.S_IXUSR)
517 and (mode & stat.S_IXGRP)
517 and (mode & stat.S_IXGRP)
518 and (mode & stat.S_IXOTH)
518 and (mode & stat.S_IXOTH)
519 )
519 )
520
520
521
521
522 def urljoin(first, second, *arg):
522 def urljoin(first, second, *arg):
523 def join(left, right):
523 def join(left, right):
524 if not left.endswith(b'/'):
524 if not left.endswith(b'/'):
525 left += b'/'
525 left += b'/'
526 if right.startswith(b'/'):
526 if right.startswith(b'/'):
527 right = right[1:]
527 right = right[1:]
528 return left + right
528 return left + right
529
529
530 url = join(first, second)
530 url = join(first, second)
531 for a in arg:
531 for a in arg:
532 url = join(url, a)
532 url = join(url, a)
533 return url
533 return url
534
534
535
535
536 def hexsha1(fileobj):
536 def hexsha1(fileobj):
537 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
537 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
538 object data"""
538 object data"""
539 h = hashutil.sha1()
539 h = hashutil.sha1()
540 for chunk in util.filechunkiter(fileobj):
540 for chunk in util.filechunkiter(fileobj):
541 h.update(chunk)
541 h.update(chunk)
542 return hex(h.digest())
542 return hex(h.digest())
543
543
544
544
545 def httpsendfile(ui, filename):
545 def httpsendfile(ui, filename):
546 return httpconnection.httpsendfile(ui, filename, b'rb')
546 return httpconnection.httpsendfile(ui, filename, b'rb')
547
547
548
548
549 def unixpath(path):
549 def unixpath(path):
550 '''Return a version of path normalized for use with the lfdirstate.'''
550 '''Return a version of path normalized for use with the lfdirstate.'''
551 return util.pconvert(os.path.normpath(path))
551 return util.pconvert(os.path.normpath(path))
552
552
553
553
554 def islfilesrepo(repo):
554 def islfilesrepo(repo):
555 '''Return true if the repo is a largefile repo.'''
555 '''Return true if the repo is a largefile repo.'''
556 if b'largefiles' in repo.requirements and any(
556 if b'largefiles' in repo.requirements and any(
557 shortnameslash in f[1] for f in repo.store.datafiles()
557 shortnameslash in f[1] for f in repo.store.datafiles()
558 ):
558 ):
559 return True
559 return True
560
560
561 return any(openlfdirstate(repo.ui, repo, False))
561 return any(openlfdirstate(repo.ui, repo, False))
562
562
563
563
564 class storeprotonotcapable(Exception):
564 class storeprotonotcapable(Exception):
565 def __init__(self, storetypes):
565 def __init__(self, storetypes):
566 self.storetypes = storetypes
566 self.storetypes = storetypes
567
567
568
568
569 def getstandinsstate(repo):
569 def getstandinsstate(repo):
570 standins = []
570 standins = []
571 matcher = getstandinmatcher(repo)
571 matcher = getstandinmatcher(repo)
572 wctx = repo[None]
572 wctx = repo[None]
573 for standin in repo.dirstate.walk(
573 for standin in repo.dirstate.walk(
574 matcher, subrepos=[], unknown=False, ignored=False
574 matcher, subrepos=[], unknown=False, ignored=False
575 ):
575 ):
576 lfile = splitstandin(standin)
576 lfile = splitstandin(standin)
577 try:
577 try:
578 hash = readasstandin(wctx[standin])
578 hash = readasstandin(wctx[standin])
579 except IOError:
579 except IOError:
580 hash = None
580 hash = None
581 standins.append((lfile, hash))
581 standins.append((lfile, hash))
582 return standins
582 return standins
583
583
584
584
585 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
585 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
586 lfstandin = standin(lfile)
586 lfstandin = standin(lfile)
587 if lfstandin not in repo.dirstate:
587 if lfstandin not in repo.dirstate:
588 lfdirstate.hacky_extension_update_file(
588 lfdirstate.hacky_extension_update_file(
589 lfile,
589 lfile,
590 p1_tracked=False,
590 p1_tracked=False,
591 wc_tracked=False,
591 wc_tracked=False,
592 )
592 )
593 else:
593 else:
594 entry = repo.dirstate.get_entry(lfstandin)
594 entry = repo.dirstate.get_entry(lfstandin)
595 lfdirstate.hacky_extension_update_file(
595 lfdirstate.hacky_extension_update_file(
596 lfile,
596 lfile,
597 wc_tracked=entry.tracked,
597 wc_tracked=entry.tracked,
598 p1_tracked=entry.p1_tracked,
598 p1_tracked=entry.p1_tracked,
599 p2_info=entry.p2_info,
599 p2_info=entry.p2_info,
600 possibly_dirty=True,
600 possibly_dirty=True,
601 )
601 )
602
602
603
603
604 def markcommitted(orig, ctx, node):
604 def markcommitted(orig, ctx, node):
605 repo = ctx.repo()
605 repo = ctx.repo()
606
606
607 with repo.dirstate.changing_parents(repo):
607 with repo.dirstate.changing_parents(repo):
608 orig(node)
608 orig(node)
609
609
610 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
610 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
611 # because files coming from the 2nd parent are omitted in the latter.
611 # because files coming from the 2nd parent are omitted in the latter.
612 #
612 #
613 # The former should be used to get targets of "synclfdirstate",
613 # The former should be used to get targets of "synclfdirstate",
614 # because such files:
614 # because such files:
615 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
615 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
616 # - have to be marked as "n" after commit, but
616 # - have to be marked as "n" after commit, but
617 # - aren't listed in "repo[node].files()"
617 # - aren't listed in "repo[node].files()"
618
618
619 lfdirstate = openlfdirstate(repo.ui, repo)
619 lfdirstate = openlfdirstate(repo.ui, repo)
620 for f in ctx.files():
620 for f in ctx.files():
621 lfile = splitstandin(f)
621 lfile = splitstandin(f)
622 if lfile is not None:
622 if lfile is not None:
623 synclfdirstate(repo, lfdirstate, lfile, False)
623 synclfdirstate(repo, lfdirstate, lfile, False)
624
624
625 # As part of committing, copy all of the largefiles into the cache.
625 # As part of committing, copy all of the largefiles into the cache.
626 #
626 #
627 # Using "node" instead of "ctx" implies additional "repo[node]"
627 # Using "node" instead of "ctx" implies additional "repo[node]"
628 # lookup while copyalltostore(), but can omit redundant check for
628 # lookup while copyalltostore(), but can omit redundant check for
629 # files comming from the 2nd parent, which should exist in store
629 # files comming from the 2nd parent, which should exist in store
630 # at merging.
630 # at merging.
631 copyalltostore(repo, node)
631 copyalltostore(repo, node)
632
632
633
633
634 def getlfilestoupdate(oldstandins, newstandins):
634 def getlfilestoupdate(oldstandins, newstandins):
635 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
635 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
636 filelist = []
636 filelist = []
637 for f in changedstandins:
637 for f in changedstandins:
638 if f[0] not in filelist:
638 if f[0] not in filelist:
639 filelist.append(f[0])
639 filelist.append(f[0])
640 return filelist
640 return filelist
641
641
642
642
643 def getlfilestoupload(repo, missing, addfunc):
643 def getlfilestoupload(repo, missing, addfunc):
644 makeprogress = repo.ui.makeprogress
644 makeprogress = repo.ui.makeprogress
645 with makeprogress(
645 with makeprogress(
646 _(b'finding outgoing largefiles'),
646 _(b'finding outgoing largefiles'),
647 unit=_(b'revisions'),
647 unit=_(b'revisions'),
648 total=len(missing),
648 total=len(missing),
649 ) as progress:
649 ) as progress:
650 for i, n in enumerate(missing):
650 for i, n in enumerate(missing):
651 progress.update(i)
651 progress.update(i)
652 parents = [p for p in repo[n].parents() if p != repo.nullid]
652 parents = [p for p in repo[n].parents() if p != repo.nullid]
653
653
654 with lfstatus(repo, value=False):
654 with lfstatus(repo, value=False):
655 ctx = repo[n]
655 ctx = repo[n]
656
656
657 files = set(ctx.files())
657 files = set(ctx.files())
658 if len(parents) == 2:
658 if len(parents) == 2:
659 mc = ctx.manifest()
659 mc = ctx.manifest()
660 mp1 = ctx.p1().manifest()
660 mp1 = ctx.p1().manifest()
661 mp2 = ctx.p2().manifest()
661 mp2 = ctx.p2().manifest()
662 for f in mp1:
662 for f in mp1:
663 if f not in mc:
663 if f not in mc:
664 files.add(f)
664 files.add(f)
665 for f in mp2:
665 for f in mp2:
666 if f not in mc:
666 if f not in mc:
667 files.add(f)
667 files.add(f)
668 for f in mc:
668 for f in mc:
669 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
669 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
670 files.add(f)
670 files.add(f)
671 for fn in files:
671 for fn in files:
672 if isstandin(fn) and fn in ctx:
672 if isstandin(fn) and fn in ctx:
673 addfunc(fn, readasstandin(ctx[fn]))
673 addfunc(fn, readasstandin(ctx[fn]))
674
674
675
675
676 def updatestandinsbymatch(repo, match):
676 def updatestandinsbymatch(repo, match):
677 """Update standins in the working directory according to specified match
677 """Update standins in the working directory according to specified match
678
678
679 This returns (possibly modified) ``match`` object to be used for
679 This returns (possibly modified) ``match`` object to be used for
680 subsequent commit process.
680 subsequent commit process.
681 """
681 """
682
682
683 ui = repo.ui
683 ui = repo.ui
684
684
685 # Case 1: user calls commit with no specific files or
685 # Case 1: user calls commit with no specific files or
686 # include/exclude patterns: refresh and commit all files that
686 # include/exclude patterns: refresh and commit all files that
687 # are "dirty".
687 # are "dirty".
688 if match is None or match.always():
688 if match is None or match.always():
689 # Spend a bit of time here to get a list of files we know
689 # Spend a bit of time here to get a list of files we know
690 # are modified so we can compare only against those.
690 # are modified so we can compare only against those.
691 # It can cost a lot of time (several seconds)
691 # It can cost a lot of time (several seconds)
692 # otherwise to update all standins if the largefiles are
692 # otherwise to update all standins if the largefiles are
693 # large.
693 # large.
694 lfdirstate = openlfdirstate(ui, repo)
695 dirtymatch = matchmod.always()
694 dirtymatch = matchmod.always()
696 unsure, s, mtime_boundary = lfdirstate.status(
695 with repo.dirstate.running_status(repo):
697 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
696 lfdirstate = openlfdirstate(ui, repo)
698 )
697 unsure, s, mtime_boundary = lfdirstate.status(
698 dirtymatch,
699 subrepos=[],
700 ignored=False,
701 clean=False,
702 unknown=False,
703 )
699 modifiedfiles = unsure + s.modified + s.added + s.removed
704 modifiedfiles = unsure + s.modified + s.added + s.removed
700 lfiles = listlfiles(repo)
705 lfiles = listlfiles(repo)
701 # this only loops through largefiles that exist (not
706 # this only loops through largefiles that exist (not
702 # removed/renamed)
707 # removed/renamed)
703 for lfile in lfiles:
708 for lfile in lfiles:
704 if lfile in modifiedfiles:
709 if lfile in modifiedfiles:
705 fstandin = standin(lfile)
710 fstandin = standin(lfile)
706 if repo.wvfs.exists(fstandin):
711 if repo.wvfs.exists(fstandin):
707 # this handles the case where a rebase is being
712 # this handles the case where a rebase is being
708 # performed and the working copy is not updated
713 # performed and the working copy is not updated
709 # yet.
714 # yet.
710 if repo.wvfs.exists(lfile):
715 if repo.wvfs.exists(lfile):
711 updatestandin(repo, lfile, fstandin)
716 updatestandin(repo, lfile, fstandin)
712
717
713 return match
718 return match
714
719
715 lfiles = listlfiles(repo)
720 lfiles = listlfiles(repo)
716 match._files = repo._subdirlfs(match.files(), lfiles)
721 match._files = repo._subdirlfs(match.files(), lfiles)
717
722
718 # Case 2: user calls commit with specified patterns: refresh
723 # Case 2: user calls commit with specified patterns: refresh
719 # any matching big files.
724 # any matching big files.
720 smatcher = composestandinmatcher(repo, match)
725 smatcher = composestandinmatcher(repo, match)
721 standins = repo.dirstate.walk(
726 standins = repo.dirstate.walk(
722 smatcher, subrepos=[], unknown=False, ignored=False
727 smatcher, subrepos=[], unknown=False, ignored=False
723 )
728 )
724
729
725 # No matching big files: get out of the way and pass control to
730 # No matching big files: get out of the way and pass control to
726 # the usual commit() method.
731 # the usual commit() method.
727 if not standins:
732 if not standins:
728 return match
733 return match
729
734
730 # Refresh all matching big files. It's possible that the
735 # Refresh all matching big files. It's possible that the
731 # commit will end up failing, in which case the big files will
736 # commit will end up failing, in which case the big files will
732 # stay refreshed. No harm done: the user modified them and
737 # stay refreshed. No harm done: the user modified them and
733 # asked to commit them, so sooner or later we're going to
738 # asked to commit them, so sooner or later we're going to
734 # refresh the standins. Might as well leave them refreshed.
739 # refresh the standins. Might as well leave them refreshed.
735 lfdirstate = openlfdirstate(ui, repo)
740 lfdirstate = openlfdirstate(ui, repo)
736 for fstandin in standins:
741 for fstandin in standins:
737 lfile = splitstandin(fstandin)
742 lfile = splitstandin(fstandin)
738 if lfdirstate.get_entry(lfile).tracked:
743 if lfdirstate.get_entry(lfile).tracked:
739 updatestandin(repo, lfile, fstandin)
744 updatestandin(repo, lfile, fstandin)
740
745
741 # Cook up a new matcher that only matches regular files or
746 # Cook up a new matcher that only matches regular files or
742 # standins corresponding to the big files requested by the
747 # standins corresponding to the big files requested by the
743 # user. Have to modify _files to prevent commit() from
748 # user. Have to modify _files to prevent commit() from
744 # complaining "not tracked" for big files.
749 # complaining "not tracked" for big files.
745 match = copy.copy(match)
750 match = copy.copy(match)
746 origmatchfn = match.matchfn
751 origmatchfn = match.matchfn
747
752
748 # Check both the list of largefiles and the list of
753 # Check both the list of largefiles and the list of
749 # standins because if a largefile was removed, it
754 # standins because if a largefile was removed, it
750 # won't be in the list of largefiles at this point
755 # won't be in the list of largefiles at this point
751 match._files += sorted(standins)
756 match._files += sorted(standins)
752
757
753 actualfiles = []
758 actualfiles = []
754 for f in match._files:
759 for f in match._files:
755 fstandin = standin(f)
760 fstandin = standin(f)
756
761
757 # For largefiles, only one of the normal and standin should be
762 # For largefiles, only one of the normal and standin should be
758 # committed (except if one of them is a remove). In the case of a
763 # committed (except if one of them is a remove). In the case of a
759 # standin removal, drop the normal file if it is unknown to dirstate.
764 # standin removal, drop the normal file if it is unknown to dirstate.
760 # Thus, skip plain largefile names but keep the standin.
765 # Thus, skip plain largefile names but keep the standin.
761 if f in lfiles or fstandin in standins:
766 if f in lfiles or fstandin in standins:
762 if not repo.dirstate.get_entry(fstandin).removed:
767 if not repo.dirstate.get_entry(fstandin).removed:
763 if not repo.dirstate.get_entry(f).removed:
768 if not repo.dirstate.get_entry(f).removed:
764 continue
769 continue
765 elif not repo.dirstate.get_entry(f).any_tracked:
770 elif not repo.dirstate.get_entry(f).any_tracked:
766 continue
771 continue
767
772
768 actualfiles.append(f)
773 actualfiles.append(f)
769 match._files = actualfiles
774 match._files = actualfiles
770
775
771 def matchfn(f):
776 def matchfn(f):
772 if origmatchfn(f):
777 if origmatchfn(f):
773 return f not in lfiles
778 return f not in lfiles
774 else:
779 else:
775 return f in standins
780 return f in standins
776
781
777 match.matchfn = matchfn
782 match.matchfn = matchfn
778
783
779 return match
784 return match
780
785
781
786
782 class automatedcommithook:
787 class automatedcommithook:
783 """Stateful hook to update standins at the 1st commit of resuming
788 """Stateful hook to update standins at the 1st commit of resuming
784
789
785 For efficiency, updating standins in the working directory should
790 For efficiency, updating standins in the working directory should
786 be avoided while automated committing (like rebase, transplant and
791 be avoided while automated committing (like rebase, transplant and
787 so on), because they should be updated before committing.
792 so on), because they should be updated before committing.
788
793
789 But the 1st commit of resuming automated committing (e.g. ``rebase
794 But the 1st commit of resuming automated committing (e.g. ``rebase
790 --continue``) should update them, because largefiles may be
795 --continue``) should update them, because largefiles may be
791 modified manually.
796 modified manually.
792 """
797 """
793
798
794 def __init__(self, resuming):
799 def __init__(self, resuming):
795 self.resuming = resuming
800 self.resuming = resuming
796
801
797 def __call__(self, repo, match):
802 def __call__(self, repo, match):
798 if self.resuming:
803 if self.resuming:
799 self.resuming = False # avoids updating at subsequent commits
804 self.resuming = False # avoids updating at subsequent commits
800 return updatestandinsbymatch(repo, match)
805 return updatestandinsbymatch(repo, match)
801 else:
806 else:
802 return match
807 return match
803
808
804
809
805 def getstatuswriter(ui, repo, forcibly=None):
810 def getstatuswriter(ui, repo, forcibly=None):
806 """Return the function to write largefiles specific status out
811 """Return the function to write largefiles specific status out
807
812
808 If ``forcibly`` is ``None``, this returns the last element of
813 If ``forcibly`` is ``None``, this returns the last element of
809 ``repo._lfstatuswriters`` as "default" writer function.
814 ``repo._lfstatuswriters`` as "default" writer function.
810
815
811 Otherwise, this returns the function to always write out (or
816 Otherwise, this returns the function to always write out (or
812 ignore if ``not forcibly``) status.
817 ignore if ``not forcibly``) status.
813 """
818 """
814 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
819 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
815 return repo._lfstatuswriters[-1]
820 return repo._lfstatuswriters[-1]
816 else:
821 else:
817 if forcibly:
822 if forcibly:
818 return ui.status # forcibly WRITE OUT
823 return ui.status # forcibly WRITE OUT
819 else:
824 else:
820 return lambda *msg, **opts: None # forcibly IGNORE
825 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now