##// END OF EJS Templates
large-files: use a `changing_files` context when initializing the dirstate...
marmoute -
r51048:98890baf default
parent child Browse files
Show More
@@ -1,825 +1,823 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
163 _tr_key_suffix = b'-large-files'
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
167
167
168 def set_tracked(self, f):
168 def set_tracked(self, f):
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
170
170
171 def set_untracked(self, f):
171 def set_untracked(self, f):
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173
173
174 def normal(self, f, parentfiledata=None):
174 def normal(self, f, parentfiledata=None):
175 # not sure if we should pass the `parentfiledata` down or throw it
175 # not sure if we should pass the `parentfiledata` down or throw it
176 # away. So throwing it away to stay on the safe side.
176 # away. So throwing it away to stay on the safe side.
177 return super(largefilesdirstate, self).normal(unixpath(f))
177 return super(largefilesdirstate, self).normal(unixpath(f))
178
178
179 def remove(self, f):
179 def remove(self, f):
180 return super(largefilesdirstate, self).remove(unixpath(f))
180 return super(largefilesdirstate, self).remove(unixpath(f))
181
181
182 def add(self, f):
182 def add(self, f):
183 return super(largefilesdirstate, self).add(unixpath(f))
183 return super(largefilesdirstate, self).add(unixpath(f))
184
184
185 def drop(self, f):
185 def drop(self, f):
186 return super(largefilesdirstate, self).drop(unixpath(f))
186 return super(largefilesdirstate, self).drop(unixpath(f))
187
187
188 def forget(self, f):
188 def forget(self, f):
189 return super(largefilesdirstate, self).forget(unixpath(f))
189 return super(largefilesdirstate, self).forget(unixpath(f))
190
190
191 def normallookup(self, f):
191 def normallookup(self, f):
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193
193
194 def _ignore(self, f):
194 def _ignore(self, f):
195 return False
195 return False
196
196
197 def write(self, tr):
197 def write(self, tr):
198 # (1) disable PENDING mode always
198 # (1) disable PENDING mode always
199 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (lfdirstate isn't yet managed as a part of the transaction)
200 # (2) avoid develwarn 'use dirstate.write with ....'
200 # (2) avoid develwarn 'use dirstate.write with ....'
201 if tr:
201 if tr:
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 super(largefilesdirstate, self).write(None)
203 super(largefilesdirstate, self).write(None)
204
204
205
205
206 def openlfdirstate(ui, repo, create=True):
206 def openlfdirstate(ui, repo, create=True):
207 """
207 """
208 Return a dirstate object that tracks largefiles: i.e. its root is
208 Return a dirstate object that tracks largefiles: i.e. its root is
209 the repo root, but it is saved in .hg/largefiles/dirstate.
209 the repo root, but it is saved in .hg/largefiles/dirstate.
210
210
211 If a dirstate object already exists and is being used for a 'changing_*'
211 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
212 context, it will be returned.
213 """
213 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
215 if sub_dirstate is not None:
216 return sub_dirstate
216 return sub_dirstate
217 vfs = repo.vfs
217 vfs = repo.vfs
218 lfstoredir = longname
218 lfstoredir = longname
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 lfdirstate = largefilesdirstate(
221 lfdirstate = largefilesdirstate(
222 opener,
222 opener,
223 ui,
223 ui,
224 repo.root,
224 repo.root,
225 repo.dirstate._validate,
225 repo.dirstate._validate,
226 lambda: sparse.matcher(repo),
226 lambda: sparse.matcher(repo),
227 repo.nodeconstants,
227 repo.nodeconstants,
228 use_dirstate_v2,
228 use_dirstate_v2,
229 )
229 )
230
230
231 # If the largefiles dirstate does not exist, populate and create
231 # If the largefiles dirstate does not exist, populate and create
232 # it. This ensures that we create it on the first meaningful
232 # it. This ensures that we create it on the first meaningful
233 # largefiles operation in a new clone.
233 # largefiles operation in a new clone.
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 try:
235 try:
236 with repo.wlock(wait=False):
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 matcher = getstandinmatcher(repo)
237 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
238 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
239 matcher, subrepos=[], unknown=False, ignored=False
240 )
240 )
241
241
242 if len(standins) > 0:
242 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
243 vfs.makedirs(lfstoredir)
244
244
245 for standin in standins:
245 for standin in standins:
246 lfile = splitstandin(standin)
246 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
247 lfdirstate.hacky_extension_update_file(
248 lfile,
248 lfile,
249 p1_tracked=True,
249 p1_tracked=True,
250 wc_tracked=True,
250 wc_tracked=True,
251 possibly_dirty=True,
251 possibly_dirty=True,
252 )
252 )
253 # avoid getting dirty dirstate before other operations
254 lfdirstate.write(repo.currenttransaction())
255 except error.LockError:
253 except error.LockError:
256 # Assume that whatever was holding the lock was important.
254 # Assume that whatever was holding the lock was important.
257 # If we were doing something important, we would already have
255 # If we were doing something important, we would already have
258 # either the lock or a largefile dirstate.
256 # either the lock or a largefile dirstate.
259 pass
257 pass
260 return lfdirstate
258 return lfdirstate
261
259
262
260
263 def lfdirstatestatus(lfdirstate, repo):
261 def lfdirstatestatus(lfdirstate, repo):
264 pctx = repo[b'.']
262 pctx = repo[b'.']
265 match = matchmod.always()
263 match = matchmod.always()
266 unsure, s, mtime_boundary = lfdirstate.status(
264 unsure, s, mtime_boundary = lfdirstate.status(
267 match, subrepos=[], ignored=False, clean=False, unknown=False
265 match, subrepos=[], ignored=False, clean=False, unknown=False
268 )
266 )
269 modified, clean = s.modified, s.clean
267 modified, clean = s.modified, s.clean
270 wctx = repo[None]
268 wctx = repo[None]
271 for lfile in unsure:
269 for lfile in unsure:
272 try:
270 try:
273 fctx = pctx[standin(lfile)]
271 fctx = pctx[standin(lfile)]
274 except LookupError:
272 except LookupError:
275 fctx = None
273 fctx = None
276 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
277 modified.append(lfile)
275 modified.append(lfile)
278 else:
276 else:
279 clean.append(lfile)
277 clean.append(lfile)
280 st = wctx[lfile].lstat()
278 st = wctx[lfile].lstat()
281 mode = st.st_mode
279 mode = st.st_mode
282 size = st.st_size
280 size = st.st_size
283 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
284 if mtime is not None:
282 if mtime is not None:
285 cache_data = (mode, size, mtime)
283 cache_data = (mode, size, mtime)
286 lfdirstate.set_clean(lfile, cache_data)
284 lfdirstate.set_clean(lfile, cache_data)
287 return s
285 return s
288
286
289
287
290 def listlfiles(repo, rev=None, matcher=None):
288 def listlfiles(repo, rev=None, matcher=None):
291 """return a list of largefiles in the working copy or the
289 """return a list of largefiles in the working copy or the
292 specified changeset"""
290 specified changeset"""
293
291
294 if matcher is None:
292 if matcher is None:
295 matcher = getstandinmatcher(repo)
293 matcher = getstandinmatcher(repo)
296
294
297 # ignore unknown files in working directory
295 # ignore unknown files in working directory
298 return [
296 return [
299 splitstandin(f)
297 splitstandin(f)
300 for f in repo[rev].walk(matcher)
298 for f in repo[rev].walk(matcher)
301 if rev is not None or repo.dirstate.get_entry(f).any_tracked
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
302 ]
300 ]
303
301
304
302
305 def instore(repo, hash, forcelocal=False):
303 def instore(repo, hash, forcelocal=False):
306 '''Return true if a largefile with the given hash exists in the store'''
304 '''Return true if a largefile with the given hash exists in the store'''
307 return os.path.exists(storepath(repo, hash, forcelocal))
305 return os.path.exists(storepath(repo, hash, forcelocal))
308
306
309
307
310 def storepath(repo, hash, forcelocal=False):
308 def storepath(repo, hash, forcelocal=False):
311 """Return the correct location in the repository largefiles store for a
309 """Return the correct location in the repository largefiles store for a
312 file with the given hash."""
310 file with the given hash."""
313 if not forcelocal and repo.shared():
311 if not forcelocal and repo.shared():
314 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
315 return repo.vfs.join(longname, hash)
313 return repo.vfs.join(longname, hash)
316
314
317
315
318 def findstorepath(repo, hash):
316 def findstorepath(repo, hash):
319 """Search through the local store path(s) to find the file for the given
317 """Search through the local store path(s) to find the file for the given
320 hash. If the file is not found, its path in the primary store is returned.
318 hash. If the file is not found, its path in the primary store is returned.
321 The return value is a tuple of (path, exists(path)).
319 The return value is a tuple of (path, exists(path)).
322 """
320 """
323 # For shared repos, the primary store is in the share source. But for
321 # For shared repos, the primary store is in the share source. But for
324 # backward compatibility, force a lookup in the local store if it wasn't
322 # backward compatibility, force a lookup in the local store if it wasn't
325 # found in the share source.
323 # found in the share source.
326 path = storepath(repo, hash, False)
324 path = storepath(repo, hash, False)
327
325
328 if instore(repo, hash):
326 if instore(repo, hash):
329 return (path, True)
327 return (path, True)
330 elif repo.shared() and instore(repo, hash, True):
328 elif repo.shared() and instore(repo, hash, True):
331 return storepath(repo, hash, True), True
329 return storepath(repo, hash, True), True
332
330
333 return (path, False)
331 return (path, False)
334
332
335
333
336 def copyfromcache(repo, hash, filename):
334 def copyfromcache(repo, hash, filename):
337 """Copy the specified largefile from the repo or system cache to
335 """Copy the specified largefile from the repo or system cache to
338 filename in the repository. Return true on success or false if the
336 filename in the repository. Return true on success or false if the
339 file was not found in either cache (which should not happened:
337 file was not found in either cache (which should not happened:
340 this is meant to be called only after ensuring that the needed
338 this is meant to be called only after ensuring that the needed
341 largefile exists in the cache)."""
339 largefile exists in the cache)."""
342 wvfs = repo.wvfs
340 wvfs = repo.wvfs
343 path = findfile(repo, hash)
341 path = findfile(repo, hash)
344 if path is None:
342 if path is None:
345 return False
343 return False
346 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
347 # The write may fail before the file is fully written, but we
345 # The write may fail before the file is fully written, but we
348 # don't use atomic writes in the working copy.
346 # don't use atomic writes in the working copy.
349 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
350 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
351 if gothash != hash:
349 if gothash != hash:
352 repo.ui.warn(
350 repo.ui.warn(
353 _(b'%s: data corruption in %s with hash %s\n')
351 _(b'%s: data corruption in %s with hash %s\n')
354 % (filename, path, gothash)
352 % (filename, path, gothash)
355 )
353 )
356 wvfs.unlink(filename)
354 wvfs.unlink(filename)
357 return False
355 return False
358 return True
356 return True
359
357
360
358
361 def copytostore(repo, ctx, file, fstandin):
359 def copytostore(repo, ctx, file, fstandin):
362 wvfs = repo.wvfs
360 wvfs = repo.wvfs
363 hash = readasstandin(ctx[fstandin])
361 hash = readasstandin(ctx[fstandin])
364 if instore(repo, hash):
362 if instore(repo, hash):
365 return
363 return
366 if wvfs.exists(file):
364 if wvfs.exists(file):
367 copytostoreabsolute(repo, wvfs.join(file), hash)
365 copytostoreabsolute(repo, wvfs.join(file), hash)
368 else:
366 else:
369 repo.ui.warn(
367 repo.ui.warn(
370 _(b"%s: largefile %s not available from local store\n")
368 _(b"%s: largefile %s not available from local store\n")
371 % (file, hash)
369 % (file, hash)
372 )
370 )
373
371
374
372
375 def copyalltostore(repo, node):
373 def copyalltostore(repo, node):
376 '''Copy all largefiles in a given revision to the store'''
374 '''Copy all largefiles in a given revision to the store'''
377
375
378 ctx = repo[node]
376 ctx = repo[node]
379 for filename in ctx.files():
377 for filename in ctx.files():
380 realfile = splitstandin(filename)
378 realfile = splitstandin(filename)
381 if realfile is not None and filename in ctx.manifest():
379 if realfile is not None and filename in ctx.manifest():
382 copytostore(repo, ctx, realfile, filename)
380 copytostore(repo, ctx, realfile, filename)
383
381
384
382
385 def copytostoreabsolute(repo, file, hash):
383 def copytostoreabsolute(repo, file, hash):
386 if inusercache(repo.ui, hash):
384 if inusercache(repo.ui, hash):
387 link(usercachepath(repo.ui, hash), storepath(repo, hash))
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
388 else:
386 else:
389 util.makedirs(os.path.dirname(storepath(repo, hash)))
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
390 with open(file, b'rb') as srcf:
388 with open(file, b'rb') as srcf:
391 with util.atomictempfile(
389 with util.atomictempfile(
392 storepath(repo, hash), createmode=repo.store.createmode
390 storepath(repo, hash), createmode=repo.store.createmode
393 ) as dstf:
391 ) as dstf:
394 for chunk in util.filechunkiter(srcf):
392 for chunk in util.filechunkiter(srcf):
395 dstf.write(chunk)
393 dstf.write(chunk)
396 linktousercache(repo, hash)
394 linktousercache(repo, hash)
397
395
398
396
399 def linktousercache(repo, hash):
397 def linktousercache(repo, hash):
400 """Link / copy the largefile with the specified hash from the store
398 """Link / copy the largefile with the specified hash from the store
401 to the cache."""
399 to the cache."""
402 path = usercachepath(repo.ui, hash)
400 path = usercachepath(repo.ui, hash)
403 link(storepath(repo, hash), path)
401 link(storepath(repo, hash), path)
404
402
405
403
406 def getstandinmatcher(repo, rmatcher=None):
404 def getstandinmatcher(repo, rmatcher=None):
407 '''Return a match object that applies rmatcher to the standin directory'''
405 '''Return a match object that applies rmatcher to the standin directory'''
408 wvfs = repo.wvfs
406 wvfs = repo.wvfs
409 standindir = shortname
407 standindir = shortname
410
408
411 # no warnings about missing files or directories
409 # no warnings about missing files or directories
412 badfn = lambda f, msg: None
410 badfn = lambda f, msg: None
413
411
414 if rmatcher and not rmatcher.always():
412 if rmatcher and not rmatcher.always():
415 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
416 if not pats:
414 if not pats:
417 pats = [wvfs.join(standindir)]
415 pats = [wvfs.join(standindir)]
418 match = scmutil.match(repo[None], pats, badfn=badfn)
416 match = scmutil.match(repo[None], pats, badfn=badfn)
419 else:
417 else:
420 # no patterns: relative to repo root
418 # no patterns: relative to repo root
421 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
422 return match
420 return match
423
421
424
422
425 def composestandinmatcher(repo, rmatcher):
423 def composestandinmatcher(repo, rmatcher):
426 """Return a matcher that accepts standins corresponding to the
424 """Return a matcher that accepts standins corresponding to the
427 files accepted by rmatcher. Pass the list of files in the matcher
425 files accepted by rmatcher. Pass the list of files in the matcher
428 as the paths specified by the user."""
426 as the paths specified by the user."""
429 smatcher = getstandinmatcher(repo, rmatcher)
427 smatcher = getstandinmatcher(repo, rmatcher)
430 isstandin = smatcher.matchfn
428 isstandin = smatcher.matchfn
431
429
432 def composedmatchfn(f):
430 def composedmatchfn(f):
433 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
434
432
435 smatcher.matchfn = composedmatchfn
433 smatcher.matchfn = composedmatchfn
436
434
437 return smatcher
435 return smatcher
438
436
439
437
440 def standin(filename):
438 def standin(filename):
441 """Return the repo-relative path to the standin for the specified big
439 """Return the repo-relative path to the standin for the specified big
442 file."""
440 file."""
443 # Notes:
441 # Notes:
444 # 1) Some callers want an absolute path, but for instance addlargefiles
442 # 1) Some callers want an absolute path, but for instance addlargefiles
445 # needs it repo-relative so it can be passed to repo[None].add(). So
443 # needs it repo-relative so it can be passed to repo[None].add(). So
446 # leave it up to the caller to use repo.wjoin() to get an absolute path.
444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
447 # 2) Join with '/' because that's what dirstate always uses, even on
445 # 2) Join with '/' because that's what dirstate always uses, even on
448 # Windows. Change existing separator to '/' first in case we are
446 # Windows. Change existing separator to '/' first in case we are
449 # passed filenames from an external source (like the command line).
447 # passed filenames from an external source (like the command line).
450 return shortnameslash + util.pconvert(filename)
448 return shortnameslash + util.pconvert(filename)
451
449
452
450
453 def isstandin(filename):
451 def isstandin(filename):
454 """Return true if filename is a big file standin. filename must be
452 """Return true if filename is a big file standin. filename must be
455 in Mercurial's internal form (slash-separated)."""
453 in Mercurial's internal form (slash-separated)."""
456 return filename.startswith(shortnameslash)
454 return filename.startswith(shortnameslash)
457
455
458
456
459 def splitstandin(filename):
457 def splitstandin(filename):
460 # Split on / because that's what dirstate always uses, even on Windows.
458 # Split on / because that's what dirstate always uses, even on Windows.
461 # Change local separator to / first just in case we are passed filenames
459 # Change local separator to / first just in case we are passed filenames
462 # from an external source (like the command line).
460 # from an external source (like the command line).
463 bits = util.pconvert(filename).split(b'/', 1)
461 bits = util.pconvert(filename).split(b'/', 1)
464 if len(bits) == 2 and bits[0] == shortname:
462 if len(bits) == 2 and bits[0] == shortname:
465 return bits[1]
463 return bits[1]
466 else:
464 else:
467 return None
465 return None
468
466
469
467
470 def updatestandin(repo, lfile, standin):
468 def updatestandin(repo, lfile, standin):
471 """Re-calculate hash value of lfile and write it into standin
469 """Re-calculate hash value of lfile and write it into standin
472
470
473 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
474 """
472 """
475 file = repo.wjoin(lfile)
473 file = repo.wjoin(lfile)
476 if repo.wvfs.exists(lfile):
474 if repo.wvfs.exists(lfile):
477 hash = hashfile(file)
475 hash = hashfile(file)
478 executable = getexecutable(file)
476 executable = getexecutable(file)
479 writestandin(repo, standin, hash, executable)
477 writestandin(repo, standin, hash, executable)
480 else:
478 else:
481 raise error.Abort(_(b'%s: file not found!') % lfile)
479 raise error.Abort(_(b'%s: file not found!') % lfile)
482
480
483
481
484 def readasstandin(fctx):
482 def readasstandin(fctx):
485 """read hex hash from given filectx of standin file
483 """read hex hash from given filectx of standin file
486
484
487 This encapsulates how "standin" data is stored into storage layer."""
485 This encapsulates how "standin" data is stored into storage layer."""
488 return fctx.data().strip()
486 return fctx.data().strip()
489
487
490
488
491 def writestandin(repo, standin, hash, executable):
489 def writestandin(repo, standin, hash, executable):
492 '''write hash to <repo.root>/<standin>'''
490 '''write hash to <repo.root>/<standin>'''
493 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
494
492
495
493
496 def copyandhash(instream, outfile):
494 def copyandhash(instream, outfile):
497 """Read bytes from instream (iterable) and write them to outfile,
495 """Read bytes from instream (iterable) and write them to outfile,
498 computing the SHA-1 hash of the data along the way. Return the hash."""
496 computing the SHA-1 hash of the data along the way. Return the hash."""
499 hasher = hashutil.sha1(b'')
497 hasher = hashutil.sha1(b'')
500 for data in instream:
498 for data in instream:
501 hasher.update(data)
499 hasher.update(data)
502 outfile.write(data)
500 outfile.write(data)
503 return hex(hasher.digest())
501 return hex(hasher.digest())
504
502
505
503
506 def hashfile(file):
504 def hashfile(file):
507 if not os.path.exists(file):
505 if not os.path.exists(file):
508 return b''
506 return b''
509 with open(file, b'rb') as fd:
507 with open(file, b'rb') as fd:
510 return hexsha1(fd)
508 return hexsha1(fd)
511
509
512
510
513 def getexecutable(filename):
511 def getexecutable(filename):
514 mode = os.stat(filename).st_mode
512 mode = os.stat(filename).st_mode
515 return (
513 return (
516 (mode & stat.S_IXUSR)
514 (mode & stat.S_IXUSR)
517 and (mode & stat.S_IXGRP)
515 and (mode & stat.S_IXGRP)
518 and (mode & stat.S_IXOTH)
516 and (mode & stat.S_IXOTH)
519 )
517 )
520
518
521
519
522 def urljoin(first, second, *arg):
520 def urljoin(first, second, *arg):
523 def join(left, right):
521 def join(left, right):
524 if not left.endswith(b'/'):
522 if not left.endswith(b'/'):
525 left += b'/'
523 left += b'/'
526 if right.startswith(b'/'):
524 if right.startswith(b'/'):
527 right = right[1:]
525 right = right[1:]
528 return left + right
526 return left + right
529
527
530 url = join(first, second)
528 url = join(first, second)
531 for a in arg:
529 for a in arg:
532 url = join(url, a)
530 url = join(url, a)
533 return url
531 return url
534
532
535
533
536 def hexsha1(fileobj):
534 def hexsha1(fileobj):
537 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
538 object data"""
536 object data"""
539 h = hashutil.sha1()
537 h = hashutil.sha1()
540 for chunk in util.filechunkiter(fileobj):
538 for chunk in util.filechunkiter(fileobj):
541 h.update(chunk)
539 h.update(chunk)
542 return hex(h.digest())
540 return hex(h.digest())
543
541
544
542
545 def httpsendfile(ui, filename):
543 def httpsendfile(ui, filename):
546 return httpconnection.httpsendfile(ui, filename, b'rb')
544 return httpconnection.httpsendfile(ui, filename, b'rb')
547
545
548
546
549 def unixpath(path):
547 def unixpath(path):
550 '''Return a version of path normalized for use with the lfdirstate.'''
548 '''Return a version of path normalized for use with the lfdirstate.'''
551 return util.pconvert(os.path.normpath(path))
549 return util.pconvert(os.path.normpath(path))
552
550
553
551
554 def islfilesrepo(repo):
552 def islfilesrepo(repo):
555 '''Return true if the repo is a largefile repo.'''
553 '''Return true if the repo is a largefile repo.'''
556 if b'largefiles' in repo.requirements and any(
554 if b'largefiles' in repo.requirements and any(
557 shortnameslash in f[1] for f in repo.store.datafiles()
555 shortnameslash in f[1] for f in repo.store.datafiles()
558 ):
556 ):
559 return True
557 return True
560
558
561 return any(openlfdirstate(repo.ui, repo, False))
559 return any(openlfdirstate(repo.ui, repo, False))
562
560
563
561
564 class storeprotonotcapable(Exception):
562 class storeprotonotcapable(Exception):
565 def __init__(self, storetypes):
563 def __init__(self, storetypes):
566 self.storetypes = storetypes
564 self.storetypes = storetypes
567
565
568
566
569 def getstandinsstate(repo):
567 def getstandinsstate(repo):
570 standins = []
568 standins = []
571 matcher = getstandinmatcher(repo)
569 matcher = getstandinmatcher(repo)
572 wctx = repo[None]
570 wctx = repo[None]
573 for standin in repo.dirstate.walk(
571 for standin in repo.dirstate.walk(
574 matcher, subrepos=[], unknown=False, ignored=False
572 matcher, subrepos=[], unknown=False, ignored=False
575 ):
573 ):
576 lfile = splitstandin(standin)
574 lfile = splitstandin(standin)
577 try:
575 try:
578 hash = readasstandin(wctx[standin])
576 hash = readasstandin(wctx[standin])
579 except IOError:
577 except IOError:
580 hash = None
578 hash = None
581 standins.append((lfile, hash))
579 standins.append((lfile, hash))
582 return standins
580 return standins
583
581
584
582
585 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
586 lfstandin = standin(lfile)
584 lfstandin = standin(lfile)
587 if lfstandin not in repo.dirstate:
585 if lfstandin not in repo.dirstate:
588 lfdirstate.hacky_extension_update_file(
586 lfdirstate.hacky_extension_update_file(
589 lfile,
587 lfile,
590 p1_tracked=False,
588 p1_tracked=False,
591 wc_tracked=False,
589 wc_tracked=False,
592 )
590 )
593 else:
591 else:
594 entry = repo.dirstate.get_entry(lfstandin)
592 entry = repo.dirstate.get_entry(lfstandin)
595 lfdirstate.hacky_extension_update_file(
593 lfdirstate.hacky_extension_update_file(
596 lfile,
594 lfile,
597 wc_tracked=entry.tracked,
595 wc_tracked=entry.tracked,
598 p1_tracked=entry.p1_tracked,
596 p1_tracked=entry.p1_tracked,
599 p2_info=entry.p2_info,
597 p2_info=entry.p2_info,
600 possibly_dirty=True,
598 possibly_dirty=True,
601 )
599 )
602
600
603
601
604 def markcommitted(orig, ctx, node):
602 def markcommitted(orig, ctx, node):
605 repo = ctx.repo()
603 repo = ctx.repo()
606
604
607 with repo.dirstate.changing_parents(repo):
605 with repo.dirstate.changing_parents(repo):
608 orig(node)
606 orig(node)
609
607
610 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
611 # because files coming from the 2nd parent are omitted in the latter.
609 # because files coming from the 2nd parent are omitted in the latter.
612 #
610 #
613 # The former should be used to get targets of "synclfdirstate",
611 # The former should be used to get targets of "synclfdirstate",
614 # because such files:
612 # because such files:
615 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
616 # - have to be marked as "n" after commit, but
614 # - have to be marked as "n" after commit, but
617 # - aren't listed in "repo[node].files()"
615 # - aren't listed in "repo[node].files()"
618
616
619 lfdirstate = openlfdirstate(repo.ui, repo)
617 lfdirstate = openlfdirstate(repo.ui, repo)
620 for f in ctx.files():
618 for f in ctx.files():
621 lfile = splitstandin(f)
619 lfile = splitstandin(f)
622 if lfile is not None:
620 if lfile is not None:
623 synclfdirstate(repo, lfdirstate, lfile, False)
621 synclfdirstate(repo, lfdirstate, lfile, False)
624
622
625 # As part of committing, copy all of the largefiles into the cache.
623 # As part of committing, copy all of the largefiles into the cache.
626 #
624 #
627 # Using "node" instead of "ctx" implies additional "repo[node]"
625 # Using "node" instead of "ctx" implies additional "repo[node]"
628 # lookup while copyalltostore(), but can omit redundant check for
626 # lookup while copyalltostore(), but can omit redundant check for
629 # files comming from the 2nd parent, which should exist in store
627 # files comming from the 2nd parent, which should exist in store
630 # at merging.
628 # at merging.
631 copyalltostore(repo, node)
629 copyalltostore(repo, node)
632
630
633
631
634 def getlfilestoupdate(oldstandins, newstandins):
632 def getlfilestoupdate(oldstandins, newstandins):
635 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
636 filelist = []
634 filelist = []
637 for f in changedstandins:
635 for f in changedstandins:
638 if f[0] not in filelist:
636 if f[0] not in filelist:
639 filelist.append(f[0])
637 filelist.append(f[0])
640 return filelist
638 return filelist
641
639
642
640
643 def getlfilestoupload(repo, missing, addfunc):
641 def getlfilestoupload(repo, missing, addfunc):
644 makeprogress = repo.ui.makeprogress
642 makeprogress = repo.ui.makeprogress
645 with makeprogress(
643 with makeprogress(
646 _(b'finding outgoing largefiles'),
644 _(b'finding outgoing largefiles'),
647 unit=_(b'revisions'),
645 unit=_(b'revisions'),
648 total=len(missing),
646 total=len(missing),
649 ) as progress:
647 ) as progress:
650 for i, n in enumerate(missing):
648 for i, n in enumerate(missing):
651 progress.update(i)
649 progress.update(i)
652 parents = [p for p in repo[n].parents() if p != repo.nullid]
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
653
651
654 with lfstatus(repo, value=False):
652 with lfstatus(repo, value=False):
655 ctx = repo[n]
653 ctx = repo[n]
656
654
657 files = set(ctx.files())
655 files = set(ctx.files())
658 if len(parents) == 2:
656 if len(parents) == 2:
659 mc = ctx.manifest()
657 mc = ctx.manifest()
660 mp1 = ctx.p1().manifest()
658 mp1 = ctx.p1().manifest()
661 mp2 = ctx.p2().manifest()
659 mp2 = ctx.p2().manifest()
662 for f in mp1:
660 for f in mp1:
663 if f not in mc:
661 if f not in mc:
664 files.add(f)
662 files.add(f)
665 for f in mp2:
663 for f in mp2:
666 if f not in mc:
664 if f not in mc:
667 files.add(f)
665 files.add(f)
668 for f in mc:
666 for f in mc:
669 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
670 files.add(f)
668 files.add(f)
671 for fn in files:
669 for fn in files:
672 if isstandin(fn) and fn in ctx:
670 if isstandin(fn) and fn in ctx:
673 addfunc(fn, readasstandin(ctx[fn]))
671 addfunc(fn, readasstandin(ctx[fn]))
674
672
675
673
676 def updatestandinsbymatch(repo, match):
674 def updatestandinsbymatch(repo, match):
677 """Update standins in the working directory according to specified match
675 """Update standins in the working directory according to specified match
678
676
679 This returns (possibly modified) ``match`` object to be used for
677 This returns (possibly modified) ``match`` object to be used for
680 subsequent commit process.
678 subsequent commit process.
681 """
679 """
682
680
683 ui = repo.ui
681 ui = repo.ui
684
682
685 # Case 1: user calls commit with no specific files or
683 # Case 1: user calls commit with no specific files or
686 # include/exclude patterns: refresh and commit all files that
684 # include/exclude patterns: refresh and commit all files that
687 # are "dirty".
685 # are "dirty".
688 if match is None or match.always():
686 if match is None or match.always():
689 # Spend a bit of time here to get a list of files we know
687 # Spend a bit of time here to get a list of files we know
690 # are modified so we can compare only against those.
688 # are modified so we can compare only against those.
691 # It can cost a lot of time (several seconds)
689 # It can cost a lot of time (several seconds)
692 # otherwise to update all standins if the largefiles are
690 # otherwise to update all standins if the largefiles are
693 # large.
691 # large.
694 dirtymatch = matchmod.always()
692 dirtymatch = matchmod.always()
695 with repo.dirstate.running_status(repo):
693 with repo.dirstate.running_status(repo):
696 lfdirstate = openlfdirstate(ui, repo)
694 lfdirstate = openlfdirstate(ui, repo)
697 unsure, s, mtime_boundary = lfdirstate.status(
695 unsure, s, mtime_boundary = lfdirstate.status(
698 dirtymatch,
696 dirtymatch,
699 subrepos=[],
697 subrepos=[],
700 ignored=False,
698 ignored=False,
701 clean=False,
699 clean=False,
702 unknown=False,
700 unknown=False,
703 )
701 )
704 modifiedfiles = unsure + s.modified + s.added + s.removed
702 modifiedfiles = unsure + s.modified + s.added + s.removed
705 lfiles = listlfiles(repo)
703 lfiles = listlfiles(repo)
706 # this only loops through largefiles that exist (not
704 # this only loops through largefiles that exist (not
707 # removed/renamed)
705 # removed/renamed)
708 for lfile in lfiles:
706 for lfile in lfiles:
709 if lfile in modifiedfiles:
707 if lfile in modifiedfiles:
710 fstandin = standin(lfile)
708 fstandin = standin(lfile)
711 if repo.wvfs.exists(fstandin):
709 if repo.wvfs.exists(fstandin):
712 # this handles the case where a rebase is being
710 # this handles the case where a rebase is being
713 # performed and the working copy is not updated
711 # performed and the working copy is not updated
714 # yet.
712 # yet.
715 if repo.wvfs.exists(lfile):
713 if repo.wvfs.exists(lfile):
716 updatestandin(repo, lfile, fstandin)
714 updatestandin(repo, lfile, fstandin)
717
715
718 return match
716 return match
719
717
720 lfiles = listlfiles(repo)
718 lfiles = listlfiles(repo)
721 match._files = repo._subdirlfs(match.files(), lfiles)
719 match._files = repo._subdirlfs(match.files(), lfiles)
722
720
723 # Case 2: user calls commit with specified patterns: refresh
721 # Case 2: user calls commit with specified patterns: refresh
724 # any matching big files.
722 # any matching big files.
725 smatcher = composestandinmatcher(repo, match)
723 smatcher = composestandinmatcher(repo, match)
726 standins = repo.dirstate.walk(
724 standins = repo.dirstate.walk(
727 smatcher, subrepos=[], unknown=False, ignored=False
725 smatcher, subrepos=[], unknown=False, ignored=False
728 )
726 )
729
727
730 # No matching big files: get out of the way and pass control to
728 # No matching big files: get out of the way and pass control to
731 # the usual commit() method.
729 # the usual commit() method.
732 if not standins:
730 if not standins:
733 return match
731 return match
734
732
735 # Refresh all matching big files. It's possible that the
733 # Refresh all matching big files. It's possible that the
736 # commit will end up failing, in which case the big files will
734 # commit will end up failing, in which case the big files will
737 # stay refreshed. No harm done: the user modified them and
735 # stay refreshed. No harm done: the user modified them and
738 # asked to commit them, so sooner or later we're going to
736 # asked to commit them, so sooner or later we're going to
739 # refresh the standins. Might as well leave them refreshed.
737 # refresh the standins. Might as well leave them refreshed.
740 lfdirstate = openlfdirstate(ui, repo)
738 lfdirstate = openlfdirstate(ui, repo)
741 for fstandin in standins:
739 for fstandin in standins:
742 lfile = splitstandin(fstandin)
740 lfile = splitstandin(fstandin)
743 if lfdirstate.get_entry(lfile).tracked:
741 if lfdirstate.get_entry(lfile).tracked:
744 updatestandin(repo, lfile, fstandin)
742 updatestandin(repo, lfile, fstandin)
745
743
746 # Cook up a new matcher that only matches regular files or
744 # Cook up a new matcher that only matches regular files or
747 # standins corresponding to the big files requested by the
745 # standins corresponding to the big files requested by the
748 # user. Have to modify _files to prevent commit() from
746 # user. Have to modify _files to prevent commit() from
749 # complaining "not tracked" for big files.
747 # complaining "not tracked" for big files.
750 match = copy.copy(match)
748 match = copy.copy(match)
751 origmatchfn = match.matchfn
749 origmatchfn = match.matchfn
752
750
753 # Check both the list of largefiles and the list of
751 # Check both the list of largefiles and the list of
754 # standins because if a largefile was removed, it
752 # standins because if a largefile was removed, it
755 # won't be in the list of largefiles at this point
753 # won't be in the list of largefiles at this point
756 match._files += sorted(standins)
754 match._files += sorted(standins)
757
755
758 actualfiles = []
756 actualfiles = []
759 for f in match._files:
757 for f in match._files:
760 fstandin = standin(f)
758 fstandin = standin(f)
761
759
762 # For largefiles, only one of the normal and standin should be
760 # For largefiles, only one of the normal and standin should be
763 # committed (except if one of them is a remove). In the case of a
761 # committed (except if one of them is a remove). In the case of a
764 # standin removal, drop the normal file if it is unknown to dirstate.
762 # standin removal, drop the normal file if it is unknown to dirstate.
765 # Thus, skip plain largefile names but keep the standin.
763 # Thus, skip plain largefile names but keep the standin.
766 if f in lfiles or fstandin in standins:
764 if f in lfiles or fstandin in standins:
767 if not repo.dirstate.get_entry(fstandin).removed:
765 if not repo.dirstate.get_entry(fstandin).removed:
768 if not repo.dirstate.get_entry(f).removed:
766 if not repo.dirstate.get_entry(f).removed:
769 continue
767 continue
770 elif not repo.dirstate.get_entry(f).any_tracked:
768 elif not repo.dirstate.get_entry(f).any_tracked:
771 continue
769 continue
772
770
773 actualfiles.append(f)
771 actualfiles.append(f)
774 match._files = actualfiles
772 match._files = actualfiles
775
773
776 def matchfn(f):
774 def matchfn(f):
777 if origmatchfn(f):
775 if origmatchfn(f):
778 return f not in lfiles
776 return f not in lfiles
779 else:
777 else:
780 return f in standins
778 return f in standins
781
779
782 match.matchfn = matchfn
780 match.matchfn = matchfn
783
781
784 return match
782 return match
785
783
786
784
787 class automatedcommithook:
785 class automatedcommithook:
788 """Stateful hook to update standins at the 1st commit of resuming
786 """Stateful hook to update standins at the 1st commit of resuming
789
787
790 For efficiency, updating standins in the working directory should
788 For efficiency, updating standins in the working directory should
791 be avoided while automated committing (like rebase, transplant and
789 be avoided while automated committing (like rebase, transplant and
792 so on), because they should be updated before committing.
790 so on), because they should be updated before committing.
793
791
794 But the 1st commit of resuming automated committing (e.g. ``rebase
792 But the 1st commit of resuming automated committing (e.g. ``rebase
795 --continue``) should update them, because largefiles may be
793 --continue``) should update them, because largefiles may be
796 modified manually.
794 modified manually.
797 """
795 """
798
796
799 def __init__(self, resuming):
797 def __init__(self, resuming):
800 self.resuming = resuming
798 self.resuming = resuming
801
799
802 def __call__(self, repo, match):
800 def __call__(self, repo, match):
803 if self.resuming:
801 if self.resuming:
804 self.resuming = False # avoids updating at subsequent commits
802 self.resuming = False # avoids updating at subsequent commits
805 return updatestandinsbymatch(repo, match)
803 return updatestandinsbymatch(repo, match)
806 else:
804 else:
807 return match
805 return match
808
806
809
807
810 def getstatuswriter(ui, repo, forcibly=None):
808 def getstatuswriter(ui, repo, forcibly=None):
811 """Return the function to write largefiles specific status out
809 """Return the function to write largefiles specific status out
812
810
813 If ``forcibly`` is ``None``, this returns the last element of
811 If ``forcibly`` is ``None``, this returns the last element of
814 ``repo._lfstatuswriters`` as "default" writer function.
812 ``repo._lfstatuswriters`` as "default" writer function.
815
813
816 Otherwise, this returns the function to always write out (or
814 Otherwise, this returns the function to always write out (or
817 ignore if ``not forcibly``) status.
815 ignore if ``not forcibly``) status.
818 """
816 """
819 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
817 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
820 return repo._lfstatuswriters[-1]
818 return repo._lfstatuswriters[-1]
821 else:
819 else:
822 if forcibly:
820 if forcibly:
823 return ui.status # forcibly WRITE OUT
821 return ui.status # forcibly WRITE OUT
824 else:
822 else:
825 return lambda *msg, **opts: None # forcibly IGNORE
823 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now