##// END OF EJS Templates
largefiles: sync up `largefilesdirstate` methods with `dirstate` base class...
Matt Harbison -
r52700:0b2c978f default
parent child Browse files
Show More
@@ -1,826 +1,808 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
163 _tr_key_suffix = b'-large-files'
164
164
165 def __getitem__(self, key):
165 # XXX: why are there overrides to fix the path, if the path should already
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166 # be in unix form for the superclass?
167
167
168 def set_tracked(self, f):
168 def set_tracked(self, f, reset_copy=False):
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169 return super(largefilesdirstate, self).set_tracked(
170 unixpath(f), reset_copy=reset_copy
171 )
170
172
171 def set_untracked(self, f):
173 def set_untracked(self, f):
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
174 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173
175
174 def normal(self, f, parentfiledata=None):
176 def _dirignore(self, f):
175 # not sure if we should pass the `parentfiledata` down or throw it
176 # away. So throwing it away to stay on the safe side.
177 return super(largefilesdirstate, self).normal(unixpath(f))
178
179 def remove(self, f):
180 return super(largefilesdirstate, self).remove(unixpath(f))
181
182 def add(self, f):
183 return super(largefilesdirstate, self).add(unixpath(f))
184
185 def drop(self, f):
186 return super(largefilesdirstate, self).drop(unixpath(f))
187
188 def forget(self, f):
189 return super(largefilesdirstate, self).forget(unixpath(f))
190
191 def normallookup(self, f):
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193
194 def _ignore(self, f):
195 return False
177 return False
196
178
197 def write(self, tr):
179 def write(self, tr):
198 # (1) disable PENDING mode always
180 # (1) disable PENDING mode always
199 # (lfdirstate isn't yet managed as a part of the transaction)
181 # (lfdirstate isn't yet managed as a part of the transaction)
200 # (2) avoid develwarn 'use dirstate.write with ....'
182 # (2) avoid develwarn 'use dirstate.write with ....'
201 if tr:
183 if tr:
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
184 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 super(largefilesdirstate, self).write(None)
185 super(largefilesdirstate, self).write(None)
204
186
205
187
206 def openlfdirstate(ui, repo, create=True):
188 def openlfdirstate(ui, repo, create=True):
207 """
189 """
208 Return a dirstate object that tracks largefiles: i.e. its root is
190 Return a dirstate object that tracks largefiles: i.e. its root is
209 the repo root, but it is saved in .hg/largefiles/dirstate.
191 the repo root, but it is saved in .hg/largefiles/dirstate.
210
192
211 If a dirstate object already exists and is being used for a 'changing_*'
193 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
194 context, it will be returned.
213 """
195 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
196 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
197 if sub_dirstate is not None:
216 return sub_dirstate
198 return sub_dirstate
217 vfs = repo.vfs
199 vfs = repo.vfs
218 lfstoredir = longname
200 lfstoredir = longname
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
201 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
202 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 lfdirstate = largefilesdirstate(
203 lfdirstate = largefilesdirstate(
222 opener,
204 opener,
223 ui,
205 ui,
224 repo.root,
206 repo.root,
225 repo.dirstate._validate,
207 repo.dirstate._validate,
226 lambda: sparse.matcher(repo),
208 lambda: sparse.matcher(repo),
227 repo.nodeconstants,
209 repo.nodeconstants,
228 use_dirstate_v2,
210 use_dirstate_v2,
229 )
211 )
230
212
231 # If the largefiles dirstate does not exist, populate and create
213 # If the largefiles dirstate does not exist, populate and create
232 # it. This ensures that we create it on the first meaningful
214 # it. This ensures that we create it on the first meaningful
233 # largefiles operation in a new clone.
215 # largefiles operation in a new clone.
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
216 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 try:
217 try:
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
218 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 matcher = getstandinmatcher(repo)
219 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
220 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
221 matcher, subrepos=[], unknown=False, ignored=False
240 )
222 )
241
223
242 if len(standins) > 0:
224 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
225 vfs.makedirs(lfstoredir)
244
226
245 for standin in standins:
227 for standin in standins:
246 lfile = splitstandin(standin)
228 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
229 lfdirstate.hacky_extension_update_file(
248 lfile,
230 lfile,
249 p1_tracked=True,
231 p1_tracked=True,
250 wc_tracked=True,
232 wc_tracked=True,
251 possibly_dirty=True,
233 possibly_dirty=True,
252 )
234 )
253 except error.LockError:
235 except error.LockError:
254 # Assume that whatever was holding the lock was important.
236 # Assume that whatever was holding the lock was important.
255 # If we were doing something important, we would already have
237 # If we were doing something important, we would already have
256 # either the lock or a largefile dirstate.
238 # either the lock or a largefile dirstate.
257 pass
239 pass
258 return lfdirstate
240 return lfdirstate
259
241
260
242
261 def lfdirstatestatus(lfdirstate, repo):
243 def lfdirstatestatus(lfdirstate, repo):
262 pctx = repo[b'.']
244 pctx = repo[b'.']
263 match = matchmod.always()
245 match = matchmod.always()
264 unsure, s, mtime_boundary = lfdirstate.status(
246 unsure, s, mtime_boundary = lfdirstate.status(
265 match, subrepos=[], ignored=False, clean=False, unknown=False
247 match, subrepos=[], ignored=False, clean=False, unknown=False
266 )
248 )
267 modified, clean = s.modified, s.clean
249 modified, clean = s.modified, s.clean
268 wctx = repo[None]
250 wctx = repo[None]
269 for lfile in unsure:
251 for lfile in unsure:
270 try:
252 try:
271 fctx = pctx[standin(lfile)]
253 fctx = pctx[standin(lfile)]
272 except LookupError:
254 except LookupError:
273 fctx = None
255 fctx = None
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
256 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
275 modified.append(lfile)
257 modified.append(lfile)
276 else:
258 else:
277 clean.append(lfile)
259 clean.append(lfile)
278 st = wctx[lfile].lstat()
260 st = wctx[lfile].lstat()
279 mode = st.st_mode
261 mode = st.st_mode
280 size = st.st_size
262 size = st.st_size
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
263 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
282 if mtime is not None:
264 if mtime is not None:
283 cache_data = (mode, size, mtime)
265 cache_data = (mode, size, mtime)
284 lfdirstate.set_clean(lfile, cache_data)
266 lfdirstate.set_clean(lfile, cache_data)
285 return s
267 return s
286
268
287
269
288 def listlfiles(repo, rev=None, matcher=None):
270 def listlfiles(repo, rev=None, matcher=None):
289 """return a list of largefiles in the working copy or the
271 """return a list of largefiles in the working copy or the
290 specified changeset"""
272 specified changeset"""
291
273
292 if matcher is None:
274 if matcher is None:
293 matcher = getstandinmatcher(repo)
275 matcher = getstandinmatcher(repo)
294
276
295 # ignore unknown files in working directory
277 # ignore unknown files in working directory
296 return [
278 return [
297 splitstandin(f)
279 splitstandin(f)
298 for f in repo[rev].walk(matcher)
280 for f in repo[rev].walk(matcher)
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
281 if rev is not None or repo.dirstate.get_entry(f).any_tracked
300 ]
282 ]
301
283
302
284
303 def instore(repo, hash, forcelocal=False):
285 def instore(repo, hash, forcelocal=False):
304 '''Return true if a largefile with the given hash exists in the store'''
286 '''Return true if a largefile with the given hash exists in the store'''
305 return os.path.exists(storepath(repo, hash, forcelocal))
287 return os.path.exists(storepath(repo, hash, forcelocal))
306
288
307
289
308 def storepath(repo, hash, forcelocal=False):
290 def storepath(repo, hash, forcelocal=False):
309 """Return the correct location in the repository largefiles store for a
291 """Return the correct location in the repository largefiles store for a
310 file with the given hash."""
292 file with the given hash."""
311 if not forcelocal and repo.shared():
293 if not forcelocal and repo.shared():
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
294 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
313 return repo.vfs.join(longname, hash)
295 return repo.vfs.join(longname, hash)
314
296
315
297
316 def findstorepath(repo, hash):
298 def findstorepath(repo, hash):
317 """Search through the local store path(s) to find the file for the given
299 """Search through the local store path(s) to find the file for the given
318 hash. If the file is not found, its path in the primary store is returned.
300 hash. If the file is not found, its path in the primary store is returned.
319 The return value is a tuple of (path, exists(path)).
301 The return value is a tuple of (path, exists(path)).
320 """
302 """
321 # For shared repos, the primary store is in the share source. But for
303 # For shared repos, the primary store is in the share source. But for
322 # backward compatibility, force a lookup in the local store if it wasn't
304 # backward compatibility, force a lookup in the local store if it wasn't
323 # found in the share source.
305 # found in the share source.
324 path = storepath(repo, hash, False)
306 path = storepath(repo, hash, False)
325
307
326 if instore(repo, hash):
308 if instore(repo, hash):
327 return (path, True)
309 return (path, True)
328 elif repo.shared() and instore(repo, hash, True):
310 elif repo.shared() and instore(repo, hash, True):
329 return storepath(repo, hash, True), True
311 return storepath(repo, hash, True), True
330
312
331 return (path, False)
313 return (path, False)
332
314
333
315
334 def copyfromcache(repo, hash, filename):
316 def copyfromcache(repo, hash, filename):
335 """Copy the specified largefile from the repo or system cache to
317 """Copy the specified largefile from the repo or system cache to
336 filename in the repository. Return true on success or false if the
318 filename in the repository. Return true on success or false if the
337 file was not found in either cache (which should not happened:
319 file was not found in either cache (which should not happened:
338 this is meant to be called only after ensuring that the needed
320 this is meant to be called only after ensuring that the needed
339 largefile exists in the cache)."""
321 largefile exists in the cache)."""
340 wvfs = repo.wvfs
322 wvfs = repo.wvfs
341 path = findfile(repo, hash)
323 path = findfile(repo, hash)
342 if path is None:
324 if path is None:
343 return False
325 return False
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
326 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
345 # The write may fail before the file is fully written, but we
327 # The write may fail before the file is fully written, but we
346 # don't use atomic writes in the working copy.
328 # don't use atomic writes in the working copy.
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
329 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
330 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
349 if gothash != hash:
331 if gothash != hash:
350 repo.ui.warn(
332 repo.ui.warn(
351 _(b'%s: data corruption in %s with hash %s\n')
333 _(b'%s: data corruption in %s with hash %s\n')
352 % (filename, path, gothash)
334 % (filename, path, gothash)
353 )
335 )
354 wvfs.unlink(filename)
336 wvfs.unlink(filename)
355 return False
337 return False
356 return True
338 return True
357
339
358
340
359 def copytostore(repo, ctx, file, fstandin):
341 def copytostore(repo, ctx, file, fstandin):
360 wvfs = repo.wvfs
342 wvfs = repo.wvfs
361 hash = readasstandin(ctx[fstandin])
343 hash = readasstandin(ctx[fstandin])
362 if instore(repo, hash):
344 if instore(repo, hash):
363 return
345 return
364 if wvfs.exists(file):
346 if wvfs.exists(file):
365 copytostoreabsolute(repo, wvfs.join(file), hash)
347 copytostoreabsolute(repo, wvfs.join(file), hash)
366 else:
348 else:
367 repo.ui.warn(
349 repo.ui.warn(
368 _(b"%s: largefile %s not available from local store\n")
350 _(b"%s: largefile %s not available from local store\n")
369 % (file, hash)
351 % (file, hash)
370 )
352 )
371
353
372
354
373 def copyalltostore(repo, node):
355 def copyalltostore(repo, node):
374 '''Copy all largefiles in a given revision to the store'''
356 '''Copy all largefiles in a given revision to the store'''
375
357
376 ctx = repo[node]
358 ctx = repo[node]
377 for filename in ctx.files():
359 for filename in ctx.files():
378 realfile = splitstandin(filename)
360 realfile = splitstandin(filename)
379 if realfile is not None and filename in ctx.manifest():
361 if realfile is not None and filename in ctx.manifest():
380 copytostore(repo, ctx, realfile, filename)
362 copytostore(repo, ctx, realfile, filename)
381
363
382
364
383 def copytostoreabsolute(repo, file, hash):
365 def copytostoreabsolute(repo, file, hash):
384 if inusercache(repo.ui, hash):
366 if inusercache(repo.ui, hash):
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
367 link(usercachepath(repo.ui, hash), storepath(repo, hash))
386 else:
368 else:
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
369 util.makedirs(os.path.dirname(storepath(repo, hash)))
388 with open(file, b'rb') as srcf:
370 with open(file, b'rb') as srcf:
389 with util.atomictempfile(
371 with util.atomictempfile(
390 storepath(repo, hash), createmode=repo.store.createmode
372 storepath(repo, hash), createmode=repo.store.createmode
391 ) as dstf:
373 ) as dstf:
392 for chunk in util.filechunkiter(srcf):
374 for chunk in util.filechunkiter(srcf):
393 dstf.write(chunk)
375 dstf.write(chunk)
394 linktousercache(repo, hash)
376 linktousercache(repo, hash)
395
377
396
378
397 def linktousercache(repo, hash):
379 def linktousercache(repo, hash):
398 """Link / copy the largefile with the specified hash from the store
380 """Link / copy the largefile with the specified hash from the store
399 to the cache."""
381 to the cache."""
400 path = usercachepath(repo.ui, hash)
382 path = usercachepath(repo.ui, hash)
401 link(storepath(repo, hash), path)
383 link(storepath(repo, hash), path)
402
384
403
385
404 def getstandinmatcher(repo, rmatcher=None):
386 def getstandinmatcher(repo, rmatcher=None):
405 '''Return a match object that applies rmatcher to the standin directory'''
387 '''Return a match object that applies rmatcher to the standin directory'''
406 wvfs = repo.wvfs
388 wvfs = repo.wvfs
407 standindir = shortname
389 standindir = shortname
408
390
409 # no warnings about missing files or directories
391 # no warnings about missing files or directories
410 badfn = lambda f, msg: None
392 badfn = lambda f, msg: None
411
393
412 if rmatcher and not rmatcher.always():
394 if rmatcher and not rmatcher.always():
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
395 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
414 if not pats:
396 if not pats:
415 pats = [wvfs.join(standindir)]
397 pats = [wvfs.join(standindir)]
416 match = scmutil.match(repo[None], pats, badfn=badfn)
398 match = scmutil.match(repo[None], pats, badfn=badfn)
417 else:
399 else:
418 # no patterns: relative to repo root
400 # no patterns: relative to repo root
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
401 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
420 return match
402 return match
421
403
422
404
423 def composestandinmatcher(repo, rmatcher):
405 def composestandinmatcher(repo, rmatcher):
424 """Return a matcher that accepts standins corresponding to the
406 """Return a matcher that accepts standins corresponding to the
425 files accepted by rmatcher. Pass the list of files in the matcher
407 files accepted by rmatcher. Pass the list of files in the matcher
426 as the paths specified by the user."""
408 as the paths specified by the user."""
427 smatcher = getstandinmatcher(repo, rmatcher)
409 smatcher = getstandinmatcher(repo, rmatcher)
428 isstandin = smatcher.matchfn
410 isstandin = smatcher.matchfn
429
411
430 def composedmatchfn(f):
412 def composedmatchfn(f):
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
413 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
432
414
433 smatcher._was_tampered_with = True
415 smatcher._was_tampered_with = True
434 smatcher.matchfn = composedmatchfn
416 smatcher.matchfn = composedmatchfn
435
417
436 return smatcher
418 return smatcher
437
419
438
420
439 def standin(filename):
421 def standin(filename):
440 """Return the repo-relative path to the standin for the specified big
422 """Return the repo-relative path to the standin for the specified big
441 file."""
423 file."""
442 # Notes:
424 # Notes:
443 # 1) Some callers want an absolute path, but for instance addlargefiles
425 # 1) Some callers want an absolute path, but for instance addlargefiles
444 # needs it repo-relative so it can be passed to repo[None].add(). So
426 # needs it repo-relative so it can be passed to repo[None].add(). So
445 # leave it up to the caller to use repo.wjoin() to get an absolute path.
427 # leave it up to the caller to use repo.wjoin() to get an absolute path.
446 # 2) Join with '/' because that's what dirstate always uses, even on
428 # 2) Join with '/' because that's what dirstate always uses, even on
447 # Windows. Change existing separator to '/' first in case we are
429 # Windows. Change existing separator to '/' first in case we are
448 # passed filenames from an external source (like the command line).
430 # passed filenames from an external source (like the command line).
449 return shortnameslash + util.pconvert(filename)
431 return shortnameslash + util.pconvert(filename)
450
432
451
433
452 def isstandin(filename):
434 def isstandin(filename):
453 """Return true if filename is a big file standin. filename must be
435 """Return true if filename is a big file standin. filename must be
454 in Mercurial's internal form (slash-separated)."""
436 in Mercurial's internal form (slash-separated)."""
455 return filename.startswith(shortnameslash)
437 return filename.startswith(shortnameslash)
456
438
457
439
458 def splitstandin(filename):
440 def splitstandin(filename):
459 # Split on / because that's what dirstate always uses, even on Windows.
441 # Split on / because that's what dirstate always uses, even on Windows.
460 # Change local separator to / first just in case we are passed filenames
442 # Change local separator to / first just in case we are passed filenames
461 # from an external source (like the command line).
443 # from an external source (like the command line).
462 bits = util.pconvert(filename).split(b'/', 1)
444 bits = util.pconvert(filename).split(b'/', 1)
463 if len(bits) == 2 and bits[0] == shortname:
445 if len(bits) == 2 and bits[0] == shortname:
464 return bits[1]
446 return bits[1]
465 else:
447 else:
466 return None
448 return None
467
449
468
450
469 def updatestandin(repo, lfile, standin):
451 def updatestandin(repo, lfile, standin):
470 """Re-calculate hash value of lfile and write it into standin
452 """Re-calculate hash value of lfile and write it into standin
471
453
472 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
454 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
473 """
455 """
474 file = repo.wjoin(lfile)
456 file = repo.wjoin(lfile)
475 if repo.wvfs.exists(lfile):
457 if repo.wvfs.exists(lfile):
476 hash = hashfile(file)
458 hash = hashfile(file)
477 executable = getexecutable(file)
459 executable = getexecutable(file)
478 writestandin(repo, standin, hash, executable)
460 writestandin(repo, standin, hash, executable)
479 else:
461 else:
480 raise error.Abort(_(b'%s: file not found!') % lfile)
462 raise error.Abort(_(b'%s: file not found!') % lfile)
481
463
482
464
483 def readasstandin(fctx):
465 def readasstandin(fctx):
484 """read hex hash from given filectx of standin file
466 """read hex hash from given filectx of standin file
485
467
486 This encapsulates how "standin" data is stored into storage layer."""
468 This encapsulates how "standin" data is stored into storage layer."""
487 return fctx.data().strip()
469 return fctx.data().strip()
488
470
489
471
490 def writestandin(repo, standin, hash, executable):
472 def writestandin(repo, standin, hash, executable):
491 '''write hash to <repo.root>/<standin>'''
473 '''write hash to <repo.root>/<standin>'''
492 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
474 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
493
475
494
476
495 def copyandhash(instream, outfile):
477 def copyandhash(instream, outfile):
496 """Read bytes from instream (iterable) and write them to outfile,
478 """Read bytes from instream (iterable) and write them to outfile,
497 computing the SHA-1 hash of the data along the way. Return the hash."""
479 computing the SHA-1 hash of the data along the way. Return the hash."""
498 hasher = hashutil.sha1(b'')
480 hasher = hashutil.sha1(b'')
499 for data in instream:
481 for data in instream:
500 hasher.update(data)
482 hasher.update(data)
501 outfile.write(data)
483 outfile.write(data)
502 return hex(hasher.digest())
484 return hex(hasher.digest())
503
485
504
486
505 def hashfile(file):
487 def hashfile(file):
506 if not os.path.exists(file):
488 if not os.path.exists(file):
507 return b''
489 return b''
508 with open(file, b'rb') as fd:
490 with open(file, b'rb') as fd:
509 return hexsha1(fd)
491 return hexsha1(fd)
510
492
511
493
512 def getexecutable(filename):
494 def getexecutable(filename):
513 mode = os.stat(filename).st_mode
495 mode = os.stat(filename).st_mode
514 return (
496 return (
515 (mode & stat.S_IXUSR)
497 (mode & stat.S_IXUSR)
516 and (mode & stat.S_IXGRP)
498 and (mode & stat.S_IXGRP)
517 and (mode & stat.S_IXOTH)
499 and (mode & stat.S_IXOTH)
518 )
500 )
519
501
520
502
521 def urljoin(first, second, *arg):
503 def urljoin(first, second, *arg):
522 def join(left, right):
504 def join(left, right):
523 if not left.endswith(b'/'):
505 if not left.endswith(b'/'):
524 left += b'/'
506 left += b'/'
525 if right.startswith(b'/'):
507 if right.startswith(b'/'):
526 right = right[1:]
508 right = right[1:]
527 return left + right
509 return left + right
528
510
529 url = join(first, second)
511 url = join(first, second)
530 for a in arg:
512 for a in arg:
531 url = join(url, a)
513 url = join(url, a)
532 return url
514 return url
533
515
534
516
535 def hexsha1(fileobj):
517 def hexsha1(fileobj):
536 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
518 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
537 object data"""
519 object data"""
538 h = hashutil.sha1()
520 h = hashutil.sha1()
539 for chunk in util.filechunkiter(fileobj):
521 for chunk in util.filechunkiter(fileobj):
540 h.update(chunk)
522 h.update(chunk)
541 return hex(h.digest())
523 return hex(h.digest())
542
524
543
525
544 def httpsendfile(ui, filename):
526 def httpsendfile(ui, filename):
545 return httpconnection.httpsendfile(ui, filename, b'rb')
527 return httpconnection.httpsendfile(ui, filename, b'rb')
546
528
547
529
548 def unixpath(path):
530 def unixpath(path):
549 '''Return a version of path normalized for use with the lfdirstate.'''
531 '''Return a version of path normalized for use with the lfdirstate.'''
550 return util.pconvert(os.path.normpath(path))
532 return util.pconvert(os.path.normpath(path))
551
533
552
534
553 def islfilesrepo(repo):
535 def islfilesrepo(repo):
554 '''Return true if the repo is a largefile repo.'''
536 '''Return true if the repo is a largefile repo.'''
555 if b'largefiles' in repo.requirements:
537 if b'largefiles' in repo.requirements:
556 for entry in repo.store.data_entries():
538 for entry in repo.store.data_entries():
557 if entry.is_revlog and shortnameslash in entry.target_id:
539 if entry.is_revlog and shortnameslash in entry.target_id:
558 return True
540 return True
559
541
560 return any(openlfdirstate(repo.ui, repo, False))
542 return any(openlfdirstate(repo.ui, repo, False))
561
543
562
544
563 class storeprotonotcapable(Exception):
545 class storeprotonotcapable(Exception):
564 def __init__(self, storetypes):
546 def __init__(self, storetypes):
565 self.storetypes = storetypes
547 self.storetypes = storetypes
566
548
567
549
568 def getstandinsstate(repo):
550 def getstandinsstate(repo):
569 standins = []
551 standins = []
570 matcher = getstandinmatcher(repo)
552 matcher = getstandinmatcher(repo)
571 wctx = repo[None]
553 wctx = repo[None]
572 for standin in repo.dirstate.walk(
554 for standin in repo.dirstate.walk(
573 matcher, subrepos=[], unknown=False, ignored=False
555 matcher, subrepos=[], unknown=False, ignored=False
574 ):
556 ):
575 lfile = splitstandin(standin)
557 lfile = splitstandin(standin)
576 try:
558 try:
577 hash = readasstandin(wctx[standin])
559 hash = readasstandin(wctx[standin])
578 except IOError:
560 except IOError:
579 hash = None
561 hash = None
580 standins.append((lfile, hash))
562 standins.append((lfile, hash))
581 return standins
563 return standins
582
564
583
565
584 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
566 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
585 lfstandin = standin(lfile)
567 lfstandin = standin(lfile)
586 if lfstandin not in repo.dirstate:
568 if lfstandin not in repo.dirstate:
587 lfdirstate.hacky_extension_update_file(
569 lfdirstate.hacky_extension_update_file(
588 lfile,
570 lfile,
589 p1_tracked=False,
571 p1_tracked=False,
590 wc_tracked=False,
572 wc_tracked=False,
591 )
573 )
592 else:
574 else:
593 entry = repo.dirstate.get_entry(lfstandin)
575 entry = repo.dirstate.get_entry(lfstandin)
594 lfdirstate.hacky_extension_update_file(
576 lfdirstate.hacky_extension_update_file(
595 lfile,
577 lfile,
596 wc_tracked=entry.tracked,
578 wc_tracked=entry.tracked,
597 p1_tracked=entry.p1_tracked,
579 p1_tracked=entry.p1_tracked,
598 p2_info=entry.p2_info,
580 p2_info=entry.p2_info,
599 possibly_dirty=True,
581 possibly_dirty=True,
600 )
582 )
601
583
602
584
603 def markcommitted(orig, ctx, node):
585 def markcommitted(orig, ctx, node):
604 repo = ctx.repo()
586 repo = ctx.repo()
605
587
606 with repo.dirstate.changing_parents(repo):
588 with repo.dirstate.changing_parents(repo):
607 orig(node)
589 orig(node)
608
590
609 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
591 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
610 # because files coming from the 2nd parent are omitted in the latter.
592 # because files coming from the 2nd parent are omitted in the latter.
611 #
593 #
612 # The former should be used to get targets of "synclfdirstate",
594 # The former should be used to get targets of "synclfdirstate",
613 # because such files:
595 # because such files:
614 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
596 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
615 # - have to be marked as "n" after commit, but
597 # - have to be marked as "n" after commit, but
616 # - aren't listed in "repo[node].files()"
598 # - aren't listed in "repo[node].files()"
617
599
618 lfdirstate = openlfdirstate(repo.ui, repo)
600 lfdirstate = openlfdirstate(repo.ui, repo)
619 for f in ctx.files():
601 for f in ctx.files():
620 lfile = splitstandin(f)
602 lfile = splitstandin(f)
621 if lfile is not None:
603 if lfile is not None:
622 synclfdirstate(repo, lfdirstate, lfile, False)
604 synclfdirstate(repo, lfdirstate, lfile, False)
623
605
624 # As part of committing, copy all of the largefiles into the cache.
606 # As part of committing, copy all of the largefiles into the cache.
625 #
607 #
626 # Using "node" instead of "ctx" implies additional "repo[node]"
608 # Using "node" instead of "ctx" implies additional "repo[node]"
627 # lookup while copyalltostore(), but can omit redundant check for
609 # lookup while copyalltostore(), but can omit redundant check for
628 # files comming from the 2nd parent, which should exist in store
610 # files comming from the 2nd parent, which should exist in store
629 # at merging.
611 # at merging.
630 copyalltostore(repo, node)
612 copyalltostore(repo, node)
631
613
632
614
633 def getlfilestoupdate(oldstandins, newstandins):
615 def getlfilestoupdate(oldstandins, newstandins):
634 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
616 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
635 filelist = []
617 filelist = []
636 for f in changedstandins:
618 for f in changedstandins:
637 if f[0] not in filelist:
619 if f[0] not in filelist:
638 filelist.append(f[0])
620 filelist.append(f[0])
639 return filelist
621 return filelist
640
622
641
623
642 def getlfilestoupload(repo, missing, addfunc):
624 def getlfilestoupload(repo, missing, addfunc):
643 makeprogress = repo.ui.makeprogress
625 makeprogress = repo.ui.makeprogress
644 with makeprogress(
626 with makeprogress(
645 _(b'finding outgoing largefiles'),
627 _(b'finding outgoing largefiles'),
646 unit=_(b'revisions'),
628 unit=_(b'revisions'),
647 total=len(missing),
629 total=len(missing),
648 ) as progress:
630 ) as progress:
649 for i, n in enumerate(missing):
631 for i, n in enumerate(missing):
650 progress.update(i)
632 progress.update(i)
651 parents = [p for p in repo[n].parents() if p != repo.nullid]
633 parents = [p for p in repo[n].parents() if p != repo.nullid]
652
634
653 with lfstatus(repo, value=False):
635 with lfstatus(repo, value=False):
654 ctx = repo[n]
636 ctx = repo[n]
655
637
656 files = set(ctx.files())
638 files = set(ctx.files())
657 if len(parents) == 2:
639 if len(parents) == 2:
658 mc = ctx.manifest()
640 mc = ctx.manifest()
659 mp1 = ctx.p1().manifest()
641 mp1 = ctx.p1().manifest()
660 mp2 = ctx.p2().manifest()
642 mp2 = ctx.p2().manifest()
661 for f in mp1:
643 for f in mp1:
662 if f not in mc:
644 if f not in mc:
663 files.add(f)
645 files.add(f)
664 for f in mp2:
646 for f in mp2:
665 if f not in mc:
647 if f not in mc:
666 files.add(f)
648 files.add(f)
667 for f in mc:
649 for f in mc:
668 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
650 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
669 files.add(f)
651 files.add(f)
670 for fn in files:
652 for fn in files:
671 if isstandin(fn) and fn in ctx:
653 if isstandin(fn) and fn in ctx:
672 addfunc(fn, readasstandin(ctx[fn]))
654 addfunc(fn, readasstandin(ctx[fn]))
673
655
674
656
675 def updatestandinsbymatch(repo, match):
657 def updatestandinsbymatch(repo, match):
676 """Update standins in the working directory according to specified match
658 """Update standins in the working directory according to specified match
677
659
678 This returns (possibly modified) ``match`` object to be used for
660 This returns (possibly modified) ``match`` object to be used for
679 subsequent commit process.
661 subsequent commit process.
680 """
662 """
681
663
682 ui = repo.ui
664 ui = repo.ui
683
665
684 # Case 1: user calls commit with no specific files or
666 # Case 1: user calls commit with no specific files or
685 # include/exclude patterns: refresh and commit all files that
667 # include/exclude patterns: refresh and commit all files that
686 # are "dirty".
668 # are "dirty".
687 if match is None or match.always():
669 if match is None or match.always():
688 # Spend a bit of time here to get a list of files we know
670 # Spend a bit of time here to get a list of files we know
689 # are modified so we can compare only against those.
671 # are modified so we can compare only against those.
690 # It can cost a lot of time (several seconds)
672 # It can cost a lot of time (several seconds)
691 # otherwise to update all standins if the largefiles are
673 # otherwise to update all standins if the largefiles are
692 # large.
674 # large.
693 dirtymatch = matchmod.always()
675 dirtymatch = matchmod.always()
694 with repo.dirstate.running_status(repo):
676 with repo.dirstate.running_status(repo):
695 lfdirstate = openlfdirstate(ui, repo)
677 lfdirstate = openlfdirstate(ui, repo)
696 unsure, s, mtime_boundary = lfdirstate.status(
678 unsure, s, mtime_boundary = lfdirstate.status(
697 dirtymatch,
679 dirtymatch,
698 subrepos=[],
680 subrepos=[],
699 ignored=False,
681 ignored=False,
700 clean=False,
682 clean=False,
701 unknown=False,
683 unknown=False,
702 )
684 )
703 modifiedfiles = unsure + s.modified + s.added + s.removed
685 modifiedfiles = unsure + s.modified + s.added + s.removed
704 lfiles = listlfiles(repo)
686 lfiles = listlfiles(repo)
705 # this only loops through largefiles that exist (not
687 # this only loops through largefiles that exist (not
706 # removed/renamed)
688 # removed/renamed)
707 for lfile in lfiles:
689 for lfile in lfiles:
708 if lfile in modifiedfiles:
690 if lfile in modifiedfiles:
709 fstandin = standin(lfile)
691 fstandin = standin(lfile)
710 if repo.wvfs.exists(fstandin):
692 if repo.wvfs.exists(fstandin):
711 # this handles the case where a rebase is being
693 # this handles the case where a rebase is being
712 # performed and the working copy is not updated
694 # performed and the working copy is not updated
713 # yet.
695 # yet.
714 if repo.wvfs.exists(lfile):
696 if repo.wvfs.exists(lfile):
715 updatestandin(repo, lfile, fstandin)
697 updatestandin(repo, lfile, fstandin)
716
698
717 return match
699 return match
718
700
719 lfiles = listlfiles(repo)
701 lfiles = listlfiles(repo)
720 match._was_tampered_with = True
702 match._was_tampered_with = True
721 match._files = repo._subdirlfs(match.files(), lfiles)
703 match._files = repo._subdirlfs(match.files(), lfiles)
722
704
723 # Case 2: user calls commit with specified patterns: refresh
705 # Case 2: user calls commit with specified patterns: refresh
724 # any matching big files.
706 # any matching big files.
725 smatcher = composestandinmatcher(repo, match)
707 smatcher = composestandinmatcher(repo, match)
726 standins = repo.dirstate.walk(
708 standins = repo.dirstate.walk(
727 smatcher, subrepos=[], unknown=False, ignored=False
709 smatcher, subrepos=[], unknown=False, ignored=False
728 )
710 )
729
711
730 # No matching big files: get out of the way and pass control to
712 # No matching big files: get out of the way and pass control to
731 # the usual commit() method.
713 # the usual commit() method.
732 if not standins:
714 if not standins:
733 return match
715 return match
734
716
735 # Refresh all matching big files. It's possible that the
717 # Refresh all matching big files. It's possible that the
736 # commit will end up failing, in which case the big files will
718 # commit will end up failing, in which case the big files will
737 # stay refreshed. No harm done: the user modified them and
719 # stay refreshed. No harm done: the user modified them and
738 # asked to commit them, so sooner or later we're going to
720 # asked to commit them, so sooner or later we're going to
739 # refresh the standins. Might as well leave them refreshed.
721 # refresh the standins. Might as well leave them refreshed.
740 lfdirstate = openlfdirstate(ui, repo)
722 lfdirstate = openlfdirstate(ui, repo)
741 for fstandin in standins:
723 for fstandin in standins:
742 lfile = splitstandin(fstandin)
724 lfile = splitstandin(fstandin)
743 if lfdirstate.get_entry(lfile).tracked:
725 if lfdirstate.get_entry(lfile).tracked:
744 updatestandin(repo, lfile, fstandin)
726 updatestandin(repo, lfile, fstandin)
745
727
746 # Cook up a new matcher that only matches regular files or
728 # Cook up a new matcher that only matches regular files or
747 # standins corresponding to the big files requested by the
729 # standins corresponding to the big files requested by the
748 # user. Have to modify _files to prevent commit() from
730 # user. Have to modify _files to prevent commit() from
749 # complaining "not tracked" for big files.
731 # complaining "not tracked" for big files.
750 match = copy.copy(match)
732 match = copy.copy(match)
751 match._was_tampered_with = True
733 match._was_tampered_with = True
752 origmatchfn = match.matchfn
734 origmatchfn = match.matchfn
753
735
754 # Check both the list of largefiles and the list of
736 # Check both the list of largefiles and the list of
755 # standins because if a largefile was removed, it
737 # standins because if a largefile was removed, it
756 # won't be in the list of largefiles at this point
738 # won't be in the list of largefiles at this point
757 match._files += sorted(standins)
739 match._files += sorted(standins)
758
740
759 actualfiles = []
741 actualfiles = []
760 for f in match._files:
742 for f in match._files:
761 fstandin = standin(f)
743 fstandin = standin(f)
762
744
763 # For largefiles, only one of the normal and standin should be
745 # For largefiles, only one of the normal and standin should be
764 # committed (except if one of them is a remove). In the case of a
746 # committed (except if one of them is a remove). In the case of a
765 # standin removal, drop the normal file if it is unknown to dirstate.
747 # standin removal, drop the normal file if it is unknown to dirstate.
766 # Thus, skip plain largefile names but keep the standin.
748 # Thus, skip plain largefile names but keep the standin.
767 if f in lfiles or fstandin in standins:
749 if f in lfiles or fstandin in standins:
768 if not repo.dirstate.get_entry(fstandin).removed:
750 if not repo.dirstate.get_entry(fstandin).removed:
769 if not repo.dirstate.get_entry(f).removed:
751 if not repo.dirstate.get_entry(f).removed:
770 continue
752 continue
771 elif not repo.dirstate.get_entry(f).any_tracked:
753 elif not repo.dirstate.get_entry(f).any_tracked:
772 continue
754 continue
773
755
774 actualfiles.append(f)
756 actualfiles.append(f)
775 match._files = actualfiles
757 match._files = actualfiles
776
758
777 def matchfn(f):
759 def matchfn(f):
778 if origmatchfn(f):
760 if origmatchfn(f):
779 return f not in lfiles
761 return f not in lfiles
780 else:
762 else:
781 return f in standins
763 return f in standins
782
764
783 match.matchfn = matchfn
765 match.matchfn = matchfn
784
766
785 return match
767 return match
786
768
787
769
788 class automatedcommithook:
770 class automatedcommithook:
789 """Stateful hook to update standins at the 1st commit of resuming
771 """Stateful hook to update standins at the 1st commit of resuming
790
772
791 For efficiency, updating standins in the working directory should
773 For efficiency, updating standins in the working directory should
792 be avoided while automated committing (like rebase, transplant and
774 be avoided while automated committing (like rebase, transplant and
793 so on), because they should be updated before committing.
775 so on), because they should be updated before committing.
794
776
795 But the 1st commit of resuming automated committing (e.g. ``rebase
777 But the 1st commit of resuming automated committing (e.g. ``rebase
796 --continue``) should update them, because largefiles may be
778 --continue``) should update them, because largefiles may be
797 modified manually.
779 modified manually.
798 """
780 """
799
781
800 def __init__(self, resuming):
782 def __init__(self, resuming):
801 self.resuming = resuming
783 self.resuming = resuming
802
784
803 def __call__(self, repo, match):
785 def __call__(self, repo, match):
804 if self.resuming:
786 if self.resuming:
805 self.resuming = False # avoids updating at subsequent commits
787 self.resuming = False # avoids updating at subsequent commits
806 return updatestandinsbymatch(repo, match)
788 return updatestandinsbymatch(repo, match)
807 else:
789 else:
808 return match
790 return match
809
791
810
792
811 def getstatuswriter(ui, repo, forcibly=None):
793 def getstatuswriter(ui, repo, forcibly=None):
812 """Return the function to write largefiles specific status out
794 """Return the function to write largefiles specific status out
813
795
814 If ``forcibly`` is ``None``, this returns the last element of
796 If ``forcibly`` is ``None``, this returns the last element of
815 ``repo._lfstatuswriters`` as "default" writer function.
797 ``repo._lfstatuswriters`` as "default" writer function.
816
798
817 Otherwise, this returns the function to always write out (or
799 Otherwise, this returns the function to always write out (or
818 ignore if ``not forcibly``) status.
800 ignore if ``not forcibly``) status.
819 """
801 """
820 if forcibly is None and hasattr(repo, '_largefilesenabled'):
802 if forcibly is None and hasattr(repo, '_largefilesenabled'):
821 return repo._lfstatuswriters[-1]
803 return repo._lfstatuswriters[-1]
822 else:
804 else:
823 if forcibly:
805 if forcibly:
824 return ui.status # forcibly WRITE OUT
806 return ui.status # forcibly WRITE OUT
825 else:
807 else:
826 return lambda *msg, **opts: None # forcibly IGNORE
808 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now