##// END OF EJS Templates
store: use StoreEntry API instead of parsing filename in largefile...
marmoute -
r51381:b4a9c8f1 default
parent child Browse files
Show More
@@ -1,824 +1,823 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from mercurial.pycompat import open
18 from mercurial.pycompat import open
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 pycompat,
26 pycompat,
27 requirements,
27 requirements,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33 from mercurial.utils import hashutil
33 from mercurial.utils import hashutil
34 from mercurial.dirstateutils import timestamp
34 from mercurial.dirstateutils import timestamp
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 _large_file_dirstate = True
162 _large_file_dirstate = True
163 _tr_key_suffix = b'-large-files'
163 _tr_key_suffix = b'-large-files'
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
166 return super(largefilesdirstate, self).__getitem__(unixpath(key))
167
167
168 def set_tracked(self, f):
168 def set_tracked(self, f):
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
169 return super(largefilesdirstate, self).set_tracked(unixpath(f))
170
170
171 def set_untracked(self, f):
171 def set_untracked(self, f):
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
172 return super(largefilesdirstate, self).set_untracked(unixpath(f))
173
173
174 def normal(self, f, parentfiledata=None):
174 def normal(self, f, parentfiledata=None):
175 # not sure if we should pass the `parentfiledata` down or throw it
175 # not sure if we should pass the `parentfiledata` down or throw it
176 # away. So throwing it away to stay on the safe side.
176 # away. So throwing it away to stay on the safe side.
177 return super(largefilesdirstate, self).normal(unixpath(f))
177 return super(largefilesdirstate, self).normal(unixpath(f))
178
178
179 def remove(self, f):
179 def remove(self, f):
180 return super(largefilesdirstate, self).remove(unixpath(f))
180 return super(largefilesdirstate, self).remove(unixpath(f))
181
181
182 def add(self, f):
182 def add(self, f):
183 return super(largefilesdirstate, self).add(unixpath(f))
183 return super(largefilesdirstate, self).add(unixpath(f))
184
184
185 def drop(self, f):
185 def drop(self, f):
186 return super(largefilesdirstate, self).drop(unixpath(f))
186 return super(largefilesdirstate, self).drop(unixpath(f))
187
187
188 def forget(self, f):
188 def forget(self, f):
189 return super(largefilesdirstate, self).forget(unixpath(f))
189 return super(largefilesdirstate, self).forget(unixpath(f))
190
190
191 def normallookup(self, f):
191 def normallookup(self, f):
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
192 return super(largefilesdirstate, self).normallookup(unixpath(f))
193
193
194 def _ignore(self, f):
194 def _ignore(self, f):
195 return False
195 return False
196
196
197 def write(self, tr):
197 def write(self, tr):
198 # (1) disable PENDING mode always
198 # (1) disable PENDING mode always
199 # (lfdirstate isn't yet managed as a part of the transaction)
199 # (lfdirstate isn't yet managed as a part of the transaction)
200 # (2) avoid develwarn 'use dirstate.write with ....'
200 # (2) avoid develwarn 'use dirstate.write with ....'
201 if tr:
201 if tr:
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
202 tr.addbackup(b'largefiles/dirstate', location=b'plain')
203 super(largefilesdirstate, self).write(None)
203 super(largefilesdirstate, self).write(None)
204
204
205
205
206 def openlfdirstate(ui, repo, create=True):
206 def openlfdirstate(ui, repo, create=True):
207 """
207 """
208 Return a dirstate object that tracks largefiles: i.e. its root is
208 Return a dirstate object that tracks largefiles: i.e. its root is
209 the repo root, but it is saved in .hg/largefiles/dirstate.
209 the repo root, but it is saved in .hg/largefiles/dirstate.
210
210
211 If a dirstate object already exists and is being used for a 'changing_*'
211 If a dirstate object already exists and is being used for a 'changing_*'
212 context, it will be returned.
212 context, it will be returned.
213 """
213 """
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
214 sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
215 if sub_dirstate is not None:
215 if sub_dirstate is not None:
216 return sub_dirstate
216 return sub_dirstate
217 vfs = repo.vfs
217 vfs = repo.vfs
218 lfstoredir = longname
218 lfstoredir = longname
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
219 opener = vfsmod.vfs(vfs.join(lfstoredir))
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
220 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
221 lfdirstate = largefilesdirstate(
221 lfdirstate = largefilesdirstate(
222 opener,
222 opener,
223 ui,
223 ui,
224 repo.root,
224 repo.root,
225 repo.dirstate._validate,
225 repo.dirstate._validate,
226 lambda: sparse.matcher(repo),
226 lambda: sparse.matcher(repo),
227 repo.nodeconstants,
227 repo.nodeconstants,
228 use_dirstate_v2,
228 use_dirstate_v2,
229 )
229 )
230
230
231 # If the largefiles dirstate does not exist, populate and create
231 # If the largefiles dirstate does not exist, populate and create
232 # it. This ensures that we create it on the first meaningful
232 # it. This ensures that we create it on the first meaningful
233 # largefiles operation in a new clone.
233 # largefiles operation in a new clone.
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
234 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
235 try:
235 try:
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
236 with repo.wlock(wait=False), lfdirstate.changing_files(repo):
237 matcher = getstandinmatcher(repo)
237 matcher = getstandinmatcher(repo)
238 standins = repo.dirstate.walk(
238 standins = repo.dirstate.walk(
239 matcher, subrepos=[], unknown=False, ignored=False
239 matcher, subrepos=[], unknown=False, ignored=False
240 )
240 )
241
241
242 if len(standins) > 0:
242 if len(standins) > 0:
243 vfs.makedirs(lfstoredir)
243 vfs.makedirs(lfstoredir)
244
244
245 for standin in standins:
245 for standin in standins:
246 lfile = splitstandin(standin)
246 lfile = splitstandin(standin)
247 lfdirstate.hacky_extension_update_file(
247 lfdirstate.hacky_extension_update_file(
248 lfile,
248 lfile,
249 p1_tracked=True,
249 p1_tracked=True,
250 wc_tracked=True,
250 wc_tracked=True,
251 possibly_dirty=True,
251 possibly_dirty=True,
252 )
252 )
253 except error.LockError:
253 except error.LockError:
254 # Assume that whatever was holding the lock was important.
254 # Assume that whatever was holding the lock was important.
255 # If we were doing something important, we would already have
255 # If we were doing something important, we would already have
256 # either the lock or a largefile dirstate.
256 # either the lock or a largefile dirstate.
257 pass
257 pass
258 return lfdirstate
258 return lfdirstate
259
259
260
260
261 def lfdirstatestatus(lfdirstate, repo):
261 def lfdirstatestatus(lfdirstate, repo):
262 pctx = repo[b'.']
262 pctx = repo[b'.']
263 match = matchmod.always()
263 match = matchmod.always()
264 unsure, s, mtime_boundary = lfdirstate.status(
264 unsure, s, mtime_boundary = lfdirstate.status(
265 match, subrepos=[], ignored=False, clean=False, unknown=False
265 match, subrepos=[], ignored=False, clean=False, unknown=False
266 )
266 )
267 modified, clean = s.modified, s.clean
267 modified, clean = s.modified, s.clean
268 wctx = repo[None]
268 wctx = repo[None]
269 for lfile in unsure:
269 for lfile in unsure:
270 try:
270 try:
271 fctx = pctx[standin(lfile)]
271 fctx = pctx[standin(lfile)]
272 except LookupError:
272 except LookupError:
273 fctx = None
273 fctx = None
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
274 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
275 modified.append(lfile)
275 modified.append(lfile)
276 else:
276 else:
277 clean.append(lfile)
277 clean.append(lfile)
278 st = wctx[lfile].lstat()
278 st = wctx[lfile].lstat()
279 mode = st.st_mode
279 mode = st.st_mode
280 size = st.st_size
280 size = st.st_size
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
281 mtime = timestamp.reliable_mtime_of(st, mtime_boundary)
282 if mtime is not None:
282 if mtime is not None:
283 cache_data = (mode, size, mtime)
283 cache_data = (mode, size, mtime)
284 lfdirstate.set_clean(lfile, cache_data)
284 lfdirstate.set_clean(lfile, cache_data)
285 return s
285 return s
286
286
287
287
288 def listlfiles(repo, rev=None, matcher=None):
288 def listlfiles(repo, rev=None, matcher=None):
289 """return a list of largefiles in the working copy or the
289 """return a list of largefiles in the working copy or the
290 specified changeset"""
290 specified changeset"""
291
291
292 if matcher is None:
292 if matcher is None:
293 matcher = getstandinmatcher(repo)
293 matcher = getstandinmatcher(repo)
294
294
295 # ignore unknown files in working directory
295 # ignore unknown files in working directory
296 return [
296 return [
297 splitstandin(f)
297 splitstandin(f)
298 for f in repo[rev].walk(matcher)
298 for f in repo[rev].walk(matcher)
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
299 if rev is not None or repo.dirstate.get_entry(f).any_tracked
300 ]
300 ]
301
301
302
302
303 def instore(repo, hash, forcelocal=False):
303 def instore(repo, hash, forcelocal=False):
304 '''Return true if a largefile with the given hash exists in the store'''
304 '''Return true if a largefile with the given hash exists in the store'''
305 return os.path.exists(storepath(repo, hash, forcelocal))
305 return os.path.exists(storepath(repo, hash, forcelocal))
306
306
307
307
308 def storepath(repo, hash, forcelocal=False):
308 def storepath(repo, hash, forcelocal=False):
309 """Return the correct location in the repository largefiles store for a
309 """Return the correct location in the repository largefiles store for a
310 file with the given hash."""
310 file with the given hash."""
311 if not forcelocal and repo.shared():
311 if not forcelocal and repo.shared():
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
312 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
313 return repo.vfs.join(longname, hash)
313 return repo.vfs.join(longname, hash)
314
314
315
315
316 def findstorepath(repo, hash):
316 def findstorepath(repo, hash):
317 """Search through the local store path(s) to find the file for the given
317 """Search through the local store path(s) to find the file for the given
318 hash. If the file is not found, its path in the primary store is returned.
318 hash. If the file is not found, its path in the primary store is returned.
319 The return value is a tuple of (path, exists(path)).
319 The return value is a tuple of (path, exists(path)).
320 """
320 """
321 # For shared repos, the primary store is in the share source. But for
321 # For shared repos, the primary store is in the share source. But for
322 # backward compatibility, force a lookup in the local store if it wasn't
322 # backward compatibility, force a lookup in the local store if it wasn't
323 # found in the share source.
323 # found in the share source.
324 path = storepath(repo, hash, False)
324 path = storepath(repo, hash, False)
325
325
326 if instore(repo, hash):
326 if instore(repo, hash):
327 return (path, True)
327 return (path, True)
328 elif repo.shared() and instore(repo, hash, True):
328 elif repo.shared() and instore(repo, hash, True):
329 return storepath(repo, hash, True), True
329 return storepath(repo, hash, True), True
330
330
331 return (path, False)
331 return (path, False)
332
332
333
333
334 def copyfromcache(repo, hash, filename):
334 def copyfromcache(repo, hash, filename):
335 """Copy the specified largefile from the repo or system cache to
335 """Copy the specified largefile from the repo or system cache to
336 filename in the repository. Return true on success or false if the
336 filename in the repository. Return true on success or false if the
337 file was not found in either cache (which should not happened:
337 file was not found in either cache (which should not happened:
338 this is meant to be called only after ensuring that the needed
338 this is meant to be called only after ensuring that the needed
339 largefile exists in the cache)."""
339 largefile exists in the cache)."""
340 wvfs = repo.wvfs
340 wvfs = repo.wvfs
341 path = findfile(repo, hash)
341 path = findfile(repo, hash)
342 if path is None:
342 if path is None:
343 return False
343 return False
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
344 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
345 # The write may fail before the file is fully written, but we
345 # The write may fail before the file is fully written, but we
346 # don't use atomic writes in the working copy.
346 # don't use atomic writes in the working copy.
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
347 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
348 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
349 if gothash != hash:
349 if gothash != hash:
350 repo.ui.warn(
350 repo.ui.warn(
351 _(b'%s: data corruption in %s with hash %s\n')
351 _(b'%s: data corruption in %s with hash %s\n')
352 % (filename, path, gothash)
352 % (filename, path, gothash)
353 )
353 )
354 wvfs.unlink(filename)
354 wvfs.unlink(filename)
355 return False
355 return False
356 return True
356 return True
357
357
358
358
359 def copytostore(repo, ctx, file, fstandin):
359 def copytostore(repo, ctx, file, fstandin):
360 wvfs = repo.wvfs
360 wvfs = repo.wvfs
361 hash = readasstandin(ctx[fstandin])
361 hash = readasstandin(ctx[fstandin])
362 if instore(repo, hash):
362 if instore(repo, hash):
363 return
363 return
364 if wvfs.exists(file):
364 if wvfs.exists(file):
365 copytostoreabsolute(repo, wvfs.join(file), hash)
365 copytostoreabsolute(repo, wvfs.join(file), hash)
366 else:
366 else:
367 repo.ui.warn(
367 repo.ui.warn(
368 _(b"%s: largefile %s not available from local store\n")
368 _(b"%s: largefile %s not available from local store\n")
369 % (file, hash)
369 % (file, hash)
370 )
370 )
371
371
372
372
373 def copyalltostore(repo, node):
373 def copyalltostore(repo, node):
374 '''Copy all largefiles in a given revision to the store'''
374 '''Copy all largefiles in a given revision to the store'''
375
375
376 ctx = repo[node]
376 ctx = repo[node]
377 for filename in ctx.files():
377 for filename in ctx.files():
378 realfile = splitstandin(filename)
378 realfile = splitstandin(filename)
379 if realfile is not None and filename in ctx.manifest():
379 if realfile is not None and filename in ctx.manifest():
380 copytostore(repo, ctx, realfile, filename)
380 copytostore(repo, ctx, realfile, filename)
381
381
382
382
383 def copytostoreabsolute(repo, file, hash):
383 def copytostoreabsolute(repo, file, hash):
384 if inusercache(repo.ui, hash):
384 if inusercache(repo.ui, hash):
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
385 link(usercachepath(repo.ui, hash), storepath(repo, hash))
386 else:
386 else:
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
387 util.makedirs(os.path.dirname(storepath(repo, hash)))
388 with open(file, b'rb') as srcf:
388 with open(file, b'rb') as srcf:
389 with util.atomictempfile(
389 with util.atomictempfile(
390 storepath(repo, hash), createmode=repo.store.createmode
390 storepath(repo, hash), createmode=repo.store.createmode
391 ) as dstf:
391 ) as dstf:
392 for chunk in util.filechunkiter(srcf):
392 for chunk in util.filechunkiter(srcf):
393 dstf.write(chunk)
393 dstf.write(chunk)
394 linktousercache(repo, hash)
394 linktousercache(repo, hash)
395
395
396
396
397 def linktousercache(repo, hash):
397 def linktousercache(repo, hash):
398 """Link / copy the largefile with the specified hash from the store
398 """Link / copy the largefile with the specified hash from the store
399 to the cache."""
399 to the cache."""
400 path = usercachepath(repo.ui, hash)
400 path = usercachepath(repo.ui, hash)
401 link(storepath(repo, hash), path)
401 link(storepath(repo, hash), path)
402
402
403
403
404 def getstandinmatcher(repo, rmatcher=None):
404 def getstandinmatcher(repo, rmatcher=None):
405 '''Return a match object that applies rmatcher to the standin directory'''
405 '''Return a match object that applies rmatcher to the standin directory'''
406 wvfs = repo.wvfs
406 wvfs = repo.wvfs
407 standindir = shortname
407 standindir = shortname
408
408
409 # no warnings about missing files or directories
409 # no warnings about missing files or directories
410 badfn = lambda f, msg: None
410 badfn = lambda f, msg: None
411
411
412 if rmatcher and not rmatcher.always():
412 if rmatcher and not rmatcher.always():
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
413 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
414 if not pats:
414 if not pats:
415 pats = [wvfs.join(standindir)]
415 pats = [wvfs.join(standindir)]
416 match = scmutil.match(repo[None], pats, badfn=badfn)
416 match = scmutil.match(repo[None], pats, badfn=badfn)
417 else:
417 else:
418 # no patterns: relative to repo root
418 # no patterns: relative to repo root
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
419 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
420 return match
420 return match
421
421
422
422
423 def composestandinmatcher(repo, rmatcher):
423 def composestandinmatcher(repo, rmatcher):
424 """Return a matcher that accepts standins corresponding to the
424 """Return a matcher that accepts standins corresponding to the
425 files accepted by rmatcher. Pass the list of files in the matcher
425 files accepted by rmatcher. Pass the list of files in the matcher
426 as the paths specified by the user."""
426 as the paths specified by the user."""
427 smatcher = getstandinmatcher(repo, rmatcher)
427 smatcher = getstandinmatcher(repo, rmatcher)
428 isstandin = smatcher.matchfn
428 isstandin = smatcher.matchfn
429
429
430 def composedmatchfn(f):
430 def composedmatchfn(f):
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
431 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
432
432
433 smatcher.matchfn = composedmatchfn
433 smatcher.matchfn = composedmatchfn
434
434
435 return smatcher
435 return smatcher
436
436
437
437
438 def standin(filename):
438 def standin(filename):
439 """Return the repo-relative path to the standin for the specified big
439 """Return the repo-relative path to the standin for the specified big
440 file."""
440 file."""
441 # Notes:
441 # Notes:
442 # 1) Some callers want an absolute path, but for instance addlargefiles
442 # 1) Some callers want an absolute path, but for instance addlargefiles
443 # needs it repo-relative so it can be passed to repo[None].add(). So
443 # needs it repo-relative so it can be passed to repo[None].add(). So
444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
444 # leave it up to the caller to use repo.wjoin() to get an absolute path.
445 # 2) Join with '/' because that's what dirstate always uses, even on
445 # 2) Join with '/' because that's what dirstate always uses, even on
446 # Windows. Change existing separator to '/' first in case we are
446 # Windows. Change existing separator to '/' first in case we are
447 # passed filenames from an external source (like the command line).
447 # passed filenames from an external source (like the command line).
448 return shortnameslash + util.pconvert(filename)
448 return shortnameslash + util.pconvert(filename)
449
449
450
450
451 def isstandin(filename):
451 def isstandin(filename):
452 """Return true if filename is a big file standin. filename must be
452 """Return true if filename is a big file standin. filename must be
453 in Mercurial's internal form (slash-separated)."""
453 in Mercurial's internal form (slash-separated)."""
454 return filename.startswith(shortnameslash)
454 return filename.startswith(shortnameslash)
455
455
456
456
457 def splitstandin(filename):
457 def splitstandin(filename):
458 # Split on / because that's what dirstate always uses, even on Windows.
458 # Split on / because that's what dirstate always uses, even on Windows.
459 # Change local separator to / first just in case we are passed filenames
459 # Change local separator to / first just in case we are passed filenames
460 # from an external source (like the command line).
460 # from an external source (like the command line).
461 bits = util.pconvert(filename).split(b'/', 1)
461 bits = util.pconvert(filename).split(b'/', 1)
462 if len(bits) == 2 and bits[0] == shortname:
462 if len(bits) == 2 and bits[0] == shortname:
463 return bits[1]
463 return bits[1]
464 else:
464 else:
465 return None
465 return None
466
466
467
467
468 def updatestandin(repo, lfile, standin):
468 def updatestandin(repo, lfile, standin):
469 """Re-calculate hash value of lfile and write it into standin
469 """Re-calculate hash value of lfile and write it into standin
470
470
471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
471 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
472 """
472 """
473 file = repo.wjoin(lfile)
473 file = repo.wjoin(lfile)
474 if repo.wvfs.exists(lfile):
474 if repo.wvfs.exists(lfile):
475 hash = hashfile(file)
475 hash = hashfile(file)
476 executable = getexecutable(file)
476 executable = getexecutable(file)
477 writestandin(repo, standin, hash, executable)
477 writestandin(repo, standin, hash, executable)
478 else:
478 else:
479 raise error.Abort(_(b'%s: file not found!') % lfile)
479 raise error.Abort(_(b'%s: file not found!') % lfile)
480
480
481
481
482 def readasstandin(fctx):
482 def readasstandin(fctx):
483 """read hex hash from given filectx of standin file
483 """read hex hash from given filectx of standin file
484
484
485 This encapsulates how "standin" data is stored into storage layer."""
485 This encapsulates how "standin" data is stored into storage layer."""
486 return fctx.data().strip()
486 return fctx.data().strip()
487
487
488
488
489 def writestandin(repo, standin, hash, executable):
489 def writestandin(repo, standin, hash, executable):
490 '''write hash to <repo.root>/<standin>'''
490 '''write hash to <repo.root>/<standin>'''
491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
491 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
492
492
493
493
494 def copyandhash(instream, outfile):
494 def copyandhash(instream, outfile):
495 """Read bytes from instream (iterable) and write them to outfile,
495 """Read bytes from instream (iterable) and write them to outfile,
496 computing the SHA-1 hash of the data along the way. Return the hash."""
496 computing the SHA-1 hash of the data along the way. Return the hash."""
497 hasher = hashutil.sha1(b'')
497 hasher = hashutil.sha1(b'')
498 for data in instream:
498 for data in instream:
499 hasher.update(data)
499 hasher.update(data)
500 outfile.write(data)
500 outfile.write(data)
501 return hex(hasher.digest())
501 return hex(hasher.digest())
502
502
503
503
504 def hashfile(file):
504 def hashfile(file):
505 if not os.path.exists(file):
505 if not os.path.exists(file):
506 return b''
506 return b''
507 with open(file, b'rb') as fd:
507 with open(file, b'rb') as fd:
508 return hexsha1(fd)
508 return hexsha1(fd)
509
509
510
510
511 def getexecutable(filename):
511 def getexecutable(filename):
512 mode = os.stat(filename).st_mode
512 mode = os.stat(filename).st_mode
513 return (
513 return (
514 (mode & stat.S_IXUSR)
514 (mode & stat.S_IXUSR)
515 and (mode & stat.S_IXGRP)
515 and (mode & stat.S_IXGRP)
516 and (mode & stat.S_IXOTH)
516 and (mode & stat.S_IXOTH)
517 )
517 )
518
518
519
519
520 def urljoin(first, second, *arg):
520 def urljoin(first, second, *arg):
521 def join(left, right):
521 def join(left, right):
522 if not left.endswith(b'/'):
522 if not left.endswith(b'/'):
523 left += b'/'
523 left += b'/'
524 if right.startswith(b'/'):
524 if right.startswith(b'/'):
525 right = right[1:]
525 right = right[1:]
526 return left + right
526 return left + right
527
527
528 url = join(first, second)
528 url = join(first, second)
529 for a in arg:
529 for a in arg:
530 url = join(url, a)
530 url = join(url, a)
531 return url
531 return url
532
532
533
533
534 def hexsha1(fileobj):
534 def hexsha1(fileobj):
535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
535 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
536 object data"""
536 object data"""
537 h = hashutil.sha1()
537 h = hashutil.sha1()
538 for chunk in util.filechunkiter(fileobj):
538 for chunk in util.filechunkiter(fileobj):
539 h.update(chunk)
539 h.update(chunk)
540 return hex(h.digest())
540 return hex(h.digest())
541
541
542
542
543 def httpsendfile(ui, filename):
543 def httpsendfile(ui, filename):
544 return httpconnection.httpsendfile(ui, filename, b'rb')
544 return httpconnection.httpsendfile(ui, filename, b'rb')
545
545
546
546
547 def unixpath(path):
547 def unixpath(path):
548 '''Return a version of path normalized for use with the lfdirstate.'''
548 '''Return a version of path normalized for use with the lfdirstate.'''
549 return util.pconvert(os.path.normpath(path))
549 return util.pconvert(os.path.normpath(path))
550
550
551
551
552 def islfilesrepo(repo):
552 def islfilesrepo(repo):
553 '''Return true if the repo is a largefile repo.'''
553 '''Return true if the repo is a largefile repo.'''
554 if b'largefiles' in repo.requirements and any(
554 if b'largefiles' in repo.requirements:
555 shortnameslash in entry.unencoded_path
555 for entry in repo.store.datafiles():
556 for entry in repo.store.datafiles()
556 if entry.is_revlog and shortnameslash in entry.target_id:
557 ):
557 return True
558 return True
559
558
560 return any(openlfdirstate(repo.ui, repo, False))
559 return any(openlfdirstate(repo.ui, repo, False))
561
560
562
561
563 class storeprotonotcapable(Exception):
562 class storeprotonotcapable(Exception):
564 def __init__(self, storetypes):
563 def __init__(self, storetypes):
565 self.storetypes = storetypes
564 self.storetypes = storetypes
566
565
567
566
568 def getstandinsstate(repo):
567 def getstandinsstate(repo):
569 standins = []
568 standins = []
570 matcher = getstandinmatcher(repo)
569 matcher = getstandinmatcher(repo)
571 wctx = repo[None]
570 wctx = repo[None]
572 for standin in repo.dirstate.walk(
571 for standin in repo.dirstate.walk(
573 matcher, subrepos=[], unknown=False, ignored=False
572 matcher, subrepos=[], unknown=False, ignored=False
574 ):
573 ):
575 lfile = splitstandin(standin)
574 lfile = splitstandin(standin)
576 try:
575 try:
577 hash = readasstandin(wctx[standin])
576 hash = readasstandin(wctx[standin])
578 except IOError:
577 except IOError:
579 hash = None
578 hash = None
580 standins.append((lfile, hash))
579 standins.append((lfile, hash))
581 return standins
580 return standins
582
581
583
582
584 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
583 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
585 lfstandin = standin(lfile)
584 lfstandin = standin(lfile)
586 if lfstandin not in repo.dirstate:
585 if lfstandin not in repo.dirstate:
587 lfdirstate.hacky_extension_update_file(
586 lfdirstate.hacky_extension_update_file(
588 lfile,
587 lfile,
589 p1_tracked=False,
588 p1_tracked=False,
590 wc_tracked=False,
589 wc_tracked=False,
591 )
590 )
592 else:
591 else:
593 entry = repo.dirstate.get_entry(lfstandin)
592 entry = repo.dirstate.get_entry(lfstandin)
594 lfdirstate.hacky_extension_update_file(
593 lfdirstate.hacky_extension_update_file(
595 lfile,
594 lfile,
596 wc_tracked=entry.tracked,
595 wc_tracked=entry.tracked,
597 p1_tracked=entry.p1_tracked,
596 p1_tracked=entry.p1_tracked,
598 p2_info=entry.p2_info,
597 p2_info=entry.p2_info,
599 possibly_dirty=True,
598 possibly_dirty=True,
600 )
599 )
601
600
602
601
603 def markcommitted(orig, ctx, node):
602 def markcommitted(orig, ctx, node):
604 repo = ctx.repo()
603 repo = ctx.repo()
605
604
606 with repo.dirstate.changing_parents(repo):
605 with repo.dirstate.changing_parents(repo):
607 orig(node)
606 orig(node)
608
607
609 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
608 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
610 # because files coming from the 2nd parent are omitted in the latter.
609 # because files coming from the 2nd parent are omitted in the latter.
611 #
610 #
612 # The former should be used to get targets of "synclfdirstate",
611 # The former should be used to get targets of "synclfdirstate",
613 # because such files:
612 # because such files:
614 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
613 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
615 # - have to be marked as "n" after commit, but
614 # - have to be marked as "n" after commit, but
616 # - aren't listed in "repo[node].files()"
615 # - aren't listed in "repo[node].files()"
617
616
618 lfdirstate = openlfdirstate(repo.ui, repo)
617 lfdirstate = openlfdirstate(repo.ui, repo)
619 for f in ctx.files():
618 for f in ctx.files():
620 lfile = splitstandin(f)
619 lfile = splitstandin(f)
621 if lfile is not None:
620 if lfile is not None:
622 synclfdirstate(repo, lfdirstate, lfile, False)
621 synclfdirstate(repo, lfdirstate, lfile, False)
623
622
624 # As part of committing, copy all of the largefiles into the cache.
623 # As part of committing, copy all of the largefiles into the cache.
625 #
624 #
626 # Using "node" instead of "ctx" implies additional "repo[node]"
625 # Using "node" instead of "ctx" implies additional "repo[node]"
627 # lookup while copyalltostore(), but can omit redundant check for
626 # lookup while copyalltostore(), but can omit redundant check for
628 # files comming from the 2nd parent, which should exist in store
627 # files comming from the 2nd parent, which should exist in store
629 # at merging.
628 # at merging.
630 copyalltostore(repo, node)
629 copyalltostore(repo, node)
631
630
632
631
633 def getlfilestoupdate(oldstandins, newstandins):
632 def getlfilestoupdate(oldstandins, newstandins):
634 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
633 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
635 filelist = []
634 filelist = []
636 for f in changedstandins:
635 for f in changedstandins:
637 if f[0] not in filelist:
636 if f[0] not in filelist:
638 filelist.append(f[0])
637 filelist.append(f[0])
639 return filelist
638 return filelist
640
639
641
640
642 def getlfilestoupload(repo, missing, addfunc):
641 def getlfilestoupload(repo, missing, addfunc):
643 makeprogress = repo.ui.makeprogress
642 makeprogress = repo.ui.makeprogress
644 with makeprogress(
643 with makeprogress(
645 _(b'finding outgoing largefiles'),
644 _(b'finding outgoing largefiles'),
646 unit=_(b'revisions'),
645 unit=_(b'revisions'),
647 total=len(missing),
646 total=len(missing),
648 ) as progress:
647 ) as progress:
649 for i, n in enumerate(missing):
648 for i, n in enumerate(missing):
650 progress.update(i)
649 progress.update(i)
651 parents = [p for p in repo[n].parents() if p != repo.nullid]
650 parents = [p for p in repo[n].parents() if p != repo.nullid]
652
651
653 with lfstatus(repo, value=False):
652 with lfstatus(repo, value=False):
654 ctx = repo[n]
653 ctx = repo[n]
655
654
656 files = set(ctx.files())
655 files = set(ctx.files())
657 if len(parents) == 2:
656 if len(parents) == 2:
658 mc = ctx.manifest()
657 mc = ctx.manifest()
659 mp1 = ctx.p1().manifest()
658 mp1 = ctx.p1().manifest()
660 mp2 = ctx.p2().manifest()
659 mp2 = ctx.p2().manifest()
661 for f in mp1:
660 for f in mp1:
662 if f not in mc:
661 if f not in mc:
663 files.add(f)
662 files.add(f)
664 for f in mp2:
663 for f in mp2:
665 if f not in mc:
664 if f not in mc:
666 files.add(f)
665 files.add(f)
667 for f in mc:
666 for f in mc:
668 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
667 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
669 files.add(f)
668 files.add(f)
670 for fn in files:
669 for fn in files:
671 if isstandin(fn) and fn in ctx:
670 if isstandin(fn) and fn in ctx:
672 addfunc(fn, readasstandin(ctx[fn]))
671 addfunc(fn, readasstandin(ctx[fn]))
673
672
674
673
675 def updatestandinsbymatch(repo, match):
674 def updatestandinsbymatch(repo, match):
676 """Update standins in the working directory according to specified match
675 """Update standins in the working directory according to specified match
677
676
678 This returns (possibly modified) ``match`` object to be used for
677 This returns (possibly modified) ``match`` object to be used for
679 subsequent commit process.
678 subsequent commit process.
680 """
679 """
681
680
682 ui = repo.ui
681 ui = repo.ui
683
682
684 # Case 1: user calls commit with no specific files or
683 # Case 1: user calls commit with no specific files or
685 # include/exclude patterns: refresh and commit all files that
684 # include/exclude patterns: refresh and commit all files that
686 # are "dirty".
685 # are "dirty".
687 if match is None or match.always():
686 if match is None or match.always():
688 # Spend a bit of time here to get a list of files we know
687 # Spend a bit of time here to get a list of files we know
689 # are modified so we can compare only against those.
688 # are modified so we can compare only against those.
690 # It can cost a lot of time (several seconds)
689 # It can cost a lot of time (several seconds)
691 # otherwise to update all standins if the largefiles are
690 # otherwise to update all standins if the largefiles are
692 # large.
691 # large.
693 dirtymatch = matchmod.always()
692 dirtymatch = matchmod.always()
694 with repo.dirstate.running_status(repo):
693 with repo.dirstate.running_status(repo):
695 lfdirstate = openlfdirstate(ui, repo)
694 lfdirstate = openlfdirstate(ui, repo)
696 unsure, s, mtime_boundary = lfdirstate.status(
695 unsure, s, mtime_boundary = lfdirstate.status(
697 dirtymatch,
696 dirtymatch,
698 subrepos=[],
697 subrepos=[],
699 ignored=False,
698 ignored=False,
700 clean=False,
699 clean=False,
701 unknown=False,
700 unknown=False,
702 )
701 )
703 modifiedfiles = unsure + s.modified + s.added + s.removed
702 modifiedfiles = unsure + s.modified + s.added + s.removed
704 lfiles = listlfiles(repo)
703 lfiles = listlfiles(repo)
705 # this only loops through largefiles that exist (not
704 # this only loops through largefiles that exist (not
706 # removed/renamed)
705 # removed/renamed)
707 for lfile in lfiles:
706 for lfile in lfiles:
708 if lfile in modifiedfiles:
707 if lfile in modifiedfiles:
709 fstandin = standin(lfile)
708 fstandin = standin(lfile)
710 if repo.wvfs.exists(fstandin):
709 if repo.wvfs.exists(fstandin):
711 # this handles the case where a rebase is being
710 # this handles the case where a rebase is being
712 # performed and the working copy is not updated
711 # performed and the working copy is not updated
713 # yet.
712 # yet.
714 if repo.wvfs.exists(lfile):
713 if repo.wvfs.exists(lfile):
715 updatestandin(repo, lfile, fstandin)
714 updatestandin(repo, lfile, fstandin)
716
715
717 return match
716 return match
718
717
719 lfiles = listlfiles(repo)
718 lfiles = listlfiles(repo)
720 match._files = repo._subdirlfs(match.files(), lfiles)
719 match._files = repo._subdirlfs(match.files(), lfiles)
721
720
722 # Case 2: user calls commit with specified patterns: refresh
721 # Case 2: user calls commit with specified patterns: refresh
723 # any matching big files.
722 # any matching big files.
724 smatcher = composestandinmatcher(repo, match)
723 smatcher = composestandinmatcher(repo, match)
725 standins = repo.dirstate.walk(
724 standins = repo.dirstate.walk(
726 smatcher, subrepos=[], unknown=False, ignored=False
725 smatcher, subrepos=[], unknown=False, ignored=False
727 )
726 )
728
727
729 # No matching big files: get out of the way and pass control to
728 # No matching big files: get out of the way and pass control to
730 # the usual commit() method.
729 # the usual commit() method.
731 if not standins:
730 if not standins:
732 return match
731 return match
733
732
734 # Refresh all matching big files. It's possible that the
733 # Refresh all matching big files. It's possible that the
735 # commit will end up failing, in which case the big files will
734 # commit will end up failing, in which case the big files will
736 # stay refreshed. No harm done: the user modified them and
735 # stay refreshed. No harm done: the user modified them and
737 # asked to commit them, so sooner or later we're going to
736 # asked to commit them, so sooner or later we're going to
738 # refresh the standins. Might as well leave them refreshed.
737 # refresh the standins. Might as well leave them refreshed.
739 lfdirstate = openlfdirstate(ui, repo)
738 lfdirstate = openlfdirstate(ui, repo)
740 for fstandin in standins:
739 for fstandin in standins:
741 lfile = splitstandin(fstandin)
740 lfile = splitstandin(fstandin)
742 if lfdirstate.get_entry(lfile).tracked:
741 if lfdirstate.get_entry(lfile).tracked:
743 updatestandin(repo, lfile, fstandin)
742 updatestandin(repo, lfile, fstandin)
744
743
745 # Cook up a new matcher that only matches regular files or
744 # Cook up a new matcher that only matches regular files or
746 # standins corresponding to the big files requested by the
745 # standins corresponding to the big files requested by the
747 # user. Have to modify _files to prevent commit() from
746 # user. Have to modify _files to prevent commit() from
748 # complaining "not tracked" for big files.
747 # complaining "not tracked" for big files.
749 match = copy.copy(match)
748 match = copy.copy(match)
750 origmatchfn = match.matchfn
749 origmatchfn = match.matchfn
751
750
752 # Check both the list of largefiles and the list of
751 # Check both the list of largefiles and the list of
753 # standins because if a largefile was removed, it
752 # standins because if a largefile was removed, it
754 # won't be in the list of largefiles at this point
753 # won't be in the list of largefiles at this point
755 match._files += sorted(standins)
754 match._files += sorted(standins)
756
755
757 actualfiles = []
756 actualfiles = []
758 for f in match._files:
757 for f in match._files:
759 fstandin = standin(f)
758 fstandin = standin(f)
760
759
761 # For largefiles, only one of the normal and standin should be
760 # For largefiles, only one of the normal and standin should be
762 # committed (except if one of them is a remove). In the case of a
761 # committed (except if one of them is a remove). In the case of a
763 # standin removal, drop the normal file if it is unknown to dirstate.
762 # standin removal, drop the normal file if it is unknown to dirstate.
764 # Thus, skip plain largefile names but keep the standin.
763 # Thus, skip plain largefile names but keep the standin.
765 if f in lfiles or fstandin in standins:
764 if f in lfiles or fstandin in standins:
766 if not repo.dirstate.get_entry(fstandin).removed:
765 if not repo.dirstate.get_entry(fstandin).removed:
767 if not repo.dirstate.get_entry(f).removed:
766 if not repo.dirstate.get_entry(f).removed:
768 continue
767 continue
769 elif not repo.dirstate.get_entry(f).any_tracked:
768 elif not repo.dirstate.get_entry(f).any_tracked:
770 continue
769 continue
771
770
772 actualfiles.append(f)
771 actualfiles.append(f)
773 match._files = actualfiles
772 match._files = actualfiles
774
773
775 def matchfn(f):
774 def matchfn(f):
776 if origmatchfn(f):
775 if origmatchfn(f):
777 return f not in lfiles
776 return f not in lfiles
778 else:
777 else:
779 return f in standins
778 return f in standins
780
779
781 match.matchfn = matchfn
780 match.matchfn = matchfn
782
781
783 return match
782 return match
784
783
785
784
786 class automatedcommithook:
785 class automatedcommithook:
787 """Stateful hook to update standins at the 1st commit of resuming
786 """Stateful hook to update standins at the 1st commit of resuming
788
787
789 For efficiency, updating standins in the working directory should
788 For efficiency, updating standins in the working directory should
790 be avoided while automated committing (like rebase, transplant and
789 be avoided while automated committing (like rebase, transplant and
791 so on), because they should be updated before committing.
790 so on), because they should be updated before committing.
792
791
793 But the 1st commit of resuming automated committing (e.g. ``rebase
792 But the 1st commit of resuming automated committing (e.g. ``rebase
794 --continue``) should update them, because largefiles may be
793 --continue``) should update them, because largefiles may be
795 modified manually.
794 modified manually.
796 """
795 """
797
796
798 def __init__(self, resuming):
797 def __init__(self, resuming):
799 self.resuming = resuming
798 self.resuming = resuming
800
799
801 def __call__(self, repo, match):
800 def __call__(self, repo, match):
802 if self.resuming:
801 if self.resuming:
803 self.resuming = False # avoids updating at subsequent commits
802 self.resuming = False # avoids updating at subsequent commits
804 return updatestandinsbymatch(repo, match)
803 return updatestandinsbymatch(repo, match)
805 else:
804 else:
806 return match
805 return match
807
806
808
807
809 def getstatuswriter(ui, repo, forcibly=None):
808 def getstatuswriter(ui, repo, forcibly=None):
810 """Return the function to write largefiles specific status out
809 """Return the function to write largefiles specific status out
811
810
812 If ``forcibly`` is ``None``, this returns the last element of
811 If ``forcibly`` is ``None``, this returns the last element of
813 ``repo._lfstatuswriters`` as "default" writer function.
812 ``repo._lfstatuswriters`` as "default" writer function.
814
813
815 Otherwise, this returns the function to always write out (or
814 Otherwise, this returns the function to always write out (or
816 ignore if ``not forcibly``) status.
815 ignore if ``not forcibly``) status.
817 """
816 """
818 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
817 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
819 return repo._lfstatuswriters[-1]
818 return repo._lfstatuswriters[-1]
820 else:
819 else:
821 if forcibly:
820 if forcibly:
822 return ui.status # forcibly WRITE OUT
821 return ui.status # forcibly WRITE OUT
823 else:
822 else:
824 return lambda *msg, **opts: None # forcibly IGNORE
823 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,470 +1,474 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10
10
11 import copy
11 import copy
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14
14
15 from mercurial import (
15 from mercurial import (
16 error,
16 error,
17 extensions,
17 extensions,
18 localrepo,
18 localrepo,
19 match as matchmod,
19 match as matchmod,
20 scmutil,
20 scmutil,
21 util,
21 util,
22 )
22 )
23
23
24 from mercurial.dirstateutils import timestamp
24 from mercurial.dirstateutils import timestamp
25
25
26 from . import (
26 from . import (
27 lfcommands,
27 lfcommands,
28 lfutil,
28 lfutil,
29 )
29 )
30
30
31
31
32 def reposetup(ui, repo):
32 def reposetup(ui, repo):
33 # wire repositories should be given new wireproto functions
33 # wire repositories should be given new wireproto functions
34 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
34 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
35 if not repo.local():
35 if not repo.local():
36 return
36 return
37
37
38 class lfilesrepo(repo.__class__):
38 class lfilesrepo(repo.__class__):
39 # the mark to examine whether "repo" object enables largefiles or not
39 # the mark to examine whether "repo" object enables largefiles or not
40 _largefilesenabled = True
40 _largefilesenabled = True
41
41
42 lfstatus = False
42 lfstatus = False
43
43
44 # When lfstatus is set, return a context that gives the names
44 # When lfstatus is set, return a context that gives the names
45 # of largefiles instead of their corresponding standins and
45 # of largefiles instead of their corresponding standins and
46 # identifies the largefiles as always binary, regardless of
46 # identifies the largefiles as always binary, regardless of
47 # their actual contents.
47 # their actual contents.
48 def __getitem__(self, changeid):
48 def __getitem__(self, changeid):
49 ctx = super(lfilesrepo, self).__getitem__(changeid)
49 ctx = super(lfilesrepo, self).__getitem__(changeid)
50 if self.lfstatus:
50 if self.lfstatus:
51
51
52 def files(orig):
52 def files(orig):
53 filenames = orig()
53 filenames = orig()
54 return [lfutil.splitstandin(f) or f for f in filenames]
54 return [lfutil.splitstandin(f) or f for f in filenames]
55
55
56 extensions.wrapfunction(ctx, 'files', files)
56 extensions.wrapfunction(ctx, 'files', files)
57
57
58 def manifest(orig):
58 def manifest(orig):
59 man1 = orig()
59 man1 = orig()
60
60
61 class lfilesmanifest(man1.__class__):
61 class lfilesmanifest(man1.__class__):
62 def __contains__(self, filename):
62 def __contains__(self, filename):
63 orig = super(lfilesmanifest, self).__contains__
63 orig = super(lfilesmanifest, self).__contains__
64 return orig(filename) or orig(
64 return orig(filename) or orig(
65 lfutil.standin(filename)
65 lfutil.standin(filename)
66 )
66 )
67
67
68 man1.__class__ = lfilesmanifest
68 man1.__class__ = lfilesmanifest
69 return man1
69 return man1
70
70
71 extensions.wrapfunction(ctx, 'manifest', manifest)
71 extensions.wrapfunction(ctx, 'manifest', manifest)
72
72
73 def filectx(orig, path, fileid=None, filelog=None):
73 def filectx(orig, path, fileid=None, filelog=None):
74 try:
74 try:
75 if filelog is not None:
75 if filelog is not None:
76 result = orig(path, fileid, filelog)
76 result = orig(path, fileid, filelog)
77 else:
77 else:
78 result = orig(path, fileid)
78 result = orig(path, fileid)
79 except error.LookupError:
79 except error.LookupError:
80 # Adding a null character will cause Mercurial to
80 # Adding a null character will cause Mercurial to
81 # identify this as a binary file.
81 # identify this as a binary file.
82 if filelog is not None:
82 if filelog is not None:
83 result = orig(lfutil.standin(path), fileid, filelog)
83 result = orig(lfutil.standin(path), fileid, filelog)
84 else:
84 else:
85 result = orig(lfutil.standin(path), fileid)
85 result = orig(lfutil.standin(path), fileid)
86 olddata = result.data
86 olddata = result.data
87 result.data = lambda: olddata() + b'\0'
87 result.data = lambda: olddata() + b'\0'
88 return result
88 return result
89
89
90 extensions.wrapfunction(ctx, 'filectx', filectx)
90 extensions.wrapfunction(ctx, 'filectx', filectx)
91
91
92 return ctx
92 return ctx
93
93
94 # Figure out the status of big files and insert them into the
94 # Figure out the status of big files and insert them into the
95 # appropriate list in the result. Also removes standin files
95 # appropriate list in the result. Also removes standin files
96 # from the listing. Revert to the original status if
96 # from the listing. Revert to the original status if
97 # self.lfstatus is False.
97 # self.lfstatus is False.
98 # XXX large file status is buggy when used on repo proxy.
98 # XXX large file status is buggy when used on repo proxy.
99 # XXX this needs to be investigated.
99 # XXX this needs to be investigated.
100 @localrepo.unfilteredmethod
100 @localrepo.unfilteredmethod
101 def status(
101 def status(
102 self,
102 self,
103 node1=b'.',
103 node1=b'.',
104 node2=None,
104 node2=None,
105 match=None,
105 match=None,
106 ignored=False,
106 ignored=False,
107 clean=False,
107 clean=False,
108 unknown=False,
108 unknown=False,
109 listsubrepos=False,
109 listsubrepos=False,
110 ):
110 ):
111 listignored, listclean, listunknown = ignored, clean, unknown
111 listignored, listclean, listunknown = ignored, clean, unknown
112 orig = super(lfilesrepo, self).status
112 orig = super(lfilesrepo, self).status
113 if not self.lfstatus:
113 if not self.lfstatus:
114 return orig(
114 return orig(
115 node1,
115 node1,
116 node2,
116 node2,
117 match,
117 match,
118 listignored,
118 listignored,
119 listclean,
119 listclean,
120 listunknown,
120 listunknown,
121 listsubrepos,
121 listsubrepos,
122 )
122 )
123
123
124 # some calls in this function rely on the old version of status
124 # some calls in this function rely on the old version of status
125 self.lfstatus = False
125 self.lfstatus = False
126 ctx1 = self[node1]
126 ctx1 = self[node1]
127 ctx2 = self[node2]
127 ctx2 = self[node2]
128 working = ctx2.rev() is None
128 working = ctx2.rev() is None
129 parentworking = working and ctx1 == self[b'.']
129 parentworking = working and ctx1 == self[b'.']
130
130
131 if match is None:
131 if match is None:
132 match = matchmod.always()
132 match = matchmod.always()
133
133
134 try:
134 try:
135 # updating the dirstate is optional
135 # updating the dirstate is optional
136 # so we don't wait on the lock
136 # so we don't wait on the lock
137 wlock = self.wlock(False)
137 wlock = self.wlock(False)
138 gotlock = True
138 gotlock = True
139 except error.LockError:
139 except error.LockError:
140 wlock = util.nullcontextmanager()
140 wlock = util.nullcontextmanager()
141 gotlock = False
141 gotlock = False
142 with wlock, self.dirstate.running_status(self):
142 with wlock, self.dirstate.running_status(self):
143
143
144 # First check if paths or patterns were specified on the
144 # First check if paths or patterns were specified on the
145 # command line. If there were, and they don't match any
145 # command line. If there were, and they don't match any
146 # largefiles, we should just bail here and let super
146 # largefiles, we should just bail here and let super
147 # handle it -- thus gaining a big performance boost.
147 # handle it -- thus gaining a big performance boost.
148 lfdirstate = lfutil.openlfdirstate(ui, self)
148 lfdirstate = lfutil.openlfdirstate(ui, self)
149 if not match.always():
149 if not match.always():
150 for f in lfdirstate:
150 for f in lfdirstate:
151 if match(f):
151 if match(f):
152 break
152 break
153 else:
153 else:
154 return orig(
154 return orig(
155 node1,
155 node1,
156 node2,
156 node2,
157 match,
157 match,
158 listignored,
158 listignored,
159 listclean,
159 listclean,
160 listunknown,
160 listunknown,
161 listsubrepos,
161 listsubrepos,
162 )
162 )
163
163
164 # Create a copy of match that matches standins instead
164 # Create a copy of match that matches standins instead
165 # of largefiles.
165 # of largefiles.
166 def tostandins(files):
166 def tostandins(files):
167 if not working:
167 if not working:
168 return files
168 return files
169 newfiles = []
169 newfiles = []
170 dirstate = self.dirstate
170 dirstate = self.dirstate
171 for f in files:
171 for f in files:
172 sf = lfutil.standin(f)
172 sf = lfutil.standin(f)
173 if sf in dirstate:
173 if sf in dirstate:
174 newfiles.append(sf)
174 newfiles.append(sf)
175 elif dirstate.hasdir(sf):
175 elif dirstate.hasdir(sf):
176 # Directory entries could be regular or
176 # Directory entries could be regular or
177 # standin, check both
177 # standin, check both
178 newfiles.extend((f, sf))
178 newfiles.extend((f, sf))
179 else:
179 else:
180 newfiles.append(f)
180 newfiles.append(f)
181 return newfiles
181 return newfiles
182
182
183 m = copy.copy(match)
183 m = copy.copy(match)
184 m._files = tostandins(m._files)
184 m._files = tostandins(m._files)
185
185
186 result = orig(
186 result = orig(
187 node1, node2, m, ignored, clean, unknown, listsubrepos
187 node1, node2, m, ignored, clean, unknown, listsubrepos
188 )
188 )
189 if working:
189 if working:
190
190
191 def sfindirstate(f):
191 def sfindirstate(f):
192 sf = lfutil.standin(f)
192 sf = lfutil.standin(f)
193 dirstate = self.dirstate
193 dirstate = self.dirstate
194 return sf in dirstate or dirstate.hasdir(sf)
194 return sf in dirstate or dirstate.hasdir(sf)
195
195
196 match._files = [f for f in match._files if sfindirstate(f)]
196 match._files = [f for f in match._files if sfindirstate(f)]
197 # Don't waste time getting the ignored and unknown
197 # Don't waste time getting the ignored and unknown
198 # files from lfdirstate
198 # files from lfdirstate
199 unsure, s, mtime_boundary = lfdirstate.status(
199 unsure, s, mtime_boundary = lfdirstate.status(
200 match,
200 match,
201 subrepos=[],
201 subrepos=[],
202 ignored=False,
202 ignored=False,
203 clean=listclean,
203 clean=listclean,
204 unknown=False,
204 unknown=False,
205 )
205 )
206 (modified, added, removed, deleted, clean) = (
206 (modified, added, removed, deleted, clean) = (
207 s.modified,
207 s.modified,
208 s.added,
208 s.added,
209 s.removed,
209 s.removed,
210 s.deleted,
210 s.deleted,
211 s.clean,
211 s.clean,
212 )
212 )
213 if parentworking:
213 if parentworking:
214 wctx = repo[None]
214 wctx = repo[None]
215 for lfile in unsure:
215 for lfile in unsure:
216 standin = lfutil.standin(lfile)
216 standin = lfutil.standin(lfile)
217 if standin not in ctx1:
217 if standin not in ctx1:
218 # from second parent
218 # from second parent
219 modified.append(lfile)
219 modified.append(lfile)
220 elif lfutil.readasstandin(
220 elif lfutil.readasstandin(
221 ctx1[standin]
221 ctx1[standin]
222 ) != lfutil.hashfile(self.wjoin(lfile)):
222 ) != lfutil.hashfile(self.wjoin(lfile)):
223 modified.append(lfile)
223 modified.append(lfile)
224 else:
224 else:
225 if listclean:
225 if listclean:
226 clean.append(lfile)
226 clean.append(lfile)
227 s = wctx[lfile].lstat()
227 s = wctx[lfile].lstat()
228 mode = s.st_mode
228 mode = s.st_mode
229 size = s.st_size
229 size = s.st_size
230 mtime = timestamp.reliable_mtime_of(
230 mtime = timestamp.reliable_mtime_of(
231 s, mtime_boundary
231 s, mtime_boundary
232 )
232 )
233 if mtime is not None:
233 if mtime is not None:
234 cache_data = (mode, size, mtime)
234 cache_data = (mode, size, mtime)
235 lfdirstate.set_clean(lfile, cache_data)
235 lfdirstate.set_clean(lfile, cache_data)
236 else:
236 else:
237 tocheck = unsure + modified + added + clean
237 tocheck = unsure + modified + added + clean
238 modified, added, clean = [], [], []
238 modified, added, clean = [], [], []
239 checkexec = self.dirstate._checkexec
239 checkexec = self.dirstate._checkexec
240
240
241 for lfile in tocheck:
241 for lfile in tocheck:
242 standin = lfutil.standin(lfile)
242 standin = lfutil.standin(lfile)
243 if standin in ctx1:
243 if standin in ctx1:
244 abslfile = self.wjoin(lfile)
244 abslfile = self.wjoin(lfile)
245 if (
245 if (
246 lfutil.readasstandin(ctx1[standin])
246 lfutil.readasstandin(ctx1[standin])
247 != lfutil.hashfile(abslfile)
247 != lfutil.hashfile(abslfile)
248 ) or (
248 ) or (
249 checkexec
249 checkexec
250 and (b'x' in ctx1.flags(standin))
250 and (b'x' in ctx1.flags(standin))
251 != bool(lfutil.getexecutable(abslfile))
251 != bool(lfutil.getexecutable(abslfile))
252 ):
252 ):
253 modified.append(lfile)
253 modified.append(lfile)
254 elif listclean:
254 elif listclean:
255 clean.append(lfile)
255 clean.append(lfile)
256 else:
256 else:
257 added.append(lfile)
257 added.append(lfile)
258
258
259 # at this point, 'removed' contains largefiles
259 # at this point, 'removed' contains largefiles
260 # marked as 'R' in the working context.
260 # marked as 'R' in the working context.
261 # then, largefiles not managed also in the target
261 # then, largefiles not managed also in the target
262 # context should be excluded from 'removed'.
262 # context should be excluded from 'removed'.
263 removed = [
263 removed = [
264 lfile
264 lfile
265 for lfile in removed
265 for lfile in removed
266 if lfutil.standin(lfile) in ctx1
266 if lfutil.standin(lfile) in ctx1
267 ]
267 ]
268
268
269 # Standins no longer found in lfdirstate have been deleted
269 # Standins no longer found in lfdirstate have been deleted
270 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
270 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
271 lfile = lfutil.splitstandin(standin)
271 lfile = lfutil.splitstandin(standin)
272 if not match(lfile):
272 if not match(lfile):
273 continue
273 continue
274 if lfile not in lfdirstate:
274 if lfile not in lfdirstate:
275 deleted.append(lfile)
275 deleted.append(lfile)
276 # Sync "largefile has been removed" back to the
276 # Sync "largefile has been removed" back to the
277 # standin. Removing a file as a side effect of
277 # standin. Removing a file as a side effect of
278 # running status is gross, but the alternatives (if
278 # running status is gross, but the alternatives (if
279 # any) are worse.
279 # any) are worse.
280 self.wvfs.unlinkpath(standin, ignoremissing=True)
280 self.wvfs.unlinkpath(standin, ignoremissing=True)
281
281
282 # Filter result lists
282 # Filter result lists
283 result = list(result)
283 result = list(result)
284
284
285 # Largefiles are not really removed when they're
285 # Largefiles are not really removed when they're
286 # still in the normal dirstate. Likewise, normal
286 # still in the normal dirstate. Likewise, normal
287 # files are not really removed if they are still in
287 # files are not really removed if they are still in
288 # lfdirstate. This happens in merges where files
288 # lfdirstate. This happens in merges where files
289 # change type.
289 # change type.
290 removed = [f for f in removed if f not in self.dirstate]
290 removed = [f for f in removed if f not in self.dirstate]
291 result[2] = [f for f in result[2] if f not in lfdirstate]
291 result[2] = [f for f in result[2] if f not in lfdirstate]
292
292
293 lfiles = set(lfdirstate)
293 lfiles = set(lfdirstate)
294 # Unknown files
294 # Unknown files
295 result[4] = set(result[4]).difference(lfiles)
295 result[4] = set(result[4]).difference(lfiles)
296 # Ignored files
296 # Ignored files
297 result[5] = set(result[5]).difference(lfiles)
297 result[5] = set(result[5]).difference(lfiles)
298 # combine normal files and largefiles
298 # combine normal files and largefiles
299 normals = [
299 normals = [
300 [fn for fn in filelist if not lfutil.isstandin(fn)]
300 [fn for fn in filelist if not lfutil.isstandin(fn)]
301 for filelist in result
301 for filelist in result
302 ]
302 ]
303 lfstatus = (
303 lfstatus = (
304 modified,
304 modified,
305 added,
305 added,
306 removed,
306 removed,
307 deleted,
307 deleted,
308 [],
308 [],
309 [],
309 [],
310 clean,
310 clean,
311 )
311 )
312 result = [
312 result = [
313 sorted(list1 + list2)
313 sorted(list1 + list2)
314 for (list1, list2) in zip(normals, lfstatus)
314 for (list1, list2) in zip(normals, lfstatus)
315 ]
315 ]
316 else: # not against working directory
316 else: # not against working directory
317 result = [
317 result = [
318 [lfutil.splitstandin(f) or f for f in items]
318 [lfutil.splitstandin(f) or f for f in items]
319 for items in result
319 for items in result
320 ]
320 ]
321
321
322 if gotlock:
322 if gotlock:
323 lfdirstate.write(self.currenttransaction())
323 lfdirstate.write(self.currenttransaction())
324 else:
324 else:
325 lfdirstate.invalidate()
325 lfdirstate.invalidate()
326
326
327 self.lfstatus = True
327 self.lfstatus = True
328 return scmutil.status(*result)
328 return scmutil.status(*result)
329
329
330 def commitctx(self, ctx, *args, **kwargs):
330 def commitctx(self, ctx, *args, **kwargs):
331 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
331 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
332
332
333 class lfilesctx(ctx.__class__):
333 class lfilesctx(ctx.__class__):
334 def markcommitted(self, node):
334 def markcommitted(self, node):
335 orig = super(lfilesctx, self).markcommitted
335 orig = super(lfilesctx, self).markcommitted
336 return lfutil.markcommitted(orig, self, node)
336 return lfutil.markcommitted(orig, self, node)
337
337
338 ctx.__class__ = lfilesctx
338 ctx.__class__ = lfilesctx
339 return node
339 return node
340
340
341 # Before commit, largefile standins have not had their
341 # Before commit, largefile standins have not had their
342 # contents updated to reflect the hash of their largefile.
342 # contents updated to reflect the hash of their largefile.
343 # Do that here.
343 # Do that here.
344 def commit(
344 def commit(
345 self,
345 self,
346 text=b"",
346 text=b"",
347 user=None,
347 user=None,
348 date=None,
348 date=None,
349 match=None,
349 match=None,
350 force=False,
350 force=False,
351 editor=False,
351 editor=False,
352 extra=None,
352 extra=None,
353 ):
353 ):
354 if extra is None:
354 if extra is None:
355 extra = {}
355 extra = {}
356 orig = super(lfilesrepo, self).commit
356 orig = super(lfilesrepo, self).commit
357
357
358 with self.wlock():
358 with self.wlock():
359 lfcommithook = self._lfcommithooks[-1]
359 lfcommithook = self._lfcommithooks[-1]
360 match = lfcommithook(self, match)
360 match = lfcommithook(self, match)
361 result = orig(
361 result = orig(
362 text=text,
362 text=text,
363 user=user,
363 user=user,
364 date=date,
364 date=date,
365 match=match,
365 match=match,
366 force=force,
366 force=force,
367 editor=editor,
367 editor=editor,
368 extra=extra,
368 extra=extra,
369 )
369 )
370 return result
370 return result
371
371
372 # TODO: _subdirlfs should be moved into "lfutil.py", because
372 # TODO: _subdirlfs should be moved into "lfutil.py", because
373 # it is referred only from "lfutil.updatestandinsbymatch"
373 # it is referred only from "lfutil.updatestandinsbymatch"
374 def _subdirlfs(self, files, lfiles):
374 def _subdirlfs(self, files, lfiles):
375 """
375 """
376 Adjust matched file list
376 Adjust matched file list
377 If we pass a directory to commit whose only committable files
377 If we pass a directory to commit whose only committable files
378 are largefiles, the core commit code aborts before finding
378 are largefiles, the core commit code aborts before finding
379 the largefiles.
379 the largefiles.
380 So we do the following:
380 So we do the following:
381 For directories that only have largefiles as matches,
381 For directories that only have largefiles as matches,
382 we explicitly add the largefiles to the match list and remove
382 we explicitly add the largefiles to the match list and remove
383 the directory.
383 the directory.
384 In other cases, we leave the match list unmodified.
384 In other cases, we leave the match list unmodified.
385 """
385 """
386 actualfiles = []
386 actualfiles = []
387 dirs = []
387 dirs = []
388 regulars = []
388 regulars = []
389
389
390 for f in files:
390 for f in files:
391 if lfutil.isstandin(f + b'/'):
391 if lfutil.isstandin(f + b'/'):
392 raise error.Abort(
392 raise error.Abort(
393 _(b'file "%s" is a largefile standin') % f,
393 _(b'file "%s" is a largefile standin') % f,
394 hint=b'commit the largefile itself instead',
394 hint=b'commit the largefile itself instead',
395 )
395 )
396 # Scan directories
396 # Scan directories
397 if self.wvfs.isdir(f):
397 if self.wvfs.isdir(f):
398 dirs.append(f)
398 dirs.append(f)
399 else:
399 else:
400 regulars.append(f)
400 regulars.append(f)
401
401
402 for f in dirs:
402 for f in dirs:
403 matcheddir = False
403 matcheddir = False
404 d = self.dirstate.normalize(f) + b'/'
404 d = self.dirstate.normalize(f) + b'/'
405 # Check for matched normal files
405 # Check for matched normal files
406 for mf in regulars:
406 for mf in regulars:
407 if self.dirstate.normalize(mf).startswith(d):
407 if self.dirstate.normalize(mf).startswith(d):
408 actualfiles.append(f)
408 actualfiles.append(f)
409 matcheddir = True
409 matcheddir = True
410 break
410 break
411 if not matcheddir:
411 if not matcheddir:
412 # If no normal match, manually append
412 # If no normal match, manually append
413 # any matching largefiles
413 # any matching largefiles
414 for lf in lfiles:
414 for lf in lfiles:
415 if self.dirstate.normalize(lf).startswith(d):
415 if self.dirstate.normalize(lf).startswith(d):
416 actualfiles.append(lf)
416 actualfiles.append(lf)
417 if not matcheddir:
417 if not matcheddir:
418 # There may still be normal files in the dir, so
418 # There may still be normal files in the dir, so
419 # add a directory to the list, which
419 # add a directory to the list, which
420 # forces status/dirstate to walk all files and
420 # forces status/dirstate to walk all files and
421 # call the match function on the matcher, even
421 # call the match function on the matcher, even
422 # on case sensitive filesystems.
422 # on case sensitive filesystems.
423 actualfiles.append(b'.')
423 actualfiles.append(b'.')
424 matcheddir = True
424 matcheddir = True
425 # Nothing in dir, so readd it
425 # Nothing in dir, so readd it
426 # and let commit reject it
426 # and let commit reject it
427 if not matcheddir:
427 if not matcheddir:
428 actualfiles.append(f)
428 actualfiles.append(f)
429
429
430 # Always add normal files
430 # Always add normal files
431 actualfiles += regulars
431 actualfiles += regulars
432 return actualfiles
432 return actualfiles
433
433
434 repo.__class__ = lfilesrepo
434 repo.__class__ = lfilesrepo
435
435
436 # stack of hooks being executed before committing.
436 # stack of hooks being executed before committing.
437 # only last element ("_lfcommithooks[-1]") is used for each committing.
437 # only last element ("_lfcommithooks[-1]") is used for each committing.
438 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
438 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
439
439
440 # Stack of status writer functions taking "*msg, **opts" arguments
440 # Stack of status writer functions taking "*msg, **opts" arguments
441 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
441 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
442 # is used to write status out.
442 # is used to write status out.
443 repo._lfstatuswriters = [ui.status]
443 repo._lfstatuswriters = [ui.status]
444
444
445 def prepushoutgoinghook(pushop):
445 def prepushoutgoinghook(pushop):
446 """Push largefiles for pushop before pushing revisions."""
446 """Push largefiles for pushop before pushing revisions."""
447 lfrevs = pushop.lfrevs
447 lfrevs = pushop.lfrevs
448 if lfrevs is None:
448 if lfrevs is None:
449 lfrevs = pushop.outgoing.missing
449 lfrevs = pushop.outgoing.missing
450 if lfrevs:
450 if lfrevs:
451 toupload = set()
451 toupload = set()
452 addfunc = lambda fn, lfhash: toupload.add(lfhash)
452 addfunc = lambda fn, lfhash: toupload.add(lfhash)
453 lfutil.getlfilestoupload(pushop.repo, lfrevs, addfunc)
453 lfutil.getlfilestoupload(pushop.repo, lfrevs, addfunc)
454 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
454 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
455
455
456 repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook)
456 repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook)
457
457
458 def checkrequireslfiles(ui, repo, **kwargs):
458 def checkrequireslfiles(ui, repo, **kwargs):
459 with repo.lock():
459 with repo.lock():
460 if b'largefiles' not in repo.requirements and any(
460 if b'largefiles' in repo.requirements:
461 lfutil.shortname + b'/' in entry.unencoded_path
461 return
462 for entry in repo.store.datafiles()
462 marker = lfutil.shortnameslash
463 ):
463 for entry in repo.store.datafiles():
464 repo.requirements.add(b'largefiles')
464 # XXX note that this match is not rooted and can wrongly match
465 scmutil.writereporequirements(repo)
465 # directory ending with ".hglf"
466 if entry.is_revlog and marker in entry.target_id:
467 repo.requirements.add(b'largefiles')
468 scmutil.writereporequirements(repo)
469 break
466
470
467 ui.setconfig(
471 ui.setconfig(
468 b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles'
472 b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles'
469 )
473 )
470 ui.setconfig(b'hooks', b'commit.lfiles', checkrequireslfiles, b'largefiles')
474 ui.setconfig(b'hooks', b'commit.lfiles', checkrequireslfiles, b'largefiles')
General Comments 0
You need to be logged in to leave comments. Login now