##// END OF EJS Templates
lfutil: provide a hint if the largefiles/lfs cache path cannot be determined...
Matt Harbison -
r44863:ca82929e default
parent child Browse files
Show More
@@ -1,760 +1,781 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 '''Return the correct location in the "global" largefiles cache for a file
83 '''Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space.'''
86 to preserve download bandwidth and storage space.'''
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
96 hint = None
97
95 if pycompat.iswindows:
98 if pycompat.iswindows:
96 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
97 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
98 )
101 )
99 if appdata:
102 if appdata:
100 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
107 b"APPDATA",
108 name,
109 )
101 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
102 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
103 if home:
112 if home:
104 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
117 name,
118 )
105 elif pycompat.isposix:
119 elif pycompat.isposix:
106 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
107 if path:
121 if path:
108 return os.path.join(path, name)
122 return os.path.join(path, name)
109 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
110 if home:
124 if home:
111 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
129 b"HOME",
130 name,
131 )
112 else:
132 else:
113 raise error.Abort(
133 raise error.Abort(
114 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
115 )
135 )
116 raise error.Abort(_(b'unknown %s usercache location') % name)
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
117
138
118
139
119 def inusercache(ui, hash):
140 def inusercache(ui, hash):
120 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
121 return os.path.exists(path)
142 return os.path.exists(path)
122
143
123
144
124 def findfile(repo, hash):
145 def findfile(repo, hash):
125 '''Return store path of the largefile with the specified hash.
146 '''Return store path of the largefile with the specified hash.
126 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
127 Return None if the file can't be found locally.'''
148 Return None if the file can't be found locally.'''
128 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
129 if exists:
150 if exists:
130 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
131 return path
152 return path
132 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
133 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
134 path = storepath(repo, hash)
155 path = storepath(repo, hash)
135 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
136 return path
157 return path
137 return None
158 return None
138
159
139
160
140 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
141 def __getitem__(self, key):
162 def __getitem__(self, key):
142 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
143
164
144 def normal(self, f):
165 def normal(self, f):
145 return super(largefilesdirstate, self).normal(unixpath(f))
166 return super(largefilesdirstate, self).normal(unixpath(f))
146
167
147 def remove(self, f):
168 def remove(self, f):
148 return super(largefilesdirstate, self).remove(unixpath(f))
169 return super(largefilesdirstate, self).remove(unixpath(f))
149
170
150 def add(self, f):
171 def add(self, f):
151 return super(largefilesdirstate, self).add(unixpath(f))
172 return super(largefilesdirstate, self).add(unixpath(f))
152
173
153 def drop(self, f):
174 def drop(self, f):
154 return super(largefilesdirstate, self).drop(unixpath(f))
175 return super(largefilesdirstate, self).drop(unixpath(f))
155
176
156 def forget(self, f):
177 def forget(self, f):
157 return super(largefilesdirstate, self).forget(unixpath(f))
178 return super(largefilesdirstate, self).forget(unixpath(f))
158
179
159 def normallookup(self, f):
180 def normallookup(self, f):
160 return super(largefilesdirstate, self).normallookup(unixpath(f))
181 return super(largefilesdirstate, self).normallookup(unixpath(f))
161
182
162 def _ignore(self, f):
183 def _ignore(self, f):
163 return False
184 return False
164
185
165 def write(self, tr=False):
186 def write(self, tr=False):
166 # (1) disable PENDING mode always
187 # (1) disable PENDING mode always
167 # (lfdirstate isn't yet managed as a part of the transaction)
188 # (lfdirstate isn't yet managed as a part of the transaction)
168 # (2) avoid develwarn 'use dirstate.write with ....'
189 # (2) avoid develwarn 'use dirstate.write with ....'
169 super(largefilesdirstate, self).write(None)
190 super(largefilesdirstate, self).write(None)
170
191
171
192
172 def openlfdirstate(ui, repo, create=True):
193 def openlfdirstate(ui, repo, create=True):
173 '''
194 '''
174 Return a dirstate object that tracks largefiles: i.e. its root is
195 Return a dirstate object that tracks largefiles: i.e. its root is
175 the repo root, but it is saved in .hg/largefiles/dirstate.
196 the repo root, but it is saved in .hg/largefiles/dirstate.
176 '''
197 '''
177 vfs = repo.vfs
198 vfs = repo.vfs
178 lfstoredir = longname
199 lfstoredir = longname
179 opener = vfsmod.vfs(vfs.join(lfstoredir))
200 opener = vfsmod.vfs(vfs.join(lfstoredir))
180 lfdirstate = largefilesdirstate(
201 lfdirstate = largefilesdirstate(
181 opener,
202 opener,
182 ui,
203 ui,
183 repo.root,
204 repo.root,
184 repo.dirstate._validate,
205 repo.dirstate._validate,
185 lambda: sparse.matcher(repo),
206 lambda: sparse.matcher(repo),
186 )
207 )
187
208
188 # If the largefiles dirstate does not exist, populate and create
209 # If the largefiles dirstate does not exist, populate and create
189 # it. This ensures that we create it on the first meaningful
210 # it. This ensures that we create it on the first meaningful
190 # largefiles operation in a new clone.
211 # largefiles operation in a new clone.
191 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
212 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
192 matcher = getstandinmatcher(repo)
213 matcher = getstandinmatcher(repo)
193 standins = repo.dirstate.walk(
214 standins = repo.dirstate.walk(
194 matcher, subrepos=[], unknown=False, ignored=False
215 matcher, subrepos=[], unknown=False, ignored=False
195 )
216 )
196
217
197 if len(standins) > 0:
218 if len(standins) > 0:
198 vfs.makedirs(lfstoredir)
219 vfs.makedirs(lfstoredir)
199
220
200 for standin in standins:
221 for standin in standins:
201 lfile = splitstandin(standin)
222 lfile = splitstandin(standin)
202 lfdirstate.normallookup(lfile)
223 lfdirstate.normallookup(lfile)
203 return lfdirstate
224 return lfdirstate
204
225
205
226
206 def lfdirstatestatus(lfdirstate, repo):
227 def lfdirstatestatus(lfdirstate, repo):
207 pctx = repo[b'.']
228 pctx = repo[b'.']
208 match = matchmod.always()
229 match = matchmod.always()
209 unsure, s = lfdirstate.status(
230 unsure, s = lfdirstate.status(
210 match, subrepos=[], ignored=False, clean=False, unknown=False
231 match, subrepos=[], ignored=False, clean=False, unknown=False
211 )
232 )
212 modified, clean = s.modified, s.clean
233 modified, clean = s.modified, s.clean
213 for lfile in unsure:
234 for lfile in unsure:
214 try:
235 try:
215 fctx = pctx[standin(lfile)]
236 fctx = pctx[standin(lfile)]
216 except LookupError:
237 except LookupError:
217 fctx = None
238 fctx = None
218 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
239 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
219 modified.append(lfile)
240 modified.append(lfile)
220 else:
241 else:
221 clean.append(lfile)
242 clean.append(lfile)
222 lfdirstate.normal(lfile)
243 lfdirstate.normal(lfile)
223 return s
244 return s
224
245
225
246
226 def listlfiles(repo, rev=None, matcher=None):
247 def listlfiles(repo, rev=None, matcher=None):
227 '''return a list of largefiles in the working copy or the
248 '''return a list of largefiles in the working copy or the
228 specified changeset'''
249 specified changeset'''
229
250
230 if matcher is None:
251 if matcher is None:
231 matcher = getstandinmatcher(repo)
252 matcher = getstandinmatcher(repo)
232
253
233 # ignore unknown files in working directory
254 # ignore unknown files in working directory
234 return [
255 return [
235 splitstandin(f)
256 splitstandin(f)
236 for f in repo[rev].walk(matcher)
257 for f in repo[rev].walk(matcher)
237 if rev is not None or repo.dirstate[f] != b'?'
258 if rev is not None or repo.dirstate[f] != b'?'
238 ]
259 ]
239
260
240
261
241 def instore(repo, hash, forcelocal=False):
262 def instore(repo, hash, forcelocal=False):
242 '''Return true if a largefile with the given hash exists in the store'''
263 '''Return true if a largefile with the given hash exists in the store'''
243 return os.path.exists(storepath(repo, hash, forcelocal))
264 return os.path.exists(storepath(repo, hash, forcelocal))
244
265
245
266
246 def storepath(repo, hash, forcelocal=False):
267 def storepath(repo, hash, forcelocal=False):
247 '''Return the correct location in the repository largefiles store for a
268 '''Return the correct location in the repository largefiles store for a
248 file with the given hash.'''
269 file with the given hash.'''
249 if not forcelocal and repo.shared():
270 if not forcelocal and repo.shared():
250 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
271 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
251 return repo.vfs.join(longname, hash)
272 return repo.vfs.join(longname, hash)
252
273
253
274
254 def findstorepath(repo, hash):
275 def findstorepath(repo, hash):
255 '''Search through the local store path(s) to find the file for the given
276 '''Search through the local store path(s) to find the file for the given
256 hash. If the file is not found, its path in the primary store is returned.
277 hash. If the file is not found, its path in the primary store is returned.
257 The return value is a tuple of (path, exists(path)).
278 The return value is a tuple of (path, exists(path)).
258 '''
279 '''
259 # For shared repos, the primary store is in the share source. But for
280 # For shared repos, the primary store is in the share source. But for
260 # backward compatibility, force a lookup in the local store if it wasn't
281 # backward compatibility, force a lookup in the local store if it wasn't
261 # found in the share source.
282 # found in the share source.
262 path = storepath(repo, hash, False)
283 path = storepath(repo, hash, False)
263
284
264 if instore(repo, hash):
285 if instore(repo, hash):
265 return (path, True)
286 return (path, True)
266 elif repo.shared() and instore(repo, hash, True):
287 elif repo.shared() and instore(repo, hash, True):
267 return storepath(repo, hash, True), True
288 return storepath(repo, hash, True), True
268
289
269 return (path, False)
290 return (path, False)
270
291
271
292
272 def copyfromcache(repo, hash, filename):
293 def copyfromcache(repo, hash, filename):
273 '''Copy the specified largefile from the repo or system cache to
294 '''Copy the specified largefile from the repo or system cache to
274 filename in the repository. Return true on success or false if the
295 filename in the repository. Return true on success or false if the
275 file was not found in either cache (which should not happened:
296 file was not found in either cache (which should not happened:
276 this is meant to be called only after ensuring that the needed
297 this is meant to be called only after ensuring that the needed
277 largefile exists in the cache).'''
298 largefile exists in the cache).'''
278 wvfs = repo.wvfs
299 wvfs = repo.wvfs
279 path = findfile(repo, hash)
300 path = findfile(repo, hash)
280 if path is None:
301 if path is None:
281 return False
302 return False
282 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
303 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
283 # The write may fail before the file is fully written, but we
304 # The write may fail before the file is fully written, but we
284 # don't use atomic writes in the working copy.
305 # don't use atomic writes in the working copy.
285 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
306 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
286 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
307 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
287 if gothash != hash:
308 if gothash != hash:
288 repo.ui.warn(
309 repo.ui.warn(
289 _(b'%s: data corruption in %s with hash %s\n')
310 _(b'%s: data corruption in %s with hash %s\n')
290 % (filename, path, gothash)
311 % (filename, path, gothash)
291 )
312 )
292 wvfs.unlink(filename)
313 wvfs.unlink(filename)
293 return False
314 return False
294 return True
315 return True
295
316
296
317
297 def copytostore(repo, ctx, file, fstandin):
318 def copytostore(repo, ctx, file, fstandin):
298 wvfs = repo.wvfs
319 wvfs = repo.wvfs
299 hash = readasstandin(ctx[fstandin])
320 hash = readasstandin(ctx[fstandin])
300 if instore(repo, hash):
321 if instore(repo, hash):
301 return
322 return
302 if wvfs.exists(file):
323 if wvfs.exists(file):
303 copytostoreabsolute(repo, wvfs.join(file), hash)
324 copytostoreabsolute(repo, wvfs.join(file), hash)
304 else:
325 else:
305 repo.ui.warn(
326 repo.ui.warn(
306 _(b"%s: largefile %s not available from local store\n")
327 _(b"%s: largefile %s not available from local store\n")
307 % (file, hash)
328 % (file, hash)
308 )
329 )
309
330
310
331
311 def copyalltostore(repo, node):
332 def copyalltostore(repo, node):
312 '''Copy all largefiles in a given revision to the store'''
333 '''Copy all largefiles in a given revision to the store'''
313
334
314 ctx = repo[node]
335 ctx = repo[node]
315 for filename in ctx.files():
336 for filename in ctx.files():
316 realfile = splitstandin(filename)
337 realfile = splitstandin(filename)
317 if realfile is not None and filename in ctx.manifest():
338 if realfile is not None and filename in ctx.manifest():
318 copytostore(repo, ctx, realfile, filename)
339 copytostore(repo, ctx, realfile, filename)
319
340
320
341
321 def copytostoreabsolute(repo, file, hash):
342 def copytostoreabsolute(repo, file, hash):
322 if inusercache(repo.ui, hash):
343 if inusercache(repo.ui, hash):
323 link(usercachepath(repo.ui, hash), storepath(repo, hash))
344 link(usercachepath(repo.ui, hash), storepath(repo, hash))
324 else:
345 else:
325 util.makedirs(os.path.dirname(storepath(repo, hash)))
346 util.makedirs(os.path.dirname(storepath(repo, hash)))
326 with open(file, b'rb') as srcf:
347 with open(file, b'rb') as srcf:
327 with util.atomictempfile(
348 with util.atomictempfile(
328 storepath(repo, hash), createmode=repo.store.createmode
349 storepath(repo, hash), createmode=repo.store.createmode
329 ) as dstf:
350 ) as dstf:
330 for chunk in util.filechunkiter(srcf):
351 for chunk in util.filechunkiter(srcf):
331 dstf.write(chunk)
352 dstf.write(chunk)
332 linktousercache(repo, hash)
353 linktousercache(repo, hash)
333
354
334
355
335 def linktousercache(repo, hash):
356 def linktousercache(repo, hash):
336 '''Link / copy the largefile with the specified hash from the store
357 '''Link / copy the largefile with the specified hash from the store
337 to the cache.'''
358 to the cache.'''
338 path = usercachepath(repo.ui, hash)
359 path = usercachepath(repo.ui, hash)
339 link(storepath(repo, hash), path)
360 link(storepath(repo, hash), path)
340
361
341
362
342 def getstandinmatcher(repo, rmatcher=None):
363 def getstandinmatcher(repo, rmatcher=None):
343 '''Return a match object that applies rmatcher to the standin directory'''
364 '''Return a match object that applies rmatcher to the standin directory'''
344 wvfs = repo.wvfs
365 wvfs = repo.wvfs
345 standindir = shortname
366 standindir = shortname
346
367
347 # no warnings about missing files or directories
368 # no warnings about missing files or directories
348 badfn = lambda f, msg: None
369 badfn = lambda f, msg: None
349
370
350 if rmatcher and not rmatcher.always():
371 if rmatcher and not rmatcher.always():
351 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
372 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
352 if not pats:
373 if not pats:
353 pats = [wvfs.join(standindir)]
374 pats = [wvfs.join(standindir)]
354 match = scmutil.match(repo[None], pats, badfn=badfn)
375 match = scmutil.match(repo[None], pats, badfn=badfn)
355 else:
376 else:
356 # no patterns: relative to repo root
377 # no patterns: relative to repo root
357 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
378 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
358 return match
379 return match
359
380
360
381
361 def composestandinmatcher(repo, rmatcher):
382 def composestandinmatcher(repo, rmatcher):
362 '''Return a matcher that accepts standins corresponding to the
383 '''Return a matcher that accepts standins corresponding to the
363 files accepted by rmatcher. Pass the list of files in the matcher
384 files accepted by rmatcher. Pass the list of files in the matcher
364 as the paths specified by the user.'''
385 as the paths specified by the user.'''
365 smatcher = getstandinmatcher(repo, rmatcher)
386 smatcher = getstandinmatcher(repo, rmatcher)
366 isstandin = smatcher.matchfn
387 isstandin = smatcher.matchfn
367
388
368 def composedmatchfn(f):
389 def composedmatchfn(f):
369 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
390 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
370
391
371 smatcher.matchfn = composedmatchfn
392 smatcher.matchfn = composedmatchfn
372
393
373 return smatcher
394 return smatcher
374
395
375
396
376 def standin(filename):
397 def standin(filename):
377 '''Return the repo-relative path to the standin for the specified big
398 '''Return the repo-relative path to the standin for the specified big
378 file.'''
399 file.'''
379 # Notes:
400 # Notes:
380 # 1) Some callers want an absolute path, but for instance addlargefiles
401 # 1) Some callers want an absolute path, but for instance addlargefiles
381 # needs it repo-relative so it can be passed to repo[None].add(). So
402 # needs it repo-relative so it can be passed to repo[None].add(). So
382 # leave it up to the caller to use repo.wjoin() to get an absolute path.
403 # leave it up to the caller to use repo.wjoin() to get an absolute path.
383 # 2) Join with '/' because that's what dirstate always uses, even on
404 # 2) Join with '/' because that's what dirstate always uses, even on
384 # Windows. Change existing separator to '/' first in case we are
405 # Windows. Change existing separator to '/' first in case we are
385 # passed filenames from an external source (like the command line).
406 # passed filenames from an external source (like the command line).
386 return shortnameslash + util.pconvert(filename)
407 return shortnameslash + util.pconvert(filename)
387
408
388
409
389 def isstandin(filename):
410 def isstandin(filename):
390 '''Return true if filename is a big file standin. filename must be
411 '''Return true if filename is a big file standin. filename must be
391 in Mercurial's internal form (slash-separated).'''
412 in Mercurial's internal form (slash-separated).'''
392 return filename.startswith(shortnameslash)
413 return filename.startswith(shortnameslash)
393
414
394
415
395 def splitstandin(filename):
416 def splitstandin(filename):
396 # Split on / because that's what dirstate always uses, even on Windows.
417 # Split on / because that's what dirstate always uses, even on Windows.
397 # Change local separator to / first just in case we are passed filenames
418 # Change local separator to / first just in case we are passed filenames
398 # from an external source (like the command line).
419 # from an external source (like the command line).
399 bits = util.pconvert(filename).split(b'/', 1)
420 bits = util.pconvert(filename).split(b'/', 1)
400 if len(bits) == 2 and bits[0] == shortname:
421 if len(bits) == 2 and bits[0] == shortname:
401 return bits[1]
422 return bits[1]
402 else:
423 else:
403 return None
424 return None
404
425
405
426
406 def updatestandin(repo, lfile, standin):
427 def updatestandin(repo, lfile, standin):
407 """Re-calculate hash value of lfile and write it into standin
428 """Re-calculate hash value of lfile and write it into standin
408
429
409 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
430 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
410 """
431 """
411 file = repo.wjoin(lfile)
432 file = repo.wjoin(lfile)
412 if repo.wvfs.exists(lfile):
433 if repo.wvfs.exists(lfile):
413 hash = hashfile(file)
434 hash = hashfile(file)
414 executable = getexecutable(file)
435 executable = getexecutable(file)
415 writestandin(repo, standin, hash, executable)
436 writestandin(repo, standin, hash, executable)
416 else:
437 else:
417 raise error.Abort(_(b'%s: file not found!') % lfile)
438 raise error.Abort(_(b'%s: file not found!') % lfile)
418
439
419
440
420 def readasstandin(fctx):
441 def readasstandin(fctx):
421 '''read hex hash from given filectx of standin file
442 '''read hex hash from given filectx of standin file
422
443
423 This encapsulates how "standin" data is stored into storage layer.'''
444 This encapsulates how "standin" data is stored into storage layer.'''
424 return fctx.data().strip()
445 return fctx.data().strip()
425
446
426
447
427 def writestandin(repo, standin, hash, executable):
448 def writestandin(repo, standin, hash, executable):
428 '''write hash to <repo.root>/<standin>'''
449 '''write hash to <repo.root>/<standin>'''
429 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
450 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
430
451
431
452
432 def copyandhash(instream, outfile):
453 def copyandhash(instream, outfile):
433 '''Read bytes from instream (iterable) and write them to outfile,
454 '''Read bytes from instream (iterable) and write them to outfile,
434 computing the SHA-1 hash of the data along the way. Return the hash.'''
455 computing the SHA-1 hash of the data along the way. Return the hash.'''
435 hasher = hashutil.sha1(b'')
456 hasher = hashutil.sha1(b'')
436 for data in instream:
457 for data in instream:
437 hasher.update(data)
458 hasher.update(data)
438 outfile.write(data)
459 outfile.write(data)
439 return hex(hasher.digest())
460 return hex(hasher.digest())
440
461
441
462
442 def hashfile(file):
463 def hashfile(file):
443 if not os.path.exists(file):
464 if not os.path.exists(file):
444 return b''
465 return b''
445 with open(file, b'rb') as fd:
466 with open(file, b'rb') as fd:
446 return hexsha1(fd)
467 return hexsha1(fd)
447
468
448
469
449 def getexecutable(filename):
470 def getexecutable(filename):
450 mode = os.stat(filename).st_mode
471 mode = os.stat(filename).st_mode
451 return (
472 return (
452 (mode & stat.S_IXUSR)
473 (mode & stat.S_IXUSR)
453 and (mode & stat.S_IXGRP)
474 and (mode & stat.S_IXGRP)
454 and (mode & stat.S_IXOTH)
475 and (mode & stat.S_IXOTH)
455 )
476 )
456
477
457
478
458 def urljoin(first, second, *arg):
479 def urljoin(first, second, *arg):
459 def join(left, right):
480 def join(left, right):
460 if not left.endswith(b'/'):
481 if not left.endswith(b'/'):
461 left += b'/'
482 left += b'/'
462 if right.startswith(b'/'):
483 if right.startswith(b'/'):
463 right = right[1:]
484 right = right[1:]
464 return left + right
485 return left + right
465
486
466 url = join(first, second)
487 url = join(first, second)
467 for a in arg:
488 for a in arg:
468 url = join(url, a)
489 url = join(url, a)
469 return url
490 return url
470
491
471
492
472 def hexsha1(fileobj):
493 def hexsha1(fileobj):
473 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
494 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
474 object data"""
495 object data"""
475 h = hashutil.sha1()
496 h = hashutil.sha1()
476 for chunk in util.filechunkiter(fileobj):
497 for chunk in util.filechunkiter(fileobj):
477 h.update(chunk)
498 h.update(chunk)
478 return hex(h.digest())
499 return hex(h.digest())
479
500
480
501
481 def httpsendfile(ui, filename):
502 def httpsendfile(ui, filename):
482 return httpconnection.httpsendfile(ui, filename, b'rb')
503 return httpconnection.httpsendfile(ui, filename, b'rb')
483
504
484
505
485 def unixpath(path):
506 def unixpath(path):
486 '''Return a version of path normalized for use with the lfdirstate.'''
507 '''Return a version of path normalized for use with the lfdirstate.'''
487 return util.pconvert(os.path.normpath(path))
508 return util.pconvert(os.path.normpath(path))
488
509
489
510
490 def islfilesrepo(repo):
511 def islfilesrepo(repo):
491 '''Return true if the repo is a largefile repo.'''
512 '''Return true if the repo is a largefile repo.'''
492 if b'largefiles' in repo.requirements and any(
513 if b'largefiles' in repo.requirements and any(
493 shortnameslash in f[0] for f in repo.store.datafiles()
514 shortnameslash in f[0] for f in repo.store.datafiles()
494 ):
515 ):
495 return True
516 return True
496
517
497 return any(openlfdirstate(repo.ui, repo, False))
518 return any(openlfdirstate(repo.ui, repo, False))
498
519
499
520
500 class storeprotonotcapable(Exception):
521 class storeprotonotcapable(Exception):
501 def __init__(self, storetypes):
522 def __init__(self, storetypes):
502 self.storetypes = storetypes
523 self.storetypes = storetypes
503
524
504
525
505 def getstandinsstate(repo):
526 def getstandinsstate(repo):
506 standins = []
527 standins = []
507 matcher = getstandinmatcher(repo)
528 matcher = getstandinmatcher(repo)
508 wctx = repo[None]
529 wctx = repo[None]
509 for standin in repo.dirstate.walk(
530 for standin in repo.dirstate.walk(
510 matcher, subrepos=[], unknown=False, ignored=False
531 matcher, subrepos=[], unknown=False, ignored=False
511 ):
532 ):
512 lfile = splitstandin(standin)
533 lfile = splitstandin(standin)
513 try:
534 try:
514 hash = readasstandin(wctx[standin])
535 hash = readasstandin(wctx[standin])
515 except IOError:
536 except IOError:
516 hash = None
537 hash = None
517 standins.append((lfile, hash))
538 standins.append((lfile, hash))
518 return standins
539 return standins
519
540
520
541
521 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
542 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
522 lfstandin = standin(lfile)
543 lfstandin = standin(lfile)
523 if lfstandin in repo.dirstate:
544 if lfstandin in repo.dirstate:
524 stat = repo.dirstate._map[lfstandin]
545 stat = repo.dirstate._map[lfstandin]
525 state, mtime = stat[0], stat[3]
546 state, mtime = stat[0], stat[3]
526 else:
547 else:
527 state, mtime = b'?', -1
548 state, mtime = b'?', -1
528 if state == b'n':
549 if state == b'n':
529 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
550 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
530 # state 'n' doesn't ensure 'clean' in this case
551 # state 'n' doesn't ensure 'clean' in this case
531 lfdirstate.normallookup(lfile)
552 lfdirstate.normallookup(lfile)
532 else:
553 else:
533 lfdirstate.normal(lfile)
554 lfdirstate.normal(lfile)
534 elif state == b'm':
555 elif state == b'm':
535 lfdirstate.normallookup(lfile)
556 lfdirstate.normallookup(lfile)
536 elif state == b'r':
557 elif state == b'r':
537 lfdirstate.remove(lfile)
558 lfdirstate.remove(lfile)
538 elif state == b'a':
559 elif state == b'a':
539 lfdirstate.add(lfile)
560 lfdirstate.add(lfile)
540 elif state == b'?':
561 elif state == b'?':
541 lfdirstate.drop(lfile)
562 lfdirstate.drop(lfile)
542
563
543
564
544 def markcommitted(orig, ctx, node):
565 def markcommitted(orig, ctx, node):
545 repo = ctx.repo()
566 repo = ctx.repo()
546
567
547 orig(node)
568 orig(node)
548
569
549 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
570 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
550 # because files coming from the 2nd parent are omitted in the latter.
571 # because files coming from the 2nd parent are omitted in the latter.
551 #
572 #
552 # The former should be used to get targets of "synclfdirstate",
573 # The former should be used to get targets of "synclfdirstate",
553 # because such files:
574 # because such files:
554 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
575 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
555 # - have to be marked as "n" after commit, but
576 # - have to be marked as "n" after commit, but
556 # - aren't listed in "repo[node].files()"
577 # - aren't listed in "repo[node].files()"
557
578
558 lfdirstate = openlfdirstate(repo.ui, repo)
579 lfdirstate = openlfdirstate(repo.ui, repo)
559 for f in ctx.files():
580 for f in ctx.files():
560 lfile = splitstandin(f)
581 lfile = splitstandin(f)
561 if lfile is not None:
582 if lfile is not None:
562 synclfdirstate(repo, lfdirstate, lfile, False)
583 synclfdirstate(repo, lfdirstate, lfile, False)
563 lfdirstate.write()
584 lfdirstate.write()
564
585
565 # As part of committing, copy all of the largefiles into the cache.
586 # As part of committing, copy all of the largefiles into the cache.
566 #
587 #
567 # Using "node" instead of "ctx" implies additional "repo[node]"
588 # Using "node" instead of "ctx" implies additional "repo[node]"
568 # lookup while copyalltostore(), but can omit redundant check for
589 # lookup while copyalltostore(), but can omit redundant check for
569 # files comming from the 2nd parent, which should exist in store
590 # files comming from the 2nd parent, which should exist in store
570 # at merging.
591 # at merging.
571 copyalltostore(repo, node)
592 copyalltostore(repo, node)
572
593
573
594
574 def getlfilestoupdate(oldstandins, newstandins):
595 def getlfilestoupdate(oldstandins, newstandins):
575 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
596 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
576 filelist = []
597 filelist = []
577 for f in changedstandins:
598 for f in changedstandins:
578 if f[0] not in filelist:
599 if f[0] not in filelist:
579 filelist.append(f[0])
600 filelist.append(f[0])
580 return filelist
601 return filelist
581
602
582
603
583 def getlfilestoupload(repo, missing, addfunc):
604 def getlfilestoupload(repo, missing, addfunc):
584 makeprogress = repo.ui.makeprogress
605 makeprogress = repo.ui.makeprogress
585 with makeprogress(
606 with makeprogress(
586 _(b'finding outgoing largefiles'),
607 _(b'finding outgoing largefiles'),
587 unit=_(b'revisions'),
608 unit=_(b'revisions'),
588 total=len(missing),
609 total=len(missing),
589 ) as progress:
610 ) as progress:
590 for i, n in enumerate(missing):
611 for i, n in enumerate(missing):
591 progress.update(i)
612 progress.update(i)
592 parents = [p for p in repo[n].parents() if p != node.nullid]
613 parents = [p for p in repo[n].parents() if p != node.nullid]
593
614
594 with lfstatus(repo, value=False):
615 with lfstatus(repo, value=False):
595 ctx = repo[n]
616 ctx = repo[n]
596
617
597 files = set(ctx.files())
618 files = set(ctx.files())
598 if len(parents) == 2:
619 if len(parents) == 2:
599 mc = ctx.manifest()
620 mc = ctx.manifest()
600 mp1 = ctx.p1().manifest()
621 mp1 = ctx.p1().manifest()
601 mp2 = ctx.p2().manifest()
622 mp2 = ctx.p2().manifest()
602 for f in mp1:
623 for f in mp1:
603 if f not in mc:
624 if f not in mc:
604 files.add(f)
625 files.add(f)
605 for f in mp2:
626 for f in mp2:
606 if f not in mc:
627 if f not in mc:
607 files.add(f)
628 files.add(f)
608 for f in mc:
629 for f in mc:
609 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
630 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
610 files.add(f)
631 files.add(f)
611 for fn in files:
632 for fn in files:
612 if isstandin(fn) and fn in ctx:
633 if isstandin(fn) and fn in ctx:
613 addfunc(fn, readasstandin(ctx[fn]))
634 addfunc(fn, readasstandin(ctx[fn]))
614
635
615
636
616 def updatestandinsbymatch(repo, match):
637 def updatestandinsbymatch(repo, match):
617 '''Update standins in the working directory according to specified match
638 '''Update standins in the working directory according to specified match
618
639
619 This returns (possibly modified) ``match`` object to be used for
640 This returns (possibly modified) ``match`` object to be used for
620 subsequent commit process.
641 subsequent commit process.
621 '''
642 '''
622
643
623 ui = repo.ui
644 ui = repo.ui
624
645
625 # Case 1: user calls commit with no specific files or
646 # Case 1: user calls commit with no specific files or
626 # include/exclude patterns: refresh and commit all files that
647 # include/exclude patterns: refresh and commit all files that
627 # are "dirty".
648 # are "dirty".
628 if match is None or match.always():
649 if match is None or match.always():
629 # Spend a bit of time here to get a list of files we know
650 # Spend a bit of time here to get a list of files we know
630 # are modified so we can compare only against those.
651 # are modified so we can compare only against those.
631 # It can cost a lot of time (several seconds)
652 # It can cost a lot of time (several seconds)
632 # otherwise to update all standins if the largefiles are
653 # otherwise to update all standins if the largefiles are
633 # large.
654 # large.
634 lfdirstate = openlfdirstate(ui, repo)
655 lfdirstate = openlfdirstate(ui, repo)
635 dirtymatch = matchmod.always()
656 dirtymatch = matchmod.always()
636 unsure, s = lfdirstate.status(
657 unsure, s = lfdirstate.status(
637 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
658 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
638 )
659 )
639 modifiedfiles = unsure + s.modified + s.added + s.removed
660 modifiedfiles = unsure + s.modified + s.added + s.removed
640 lfiles = listlfiles(repo)
661 lfiles = listlfiles(repo)
641 # this only loops through largefiles that exist (not
662 # this only loops through largefiles that exist (not
642 # removed/renamed)
663 # removed/renamed)
643 for lfile in lfiles:
664 for lfile in lfiles:
644 if lfile in modifiedfiles:
665 if lfile in modifiedfiles:
645 fstandin = standin(lfile)
666 fstandin = standin(lfile)
646 if repo.wvfs.exists(fstandin):
667 if repo.wvfs.exists(fstandin):
647 # this handles the case where a rebase is being
668 # this handles the case where a rebase is being
648 # performed and the working copy is not updated
669 # performed and the working copy is not updated
649 # yet.
670 # yet.
650 if repo.wvfs.exists(lfile):
671 if repo.wvfs.exists(lfile):
651 updatestandin(repo, lfile, fstandin)
672 updatestandin(repo, lfile, fstandin)
652
673
653 return match
674 return match
654
675
655 lfiles = listlfiles(repo)
676 lfiles = listlfiles(repo)
656 match._files = repo._subdirlfs(match.files(), lfiles)
677 match._files = repo._subdirlfs(match.files(), lfiles)
657
678
658 # Case 2: user calls commit with specified patterns: refresh
679 # Case 2: user calls commit with specified patterns: refresh
659 # any matching big files.
680 # any matching big files.
660 smatcher = composestandinmatcher(repo, match)
681 smatcher = composestandinmatcher(repo, match)
661 standins = repo.dirstate.walk(
682 standins = repo.dirstate.walk(
662 smatcher, subrepos=[], unknown=False, ignored=False
683 smatcher, subrepos=[], unknown=False, ignored=False
663 )
684 )
664
685
665 # No matching big files: get out of the way and pass control to
686 # No matching big files: get out of the way and pass control to
666 # the usual commit() method.
687 # the usual commit() method.
667 if not standins:
688 if not standins:
668 return match
689 return match
669
690
670 # Refresh all matching big files. It's possible that the
691 # Refresh all matching big files. It's possible that the
671 # commit will end up failing, in which case the big files will
692 # commit will end up failing, in which case the big files will
672 # stay refreshed. No harm done: the user modified them and
693 # stay refreshed. No harm done: the user modified them and
673 # asked to commit them, so sooner or later we're going to
694 # asked to commit them, so sooner or later we're going to
674 # refresh the standins. Might as well leave them refreshed.
695 # refresh the standins. Might as well leave them refreshed.
675 lfdirstate = openlfdirstate(ui, repo)
696 lfdirstate = openlfdirstate(ui, repo)
676 for fstandin in standins:
697 for fstandin in standins:
677 lfile = splitstandin(fstandin)
698 lfile = splitstandin(fstandin)
678 if lfdirstate[lfile] != b'r':
699 if lfdirstate[lfile] != b'r':
679 updatestandin(repo, lfile, fstandin)
700 updatestandin(repo, lfile, fstandin)
680
701
681 # Cook up a new matcher that only matches regular files or
702 # Cook up a new matcher that only matches regular files or
682 # standins corresponding to the big files requested by the
703 # standins corresponding to the big files requested by the
683 # user. Have to modify _files to prevent commit() from
704 # user. Have to modify _files to prevent commit() from
684 # complaining "not tracked" for big files.
705 # complaining "not tracked" for big files.
685 match = copy.copy(match)
706 match = copy.copy(match)
686 origmatchfn = match.matchfn
707 origmatchfn = match.matchfn
687
708
688 # Check both the list of largefiles and the list of
709 # Check both the list of largefiles and the list of
689 # standins because if a largefile was removed, it
710 # standins because if a largefile was removed, it
690 # won't be in the list of largefiles at this point
711 # won't be in the list of largefiles at this point
691 match._files += sorted(standins)
712 match._files += sorted(standins)
692
713
693 actualfiles = []
714 actualfiles = []
694 for f in match._files:
715 for f in match._files:
695 fstandin = standin(f)
716 fstandin = standin(f)
696
717
697 # For largefiles, only one of the normal and standin should be
718 # For largefiles, only one of the normal and standin should be
698 # committed (except if one of them is a remove). In the case of a
719 # committed (except if one of them is a remove). In the case of a
699 # standin removal, drop the normal file if it is unknown to dirstate.
720 # standin removal, drop the normal file if it is unknown to dirstate.
700 # Thus, skip plain largefile names but keep the standin.
721 # Thus, skip plain largefile names but keep the standin.
701 if f in lfiles or fstandin in standins:
722 if f in lfiles or fstandin in standins:
702 if repo.dirstate[fstandin] != b'r':
723 if repo.dirstate[fstandin] != b'r':
703 if repo.dirstate[f] != b'r':
724 if repo.dirstate[f] != b'r':
704 continue
725 continue
705 elif repo.dirstate[f] == b'?':
726 elif repo.dirstate[f] == b'?':
706 continue
727 continue
707
728
708 actualfiles.append(f)
729 actualfiles.append(f)
709 match._files = actualfiles
730 match._files = actualfiles
710
731
711 def matchfn(f):
732 def matchfn(f):
712 if origmatchfn(f):
733 if origmatchfn(f):
713 return f not in lfiles
734 return f not in lfiles
714 else:
735 else:
715 return f in standins
736 return f in standins
716
737
717 match.matchfn = matchfn
738 match.matchfn = matchfn
718
739
719 return match
740 return match
720
741
721
742
722 class automatedcommithook(object):
743 class automatedcommithook(object):
723 '''Stateful hook to update standins at the 1st commit of resuming
744 '''Stateful hook to update standins at the 1st commit of resuming
724
745
725 For efficiency, updating standins in the working directory should
746 For efficiency, updating standins in the working directory should
726 be avoided while automated committing (like rebase, transplant and
747 be avoided while automated committing (like rebase, transplant and
727 so on), because they should be updated before committing.
748 so on), because they should be updated before committing.
728
749
729 But the 1st commit of resuming automated committing (e.g. ``rebase
750 But the 1st commit of resuming automated committing (e.g. ``rebase
730 --continue``) should update them, because largefiles may be
751 --continue``) should update them, because largefiles may be
731 modified manually.
752 modified manually.
732 '''
753 '''
733
754
734 def __init__(self, resuming):
755 def __init__(self, resuming):
735 self.resuming = resuming
756 self.resuming = resuming
736
757
737 def __call__(self, repo, match):
758 def __call__(self, repo, match):
738 if self.resuming:
759 if self.resuming:
739 self.resuming = False # avoids updating at subsequent commits
760 self.resuming = False # avoids updating at subsequent commits
740 return updatestandinsbymatch(repo, match)
761 return updatestandinsbymatch(repo, match)
741 else:
762 else:
742 return match
763 return match
743
764
744
765
745 def getstatuswriter(ui, repo, forcibly=None):
766 def getstatuswriter(ui, repo, forcibly=None):
746 '''Return the function to write largefiles specific status out
767 '''Return the function to write largefiles specific status out
747
768
748 If ``forcibly`` is ``None``, this returns the last element of
769 If ``forcibly`` is ``None``, this returns the last element of
749 ``repo._lfstatuswriters`` as "default" writer function.
770 ``repo._lfstatuswriters`` as "default" writer function.
750
771
751 Otherwise, this returns the function to always write out (or
772 Otherwise, this returns the function to always write out (or
752 ignore if ``not forcibly``) status.
773 ignore if ``not forcibly``) status.
753 '''
774 '''
754 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
775 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
755 return repo._lfstatuswriters[-1]
776 return repo._lfstatuswriters[-1]
756 else:
777 else:
757 if forcibly:
778 if forcibly:
758 return ui.status # forcibly WRITE OUT
779 return ui.status # forcibly WRITE OUT
759 else:
780 else:
760 return lambda *msg, **opts: None # forcibly IGNORE
781 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,97 +1,126 b''
1 In this test, we want to test LFS bundle application on both LFS and non-LFS
1 In this test, we want to test LFS bundle application on both LFS and non-LFS
2 repos.
2 repos.
3
3
4 To make it more interesting, the file revisions will contain hg filelog
4 To make it more interesting, the file revisions will contain hg filelog
5 metadata ('\1\n'). The bundle will have 1 file revision overlapping with the
5 metadata ('\1\n'). The bundle will have 1 file revision overlapping with the
6 destination repo.
6 destination repo.
7
7
8 # rev 1 2 3
8 # rev 1 2 3
9 # repo: yes yes no
9 # repo: yes yes no
10 # bundle: no (base) yes yes (deltabase: 2 if possible)
10 # bundle: no (base) yes yes (deltabase: 2 if possible)
11
11
12 It is interesting because rev 2 could have been stored as LFS in the repo, and
12 It is interesting because rev 2 could have been stored as LFS in the repo, and
13 non-LFS in the bundle; or vice-versa.
13 non-LFS in the bundle; or vice-versa.
14
14
15 Init
15 Init
16
16
17 $ cat >> $HGRCPATH << EOF
17 $ cat >> $HGRCPATH << EOF
18 > [extensions]
18 > [extensions]
19 > lfs=
19 > lfs=
20 > drawdag=$TESTDIR/drawdag.py
20 > drawdag=$TESTDIR/drawdag.py
21 > [lfs]
21 > [lfs]
22 > url=file:$TESTTMP/lfs-remote
22 > url=file:$TESTTMP/lfs-remote
23 > EOF
23 > EOF
24
24
25 Helper functions
25 Helper functions
26
26
27 $ commitxy() {
27 $ commitxy() {
28 > hg debugdrawdag "$@" <<'EOS'
28 > hg debugdrawdag "$@" <<'EOS'
29 > Y # Y/X=\1\nAAAA\nE\nF
29 > Y # Y/X=\1\nAAAA\nE\nF
30 > | # Y/Y=\1\nAAAA\nG\nH
30 > | # Y/Y=\1\nAAAA\nG\nH
31 > X # X/X=\1\nAAAA\nC\n
31 > X # X/X=\1\nAAAA\nC\n
32 > # X/Y=\1\nAAAA\nD\n
32 > # X/Y=\1\nAAAA\nD\n
33 > EOS
33 > EOS
34 > }
34 > }
35
35
36 $ commitz() {
36 $ commitz() {
37 > hg debugdrawdag "$@" <<'EOS'
37 > hg debugdrawdag "$@" <<'EOS'
38 > Z # Z/X=\1\nAAAA\nI\n
38 > Z # Z/X=\1\nAAAA\nI\n
39 > | # Z/Y=\1\nAAAA\nJ\n
39 > | # Z/Y=\1\nAAAA\nJ\n
40 > | # Z/Z=\1\nZ
40 > | # Z/Z=\1\nZ
41 > Y
41 > Y
42 > EOS
42 > EOS
43 > }
43 > }
44
44
45 $ enablelfs() {
45 $ enablelfs() {
46 > cat >> .hg/hgrc <<EOF
46 > cat >> .hg/hgrc <<EOF
47 > [lfs]
47 > [lfs]
48 > track=all()
48 > track=all()
49 > EOF
49 > EOF
50 > }
50 > }
51
51
52 Generate bundles
52 Generate bundles
53
53
54 $ for i in normal lfs; do
54 $ for i in normal lfs; do
55 > NAME=src-$i
55 > NAME=src-$i
56 > hg init $TESTTMP/$NAME
56 > hg init $TESTTMP/$NAME
57 > cd $TESTTMP/$NAME
57 > cd $TESTTMP/$NAME
58 > [ $i = lfs ] && enablelfs
58 > [ $i = lfs ] && enablelfs
59 > commitxy
59 > commitxy
60 > commitz
60 > commitz
61 > hg bundle -q --base X -r Y+Z $TESTTMP/$NAME.bundle
61 > hg bundle -q --base X -r Y+Z $TESTTMP/$NAME.bundle
62 > SRCNAMES="$SRCNAMES $NAME"
62 > SRCNAMES="$SRCNAMES $NAME"
63 > done
63 > done
64
64
65 Prepare destination repos
65 Prepare destination repos
66
66
67 $ for i in normal lfs; do
67 $ for i in normal lfs; do
68 > NAME=dst-$i
68 > NAME=dst-$i
69 > hg init $TESTTMP/$NAME
69 > hg init $TESTTMP/$NAME
70 > cd $TESTTMP/$NAME
70 > cd $TESTTMP/$NAME
71 > [ $i = lfs ] && enablelfs
71 > [ $i = lfs ] && enablelfs
72 > commitxy
72 > commitxy
73 > DSTNAMES="$DSTNAMES $NAME"
73 > DSTNAMES="$DSTNAMES $NAME"
74 > done
74 > done
75
75
76 Apply bundles
76 Apply bundles
77
77
78 $ for i in $SRCNAMES; do
78 $ for i in $SRCNAMES; do
79 > for j in $DSTNAMES; do
79 > for j in $DSTNAMES; do
80 > echo ---- Applying $i.bundle to $j ----
80 > echo ---- Applying $i.bundle to $j ----
81 > cp -R $TESTTMP/$j $TESTTMP/tmp-$i-$j
81 > cp -R $TESTTMP/$j $TESTTMP/tmp-$i-$j
82 > cd $TESTTMP/tmp-$i-$j
82 > cd $TESTTMP/tmp-$i-$j
83 > if hg unbundle $TESTTMP/$i.bundle -q 2>/dev/null; then
83 > if hg unbundle $TESTTMP/$i.bundle -q 2>/dev/null; then
84 > hg verify -q && echo OK
84 > hg verify -q && echo OK
85 > else
85 > else
86 > echo CRASHED
86 > echo CRASHED
87 > fi
87 > fi
88 > done
88 > done
89 > done
89 > done
90 ---- Applying src-normal.bundle to dst-normal ----
90 ---- Applying src-normal.bundle to dst-normal ----
91 OK
91 OK
92 ---- Applying src-normal.bundle to dst-lfs ----
92 ---- Applying src-normal.bundle to dst-lfs ----
93 OK
93 OK
94 ---- Applying src-lfs.bundle to dst-normal ----
94 ---- Applying src-lfs.bundle to dst-normal ----
95 OK
95 OK
96 ---- Applying src-lfs.bundle to dst-lfs ----
96 ---- Applying src-lfs.bundle to dst-lfs ----
97 OK
97 OK
98
99 Hint if the cache location cannot be inferred from the environment
100
101 #if windows
102 $ unset LOCALAPPDATA
103 $ unset APPDATA
104 $ HGRCPATH= hg config lfs --debug
105 abort: unknown lfs usercache location
106 (define LOCALAPPDATA or APPDATA in the environment, or set lfs.usercache)
107 [255]
108 #endif
109
110 #if osx
111 $ unset HOME
112 $ HGRCPATH= hg config lfs --debug
113 abort: unknown lfs usercache location
114 (define HOME in the environment, or set lfs.usercache)
115 [255]
116 #endif
117
118 #if no-windows no-osx
119 $ unset XDG_CACHE_HOME
120 $ unset HOME
121 $ HGRCPATH= hg config lfs --debug
122 abort: unknown lfs usercache location
123 (define XDG_CACHE_HOME or HOME in the environment, or set lfs.usercache)
124 [255]
125 #endif
126
General Comments 0
You need to be logged in to leave comments. Login now