##// END OF EJS Templates
largefiles: allow "lfstatus" context manager to set value to False...
Martin von Zweigbergk -
r43983:a02e4c12 default
parent child Browse files
Show More
@@ -1,764 +1,760 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import hashlib
14 import hashlib
15 import os
15 import os
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.node import hex
19 from mercurial.node import hex
20 from mercurial.pycompat import open
20 from mercurial.pycompat import open
21
21
22 from mercurial import (
22 from mercurial import (
23 dirstate,
23 dirstate,
24 encoding,
24 encoding,
25 error,
25 error,
26 httpconnection,
26 httpconnection,
27 match as matchmod,
27 match as matchmod,
28 node,
28 node,
29 pycompat,
29 pycompat,
30 scmutil,
30 scmutil,
31 sparse,
31 sparse,
32 util,
32 util,
33 vfs as vfsmod,
33 vfs as vfsmod,
34 )
34 )
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = True
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 '''Return the correct location in the "global" largefiles cache for a file
83 '''Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space.'''
86 to preserve download bandwidth and storage space.'''
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95 if pycompat.iswindows:
95 if pycompat.iswindows:
96 appdata = encoding.environ.get(
96 appdata = encoding.environ.get(
97 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
97 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
98 )
98 )
99 if appdata:
99 if appdata:
100 return os.path.join(appdata, name)
100 return os.path.join(appdata, name)
101 elif pycompat.isdarwin:
101 elif pycompat.isdarwin:
102 home = encoding.environ.get(b'HOME')
102 home = encoding.environ.get(b'HOME')
103 if home:
103 if home:
104 return os.path.join(home, b'Library', b'Caches', name)
104 return os.path.join(home, b'Library', b'Caches', name)
105 elif pycompat.isposix:
105 elif pycompat.isposix:
106 path = encoding.environ.get(b'XDG_CACHE_HOME')
106 path = encoding.environ.get(b'XDG_CACHE_HOME')
107 if path:
107 if path:
108 return os.path.join(path, name)
108 return os.path.join(path, name)
109 home = encoding.environ.get(b'HOME')
109 home = encoding.environ.get(b'HOME')
110 if home:
110 if home:
111 return os.path.join(home, b'.cache', name)
111 return os.path.join(home, b'.cache', name)
112 else:
112 else:
113 raise error.Abort(
113 raise error.Abort(
114 _(b'unknown operating system: %s\n') % pycompat.osname
114 _(b'unknown operating system: %s\n') % pycompat.osname
115 )
115 )
116 raise error.Abort(_(b'unknown %s usercache location') % name)
116 raise error.Abort(_(b'unknown %s usercache location') % name)
117
117
118
118
119 def inusercache(ui, hash):
119 def inusercache(ui, hash):
120 path = usercachepath(ui, hash)
120 path = usercachepath(ui, hash)
121 return os.path.exists(path)
121 return os.path.exists(path)
122
122
123
123
124 def findfile(repo, hash):
124 def findfile(repo, hash):
125 '''Return store path of the largefile with the specified hash.
125 '''Return store path of the largefile with the specified hash.
126 As a side effect, the file might be linked from user cache.
126 As a side effect, the file might be linked from user cache.
127 Return None if the file can't be found locally.'''
127 Return None if the file can't be found locally.'''
128 path, exists = findstorepath(repo, hash)
128 path, exists = findstorepath(repo, hash)
129 if exists:
129 if exists:
130 repo.ui.note(_(b'found %s in store\n') % hash)
130 repo.ui.note(_(b'found %s in store\n') % hash)
131 return path
131 return path
132 elif inusercache(repo.ui, hash):
132 elif inusercache(repo.ui, hash):
133 repo.ui.note(_(b'found %s in system cache\n') % hash)
133 repo.ui.note(_(b'found %s in system cache\n') % hash)
134 path = storepath(repo, hash)
134 path = storepath(repo, hash)
135 link(usercachepath(repo.ui, hash), path)
135 link(usercachepath(repo.ui, hash), path)
136 return path
136 return path
137 return None
137 return None
138
138
139
139
140 class largefilesdirstate(dirstate.dirstate):
140 class largefilesdirstate(dirstate.dirstate):
141 def __getitem__(self, key):
141 def __getitem__(self, key):
142 return super(largefilesdirstate, self).__getitem__(unixpath(key))
142 return super(largefilesdirstate, self).__getitem__(unixpath(key))
143
143
144 def normal(self, f):
144 def normal(self, f):
145 return super(largefilesdirstate, self).normal(unixpath(f))
145 return super(largefilesdirstate, self).normal(unixpath(f))
146
146
147 def remove(self, f):
147 def remove(self, f):
148 return super(largefilesdirstate, self).remove(unixpath(f))
148 return super(largefilesdirstate, self).remove(unixpath(f))
149
149
150 def add(self, f):
150 def add(self, f):
151 return super(largefilesdirstate, self).add(unixpath(f))
151 return super(largefilesdirstate, self).add(unixpath(f))
152
152
153 def drop(self, f):
153 def drop(self, f):
154 return super(largefilesdirstate, self).drop(unixpath(f))
154 return super(largefilesdirstate, self).drop(unixpath(f))
155
155
156 def forget(self, f):
156 def forget(self, f):
157 return super(largefilesdirstate, self).forget(unixpath(f))
157 return super(largefilesdirstate, self).forget(unixpath(f))
158
158
159 def normallookup(self, f):
159 def normallookup(self, f):
160 return super(largefilesdirstate, self).normallookup(unixpath(f))
160 return super(largefilesdirstate, self).normallookup(unixpath(f))
161
161
162 def _ignore(self, f):
162 def _ignore(self, f):
163 return False
163 return False
164
164
165 def write(self, tr=False):
165 def write(self, tr=False):
166 # (1) disable PENDING mode always
166 # (1) disable PENDING mode always
167 # (lfdirstate isn't yet managed as a part of the transaction)
167 # (lfdirstate isn't yet managed as a part of the transaction)
168 # (2) avoid develwarn 'use dirstate.write with ....'
168 # (2) avoid develwarn 'use dirstate.write with ....'
169 super(largefilesdirstate, self).write(None)
169 super(largefilesdirstate, self).write(None)
170
170
171
171
172 def openlfdirstate(ui, repo, create=True):
172 def openlfdirstate(ui, repo, create=True):
173 '''
173 '''
174 Return a dirstate object that tracks largefiles: i.e. its root is
174 Return a dirstate object that tracks largefiles: i.e. its root is
175 the repo root, but it is saved in .hg/largefiles/dirstate.
175 the repo root, but it is saved in .hg/largefiles/dirstate.
176 '''
176 '''
177 vfs = repo.vfs
177 vfs = repo.vfs
178 lfstoredir = longname
178 lfstoredir = longname
179 opener = vfsmod.vfs(vfs.join(lfstoredir))
179 opener = vfsmod.vfs(vfs.join(lfstoredir))
180 lfdirstate = largefilesdirstate(
180 lfdirstate = largefilesdirstate(
181 opener,
181 opener,
182 ui,
182 ui,
183 repo.root,
183 repo.root,
184 repo.dirstate._validate,
184 repo.dirstate._validate,
185 lambda: sparse.matcher(repo),
185 lambda: sparse.matcher(repo),
186 )
186 )
187
187
188 # If the largefiles dirstate does not exist, populate and create
188 # If the largefiles dirstate does not exist, populate and create
189 # it. This ensures that we create it on the first meaningful
189 # it. This ensures that we create it on the first meaningful
190 # largefiles operation in a new clone.
190 # largefiles operation in a new clone.
191 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
191 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
192 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
193 standins = repo.dirstate.walk(
193 standins = repo.dirstate.walk(
194 matcher, subrepos=[], unknown=False, ignored=False
194 matcher, subrepos=[], unknown=False, ignored=False
195 )
195 )
196
196
197 if len(standins) > 0:
197 if len(standins) > 0:
198 vfs.makedirs(lfstoredir)
198 vfs.makedirs(lfstoredir)
199
199
200 for standin in standins:
200 for standin in standins:
201 lfile = splitstandin(standin)
201 lfile = splitstandin(standin)
202 lfdirstate.normallookup(lfile)
202 lfdirstate.normallookup(lfile)
203 return lfdirstate
203 return lfdirstate
204
204
205
205
206 def lfdirstatestatus(lfdirstate, repo):
206 def lfdirstatestatus(lfdirstate, repo):
207 pctx = repo[b'.']
207 pctx = repo[b'.']
208 match = matchmod.always()
208 match = matchmod.always()
209 unsure, s = lfdirstate.status(
209 unsure, s = lfdirstate.status(
210 match, subrepos=[], ignored=False, clean=False, unknown=False
210 match, subrepos=[], ignored=False, clean=False, unknown=False
211 )
211 )
212 modified, clean = s.modified, s.clean
212 modified, clean = s.modified, s.clean
213 for lfile in unsure:
213 for lfile in unsure:
214 try:
214 try:
215 fctx = pctx[standin(lfile)]
215 fctx = pctx[standin(lfile)]
216 except LookupError:
216 except LookupError:
217 fctx = None
217 fctx = None
218 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
218 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
219 modified.append(lfile)
219 modified.append(lfile)
220 else:
220 else:
221 clean.append(lfile)
221 clean.append(lfile)
222 lfdirstate.normal(lfile)
222 lfdirstate.normal(lfile)
223 return s
223 return s
224
224
225
225
226 def listlfiles(repo, rev=None, matcher=None):
226 def listlfiles(repo, rev=None, matcher=None):
227 '''return a list of largefiles in the working copy or the
227 '''return a list of largefiles in the working copy or the
228 specified changeset'''
228 specified changeset'''
229
229
230 if matcher is None:
230 if matcher is None:
231 matcher = getstandinmatcher(repo)
231 matcher = getstandinmatcher(repo)
232
232
233 # ignore unknown files in working directory
233 # ignore unknown files in working directory
234 return [
234 return [
235 splitstandin(f)
235 splitstandin(f)
236 for f in repo[rev].walk(matcher)
236 for f in repo[rev].walk(matcher)
237 if rev is not None or repo.dirstate[f] != b'?'
237 if rev is not None or repo.dirstate[f] != b'?'
238 ]
238 ]
239
239
240
240
241 def instore(repo, hash, forcelocal=False):
241 def instore(repo, hash, forcelocal=False):
242 '''Return true if a largefile with the given hash exists in the store'''
242 '''Return true if a largefile with the given hash exists in the store'''
243 return os.path.exists(storepath(repo, hash, forcelocal))
243 return os.path.exists(storepath(repo, hash, forcelocal))
244
244
245
245
246 def storepath(repo, hash, forcelocal=False):
246 def storepath(repo, hash, forcelocal=False):
247 '''Return the correct location in the repository largefiles store for a
247 '''Return the correct location in the repository largefiles store for a
248 file with the given hash.'''
248 file with the given hash.'''
249 if not forcelocal and repo.shared():
249 if not forcelocal and repo.shared():
250 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
250 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
251 return repo.vfs.join(longname, hash)
251 return repo.vfs.join(longname, hash)
252
252
253
253
254 def findstorepath(repo, hash):
254 def findstorepath(repo, hash):
255 '''Search through the local store path(s) to find the file for the given
255 '''Search through the local store path(s) to find the file for the given
256 hash. If the file is not found, its path in the primary store is returned.
256 hash. If the file is not found, its path in the primary store is returned.
257 The return value is a tuple of (path, exists(path)).
257 The return value is a tuple of (path, exists(path)).
258 '''
258 '''
259 # For shared repos, the primary store is in the share source. But for
259 # For shared repos, the primary store is in the share source. But for
260 # backward compatibility, force a lookup in the local store if it wasn't
260 # backward compatibility, force a lookup in the local store if it wasn't
261 # found in the share source.
261 # found in the share source.
262 path = storepath(repo, hash, False)
262 path = storepath(repo, hash, False)
263
263
264 if instore(repo, hash):
264 if instore(repo, hash):
265 return (path, True)
265 return (path, True)
266 elif repo.shared() and instore(repo, hash, True):
266 elif repo.shared() and instore(repo, hash, True):
267 return storepath(repo, hash, True), True
267 return storepath(repo, hash, True), True
268
268
269 return (path, False)
269 return (path, False)
270
270
271
271
272 def copyfromcache(repo, hash, filename):
272 def copyfromcache(repo, hash, filename):
273 '''Copy the specified largefile from the repo or system cache to
273 '''Copy the specified largefile from the repo or system cache to
274 filename in the repository. Return true on success or false if the
274 filename in the repository. Return true on success or false if the
275 file was not found in either cache (which should not happened:
275 file was not found in either cache (which should not happened:
276 this is meant to be called only after ensuring that the needed
276 this is meant to be called only after ensuring that the needed
277 largefile exists in the cache).'''
277 largefile exists in the cache).'''
278 wvfs = repo.wvfs
278 wvfs = repo.wvfs
279 path = findfile(repo, hash)
279 path = findfile(repo, hash)
280 if path is None:
280 if path is None:
281 return False
281 return False
282 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
282 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
283 # The write may fail before the file is fully written, but we
283 # The write may fail before the file is fully written, but we
284 # don't use atomic writes in the working copy.
284 # don't use atomic writes in the working copy.
285 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
285 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
286 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
286 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
287 if gothash != hash:
287 if gothash != hash:
288 repo.ui.warn(
288 repo.ui.warn(
289 _(b'%s: data corruption in %s with hash %s\n')
289 _(b'%s: data corruption in %s with hash %s\n')
290 % (filename, path, gothash)
290 % (filename, path, gothash)
291 )
291 )
292 wvfs.unlink(filename)
292 wvfs.unlink(filename)
293 return False
293 return False
294 return True
294 return True
295
295
296
296
297 def copytostore(repo, ctx, file, fstandin):
297 def copytostore(repo, ctx, file, fstandin):
298 wvfs = repo.wvfs
298 wvfs = repo.wvfs
299 hash = readasstandin(ctx[fstandin])
299 hash = readasstandin(ctx[fstandin])
300 if instore(repo, hash):
300 if instore(repo, hash):
301 return
301 return
302 if wvfs.exists(file):
302 if wvfs.exists(file):
303 copytostoreabsolute(repo, wvfs.join(file), hash)
303 copytostoreabsolute(repo, wvfs.join(file), hash)
304 else:
304 else:
305 repo.ui.warn(
305 repo.ui.warn(
306 _(b"%s: largefile %s not available from local store\n")
306 _(b"%s: largefile %s not available from local store\n")
307 % (file, hash)
307 % (file, hash)
308 )
308 )
309
309
310
310
311 def copyalltostore(repo, node):
311 def copyalltostore(repo, node):
312 '''Copy all largefiles in a given revision to the store'''
312 '''Copy all largefiles in a given revision to the store'''
313
313
314 ctx = repo[node]
314 ctx = repo[node]
315 for filename in ctx.files():
315 for filename in ctx.files():
316 realfile = splitstandin(filename)
316 realfile = splitstandin(filename)
317 if realfile is not None and filename in ctx.manifest():
317 if realfile is not None and filename in ctx.manifest():
318 copytostore(repo, ctx, realfile, filename)
318 copytostore(repo, ctx, realfile, filename)
319
319
320
320
321 def copytostoreabsolute(repo, file, hash):
321 def copytostoreabsolute(repo, file, hash):
322 if inusercache(repo.ui, hash):
322 if inusercache(repo.ui, hash):
323 link(usercachepath(repo.ui, hash), storepath(repo, hash))
323 link(usercachepath(repo.ui, hash), storepath(repo, hash))
324 else:
324 else:
325 util.makedirs(os.path.dirname(storepath(repo, hash)))
325 util.makedirs(os.path.dirname(storepath(repo, hash)))
326 with open(file, b'rb') as srcf:
326 with open(file, b'rb') as srcf:
327 with util.atomictempfile(
327 with util.atomictempfile(
328 storepath(repo, hash), createmode=repo.store.createmode
328 storepath(repo, hash), createmode=repo.store.createmode
329 ) as dstf:
329 ) as dstf:
330 for chunk in util.filechunkiter(srcf):
330 for chunk in util.filechunkiter(srcf):
331 dstf.write(chunk)
331 dstf.write(chunk)
332 linktousercache(repo, hash)
332 linktousercache(repo, hash)
333
333
334
334
335 def linktousercache(repo, hash):
335 def linktousercache(repo, hash):
336 '''Link / copy the largefile with the specified hash from the store
336 '''Link / copy the largefile with the specified hash from the store
337 to the cache.'''
337 to the cache.'''
338 path = usercachepath(repo.ui, hash)
338 path = usercachepath(repo.ui, hash)
339 link(storepath(repo, hash), path)
339 link(storepath(repo, hash), path)
340
340
341
341
342 def getstandinmatcher(repo, rmatcher=None):
342 def getstandinmatcher(repo, rmatcher=None):
343 '''Return a match object that applies rmatcher to the standin directory'''
343 '''Return a match object that applies rmatcher to the standin directory'''
344 wvfs = repo.wvfs
344 wvfs = repo.wvfs
345 standindir = shortname
345 standindir = shortname
346
346
347 # no warnings about missing files or directories
347 # no warnings about missing files or directories
348 badfn = lambda f, msg: None
348 badfn = lambda f, msg: None
349
349
350 if rmatcher and not rmatcher.always():
350 if rmatcher and not rmatcher.always():
351 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
351 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
352 if not pats:
352 if not pats:
353 pats = [wvfs.join(standindir)]
353 pats = [wvfs.join(standindir)]
354 match = scmutil.match(repo[None], pats, badfn=badfn)
354 match = scmutil.match(repo[None], pats, badfn=badfn)
355 else:
355 else:
356 # no patterns: relative to repo root
356 # no patterns: relative to repo root
357 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
357 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
358 return match
358 return match
359
359
360
360
361 def composestandinmatcher(repo, rmatcher):
361 def composestandinmatcher(repo, rmatcher):
362 '''Return a matcher that accepts standins corresponding to the
362 '''Return a matcher that accepts standins corresponding to the
363 files accepted by rmatcher. Pass the list of files in the matcher
363 files accepted by rmatcher. Pass the list of files in the matcher
364 as the paths specified by the user.'''
364 as the paths specified by the user.'''
365 smatcher = getstandinmatcher(repo, rmatcher)
365 smatcher = getstandinmatcher(repo, rmatcher)
366 isstandin = smatcher.matchfn
366 isstandin = smatcher.matchfn
367
367
368 def composedmatchfn(f):
368 def composedmatchfn(f):
369 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
369 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
370
370
371 smatcher.matchfn = composedmatchfn
371 smatcher.matchfn = composedmatchfn
372
372
373 return smatcher
373 return smatcher
374
374
375
375
376 def standin(filename):
376 def standin(filename):
377 '''Return the repo-relative path to the standin for the specified big
377 '''Return the repo-relative path to the standin for the specified big
378 file.'''
378 file.'''
379 # Notes:
379 # Notes:
380 # 1) Some callers want an absolute path, but for instance addlargefiles
380 # 1) Some callers want an absolute path, but for instance addlargefiles
381 # needs it repo-relative so it can be passed to repo[None].add(). So
381 # needs it repo-relative so it can be passed to repo[None].add(). So
382 # leave it up to the caller to use repo.wjoin() to get an absolute path.
382 # leave it up to the caller to use repo.wjoin() to get an absolute path.
383 # 2) Join with '/' because that's what dirstate always uses, even on
383 # 2) Join with '/' because that's what dirstate always uses, even on
384 # Windows. Change existing separator to '/' first in case we are
384 # Windows. Change existing separator to '/' first in case we are
385 # passed filenames from an external source (like the command line).
385 # passed filenames from an external source (like the command line).
386 return shortnameslash + util.pconvert(filename)
386 return shortnameslash + util.pconvert(filename)
387
387
388
388
389 def isstandin(filename):
389 def isstandin(filename):
390 '''Return true if filename is a big file standin. filename must be
390 '''Return true if filename is a big file standin. filename must be
391 in Mercurial's internal form (slash-separated).'''
391 in Mercurial's internal form (slash-separated).'''
392 return filename.startswith(shortnameslash)
392 return filename.startswith(shortnameslash)
393
393
394
394
395 def splitstandin(filename):
395 def splitstandin(filename):
396 # Split on / because that's what dirstate always uses, even on Windows.
396 # Split on / because that's what dirstate always uses, even on Windows.
397 # Change local separator to / first just in case we are passed filenames
397 # Change local separator to / first just in case we are passed filenames
398 # from an external source (like the command line).
398 # from an external source (like the command line).
399 bits = util.pconvert(filename).split(b'/', 1)
399 bits = util.pconvert(filename).split(b'/', 1)
400 if len(bits) == 2 and bits[0] == shortname:
400 if len(bits) == 2 and bits[0] == shortname:
401 return bits[1]
401 return bits[1]
402 else:
402 else:
403 return None
403 return None
404
404
405
405
406 def updatestandin(repo, lfile, standin):
406 def updatestandin(repo, lfile, standin):
407 """Re-calculate hash value of lfile and write it into standin
407 """Re-calculate hash value of lfile and write it into standin
408
408
409 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
409 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
410 """
410 """
411 file = repo.wjoin(lfile)
411 file = repo.wjoin(lfile)
412 if repo.wvfs.exists(lfile):
412 if repo.wvfs.exists(lfile):
413 hash = hashfile(file)
413 hash = hashfile(file)
414 executable = getexecutable(file)
414 executable = getexecutable(file)
415 writestandin(repo, standin, hash, executable)
415 writestandin(repo, standin, hash, executable)
416 else:
416 else:
417 raise error.Abort(_(b'%s: file not found!') % lfile)
417 raise error.Abort(_(b'%s: file not found!') % lfile)
418
418
419
419
420 def readasstandin(fctx):
420 def readasstandin(fctx):
421 '''read hex hash from given filectx of standin file
421 '''read hex hash from given filectx of standin file
422
422
423 This encapsulates how "standin" data is stored into storage layer.'''
423 This encapsulates how "standin" data is stored into storage layer.'''
424 return fctx.data().strip()
424 return fctx.data().strip()
425
425
426
426
427 def writestandin(repo, standin, hash, executable):
427 def writestandin(repo, standin, hash, executable):
428 '''write hash to <repo.root>/<standin>'''
428 '''write hash to <repo.root>/<standin>'''
429 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
429 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
430
430
431
431
432 def copyandhash(instream, outfile):
432 def copyandhash(instream, outfile):
433 '''Read bytes from instream (iterable) and write them to outfile,
433 '''Read bytes from instream (iterable) and write them to outfile,
434 computing the SHA-1 hash of the data along the way. Return the hash.'''
434 computing the SHA-1 hash of the data along the way. Return the hash.'''
435 hasher = hashlib.sha1(b'')
435 hasher = hashlib.sha1(b'')
436 for data in instream:
436 for data in instream:
437 hasher.update(data)
437 hasher.update(data)
438 outfile.write(data)
438 outfile.write(data)
439 return hex(hasher.digest())
439 return hex(hasher.digest())
440
440
441
441
442 def hashfile(file):
442 def hashfile(file):
443 if not os.path.exists(file):
443 if not os.path.exists(file):
444 return b''
444 return b''
445 with open(file, b'rb') as fd:
445 with open(file, b'rb') as fd:
446 return hexsha1(fd)
446 return hexsha1(fd)
447
447
448
448
449 def getexecutable(filename):
449 def getexecutable(filename):
450 mode = os.stat(filename).st_mode
450 mode = os.stat(filename).st_mode
451 return (
451 return (
452 (mode & stat.S_IXUSR)
452 (mode & stat.S_IXUSR)
453 and (mode & stat.S_IXGRP)
453 and (mode & stat.S_IXGRP)
454 and (mode & stat.S_IXOTH)
454 and (mode & stat.S_IXOTH)
455 )
455 )
456
456
457
457
458 def urljoin(first, second, *arg):
458 def urljoin(first, second, *arg):
459 def join(left, right):
459 def join(left, right):
460 if not left.endswith(b'/'):
460 if not left.endswith(b'/'):
461 left += b'/'
461 left += b'/'
462 if right.startswith(b'/'):
462 if right.startswith(b'/'):
463 right = right[1:]
463 right = right[1:]
464 return left + right
464 return left + right
465
465
466 url = join(first, second)
466 url = join(first, second)
467 for a in arg:
467 for a in arg:
468 url = join(url, a)
468 url = join(url, a)
469 return url
469 return url
470
470
471
471
472 def hexsha1(fileobj):
472 def hexsha1(fileobj):
473 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
473 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
474 object data"""
474 object data"""
475 h = hashlib.sha1()
475 h = hashlib.sha1()
476 for chunk in util.filechunkiter(fileobj):
476 for chunk in util.filechunkiter(fileobj):
477 h.update(chunk)
477 h.update(chunk)
478 return hex(h.digest())
478 return hex(h.digest())
479
479
480
480
481 def httpsendfile(ui, filename):
481 def httpsendfile(ui, filename):
482 return httpconnection.httpsendfile(ui, filename, b'rb')
482 return httpconnection.httpsendfile(ui, filename, b'rb')
483
483
484
484
485 def unixpath(path):
485 def unixpath(path):
486 '''Return a version of path normalized for use with the lfdirstate.'''
486 '''Return a version of path normalized for use with the lfdirstate.'''
487 return util.pconvert(os.path.normpath(path))
487 return util.pconvert(os.path.normpath(path))
488
488
489
489
490 def islfilesrepo(repo):
490 def islfilesrepo(repo):
491 '''Return true if the repo is a largefile repo.'''
491 '''Return true if the repo is a largefile repo.'''
492 if b'largefiles' in repo.requirements and any(
492 if b'largefiles' in repo.requirements and any(
493 shortnameslash in f[0] for f in repo.store.datafiles()
493 shortnameslash in f[0] for f in repo.store.datafiles()
494 ):
494 ):
495 return True
495 return True
496
496
497 return any(openlfdirstate(repo.ui, repo, False))
497 return any(openlfdirstate(repo.ui, repo, False))
498
498
499
499
500 class storeprotonotcapable(Exception):
500 class storeprotonotcapable(Exception):
501 def __init__(self, storetypes):
501 def __init__(self, storetypes):
502 self.storetypes = storetypes
502 self.storetypes = storetypes
503
503
504
504
505 def getstandinsstate(repo):
505 def getstandinsstate(repo):
506 standins = []
506 standins = []
507 matcher = getstandinmatcher(repo)
507 matcher = getstandinmatcher(repo)
508 wctx = repo[None]
508 wctx = repo[None]
509 for standin in repo.dirstate.walk(
509 for standin in repo.dirstate.walk(
510 matcher, subrepos=[], unknown=False, ignored=False
510 matcher, subrepos=[], unknown=False, ignored=False
511 ):
511 ):
512 lfile = splitstandin(standin)
512 lfile = splitstandin(standin)
513 try:
513 try:
514 hash = readasstandin(wctx[standin])
514 hash = readasstandin(wctx[standin])
515 except IOError:
515 except IOError:
516 hash = None
516 hash = None
517 standins.append((lfile, hash))
517 standins.append((lfile, hash))
518 return standins
518 return standins
519
519
520
520
521 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
521 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
522 lfstandin = standin(lfile)
522 lfstandin = standin(lfile)
523 if lfstandin in repo.dirstate:
523 if lfstandin in repo.dirstate:
524 stat = repo.dirstate._map[lfstandin]
524 stat = repo.dirstate._map[lfstandin]
525 state, mtime = stat[0], stat[3]
525 state, mtime = stat[0], stat[3]
526 else:
526 else:
527 state, mtime = b'?', -1
527 state, mtime = b'?', -1
528 if state == b'n':
528 if state == b'n':
529 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
529 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
530 # state 'n' doesn't ensure 'clean' in this case
530 # state 'n' doesn't ensure 'clean' in this case
531 lfdirstate.normallookup(lfile)
531 lfdirstate.normallookup(lfile)
532 else:
532 else:
533 lfdirstate.normal(lfile)
533 lfdirstate.normal(lfile)
534 elif state == b'm':
534 elif state == b'm':
535 lfdirstate.normallookup(lfile)
535 lfdirstate.normallookup(lfile)
536 elif state == b'r':
536 elif state == b'r':
537 lfdirstate.remove(lfile)
537 lfdirstate.remove(lfile)
538 elif state == b'a':
538 elif state == b'a':
539 lfdirstate.add(lfile)
539 lfdirstate.add(lfile)
540 elif state == b'?':
540 elif state == b'?':
541 lfdirstate.drop(lfile)
541 lfdirstate.drop(lfile)
542
542
543
543
544 def markcommitted(orig, ctx, node):
544 def markcommitted(orig, ctx, node):
545 repo = ctx.repo()
545 repo = ctx.repo()
546
546
547 orig(node)
547 orig(node)
548
548
549 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
549 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
550 # because files coming from the 2nd parent are omitted in the latter.
550 # because files coming from the 2nd parent are omitted in the latter.
551 #
551 #
552 # The former should be used to get targets of "synclfdirstate",
552 # The former should be used to get targets of "synclfdirstate",
553 # because such files:
553 # because such files:
554 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
554 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
555 # - have to be marked as "n" after commit, but
555 # - have to be marked as "n" after commit, but
556 # - aren't listed in "repo[node].files()"
556 # - aren't listed in "repo[node].files()"
557
557
558 lfdirstate = openlfdirstate(repo.ui, repo)
558 lfdirstate = openlfdirstate(repo.ui, repo)
559 for f in ctx.files():
559 for f in ctx.files():
560 lfile = splitstandin(f)
560 lfile = splitstandin(f)
561 if lfile is not None:
561 if lfile is not None:
562 synclfdirstate(repo, lfdirstate, lfile, False)
562 synclfdirstate(repo, lfdirstate, lfile, False)
563 lfdirstate.write()
563 lfdirstate.write()
564
564
565 # As part of committing, copy all of the largefiles into the cache.
565 # As part of committing, copy all of the largefiles into the cache.
566 #
566 #
567 # Using "node" instead of "ctx" implies additional "repo[node]"
567 # Using "node" instead of "ctx" implies additional "repo[node]"
568 # lookup while copyalltostore(), but can omit redundant check for
568 # lookup while copyalltostore(), but can omit redundant check for
569 # files comming from the 2nd parent, which should exist in store
569 # files comming from the 2nd parent, which should exist in store
570 # at merging.
570 # at merging.
571 copyalltostore(repo, node)
571 copyalltostore(repo, node)
572
572
573
573
574 def getlfilestoupdate(oldstandins, newstandins):
574 def getlfilestoupdate(oldstandins, newstandins):
575 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
575 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
576 filelist = []
576 filelist = []
577 for f in changedstandins:
577 for f in changedstandins:
578 if f[0] not in filelist:
578 if f[0] not in filelist:
579 filelist.append(f[0])
579 filelist.append(f[0])
580 return filelist
580 return filelist
581
581
582
582
583 def getlfilestoupload(repo, missing, addfunc):
583 def getlfilestoupload(repo, missing, addfunc):
584 makeprogress = repo.ui.makeprogress
584 makeprogress = repo.ui.makeprogress
585 with makeprogress(
585 with makeprogress(
586 _(b'finding outgoing largefiles'),
586 _(b'finding outgoing largefiles'),
587 unit=_(b'revisions'),
587 unit=_(b'revisions'),
588 total=len(missing),
588 total=len(missing),
589 ) as progress:
589 ) as progress:
590 for i, n in enumerate(missing):
590 for i, n in enumerate(missing):
591 progress.update(i)
591 progress.update(i)
592 parents = [p for p in repo[n].parents() if p != node.nullid]
592 parents = [p for p in repo[n].parents() if p != node.nullid]
593
593
594 oldlfstatus = repo.lfstatus
594 with lfstatus(repo, value=False):
595 repo.lfstatus = False
596 try:
597 ctx = repo[n]
595 ctx = repo[n]
598 finally:
599 repo.lfstatus = oldlfstatus
600
596
601 files = set(ctx.files())
597 files = set(ctx.files())
602 if len(parents) == 2:
598 if len(parents) == 2:
603 mc = ctx.manifest()
599 mc = ctx.manifest()
604 mp1 = ctx.p1().manifest()
600 mp1 = ctx.p1().manifest()
605 mp2 = ctx.p2().manifest()
601 mp2 = ctx.p2().manifest()
606 for f in mp1:
602 for f in mp1:
607 if f not in mc:
603 if f not in mc:
608 files.add(f)
604 files.add(f)
609 for f in mp2:
605 for f in mp2:
610 if f not in mc:
606 if f not in mc:
611 files.add(f)
607 files.add(f)
612 for f in mc:
608 for f in mc:
613 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
609 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
614 files.add(f)
610 files.add(f)
615 for fn in files:
611 for fn in files:
616 if isstandin(fn) and fn in ctx:
612 if isstandin(fn) and fn in ctx:
617 addfunc(fn, readasstandin(ctx[fn]))
613 addfunc(fn, readasstandin(ctx[fn]))
618
614
619
615
620 def updatestandinsbymatch(repo, match):
616 def updatestandinsbymatch(repo, match):
621 '''Update standins in the working directory according to specified match
617 '''Update standins in the working directory according to specified match
622
618
623 This returns (possibly modified) ``match`` object to be used for
619 This returns (possibly modified) ``match`` object to be used for
624 subsequent commit process.
620 subsequent commit process.
625 '''
621 '''
626
622
627 ui = repo.ui
623 ui = repo.ui
628
624
629 # Case 1: user calls commit with no specific files or
625 # Case 1: user calls commit with no specific files or
630 # include/exclude patterns: refresh and commit all files that
626 # include/exclude patterns: refresh and commit all files that
631 # are "dirty".
627 # are "dirty".
632 if match is None or match.always():
628 if match is None or match.always():
633 # Spend a bit of time here to get a list of files we know
629 # Spend a bit of time here to get a list of files we know
634 # are modified so we can compare only against those.
630 # are modified so we can compare only against those.
635 # It can cost a lot of time (several seconds)
631 # It can cost a lot of time (several seconds)
636 # otherwise to update all standins if the largefiles are
632 # otherwise to update all standins if the largefiles are
637 # large.
633 # large.
638 lfdirstate = openlfdirstate(ui, repo)
634 lfdirstate = openlfdirstate(ui, repo)
639 dirtymatch = matchmod.always()
635 dirtymatch = matchmod.always()
640 unsure, s = lfdirstate.status(
636 unsure, s = lfdirstate.status(
641 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
637 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
642 )
638 )
643 modifiedfiles = unsure + s.modified + s.added + s.removed
639 modifiedfiles = unsure + s.modified + s.added + s.removed
644 lfiles = listlfiles(repo)
640 lfiles = listlfiles(repo)
645 # this only loops through largefiles that exist (not
641 # this only loops through largefiles that exist (not
646 # removed/renamed)
642 # removed/renamed)
647 for lfile in lfiles:
643 for lfile in lfiles:
648 if lfile in modifiedfiles:
644 if lfile in modifiedfiles:
649 fstandin = standin(lfile)
645 fstandin = standin(lfile)
650 if repo.wvfs.exists(fstandin):
646 if repo.wvfs.exists(fstandin):
651 # this handles the case where a rebase is being
647 # this handles the case where a rebase is being
652 # performed and the working copy is not updated
648 # performed and the working copy is not updated
653 # yet.
649 # yet.
654 if repo.wvfs.exists(lfile):
650 if repo.wvfs.exists(lfile):
655 updatestandin(repo, lfile, fstandin)
651 updatestandin(repo, lfile, fstandin)
656
652
657 return match
653 return match
658
654
659 lfiles = listlfiles(repo)
655 lfiles = listlfiles(repo)
660 match._files = repo._subdirlfs(match.files(), lfiles)
656 match._files = repo._subdirlfs(match.files(), lfiles)
661
657
662 # Case 2: user calls commit with specified patterns: refresh
658 # Case 2: user calls commit with specified patterns: refresh
663 # any matching big files.
659 # any matching big files.
664 smatcher = composestandinmatcher(repo, match)
660 smatcher = composestandinmatcher(repo, match)
665 standins = repo.dirstate.walk(
661 standins = repo.dirstate.walk(
666 smatcher, subrepos=[], unknown=False, ignored=False
662 smatcher, subrepos=[], unknown=False, ignored=False
667 )
663 )
668
664
669 # No matching big files: get out of the way and pass control to
665 # No matching big files: get out of the way and pass control to
670 # the usual commit() method.
666 # the usual commit() method.
671 if not standins:
667 if not standins:
672 return match
668 return match
673
669
674 # Refresh all matching big files. It's possible that the
670 # Refresh all matching big files. It's possible that the
675 # commit will end up failing, in which case the big files will
671 # commit will end up failing, in which case the big files will
676 # stay refreshed. No harm done: the user modified them and
672 # stay refreshed. No harm done: the user modified them and
677 # asked to commit them, so sooner or later we're going to
673 # asked to commit them, so sooner or later we're going to
678 # refresh the standins. Might as well leave them refreshed.
674 # refresh the standins. Might as well leave them refreshed.
679 lfdirstate = openlfdirstate(ui, repo)
675 lfdirstate = openlfdirstate(ui, repo)
680 for fstandin in standins:
676 for fstandin in standins:
681 lfile = splitstandin(fstandin)
677 lfile = splitstandin(fstandin)
682 if lfdirstate[lfile] != b'r':
678 if lfdirstate[lfile] != b'r':
683 updatestandin(repo, lfile, fstandin)
679 updatestandin(repo, lfile, fstandin)
684
680
685 # Cook up a new matcher that only matches regular files or
681 # Cook up a new matcher that only matches regular files or
686 # standins corresponding to the big files requested by the
682 # standins corresponding to the big files requested by the
687 # user. Have to modify _files to prevent commit() from
683 # user. Have to modify _files to prevent commit() from
688 # complaining "not tracked" for big files.
684 # complaining "not tracked" for big files.
689 match = copy.copy(match)
685 match = copy.copy(match)
690 origmatchfn = match.matchfn
686 origmatchfn = match.matchfn
691
687
692 # Check both the list of largefiles and the list of
688 # Check both the list of largefiles and the list of
693 # standins because if a largefile was removed, it
689 # standins because if a largefile was removed, it
694 # won't be in the list of largefiles at this point
690 # won't be in the list of largefiles at this point
695 match._files += sorted(standins)
691 match._files += sorted(standins)
696
692
697 actualfiles = []
693 actualfiles = []
698 for f in match._files:
694 for f in match._files:
699 fstandin = standin(f)
695 fstandin = standin(f)
700
696
701 # For largefiles, only one of the normal and standin should be
697 # For largefiles, only one of the normal and standin should be
702 # committed (except if one of them is a remove). In the case of a
698 # committed (except if one of them is a remove). In the case of a
703 # standin removal, drop the normal file if it is unknown to dirstate.
699 # standin removal, drop the normal file if it is unknown to dirstate.
704 # Thus, skip plain largefile names but keep the standin.
700 # Thus, skip plain largefile names but keep the standin.
705 if f in lfiles or fstandin in standins:
701 if f in lfiles or fstandin in standins:
706 if repo.dirstate[fstandin] != b'r':
702 if repo.dirstate[fstandin] != b'r':
707 if repo.dirstate[f] != b'r':
703 if repo.dirstate[f] != b'r':
708 continue
704 continue
709 elif repo.dirstate[f] == b'?':
705 elif repo.dirstate[f] == b'?':
710 continue
706 continue
711
707
712 actualfiles.append(f)
708 actualfiles.append(f)
713 match._files = actualfiles
709 match._files = actualfiles
714
710
715 def matchfn(f):
711 def matchfn(f):
716 if origmatchfn(f):
712 if origmatchfn(f):
717 return f not in lfiles
713 return f not in lfiles
718 else:
714 else:
719 return f in standins
715 return f in standins
720
716
721 match.matchfn = matchfn
717 match.matchfn = matchfn
722
718
723 return match
719 return match
724
720
725
721
726 class automatedcommithook(object):
722 class automatedcommithook(object):
727 '''Stateful hook to update standins at the 1st commit of resuming
723 '''Stateful hook to update standins at the 1st commit of resuming
728
724
729 For efficiency, updating standins in the working directory should
725 For efficiency, updating standins in the working directory should
730 be avoided while automated committing (like rebase, transplant and
726 be avoided while automated committing (like rebase, transplant and
731 so on), because they should be updated before committing.
727 so on), because they should be updated before committing.
732
728
733 But the 1st commit of resuming automated committing (e.g. ``rebase
729 But the 1st commit of resuming automated committing (e.g. ``rebase
734 --continue``) should update them, because largefiles may be
730 --continue``) should update them, because largefiles may be
735 modified manually.
731 modified manually.
736 '''
732 '''
737
733
738 def __init__(self, resuming):
734 def __init__(self, resuming):
739 self.resuming = resuming
735 self.resuming = resuming
740
736
741 def __call__(self, repo, match):
737 def __call__(self, repo, match):
742 if self.resuming:
738 if self.resuming:
743 self.resuming = False # avoids updating at subsequent commits
739 self.resuming = False # avoids updating at subsequent commits
744 return updatestandinsbymatch(repo, match)
740 return updatestandinsbymatch(repo, match)
745 else:
741 else:
746 return match
742 return match
747
743
748
744
749 def getstatuswriter(ui, repo, forcibly=None):
745 def getstatuswriter(ui, repo, forcibly=None):
750 '''Return the function to write largefiles specific status out
746 '''Return the function to write largefiles specific status out
751
747
752 If ``forcibly`` is ``None``, this returns the last element of
748 If ``forcibly`` is ``None``, this returns the last element of
753 ``repo._lfstatuswriters`` as "default" writer function.
749 ``repo._lfstatuswriters`` as "default" writer function.
754
750
755 Otherwise, this returns the function to always write out (or
751 Otherwise, this returns the function to always write out (or
756 ignore if ``not forcibly``) status.
752 ignore if ``not forcibly``) status.
757 '''
753 '''
758 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
754 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
759 return repo._lfstatuswriters[-1]
755 return repo._lfstatuswriters[-1]
760 else:
756 else:
761 if forcibly:
757 if forcibly:
762 return ui.status # forcibly WRITE OUT
758 return ui.status # forcibly WRITE OUT
763 else:
759 else:
764 return lambda *msg, **opts: None # forcibly IGNORE
760 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now