##// END OF EJS Templates
largefiles: move lfstatus context manager to lfutil...
Martin von Zweigbergk -
r43982:73e6d334 default
parent child Browse files
Show More
@@ -1,753 +1,764 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import copy
13 import copy
13 import hashlib
14 import hashlib
14 import os
15 import os
15 import stat
16 import stat
16
17
17 from mercurial.i18n import _
18 from mercurial.i18n import _
18 from mercurial.node import hex
19 from mercurial.node import hex
19 from mercurial.pycompat import open
20 from mercurial.pycompat import open
20
21
21 from mercurial import (
22 from mercurial import (
22 dirstate,
23 dirstate,
23 encoding,
24 encoding,
24 error,
25 error,
25 httpconnection,
26 httpconnection,
26 match as matchmod,
27 match as matchmod,
27 node,
28 node,
28 pycompat,
29 pycompat,
29 scmutil,
30 scmutil,
30 sparse,
31 sparse,
31 util,
32 util,
32 vfs as vfsmod,
33 vfs as vfsmod,
33 )
34 )
34
35
35 shortname = b'.hglf'
36 shortname = b'.hglf'
36 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
37 longname = b'largefiles'
38 longname = b'largefiles'
38
39
39 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
40
41
41
42
43 @contextlib.contextmanager
44 def lfstatus(repo):
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = True
47 try:
48 yield
49 finally:
50 repo.lfstatus = oldvalue
51
52
42 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
43 lfsize = opt
54 lfsize = opt
44 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
45 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
46 if lfsize:
57 if lfsize:
47 try:
58 try:
48 lfsize = float(lfsize)
59 lfsize = float(lfsize)
49 except ValueError:
60 except ValueError:
50 raise error.Abort(
61 raise error.Abort(
51 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
52 )
63 )
53 if lfsize is None:
64 if lfsize is None:
54 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
55 return lfsize
66 return lfsize
56
67
57
68
58 def link(src, dest):
69 def link(src, dest):
59 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
60 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
61 try:
72 try:
62 util.oslink(src, dest)
73 util.oslink(src, dest)
63 except OSError:
74 except OSError:
64 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
65 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
66 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
67 dstf.write(chunk)
78 dstf.write(chunk)
68 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
69
80
70
81
71 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
72 '''Return the correct location in the "global" largefiles cache for a file
83 '''Return the correct location in the "global" largefiles cache for a file
73 with the given hash.
84 with the given hash.
74 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
75 to preserve download bandwidth and storage space.'''
86 to preserve download bandwidth and storage space.'''
76 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
77
88
78
89
79 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
80 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
81 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
82 if path:
93 if path:
83 return path
94 return path
84 if pycompat.iswindows:
95 if pycompat.iswindows:
85 appdata = encoding.environ.get(
96 appdata = encoding.environ.get(
86 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
97 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
87 )
98 )
88 if appdata:
99 if appdata:
89 return os.path.join(appdata, name)
100 return os.path.join(appdata, name)
90 elif pycompat.isdarwin:
101 elif pycompat.isdarwin:
91 home = encoding.environ.get(b'HOME')
102 home = encoding.environ.get(b'HOME')
92 if home:
103 if home:
93 return os.path.join(home, b'Library', b'Caches', name)
104 return os.path.join(home, b'Library', b'Caches', name)
94 elif pycompat.isposix:
105 elif pycompat.isposix:
95 path = encoding.environ.get(b'XDG_CACHE_HOME')
106 path = encoding.environ.get(b'XDG_CACHE_HOME')
96 if path:
107 if path:
97 return os.path.join(path, name)
108 return os.path.join(path, name)
98 home = encoding.environ.get(b'HOME')
109 home = encoding.environ.get(b'HOME')
99 if home:
110 if home:
100 return os.path.join(home, b'.cache', name)
111 return os.path.join(home, b'.cache', name)
101 else:
112 else:
102 raise error.Abort(
113 raise error.Abort(
103 _(b'unknown operating system: %s\n') % pycompat.osname
114 _(b'unknown operating system: %s\n') % pycompat.osname
104 )
115 )
105 raise error.Abort(_(b'unknown %s usercache location') % name)
116 raise error.Abort(_(b'unknown %s usercache location') % name)
106
117
107
118
108 def inusercache(ui, hash):
119 def inusercache(ui, hash):
109 path = usercachepath(ui, hash)
120 path = usercachepath(ui, hash)
110 return os.path.exists(path)
121 return os.path.exists(path)
111
122
112
123
113 def findfile(repo, hash):
124 def findfile(repo, hash):
114 '''Return store path of the largefile with the specified hash.
125 '''Return store path of the largefile with the specified hash.
115 As a side effect, the file might be linked from user cache.
126 As a side effect, the file might be linked from user cache.
116 Return None if the file can't be found locally.'''
127 Return None if the file can't be found locally.'''
117 path, exists = findstorepath(repo, hash)
128 path, exists = findstorepath(repo, hash)
118 if exists:
129 if exists:
119 repo.ui.note(_(b'found %s in store\n') % hash)
130 repo.ui.note(_(b'found %s in store\n') % hash)
120 return path
131 return path
121 elif inusercache(repo.ui, hash):
132 elif inusercache(repo.ui, hash):
122 repo.ui.note(_(b'found %s in system cache\n') % hash)
133 repo.ui.note(_(b'found %s in system cache\n') % hash)
123 path = storepath(repo, hash)
134 path = storepath(repo, hash)
124 link(usercachepath(repo.ui, hash), path)
135 link(usercachepath(repo.ui, hash), path)
125 return path
136 return path
126 return None
137 return None
127
138
128
139
129 class largefilesdirstate(dirstate.dirstate):
140 class largefilesdirstate(dirstate.dirstate):
130 def __getitem__(self, key):
141 def __getitem__(self, key):
131 return super(largefilesdirstate, self).__getitem__(unixpath(key))
142 return super(largefilesdirstate, self).__getitem__(unixpath(key))
132
143
133 def normal(self, f):
144 def normal(self, f):
134 return super(largefilesdirstate, self).normal(unixpath(f))
145 return super(largefilesdirstate, self).normal(unixpath(f))
135
146
136 def remove(self, f):
147 def remove(self, f):
137 return super(largefilesdirstate, self).remove(unixpath(f))
148 return super(largefilesdirstate, self).remove(unixpath(f))
138
149
139 def add(self, f):
150 def add(self, f):
140 return super(largefilesdirstate, self).add(unixpath(f))
151 return super(largefilesdirstate, self).add(unixpath(f))
141
152
142 def drop(self, f):
153 def drop(self, f):
143 return super(largefilesdirstate, self).drop(unixpath(f))
154 return super(largefilesdirstate, self).drop(unixpath(f))
144
155
145 def forget(self, f):
156 def forget(self, f):
146 return super(largefilesdirstate, self).forget(unixpath(f))
157 return super(largefilesdirstate, self).forget(unixpath(f))
147
158
148 def normallookup(self, f):
159 def normallookup(self, f):
149 return super(largefilesdirstate, self).normallookup(unixpath(f))
160 return super(largefilesdirstate, self).normallookup(unixpath(f))
150
161
151 def _ignore(self, f):
162 def _ignore(self, f):
152 return False
163 return False
153
164
154 def write(self, tr=False):
165 def write(self, tr=False):
155 # (1) disable PENDING mode always
166 # (1) disable PENDING mode always
156 # (lfdirstate isn't yet managed as a part of the transaction)
167 # (lfdirstate isn't yet managed as a part of the transaction)
157 # (2) avoid develwarn 'use dirstate.write with ....'
168 # (2) avoid develwarn 'use dirstate.write with ....'
158 super(largefilesdirstate, self).write(None)
169 super(largefilesdirstate, self).write(None)
159
170
160
171
161 def openlfdirstate(ui, repo, create=True):
172 def openlfdirstate(ui, repo, create=True):
162 '''
173 '''
163 Return a dirstate object that tracks largefiles: i.e. its root is
174 Return a dirstate object that tracks largefiles: i.e. its root is
164 the repo root, but it is saved in .hg/largefiles/dirstate.
175 the repo root, but it is saved in .hg/largefiles/dirstate.
165 '''
176 '''
166 vfs = repo.vfs
177 vfs = repo.vfs
167 lfstoredir = longname
178 lfstoredir = longname
168 opener = vfsmod.vfs(vfs.join(lfstoredir))
179 opener = vfsmod.vfs(vfs.join(lfstoredir))
169 lfdirstate = largefilesdirstate(
180 lfdirstate = largefilesdirstate(
170 opener,
181 opener,
171 ui,
182 ui,
172 repo.root,
183 repo.root,
173 repo.dirstate._validate,
184 repo.dirstate._validate,
174 lambda: sparse.matcher(repo),
185 lambda: sparse.matcher(repo),
175 )
186 )
176
187
177 # If the largefiles dirstate does not exist, populate and create
188 # If the largefiles dirstate does not exist, populate and create
178 # it. This ensures that we create it on the first meaningful
189 # it. This ensures that we create it on the first meaningful
179 # largefiles operation in a new clone.
190 # largefiles operation in a new clone.
180 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
191 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
181 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
182 standins = repo.dirstate.walk(
193 standins = repo.dirstate.walk(
183 matcher, subrepos=[], unknown=False, ignored=False
194 matcher, subrepos=[], unknown=False, ignored=False
184 )
195 )
185
196
186 if len(standins) > 0:
197 if len(standins) > 0:
187 vfs.makedirs(lfstoredir)
198 vfs.makedirs(lfstoredir)
188
199
189 for standin in standins:
200 for standin in standins:
190 lfile = splitstandin(standin)
201 lfile = splitstandin(standin)
191 lfdirstate.normallookup(lfile)
202 lfdirstate.normallookup(lfile)
192 return lfdirstate
203 return lfdirstate
193
204
194
205
195 def lfdirstatestatus(lfdirstate, repo):
206 def lfdirstatestatus(lfdirstate, repo):
196 pctx = repo[b'.']
207 pctx = repo[b'.']
197 match = matchmod.always()
208 match = matchmod.always()
198 unsure, s = lfdirstate.status(
209 unsure, s = lfdirstate.status(
199 match, subrepos=[], ignored=False, clean=False, unknown=False
210 match, subrepos=[], ignored=False, clean=False, unknown=False
200 )
211 )
201 modified, clean = s.modified, s.clean
212 modified, clean = s.modified, s.clean
202 for lfile in unsure:
213 for lfile in unsure:
203 try:
214 try:
204 fctx = pctx[standin(lfile)]
215 fctx = pctx[standin(lfile)]
205 except LookupError:
216 except LookupError:
206 fctx = None
217 fctx = None
207 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
218 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
208 modified.append(lfile)
219 modified.append(lfile)
209 else:
220 else:
210 clean.append(lfile)
221 clean.append(lfile)
211 lfdirstate.normal(lfile)
222 lfdirstate.normal(lfile)
212 return s
223 return s
213
224
214
225
215 def listlfiles(repo, rev=None, matcher=None):
226 def listlfiles(repo, rev=None, matcher=None):
216 '''return a list of largefiles in the working copy or the
227 '''return a list of largefiles in the working copy or the
217 specified changeset'''
228 specified changeset'''
218
229
219 if matcher is None:
230 if matcher is None:
220 matcher = getstandinmatcher(repo)
231 matcher = getstandinmatcher(repo)
221
232
222 # ignore unknown files in working directory
233 # ignore unknown files in working directory
223 return [
234 return [
224 splitstandin(f)
235 splitstandin(f)
225 for f in repo[rev].walk(matcher)
236 for f in repo[rev].walk(matcher)
226 if rev is not None or repo.dirstate[f] != b'?'
237 if rev is not None or repo.dirstate[f] != b'?'
227 ]
238 ]
228
239
229
240
230 def instore(repo, hash, forcelocal=False):
241 def instore(repo, hash, forcelocal=False):
231 '''Return true if a largefile with the given hash exists in the store'''
242 '''Return true if a largefile with the given hash exists in the store'''
232 return os.path.exists(storepath(repo, hash, forcelocal))
243 return os.path.exists(storepath(repo, hash, forcelocal))
233
244
234
245
235 def storepath(repo, hash, forcelocal=False):
246 def storepath(repo, hash, forcelocal=False):
236 '''Return the correct location in the repository largefiles store for a
247 '''Return the correct location in the repository largefiles store for a
237 file with the given hash.'''
248 file with the given hash.'''
238 if not forcelocal and repo.shared():
249 if not forcelocal and repo.shared():
239 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
250 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
240 return repo.vfs.join(longname, hash)
251 return repo.vfs.join(longname, hash)
241
252
242
253
243 def findstorepath(repo, hash):
254 def findstorepath(repo, hash):
244 '''Search through the local store path(s) to find the file for the given
255 '''Search through the local store path(s) to find the file for the given
245 hash. If the file is not found, its path in the primary store is returned.
256 hash. If the file is not found, its path in the primary store is returned.
246 The return value is a tuple of (path, exists(path)).
257 The return value is a tuple of (path, exists(path)).
247 '''
258 '''
248 # For shared repos, the primary store is in the share source. But for
259 # For shared repos, the primary store is in the share source. But for
249 # backward compatibility, force a lookup in the local store if it wasn't
260 # backward compatibility, force a lookup in the local store if it wasn't
250 # found in the share source.
261 # found in the share source.
251 path = storepath(repo, hash, False)
262 path = storepath(repo, hash, False)
252
263
253 if instore(repo, hash):
264 if instore(repo, hash):
254 return (path, True)
265 return (path, True)
255 elif repo.shared() and instore(repo, hash, True):
266 elif repo.shared() and instore(repo, hash, True):
256 return storepath(repo, hash, True), True
267 return storepath(repo, hash, True), True
257
268
258 return (path, False)
269 return (path, False)
259
270
260
271
261 def copyfromcache(repo, hash, filename):
272 def copyfromcache(repo, hash, filename):
262 '''Copy the specified largefile from the repo or system cache to
273 '''Copy the specified largefile from the repo or system cache to
263 filename in the repository. Return true on success or false if the
274 filename in the repository. Return true on success or false if the
264 file was not found in either cache (which should not happened:
275 file was not found in either cache (which should not happened:
265 this is meant to be called only after ensuring that the needed
276 this is meant to be called only after ensuring that the needed
266 largefile exists in the cache).'''
277 largefile exists in the cache).'''
267 wvfs = repo.wvfs
278 wvfs = repo.wvfs
268 path = findfile(repo, hash)
279 path = findfile(repo, hash)
269 if path is None:
280 if path is None:
270 return False
281 return False
271 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
282 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
272 # The write may fail before the file is fully written, but we
283 # The write may fail before the file is fully written, but we
273 # don't use atomic writes in the working copy.
284 # don't use atomic writes in the working copy.
274 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
285 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
275 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
286 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
276 if gothash != hash:
287 if gothash != hash:
277 repo.ui.warn(
288 repo.ui.warn(
278 _(b'%s: data corruption in %s with hash %s\n')
289 _(b'%s: data corruption in %s with hash %s\n')
279 % (filename, path, gothash)
290 % (filename, path, gothash)
280 )
291 )
281 wvfs.unlink(filename)
292 wvfs.unlink(filename)
282 return False
293 return False
283 return True
294 return True
284
295
285
296
286 def copytostore(repo, ctx, file, fstandin):
297 def copytostore(repo, ctx, file, fstandin):
287 wvfs = repo.wvfs
298 wvfs = repo.wvfs
288 hash = readasstandin(ctx[fstandin])
299 hash = readasstandin(ctx[fstandin])
289 if instore(repo, hash):
300 if instore(repo, hash):
290 return
301 return
291 if wvfs.exists(file):
302 if wvfs.exists(file):
292 copytostoreabsolute(repo, wvfs.join(file), hash)
303 copytostoreabsolute(repo, wvfs.join(file), hash)
293 else:
304 else:
294 repo.ui.warn(
305 repo.ui.warn(
295 _(b"%s: largefile %s not available from local store\n")
306 _(b"%s: largefile %s not available from local store\n")
296 % (file, hash)
307 % (file, hash)
297 )
308 )
298
309
299
310
300 def copyalltostore(repo, node):
311 def copyalltostore(repo, node):
301 '''Copy all largefiles in a given revision to the store'''
312 '''Copy all largefiles in a given revision to the store'''
302
313
303 ctx = repo[node]
314 ctx = repo[node]
304 for filename in ctx.files():
315 for filename in ctx.files():
305 realfile = splitstandin(filename)
316 realfile = splitstandin(filename)
306 if realfile is not None and filename in ctx.manifest():
317 if realfile is not None and filename in ctx.manifest():
307 copytostore(repo, ctx, realfile, filename)
318 copytostore(repo, ctx, realfile, filename)
308
319
309
320
310 def copytostoreabsolute(repo, file, hash):
321 def copytostoreabsolute(repo, file, hash):
311 if inusercache(repo.ui, hash):
322 if inusercache(repo.ui, hash):
312 link(usercachepath(repo.ui, hash), storepath(repo, hash))
323 link(usercachepath(repo.ui, hash), storepath(repo, hash))
313 else:
324 else:
314 util.makedirs(os.path.dirname(storepath(repo, hash)))
325 util.makedirs(os.path.dirname(storepath(repo, hash)))
315 with open(file, b'rb') as srcf:
326 with open(file, b'rb') as srcf:
316 with util.atomictempfile(
327 with util.atomictempfile(
317 storepath(repo, hash), createmode=repo.store.createmode
328 storepath(repo, hash), createmode=repo.store.createmode
318 ) as dstf:
329 ) as dstf:
319 for chunk in util.filechunkiter(srcf):
330 for chunk in util.filechunkiter(srcf):
320 dstf.write(chunk)
331 dstf.write(chunk)
321 linktousercache(repo, hash)
332 linktousercache(repo, hash)
322
333
323
334
324 def linktousercache(repo, hash):
335 def linktousercache(repo, hash):
325 '''Link / copy the largefile with the specified hash from the store
336 '''Link / copy the largefile with the specified hash from the store
326 to the cache.'''
337 to the cache.'''
327 path = usercachepath(repo.ui, hash)
338 path = usercachepath(repo.ui, hash)
328 link(storepath(repo, hash), path)
339 link(storepath(repo, hash), path)
329
340
330
341
331 def getstandinmatcher(repo, rmatcher=None):
342 def getstandinmatcher(repo, rmatcher=None):
332 '''Return a match object that applies rmatcher to the standin directory'''
343 '''Return a match object that applies rmatcher to the standin directory'''
333 wvfs = repo.wvfs
344 wvfs = repo.wvfs
334 standindir = shortname
345 standindir = shortname
335
346
336 # no warnings about missing files or directories
347 # no warnings about missing files or directories
337 badfn = lambda f, msg: None
348 badfn = lambda f, msg: None
338
349
339 if rmatcher and not rmatcher.always():
350 if rmatcher and not rmatcher.always():
340 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
351 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
341 if not pats:
352 if not pats:
342 pats = [wvfs.join(standindir)]
353 pats = [wvfs.join(standindir)]
343 match = scmutil.match(repo[None], pats, badfn=badfn)
354 match = scmutil.match(repo[None], pats, badfn=badfn)
344 else:
355 else:
345 # no patterns: relative to repo root
356 # no patterns: relative to repo root
346 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
357 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
347 return match
358 return match
348
359
349
360
350 def composestandinmatcher(repo, rmatcher):
361 def composestandinmatcher(repo, rmatcher):
351 '''Return a matcher that accepts standins corresponding to the
362 '''Return a matcher that accepts standins corresponding to the
352 files accepted by rmatcher. Pass the list of files in the matcher
363 files accepted by rmatcher. Pass the list of files in the matcher
353 as the paths specified by the user.'''
364 as the paths specified by the user.'''
354 smatcher = getstandinmatcher(repo, rmatcher)
365 smatcher = getstandinmatcher(repo, rmatcher)
355 isstandin = smatcher.matchfn
366 isstandin = smatcher.matchfn
356
367
357 def composedmatchfn(f):
368 def composedmatchfn(f):
358 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
369 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
359
370
360 smatcher.matchfn = composedmatchfn
371 smatcher.matchfn = composedmatchfn
361
372
362 return smatcher
373 return smatcher
363
374
364
375
365 def standin(filename):
376 def standin(filename):
366 '''Return the repo-relative path to the standin for the specified big
377 '''Return the repo-relative path to the standin for the specified big
367 file.'''
378 file.'''
368 # Notes:
379 # Notes:
369 # 1) Some callers want an absolute path, but for instance addlargefiles
380 # 1) Some callers want an absolute path, but for instance addlargefiles
370 # needs it repo-relative so it can be passed to repo[None].add(). So
381 # needs it repo-relative so it can be passed to repo[None].add(). So
371 # leave it up to the caller to use repo.wjoin() to get an absolute path.
382 # leave it up to the caller to use repo.wjoin() to get an absolute path.
372 # 2) Join with '/' because that's what dirstate always uses, even on
383 # 2) Join with '/' because that's what dirstate always uses, even on
373 # Windows. Change existing separator to '/' first in case we are
384 # Windows. Change existing separator to '/' first in case we are
374 # passed filenames from an external source (like the command line).
385 # passed filenames from an external source (like the command line).
375 return shortnameslash + util.pconvert(filename)
386 return shortnameslash + util.pconvert(filename)
376
387
377
388
378 def isstandin(filename):
389 def isstandin(filename):
379 '''Return true if filename is a big file standin. filename must be
390 '''Return true if filename is a big file standin. filename must be
380 in Mercurial's internal form (slash-separated).'''
391 in Mercurial's internal form (slash-separated).'''
381 return filename.startswith(shortnameslash)
392 return filename.startswith(shortnameslash)
382
393
383
394
384 def splitstandin(filename):
395 def splitstandin(filename):
385 # Split on / because that's what dirstate always uses, even on Windows.
396 # Split on / because that's what dirstate always uses, even on Windows.
386 # Change local separator to / first just in case we are passed filenames
397 # Change local separator to / first just in case we are passed filenames
387 # from an external source (like the command line).
398 # from an external source (like the command line).
388 bits = util.pconvert(filename).split(b'/', 1)
399 bits = util.pconvert(filename).split(b'/', 1)
389 if len(bits) == 2 and bits[0] == shortname:
400 if len(bits) == 2 and bits[0] == shortname:
390 return bits[1]
401 return bits[1]
391 else:
402 else:
392 return None
403 return None
393
404
394
405
395 def updatestandin(repo, lfile, standin):
406 def updatestandin(repo, lfile, standin):
396 """Re-calculate hash value of lfile and write it into standin
407 """Re-calculate hash value of lfile and write it into standin
397
408
398 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
409 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
399 """
410 """
400 file = repo.wjoin(lfile)
411 file = repo.wjoin(lfile)
401 if repo.wvfs.exists(lfile):
412 if repo.wvfs.exists(lfile):
402 hash = hashfile(file)
413 hash = hashfile(file)
403 executable = getexecutable(file)
414 executable = getexecutable(file)
404 writestandin(repo, standin, hash, executable)
415 writestandin(repo, standin, hash, executable)
405 else:
416 else:
406 raise error.Abort(_(b'%s: file not found!') % lfile)
417 raise error.Abort(_(b'%s: file not found!') % lfile)
407
418
408
419
409 def readasstandin(fctx):
420 def readasstandin(fctx):
410 '''read hex hash from given filectx of standin file
421 '''read hex hash from given filectx of standin file
411
422
412 This encapsulates how "standin" data is stored into storage layer.'''
423 This encapsulates how "standin" data is stored into storage layer.'''
413 return fctx.data().strip()
424 return fctx.data().strip()
414
425
415
426
416 def writestandin(repo, standin, hash, executable):
427 def writestandin(repo, standin, hash, executable):
417 '''write hash to <repo.root>/<standin>'''
428 '''write hash to <repo.root>/<standin>'''
418 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
429 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
419
430
420
431
421 def copyandhash(instream, outfile):
432 def copyandhash(instream, outfile):
422 '''Read bytes from instream (iterable) and write them to outfile,
433 '''Read bytes from instream (iterable) and write them to outfile,
423 computing the SHA-1 hash of the data along the way. Return the hash.'''
434 computing the SHA-1 hash of the data along the way. Return the hash.'''
424 hasher = hashlib.sha1(b'')
435 hasher = hashlib.sha1(b'')
425 for data in instream:
436 for data in instream:
426 hasher.update(data)
437 hasher.update(data)
427 outfile.write(data)
438 outfile.write(data)
428 return hex(hasher.digest())
439 return hex(hasher.digest())
429
440
430
441
431 def hashfile(file):
442 def hashfile(file):
432 if not os.path.exists(file):
443 if not os.path.exists(file):
433 return b''
444 return b''
434 with open(file, b'rb') as fd:
445 with open(file, b'rb') as fd:
435 return hexsha1(fd)
446 return hexsha1(fd)
436
447
437
448
438 def getexecutable(filename):
449 def getexecutable(filename):
439 mode = os.stat(filename).st_mode
450 mode = os.stat(filename).st_mode
440 return (
451 return (
441 (mode & stat.S_IXUSR)
452 (mode & stat.S_IXUSR)
442 and (mode & stat.S_IXGRP)
453 and (mode & stat.S_IXGRP)
443 and (mode & stat.S_IXOTH)
454 and (mode & stat.S_IXOTH)
444 )
455 )
445
456
446
457
447 def urljoin(first, second, *arg):
458 def urljoin(first, second, *arg):
448 def join(left, right):
459 def join(left, right):
449 if not left.endswith(b'/'):
460 if not left.endswith(b'/'):
450 left += b'/'
461 left += b'/'
451 if right.startswith(b'/'):
462 if right.startswith(b'/'):
452 right = right[1:]
463 right = right[1:]
453 return left + right
464 return left + right
454
465
455 url = join(first, second)
466 url = join(first, second)
456 for a in arg:
467 for a in arg:
457 url = join(url, a)
468 url = join(url, a)
458 return url
469 return url
459
470
460
471
461 def hexsha1(fileobj):
472 def hexsha1(fileobj):
462 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
473 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
463 object data"""
474 object data"""
464 h = hashlib.sha1()
475 h = hashlib.sha1()
465 for chunk in util.filechunkiter(fileobj):
476 for chunk in util.filechunkiter(fileobj):
466 h.update(chunk)
477 h.update(chunk)
467 return hex(h.digest())
478 return hex(h.digest())
468
479
469
480
470 def httpsendfile(ui, filename):
481 def httpsendfile(ui, filename):
471 return httpconnection.httpsendfile(ui, filename, b'rb')
482 return httpconnection.httpsendfile(ui, filename, b'rb')
472
483
473
484
474 def unixpath(path):
485 def unixpath(path):
475 '''Return a version of path normalized for use with the lfdirstate.'''
486 '''Return a version of path normalized for use with the lfdirstate.'''
476 return util.pconvert(os.path.normpath(path))
487 return util.pconvert(os.path.normpath(path))
477
488
478
489
479 def islfilesrepo(repo):
490 def islfilesrepo(repo):
480 '''Return true if the repo is a largefile repo.'''
491 '''Return true if the repo is a largefile repo.'''
481 if b'largefiles' in repo.requirements and any(
492 if b'largefiles' in repo.requirements and any(
482 shortnameslash in f[0] for f in repo.store.datafiles()
493 shortnameslash in f[0] for f in repo.store.datafiles()
483 ):
494 ):
484 return True
495 return True
485
496
486 return any(openlfdirstate(repo.ui, repo, False))
497 return any(openlfdirstate(repo.ui, repo, False))
487
498
488
499
489 class storeprotonotcapable(Exception):
500 class storeprotonotcapable(Exception):
490 def __init__(self, storetypes):
501 def __init__(self, storetypes):
491 self.storetypes = storetypes
502 self.storetypes = storetypes
492
503
493
504
494 def getstandinsstate(repo):
505 def getstandinsstate(repo):
495 standins = []
506 standins = []
496 matcher = getstandinmatcher(repo)
507 matcher = getstandinmatcher(repo)
497 wctx = repo[None]
508 wctx = repo[None]
498 for standin in repo.dirstate.walk(
509 for standin in repo.dirstate.walk(
499 matcher, subrepos=[], unknown=False, ignored=False
510 matcher, subrepos=[], unknown=False, ignored=False
500 ):
511 ):
501 lfile = splitstandin(standin)
512 lfile = splitstandin(standin)
502 try:
513 try:
503 hash = readasstandin(wctx[standin])
514 hash = readasstandin(wctx[standin])
504 except IOError:
515 except IOError:
505 hash = None
516 hash = None
506 standins.append((lfile, hash))
517 standins.append((lfile, hash))
507 return standins
518 return standins
508
519
509
520
510 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
521 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
511 lfstandin = standin(lfile)
522 lfstandin = standin(lfile)
512 if lfstandin in repo.dirstate:
523 if lfstandin in repo.dirstate:
513 stat = repo.dirstate._map[lfstandin]
524 stat = repo.dirstate._map[lfstandin]
514 state, mtime = stat[0], stat[3]
525 state, mtime = stat[0], stat[3]
515 else:
526 else:
516 state, mtime = b'?', -1
527 state, mtime = b'?', -1
517 if state == b'n':
528 if state == b'n':
518 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
529 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
519 # state 'n' doesn't ensure 'clean' in this case
530 # state 'n' doesn't ensure 'clean' in this case
520 lfdirstate.normallookup(lfile)
531 lfdirstate.normallookup(lfile)
521 else:
532 else:
522 lfdirstate.normal(lfile)
533 lfdirstate.normal(lfile)
523 elif state == b'm':
534 elif state == b'm':
524 lfdirstate.normallookup(lfile)
535 lfdirstate.normallookup(lfile)
525 elif state == b'r':
536 elif state == b'r':
526 lfdirstate.remove(lfile)
537 lfdirstate.remove(lfile)
527 elif state == b'a':
538 elif state == b'a':
528 lfdirstate.add(lfile)
539 lfdirstate.add(lfile)
529 elif state == b'?':
540 elif state == b'?':
530 lfdirstate.drop(lfile)
541 lfdirstate.drop(lfile)
531
542
532
543
533 def markcommitted(orig, ctx, node):
544 def markcommitted(orig, ctx, node):
534 repo = ctx.repo()
545 repo = ctx.repo()
535
546
536 orig(node)
547 orig(node)
537
548
538 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
549 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
539 # because files coming from the 2nd parent are omitted in the latter.
550 # because files coming from the 2nd parent are omitted in the latter.
540 #
551 #
541 # The former should be used to get targets of "synclfdirstate",
552 # The former should be used to get targets of "synclfdirstate",
542 # because such files:
553 # because such files:
543 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
554 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
544 # - have to be marked as "n" after commit, but
555 # - have to be marked as "n" after commit, but
545 # - aren't listed in "repo[node].files()"
556 # - aren't listed in "repo[node].files()"
546
557
547 lfdirstate = openlfdirstate(repo.ui, repo)
558 lfdirstate = openlfdirstate(repo.ui, repo)
548 for f in ctx.files():
559 for f in ctx.files():
549 lfile = splitstandin(f)
560 lfile = splitstandin(f)
550 if lfile is not None:
561 if lfile is not None:
551 synclfdirstate(repo, lfdirstate, lfile, False)
562 synclfdirstate(repo, lfdirstate, lfile, False)
552 lfdirstate.write()
563 lfdirstate.write()
553
564
554 # As part of committing, copy all of the largefiles into the cache.
565 # As part of committing, copy all of the largefiles into the cache.
555 #
566 #
556 # Using "node" instead of "ctx" implies additional "repo[node]"
567 # Using "node" instead of "ctx" implies additional "repo[node]"
557 # lookup while copyalltostore(), but can omit redundant check for
568 # lookup while copyalltostore(), but can omit redundant check for
558 # files comming from the 2nd parent, which should exist in store
569 # files comming from the 2nd parent, which should exist in store
559 # at merging.
570 # at merging.
560 copyalltostore(repo, node)
571 copyalltostore(repo, node)
561
572
562
573
563 def getlfilestoupdate(oldstandins, newstandins):
574 def getlfilestoupdate(oldstandins, newstandins):
564 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
575 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
565 filelist = []
576 filelist = []
566 for f in changedstandins:
577 for f in changedstandins:
567 if f[0] not in filelist:
578 if f[0] not in filelist:
568 filelist.append(f[0])
579 filelist.append(f[0])
569 return filelist
580 return filelist
570
581
571
582
572 def getlfilestoupload(repo, missing, addfunc):
583 def getlfilestoupload(repo, missing, addfunc):
573 makeprogress = repo.ui.makeprogress
584 makeprogress = repo.ui.makeprogress
574 with makeprogress(
585 with makeprogress(
575 _(b'finding outgoing largefiles'),
586 _(b'finding outgoing largefiles'),
576 unit=_(b'revisions'),
587 unit=_(b'revisions'),
577 total=len(missing),
588 total=len(missing),
578 ) as progress:
589 ) as progress:
579 for i, n in enumerate(missing):
590 for i, n in enumerate(missing):
580 progress.update(i)
591 progress.update(i)
581 parents = [p for p in repo[n].parents() if p != node.nullid]
592 parents = [p for p in repo[n].parents() if p != node.nullid]
582
593
583 oldlfstatus = repo.lfstatus
594 oldlfstatus = repo.lfstatus
584 repo.lfstatus = False
595 repo.lfstatus = False
585 try:
596 try:
586 ctx = repo[n]
597 ctx = repo[n]
587 finally:
598 finally:
588 repo.lfstatus = oldlfstatus
599 repo.lfstatus = oldlfstatus
589
600
590 files = set(ctx.files())
601 files = set(ctx.files())
591 if len(parents) == 2:
602 if len(parents) == 2:
592 mc = ctx.manifest()
603 mc = ctx.manifest()
593 mp1 = ctx.p1().manifest()
604 mp1 = ctx.p1().manifest()
594 mp2 = ctx.p2().manifest()
605 mp2 = ctx.p2().manifest()
595 for f in mp1:
606 for f in mp1:
596 if f not in mc:
607 if f not in mc:
597 files.add(f)
608 files.add(f)
598 for f in mp2:
609 for f in mp2:
599 if f not in mc:
610 if f not in mc:
600 files.add(f)
611 files.add(f)
601 for f in mc:
612 for f in mc:
602 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
613 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
603 files.add(f)
614 files.add(f)
604 for fn in files:
615 for fn in files:
605 if isstandin(fn) and fn in ctx:
616 if isstandin(fn) and fn in ctx:
606 addfunc(fn, readasstandin(ctx[fn]))
617 addfunc(fn, readasstandin(ctx[fn]))
607
618
608
619
609 def updatestandinsbymatch(repo, match):
620 def updatestandinsbymatch(repo, match):
610 '''Update standins in the working directory according to specified match
621 '''Update standins in the working directory according to specified match
611
622
612 This returns (possibly modified) ``match`` object to be used for
623 This returns (possibly modified) ``match`` object to be used for
613 subsequent commit process.
624 subsequent commit process.
614 '''
625 '''
615
626
616 ui = repo.ui
627 ui = repo.ui
617
628
618 # Case 1: user calls commit with no specific files or
629 # Case 1: user calls commit with no specific files or
619 # include/exclude patterns: refresh and commit all files that
630 # include/exclude patterns: refresh and commit all files that
620 # are "dirty".
631 # are "dirty".
621 if match is None or match.always():
632 if match is None or match.always():
622 # Spend a bit of time here to get a list of files we know
633 # Spend a bit of time here to get a list of files we know
623 # are modified so we can compare only against those.
634 # are modified so we can compare only against those.
624 # It can cost a lot of time (several seconds)
635 # It can cost a lot of time (several seconds)
625 # otherwise to update all standins if the largefiles are
636 # otherwise to update all standins if the largefiles are
626 # large.
637 # large.
627 lfdirstate = openlfdirstate(ui, repo)
638 lfdirstate = openlfdirstate(ui, repo)
628 dirtymatch = matchmod.always()
639 dirtymatch = matchmod.always()
629 unsure, s = lfdirstate.status(
640 unsure, s = lfdirstate.status(
630 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
641 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
631 )
642 )
632 modifiedfiles = unsure + s.modified + s.added + s.removed
643 modifiedfiles = unsure + s.modified + s.added + s.removed
633 lfiles = listlfiles(repo)
644 lfiles = listlfiles(repo)
634 # this only loops through largefiles that exist (not
645 # this only loops through largefiles that exist (not
635 # removed/renamed)
646 # removed/renamed)
636 for lfile in lfiles:
647 for lfile in lfiles:
637 if lfile in modifiedfiles:
648 if lfile in modifiedfiles:
638 fstandin = standin(lfile)
649 fstandin = standin(lfile)
639 if repo.wvfs.exists(fstandin):
650 if repo.wvfs.exists(fstandin):
640 # this handles the case where a rebase is being
651 # this handles the case where a rebase is being
641 # performed and the working copy is not updated
652 # performed and the working copy is not updated
642 # yet.
653 # yet.
643 if repo.wvfs.exists(lfile):
654 if repo.wvfs.exists(lfile):
644 updatestandin(repo, lfile, fstandin)
655 updatestandin(repo, lfile, fstandin)
645
656
646 return match
657 return match
647
658
648 lfiles = listlfiles(repo)
659 lfiles = listlfiles(repo)
649 match._files = repo._subdirlfs(match.files(), lfiles)
660 match._files = repo._subdirlfs(match.files(), lfiles)
650
661
651 # Case 2: user calls commit with specified patterns: refresh
662 # Case 2: user calls commit with specified patterns: refresh
652 # any matching big files.
663 # any matching big files.
653 smatcher = composestandinmatcher(repo, match)
664 smatcher = composestandinmatcher(repo, match)
654 standins = repo.dirstate.walk(
665 standins = repo.dirstate.walk(
655 smatcher, subrepos=[], unknown=False, ignored=False
666 smatcher, subrepos=[], unknown=False, ignored=False
656 )
667 )
657
668
658 # No matching big files: get out of the way and pass control to
669 # No matching big files: get out of the way and pass control to
659 # the usual commit() method.
670 # the usual commit() method.
660 if not standins:
671 if not standins:
661 return match
672 return match
662
673
663 # Refresh all matching big files. It's possible that the
674 # Refresh all matching big files. It's possible that the
664 # commit will end up failing, in which case the big files will
675 # commit will end up failing, in which case the big files will
665 # stay refreshed. No harm done: the user modified them and
676 # stay refreshed. No harm done: the user modified them and
666 # asked to commit them, so sooner or later we're going to
677 # asked to commit them, so sooner or later we're going to
667 # refresh the standins. Might as well leave them refreshed.
678 # refresh the standins. Might as well leave them refreshed.
668 lfdirstate = openlfdirstate(ui, repo)
679 lfdirstate = openlfdirstate(ui, repo)
669 for fstandin in standins:
680 for fstandin in standins:
670 lfile = splitstandin(fstandin)
681 lfile = splitstandin(fstandin)
671 if lfdirstate[lfile] != b'r':
682 if lfdirstate[lfile] != b'r':
672 updatestandin(repo, lfile, fstandin)
683 updatestandin(repo, lfile, fstandin)
673
684
674 # Cook up a new matcher that only matches regular files or
685 # Cook up a new matcher that only matches regular files or
675 # standins corresponding to the big files requested by the
686 # standins corresponding to the big files requested by the
676 # user. Have to modify _files to prevent commit() from
687 # user. Have to modify _files to prevent commit() from
677 # complaining "not tracked" for big files.
688 # complaining "not tracked" for big files.
678 match = copy.copy(match)
689 match = copy.copy(match)
679 origmatchfn = match.matchfn
690 origmatchfn = match.matchfn
680
691
681 # Check both the list of largefiles and the list of
692 # Check both the list of largefiles and the list of
682 # standins because if a largefile was removed, it
693 # standins because if a largefile was removed, it
683 # won't be in the list of largefiles at this point
694 # won't be in the list of largefiles at this point
684 match._files += sorted(standins)
695 match._files += sorted(standins)
685
696
686 actualfiles = []
697 actualfiles = []
687 for f in match._files:
698 for f in match._files:
688 fstandin = standin(f)
699 fstandin = standin(f)
689
700
690 # For largefiles, only one of the normal and standin should be
701 # For largefiles, only one of the normal and standin should be
691 # committed (except if one of them is a remove). In the case of a
702 # committed (except if one of them is a remove). In the case of a
692 # standin removal, drop the normal file if it is unknown to dirstate.
703 # standin removal, drop the normal file if it is unknown to dirstate.
693 # Thus, skip plain largefile names but keep the standin.
704 # Thus, skip plain largefile names but keep the standin.
694 if f in lfiles or fstandin in standins:
705 if f in lfiles or fstandin in standins:
695 if repo.dirstate[fstandin] != b'r':
706 if repo.dirstate[fstandin] != b'r':
696 if repo.dirstate[f] != b'r':
707 if repo.dirstate[f] != b'r':
697 continue
708 continue
698 elif repo.dirstate[f] == b'?':
709 elif repo.dirstate[f] == b'?':
699 continue
710 continue
700
711
701 actualfiles.append(f)
712 actualfiles.append(f)
702 match._files = actualfiles
713 match._files = actualfiles
703
714
704 def matchfn(f):
715 def matchfn(f):
705 if origmatchfn(f):
716 if origmatchfn(f):
706 return f not in lfiles
717 return f not in lfiles
707 else:
718 else:
708 return f in standins
719 return f in standins
709
720
710 match.matchfn = matchfn
721 match.matchfn = matchfn
711
722
712 return match
723 return match
713
724
714
725
715 class automatedcommithook(object):
726 class automatedcommithook(object):
716 '''Stateful hook to update standins at the 1st commit of resuming
727 '''Stateful hook to update standins at the 1st commit of resuming
717
728
718 For efficiency, updating standins in the working directory should
729 For efficiency, updating standins in the working directory should
719 be avoided while automated committing (like rebase, transplant and
730 be avoided while automated committing (like rebase, transplant and
720 so on), because they should be updated before committing.
731 so on), because they should be updated before committing.
721
732
722 But the 1st commit of resuming automated committing (e.g. ``rebase
733 But the 1st commit of resuming automated committing (e.g. ``rebase
723 --continue``) should update them, because largefiles may be
734 --continue``) should update them, because largefiles may be
724 modified manually.
735 modified manually.
725 '''
736 '''
726
737
727 def __init__(self, resuming):
738 def __init__(self, resuming):
728 self.resuming = resuming
739 self.resuming = resuming
729
740
730 def __call__(self, repo, match):
741 def __call__(self, repo, match):
731 if self.resuming:
742 if self.resuming:
732 self.resuming = False # avoids updating at subsequent commits
743 self.resuming = False # avoids updating at subsequent commits
733 return updatestandinsbymatch(repo, match)
744 return updatestandinsbymatch(repo, match)
734 else:
745 else:
735 return match
746 return match
736
747
737
748
738 def getstatuswriter(ui, repo, forcibly=None):
749 def getstatuswriter(ui, repo, forcibly=None):
739 '''Return the function to write largefiles specific status out
750 '''Return the function to write largefiles specific status out
740
751
741 If ``forcibly`` is ``None``, this returns the last element of
752 If ``forcibly`` is ``None``, this returns the last element of
742 ``repo._lfstatuswriters`` as "default" writer function.
753 ``repo._lfstatuswriters`` as "default" writer function.
743
754
744 Otherwise, this returns the function to always write out (or
755 Otherwise, this returns the function to always write out (or
745 ignore if ``not forcibly``) status.
756 ignore if ``not forcibly``) status.
746 '''
757 '''
747 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
758 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
748 return repo._lfstatuswriters[-1]
759 return repo._lfstatuswriters[-1]
749 else:
760 else:
750 if forcibly:
761 if forcibly:
751 return ui.status # forcibly WRITE OUT
762 return ui.status # forcibly WRITE OUT
752 else:
763 else:
753 return lambda *msg, **opts: None # forcibly IGNORE
764 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1808 +1,1799 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
13 import copy
12 import copy
14 import os
13 import os
15
14
16 from mercurial.i18n import _
15 from mercurial.i18n import _
17
16
18 from mercurial.pycompat import open
17 from mercurial.pycompat import open
19
18
20 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
21
20
22 from mercurial import (
21 from mercurial import (
23 archival,
22 archival,
24 cmdutil,
23 cmdutil,
25 copies as copiesmod,
24 copies as copiesmod,
26 error,
25 error,
27 exchange,
26 exchange,
28 extensions,
27 extensions,
29 exthelper,
28 exthelper,
30 filemerge,
29 filemerge,
31 hg,
30 hg,
32 logcmdutil,
31 logcmdutil,
33 match as matchmod,
32 match as matchmod,
34 merge,
33 merge,
35 pathutil,
34 pathutil,
36 pycompat,
35 pycompat,
37 scmutil,
36 scmutil,
38 smartset,
37 smartset,
39 subrepo,
38 subrepo,
40 upgrade,
39 upgrade,
41 url as urlmod,
40 url as urlmod,
42 util,
41 util,
43 )
42 )
44
43
45 from . import (
44 from . import (
46 lfcommands,
45 lfcommands,
47 lfutil,
46 lfutil,
48 storefactory,
47 storefactory,
49 )
48 )
50
49
51 eh = exthelper.exthelper()
50 eh = exthelper.exthelper()
52
51
52 lfstatus = lfutil.lfstatus
53
53 # -- Utility functions: commonly/repeatedly needed functionality ---------------
54 # -- Utility functions: commonly/repeatedly needed functionality ---------------
54
55
55
56
56 def composelargefilematcher(match, manifest):
57 def composelargefilematcher(match, manifest):
57 '''create a matcher that matches only the largefiles in the original
58 '''create a matcher that matches only the largefiles in the original
58 matcher'''
59 matcher'''
59 m = copy.copy(match)
60 m = copy.copy(match)
60 lfile = lambda f: lfutil.standin(f) in manifest
61 lfile = lambda f: lfutil.standin(f) in manifest
61 m._files = [lf for lf in m._files if lfile(lf)]
62 m._files = [lf for lf in m._files if lfile(lf)]
62 m._fileset = set(m._files)
63 m._fileset = set(m._files)
63 m.always = lambda: False
64 m.always = lambda: False
64 origmatchfn = m.matchfn
65 origmatchfn = m.matchfn
65 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
66 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
66 return m
67 return m
67
68
68
69
69 def composenormalfilematcher(match, manifest, exclude=None):
70 def composenormalfilematcher(match, manifest, exclude=None):
70 excluded = set()
71 excluded = set()
71 if exclude is not None:
72 if exclude is not None:
72 excluded.update(exclude)
73 excluded.update(exclude)
73
74
74 m = copy.copy(match)
75 m = copy.copy(match)
75 notlfile = lambda f: not (
76 notlfile = lambda f: not (
76 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
77 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
77 )
78 )
78 m._files = [lf for lf in m._files if notlfile(lf)]
79 m._files = [lf for lf in m._files if notlfile(lf)]
79 m._fileset = set(m._files)
80 m._fileset = set(m._files)
80 m.always = lambda: False
81 m.always = lambda: False
81 origmatchfn = m.matchfn
82 origmatchfn = m.matchfn
82 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
83 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
83 return m
84 return m
84
85
85
86
86 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
87 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
87 large = opts.get('large')
88 large = opts.get('large')
88 lfsize = lfutil.getminsize(
89 lfsize = lfutil.getminsize(
89 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
90 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
90 )
91 )
91
92
92 lfmatcher = None
93 lfmatcher = None
93 if lfutil.islfilesrepo(repo):
94 if lfutil.islfilesrepo(repo):
94 lfpats = ui.configlist(lfutil.longname, b'patterns')
95 lfpats = ui.configlist(lfutil.longname, b'patterns')
95 if lfpats:
96 if lfpats:
96 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
97 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
97
98
98 lfnames = []
99 lfnames = []
99 m = matcher
100 m = matcher
100
101
101 wctx = repo[None]
102 wctx = repo[None]
102 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
103 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
103 exact = m.exact(f)
104 exact = m.exact(f)
104 lfile = lfutil.standin(f) in wctx
105 lfile = lfutil.standin(f) in wctx
105 nfile = f in wctx
106 nfile = f in wctx
106 exists = lfile or nfile
107 exists = lfile or nfile
107
108
108 # Don't warn the user when they attempt to add a normal tracked file.
109 # Don't warn the user when they attempt to add a normal tracked file.
109 # The normal add code will do that for us.
110 # The normal add code will do that for us.
110 if exact and exists:
111 if exact and exists:
111 if lfile:
112 if lfile:
112 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
113 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
113 continue
114 continue
114
115
115 if (exact or not exists) and not lfutil.isstandin(f):
116 if (exact or not exists) and not lfutil.isstandin(f):
116 # In case the file was removed previously, but not committed
117 # In case the file was removed previously, but not committed
117 # (issue3507)
118 # (issue3507)
118 if not repo.wvfs.exists(f):
119 if not repo.wvfs.exists(f):
119 continue
120 continue
120
121
121 abovemin = (
122 abovemin = (
122 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
123 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
123 )
124 )
124 if large or abovemin or (lfmatcher and lfmatcher(f)):
125 if large or abovemin or (lfmatcher and lfmatcher(f)):
125 lfnames.append(f)
126 lfnames.append(f)
126 if ui.verbose or not exact:
127 if ui.verbose or not exact:
127 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
128 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
128
129
129 bad = []
130 bad = []
130
131
131 # Need to lock, otherwise there could be a race condition between
132 # Need to lock, otherwise there could be a race condition between
132 # when standins are created and added to the repo.
133 # when standins are created and added to the repo.
133 with repo.wlock():
134 with repo.wlock():
134 if not opts.get('dry_run'):
135 if not opts.get('dry_run'):
135 standins = []
136 standins = []
136 lfdirstate = lfutil.openlfdirstate(ui, repo)
137 lfdirstate = lfutil.openlfdirstate(ui, repo)
137 for f in lfnames:
138 for f in lfnames:
138 standinname = lfutil.standin(f)
139 standinname = lfutil.standin(f)
139 lfutil.writestandin(
140 lfutil.writestandin(
140 repo,
141 repo,
141 standinname,
142 standinname,
142 hash=b'',
143 hash=b'',
143 executable=lfutil.getexecutable(repo.wjoin(f)),
144 executable=lfutil.getexecutable(repo.wjoin(f)),
144 )
145 )
145 standins.append(standinname)
146 standins.append(standinname)
146 if lfdirstate[f] == b'r':
147 if lfdirstate[f] == b'r':
147 lfdirstate.normallookup(f)
148 lfdirstate.normallookup(f)
148 else:
149 else:
149 lfdirstate.add(f)
150 lfdirstate.add(f)
150 lfdirstate.write()
151 lfdirstate.write()
151 bad += [
152 bad += [
152 lfutil.splitstandin(f)
153 lfutil.splitstandin(f)
153 for f in repo[None].add(standins)
154 for f in repo[None].add(standins)
154 if f in m.files()
155 if f in m.files()
155 ]
156 ]
156
157
157 added = [f for f in lfnames if f not in bad]
158 added = [f for f in lfnames if f not in bad]
158 return added, bad
159 return added, bad
159
160
160
161
161 @contextlib.contextmanager
162 def lfstatus(repo):
163 oldvalue = getattr(repo, 'lfstatus', False)
164 repo.lfstatus = True
165 try:
166 yield
167 finally:
168 repo.lfstatus = oldvalue
169
170
171 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
162 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 after = opts.get('after')
163 after = opts.get('after')
173 m = composelargefilematcher(matcher, repo[None].manifest())
164 m = composelargefilematcher(matcher, repo[None].manifest())
174 with lfstatus(repo):
165 with lfstatus(repo):
175 s = repo.status(match=m, clean=not isaddremove)
166 s = repo.status(match=m, clean=not isaddremove)
176 manifest = repo[None].manifest()
167 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [
168 modified, added, deleted, clean = [
178 [f for f in list if lfutil.standin(f) in manifest]
169 [f for f in list if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added, s.deleted, s.clean)
170 for list in (s.modified, s.added, s.deleted, s.clean)
180 ]
171 ]
181
172
182 def warn(files, msg):
173 def warn(files, msg):
183 for f in files:
174 for f in files:
184 ui.warn(msg % uipathfn(f))
175 ui.warn(msg % uipathfn(f))
185 return int(len(files) > 0)
176 return int(len(files) > 0)
186
177
187 if after:
178 if after:
188 remove = deleted
179 remove = deleted
189 result = warn(
180 result = warn(
190 modified + added + clean, _(b'not removing %s: file still exists\n')
181 modified + added + clean, _(b'not removing %s: file still exists\n')
191 )
182 )
192 else:
183 else:
193 remove = deleted + clean
184 remove = deleted + clean
194 result = warn(
185 result = warn(
195 modified,
186 modified,
196 _(
187 _(
197 b'not removing %s: file is modified (use -f'
188 b'not removing %s: file is modified (use -f'
198 b' to force removal)\n'
189 b' to force removal)\n'
199 ),
190 ),
200 )
191 )
201 result = (
192 result = (
202 warn(
193 warn(
203 added,
194 added,
204 _(
195 _(
205 b'not removing %s: file has been marked for add'
196 b'not removing %s: file has been marked for add'
206 b' (use forget to undo)\n'
197 b' (use forget to undo)\n'
207 ),
198 ),
208 )
199 )
209 or result
200 or result
210 )
201 )
211
202
212 # Need to lock because standin files are deleted then removed from the
203 # Need to lock because standin files are deleted then removed from the
213 # repository and we could race in-between.
204 # repository and we could race in-between.
214 with repo.wlock():
205 with repo.wlock():
215 lfdirstate = lfutil.openlfdirstate(ui, repo)
206 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 for f in sorted(remove):
207 for f in sorted(remove):
217 if ui.verbose or not m.exact(f):
208 if ui.verbose or not m.exact(f):
218 ui.status(_(b'removing %s\n') % uipathfn(f))
209 ui.status(_(b'removing %s\n') % uipathfn(f))
219
210
220 if not dryrun:
211 if not dryrun:
221 if not after:
212 if not after:
222 repo.wvfs.unlinkpath(f, ignoremissing=True)
213 repo.wvfs.unlinkpath(f, ignoremissing=True)
223
214
224 if dryrun:
215 if dryrun:
225 return result
216 return result
226
217
227 remove = [lfutil.standin(f) for f in remove]
218 remove = [lfutil.standin(f) for f in remove]
228 # If this is being called by addremove, let the original addremove
219 # If this is being called by addremove, let the original addremove
229 # function handle this.
220 # function handle this.
230 if not isaddremove:
221 if not isaddremove:
231 for f in remove:
222 for f in remove:
232 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo[None].forget(remove)
224 repo[None].forget(remove)
234
225
235 for f in remove:
226 for f in remove:
236 lfutil.synclfdirstate(
227 lfutil.synclfdirstate(
237 repo, lfdirstate, lfutil.splitstandin(f), False
228 repo, lfdirstate, lfutil.splitstandin(f), False
238 )
229 )
239
230
240 lfdirstate.write()
231 lfdirstate.write()
241
232
242 return result
233 return result
243
234
244
235
245 # For overriding mercurial.hgweb.webcommands so that largefiles will
236 # For overriding mercurial.hgweb.webcommands so that largefiles will
246 # appear at their right place in the manifests.
237 # appear at their right place in the manifests.
247 @eh.wrapfunction(webcommands, b'decodepath')
238 @eh.wrapfunction(webcommands, b'decodepath')
248 def decodepath(orig, path):
239 def decodepath(orig, path):
249 return lfutil.splitstandin(path) or path
240 return lfutil.splitstandin(path) or path
250
241
251
242
252 # -- Wrappers: modify existing commands --------------------------------
243 # -- Wrappers: modify existing commands --------------------------------
253
244
254
245
255 @eh.wrapcommand(
246 @eh.wrapcommand(
256 b'add',
247 b'add',
257 opts=[
248 opts=[
258 (b'', b'large', None, _(b'add as largefile')),
249 (b'', b'large', None, _(b'add as largefile')),
259 (b'', b'normal', None, _(b'add as normal file')),
250 (b'', b'normal', None, _(b'add as normal file')),
260 (
251 (
261 b'',
252 b'',
262 b'lfsize',
253 b'lfsize',
263 b'',
254 b'',
264 _(
255 _(
265 b'add all files above this size (in megabytes) '
256 b'add all files above this size (in megabytes) '
266 b'as largefiles (default: 10)'
257 b'as largefiles (default: 10)'
267 ),
258 ),
268 ),
259 ),
269 ],
260 ],
270 )
261 )
271 def overrideadd(orig, ui, repo, *pats, **opts):
262 def overrideadd(orig, ui, repo, *pats, **opts):
272 if opts.get('normal') and opts.get('large'):
263 if opts.get('normal') and opts.get('large'):
273 raise error.Abort(_(b'--normal cannot be used with --large'))
264 raise error.Abort(_(b'--normal cannot be used with --large'))
274 return orig(ui, repo, *pats, **opts)
265 return orig(ui, repo, *pats, **opts)
275
266
276
267
277 @eh.wrapfunction(cmdutil, b'add')
268 @eh.wrapfunction(cmdutil, b'add')
278 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
269 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
279 # The --normal flag short circuits this override
270 # The --normal flag short circuits this override
280 if opts.get('normal'):
271 if opts.get('normal'):
281 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
272 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
282
273
283 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
274 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
284 normalmatcher = composenormalfilematcher(
275 normalmatcher = composenormalfilematcher(
285 matcher, repo[None].manifest(), ladded
276 matcher, repo[None].manifest(), ladded
286 )
277 )
287 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
278 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
288
279
289 bad.extend(f for f in lbad)
280 bad.extend(f for f in lbad)
290 return bad
281 return bad
291
282
292
283
293 @eh.wrapfunction(cmdutil, b'remove')
284 @eh.wrapfunction(cmdutil, b'remove')
294 def cmdutilremove(
285 def cmdutilremove(
295 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
286 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
296 ):
287 ):
297 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
288 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
298 result = orig(
289 result = orig(
299 ui,
290 ui,
300 repo,
291 repo,
301 normalmatcher,
292 normalmatcher,
302 prefix,
293 prefix,
303 uipathfn,
294 uipathfn,
304 after,
295 after,
305 force,
296 force,
306 subrepos,
297 subrepos,
307 dryrun,
298 dryrun,
308 )
299 )
309 return (
300 return (
310 removelargefiles(
301 removelargefiles(
311 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
302 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
312 )
303 )
313 or result
304 or result
314 )
305 )
315
306
316
307
317 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
308 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
318 def overridestatusfn(orig, repo, rev2, **opts):
309 def overridestatusfn(orig, repo, rev2, **opts):
319 with lfstatus(repo._repo):
310 with lfstatus(repo._repo):
320 return orig(repo, rev2, **opts)
311 return orig(repo, rev2, **opts)
321
312
322
313
323 @eh.wrapcommand(b'status')
314 @eh.wrapcommand(b'status')
324 def overridestatus(orig, ui, repo, *pats, **opts):
315 def overridestatus(orig, ui, repo, *pats, **opts):
325 with lfstatus(repo):
316 with lfstatus(repo):
326 return orig(ui, repo, *pats, **opts)
317 return orig(ui, repo, *pats, **opts)
327
318
328
319
329 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
320 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
330 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
321 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
331 with lfstatus(repo._repo):
322 with lfstatus(repo._repo):
332 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
323 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
333
324
334
325
335 @eh.wrapcommand(b'log')
326 @eh.wrapcommand(b'log')
336 def overridelog(orig, ui, repo, *pats, **opts):
327 def overridelog(orig, ui, repo, *pats, **opts):
337 def overridematchandpats(
328 def overridematchandpats(
338 orig,
329 orig,
339 ctx,
330 ctx,
340 pats=(),
331 pats=(),
341 opts=None,
332 opts=None,
342 globbed=False,
333 globbed=False,
343 default=b'relpath',
334 default=b'relpath',
344 badfn=None,
335 badfn=None,
345 ):
336 ):
346 """Matcher that merges root directory with .hglf, suitable for log.
337 """Matcher that merges root directory with .hglf, suitable for log.
347 It is still possible to match .hglf directly.
338 It is still possible to match .hglf directly.
348 For any listed files run log on the standin too.
339 For any listed files run log on the standin too.
349 matchfn tries both the given filename and with .hglf stripped.
340 matchfn tries both the given filename and with .hglf stripped.
350 """
341 """
351 if opts is None:
342 if opts is None:
352 opts = {}
343 opts = {}
353 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
344 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
354 m, p = copy.copy(matchandpats)
345 m, p = copy.copy(matchandpats)
355
346
356 if m.always():
347 if m.always():
357 # We want to match everything anyway, so there's no benefit trying
348 # We want to match everything anyway, so there's no benefit trying
358 # to add standins.
349 # to add standins.
359 return matchandpats
350 return matchandpats
360
351
361 pats = set(p)
352 pats = set(p)
362
353
363 def fixpats(pat, tostandin=lfutil.standin):
354 def fixpats(pat, tostandin=lfutil.standin):
364 if pat.startswith(b'set:'):
355 if pat.startswith(b'set:'):
365 return pat
356 return pat
366
357
367 kindpat = matchmod._patsplit(pat, None)
358 kindpat = matchmod._patsplit(pat, None)
368
359
369 if kindpat[0] is not None:
360 if kindpat[0] is not None:
370 return kindpat[0] + b':' + tostandin(kindpat[1])
361 return kindpat[0] + b':' + tostandin(kindpat[1])
371 return tostandin(kindpat[1])
362 return tostandin(kindpat[1])
372
363
373 cwd = repo.getcwd()
364 cwd = repo.getcwd()
374 if cwd:
365 if cwd:
375 hglf = lfutil.shortname
366 hglf = lfutil.shortname
376 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
367 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
377
368
378 def tostandin(f):
369 def tostandin(f):
379 # The file may already be a standin, so truncate the back
370 # The file may already be a standin, so truncate the back
380 # prefix and test before mangling it. This avoids turning
371 # prefix and test before mangling it. This avoids turning
381 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
372 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
382 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
373 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
383 return f
374 return f
384
375
385 # An absolute path is from outside the repo, so truncate the
376 # An absolute path is from outside the repo, so truncate the
386 # path to the root before building the standin. Otherwise cwd
377 # path to the root before building the standin. Otherwise cwd
387 # is somewhere in the repo, relative to root, and needs to be
378 # is somewhere in the repo, relative to root, and needs to be
388 # prepended before building the standin.
379 # prepended before building the standin.
389 if os.path.isabs(cwd):
380 if os.path.isabs(cwd):
390 f = f[len(back) :]
381 f = f[len(back) :]
391 else:
382 else:
392 f = cwd + b'/' + f
383 f = cwd + b'/' + f
393 return back + lfutil.standin(f)
384 return back + lfutil.standin(f)
394
385
395 else:
386 else:
396
387
397 def tostandin(f):
388 def tostandin(f):
398 if lfutil.isstandin(f):
389 if lfutil.isstandin(f):
399 return f
390 return f
400 return lfutil.standin(f)
391 return lfutil.standin(f)
401
392
402 pats.update(fixpats(f, tostandin) for f in p)
393 pats.update(fixpats(f, tostandin) for f in p)
403
394
404 for i in range(0, len(m._files)):
395 for i in range(0, len(m._files)):
405 # Don't add '.hglf' to m.files, since that is already covered by '.'
396 # Don't add '.hglf' to m.files, since that is already covered by '.'
406 if m._files[i] == b'.':
397 if m._files[i] == b'.':
407 continue
398 continue
408 standin = lfutil.standin(m._files[i])
399 standin = lfutil.standin(m._files[i])
409 # If the "standin" is a directory, append instead of replace to
400 # If the "standin" is a directory, append instead of replace to
410 # support naming a directory on the command line with only
401 # support naming a directory on the command line with only
411 # largefiles. The original directory is kept to support normal
402 # largefiles. The original directory is kept to support normal
412 # files.
403 # files.
413 if standin in ctx:
404 if standin in ctx:
414 m._files[i] = standin
405 m._files[i] = standin
415 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
406 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
416 m._files.append(standin)
407 m._files.append(standin)
417
408
418 m._fileset = set(m._files)
409 m._fileset = set(m._files)
419 m.always = lambda: False
410 m.always = lambda: False
420 origmatchfn = m.matchfn
411 origmatchfn = m.matchfn
421
412
422 def lfmatchfn(f):
413 def lfmatchfn(f):
423 lf = lfutil.splitstandin(f)
414 lf = lfutil.splitstandin(f)
424 if lf is not None and origmatchfn(lf):
415 if lf is not None and origmatchfn(lf):
425 return True
416 return True
426 r = origmatchfn(f)
417 r = origmatchfn(f)
427 return r
418 return r
428
419
429 m.matchfn = lfmatchfn
420 m.matchfn = lfmatchfn
430
421
431 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
422 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
432 return m, pats
423 return m, pats
433
424
434 # For hg log --patch, the match object is used in two different senses:
425 # For hg log --patch, the match object is used in two different senses:
435 # (1) to determine what revisions should be printed out, and
426 # (1) to determine what revisions should be printed out, and
436 # (2) to determine what files to print out diffs for.
427 # (2) to determine what files to print out diffs for.
437 # The magic matchandpats override should be used for case (1) but not for
428 # The magic matchandpats override should be used for case (1) but not for
438 # case (2).
429 # case (2).
439 oldmatchandpats = scmutil.matchandpats
430 oldmatchandpats = scmutil.matchandpats
440
431
441 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
432 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
442 wctx = repo[None]
433 wctx = repo[None]
443 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
434 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
444 return lambda ctx: match
435 return lambda ctx: match
445
436
446 wrappedmatchandpats = extensions.wrappedfunction(
437 wrappedmatchandpats = extensions.wrappedfunction(
447 scmutil, b'matchandpats', overridematchandpats
438 scmutil, b'matchandpats', overridematchandpats
448 )
439 )
449 wrappedmakefilematcher = extensions.wrappedfunction(
440 wrappedmakefilematcher = extensions.wrappedfunction(
450 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
441 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
451 )
442 )
452 with wrappedmatchandpats, wrappedmakefilematcher:
443 with wrappedmatchandpats, wrappedmakefilematcher:
453 return orig(ui, repo, *pats, **opts)
444 return orig(ui, repo, *pats, **opts)
454
445
455
446
456 @eh.wrapcommand(
447 @eh.wrapcommand(
457 b'verify',
448 b'verify',
458 opts=[
449 opts=[
459 (
450 (
460 b'',
451 b'',
461 b'large',
452 b'large',
462 None,
453 None,
463 _(b'verify that all largefiles in current revision exists'),
454 _(b'verify that all largefiles in current revision exists'),
464 ),
455 ),
465 (
456 (
466 b'',
457 b'',
467 b'lfa',
458 b'lfa',
468 None,
459 None,
469 _(b'verify largefiles in all revisions, not just current'),
460 _(b'verify largefiles in all revisions, not just current'),
470 ),
461 ),
471 (
462 (
472 b'',
463 b'',
473 b'lfc',
464 b'lfc',
474 None,
465 None,
475 _(b'verify local largefile contents, not just existence'),
466 _(b'verify local largefile contents, not just existence'),
476 ),
467 ),
477 ],
468 ],
478 )
469 )
479 def overrideverify(orig, ui, repo, *pats, **opts):
470 def overrideverify(orig, ui, repo, *pats, **opts):
480 large = opts.pop('large', False)
471 large = opts.pop('large', False)
481 all = opts.pop('lfa', False)
472 all = opts.pop('lfa', False)
482 contents = opts.pop('lfc', False)
473 contents = opts.pop('lfc', False)
483
474
484 result = orig(ui, repo, *pats, **opts)
475 result = orig(ui, repo, *pats, **opts)
485 if large or all or contents:
476 if large or all or contents:
486 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
477 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
487 return result
478 return result
488
479
489
480
490 @eh.wrapcommand(
481 @eh.wrapcommand(
491 b'debugstate',
482 b'debugstate',
492 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
483 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
493 )
484 )
494 def overridedebugstate(orig, ui, repo, *pats, **opts):
485 def overridedebugstate(orig, ui, repo, *pats, **opts):
495 large = opts.pop('large', False)
486 large = opts.pop('large', False)
496 if large:
487 if large:
497
488
498 class fakerepo(object):
489 class fakerepo(object):
499 dirstate = lfutil.openlfdirstate(ui, repo)
490 dirstate = lfutil.openlfdirstate(ui, repo)
500
491
501 orig(ui, fakerepo, *pats, **opts)
492 orig(ui, fakerepo, *pats, **opts)
502 else:
493 else:
503 orig(ui, repo, *pats, **opts)
494 orig(ui, repo, *pats, **opts)
504
495
505
496
506 # Before starting the manifest merge, merge.updates will call
497 # Before starting the manifest merge, merge.updates will call
507 # _checkunknownfile to check if there are any files in the merged-in
498 # _checkunknownfile to check if there are any files in the merged-in
508 # changeset that collide with unknown files in the working copy.
499 # changeset that collide with unknown files in the working copy.
509 #
500 #
510 # The largefiles are seen as unknown, so this prevents us from merging
501 # The largefiles are seen as unknown, so this prevents us from merging
511 # in a file 'foo' if we already have a largefile with the same name.
502 # in a file 'foo' if we already have a largefile with the same name.
512 #
503 #
513 # The overridden function filters the unknown files by removing any
504 # The overridden function filters the unknown files by removing any
514 # largefiles. This makes the merge proceed and we can then handle this
505 # largefiles. This makes the merge proceed and we can then handle this
515 # case further in the overridden calculateupdates function below.
506 # case further in the overridden calculateupdates function below.
516 @eh.wrapfunction(merge, b'_checkunknownfile')
507 @eh.wrapfunction(merge, b'_checkunknownfile')
517 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
508 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
518 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
509 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
519 return False
510 return False
520 return origfn(repo, wctx, mctx, f, f2)
511 return origfn(repo, wctx, mctx, f, f2)
521
512
522
513
523 # The manifest merge handles conflicts on the manifest level. We want
514 # The manifest merge handles conflicts on the manifest level. We want
524 # to handle changes in largefile-ness of files at this level too.
515 # to handle changes in largefile-ness of files at this level too.
525 #
516 #
526 # The strategy is to run the original calculateupdates and then process
517 # The strategy is to run the original calculateupdates and then process
527 # the action list it outputs. There are two cases we need to deal with:
518 # the action list it outputs. There are two cases we need to deal with:
528 #
519 #
529 # 1. Normal file in p1, largefile in p2. Here the largefile is
520 # 1. Normal file in p1, largefile in p2. Here the largefile is
530 # detected via its standin file, which will enter the working copy
521 # detected via its standin file, which will enter the working copy
531 # with a "get" action. It is not "merge" since the standin is all
522 # with a "get" action. It is not "merge" since the standin is all
532 # Mercurial is concerned with at this level -- the link to the
523 # Mercurial is concerned with at this level -- the link to the
533 # existing normal file is not relevant here.
524 # existing normal file is not relevant here.
534 #
525 #
535 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
526 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
536 # since the largefile will be present in the working copy and
527 # since the largefile will be present in the working copy and
537 # different from the normal file in p2. Mercurial therefore
528 # different from the normal file in p2. Mercurial therefore
538 # triggers a merge action.
529 # triggers a merge action.
539 #
530 #
540 # In both cases, we prompt the user and emit new actions to either
531 # In both cases, we prompt the user and emit new actions to either
541 # remove the standin (if the normal file was kept) or to remove the
532 # remove the standin (if the normal file was kept) or to remove the
542 # normal file and get the standin (if the largefile was kept). The
533 # normal file and get the standin (if the largefile was kept). The
543 # default prompt answer is to use the largefile version since it was
534 # default prompt answer is to use the largefile version since it was
544 # presumably changed on purpose.
535 # presumably changed on purpose.
545 #
536 #
546 # Finally, the merge.applyupdates function will then take care of
537 # Finally, the merge.applyupdates function will then take care of
547 # writing the files into the working copy and lfcommands.updatelfiles
538 # writing the files into the working copy and lfcommands.updatelfiles
548 # will update the largefiles.
539 # will update the largefiles.
549 @eh.wrapfunction(merge, b'calculateupdates')
540 @eh.wrapfunction(merge, b'calculateupdates')
550 def overridecalculateupdates(
541 def overridecalculateupdates(
551 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
542 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
552 ):
543 ):
553 overwrite = force and not branchmerge
544 overwrite = force and not branchmerge
554 actions, diverge, renamedelete = origfn(
545 actions, diverge, renamedelete = origfn(
555 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
546 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
556 )
547 )
557
548
558 if overwrite:
549 if overwrite:
559 return actions, diverge, renamedelete
550 return actions, diverge, renamedelete
560
551
561 # Convert to dictionary with filename as key and action as value.
552 # Convert to dictionary with filename as key and action as value.
562 lfiles = set()
553 lfiles = set()
563 for f in actions:
554 for f in actions:
564 splitstandin = lfutil.splitstandin(f)
555 splitstandin = lfutil.splitstandin(f)
565 if splitstandin is not None and splitstandin in p1:
556 if splitstandin is not None and splitstandin in p1:
566 lfiles.add(splitstandin)
557 lfiles.add(splitstandin)
567 elif lfutil.standin(f) in p1:
558 elif lfutil.standin(f) in p1:
568 lfiles.add(f)
559 lfiles.add(f)
569
560
570 for lfile in sorted(lfiles):
561 for lfile in sorted(lfiles):
571 standin = lfutil.standin(lfile)
562 standin = lfutil.standin(lfile)
572 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
563 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
573 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
564 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
574 if sm in (b'g', b'dc') and lm != b'r':
565 if sm in (b'g', b'dc') and lm != b'r':
575 if sm == b'dc':
566 if sm == b'dc':
576 f1, f2, fa, move, anc = sargs
567 f1, f2, fa, move, anc = sargs
577 sargs = (p2[f2].flags(), False)
568 sargs = (p2[f2].flags(), False)
578 # Case 1: normal file in the working copy, largefile in
569 # Case 1: normal file in the working copy, largefile in
579 # the second parent
570 # the second parent
580 usermsg = (
571 usermsg = (
581 _(
572 _(
582 b'remote turned local normal file %s into a largefile\n'
573 b'remote turned local normal file %s into a largefile\n'
583 b'use (l)argefile or keep (n)ormal file?'
574 b'use (l)argefile or keep (n)ormal file?'
584 b'$$ &Largefile $$ &Normal file'
575 b'$$ &Largefile $$ &Normal file'
585 )
576 )
586 % lfile
577 % lfile
587 )
578 )
588 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
579 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
589 actions[lfile] = (b'r', None, b'replaced by standin')
580 actions[lfile] = (b'r', None, b'replaced by standin')
590 actions[standin] = (b'g', sargs, b'replaces standin')
581 actions[standin] = (b'g', sargs, b'replaces standin')
591 else: # keep local normal file
582 else: # keep local normal file
592 actions[lfile] = (b'k', None, b'replaces standin')
583 actions[lfile] = (b'k', None, b'replaces standin')
593 if branchmerge:
584 if branchmerge:
594 actions[standin] = (b'k', None, b'replaced by non-standin')
585 actions[standin] = (b'k', None, b'replaced by non-standin')
595 else:
586 else:
596 actions[standin] = (b'r', None, b'replaced by non-standin')
587 actions[standin] = (b'r', None, b'replaced by non-standin')
597 elif lm in (b'g', b'dc') and sm != b'r':
588 elif lm in (b'g', b'dc') and sm != b'r':
598 if lm == b'dc':
589 if lm == b'dc':
599 f1, f2, fa, move, anc = largs
590 f1, f2, fa, move, anc = largs
600 largs = (p2[f2].flags(), False)
591 largs = (p2[f2].flags(), False)
601 # Case 2: largefile in the working copy, normal file in
592 # Case 2: largefile in the working copy, normal file in
602 # the second parent
593 # the second parent
603 usermsg = (
594 usermsg = (
604 _(
595 _(
605 b'remote turned local largefile %s into a normal file\n'
596 b'remote turned local largefile %s into a normal file\n'
606 b'keep (l)argefile or use (n)ormal file?'
597 b'keep (l)argefile or use (n)ormal file?'
607 b'$$ &Largefile $$ &Normal file'
598 b'$$ &Largefile $$ &Normal file'
608 )
599 )
609 % lfile
600 % lfile
610 )
601 )
611 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
602 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
612 if branchmerge:
603 if branchmerge:
613 # largefile can be restored from standin safely
604 # largefile can be restored from standin safely
614 actions[lfile] = (b'k', None, b'replaced by standin')
605 actions[lfile] = (b'k', None, b'replaced by standin')
615 actions[standin] = (b'k', None, b'replaces standin')
606 actions[standin] = (b'k', None, b'replaces standin')
616 else:
607 else:
617 # "lfile" should be marked as "removed" without
608 # "lfile" should be marked as "removed" without
618 # removal of itself
609 # removal of itself
619 actions[lfile] = (
610 actions[lfile] = (
620 b'lfmr',
611 b'lfmr',
621 None,
612 None,
622 b'forget non-standin largefile',
613 b'forget non-standin largefile',
623 )
614 )
624
615
625 # linear-merge should treat this largefile as 're-added'
616 # linear-merge should treat this largefile as 're-added'
626 actions[standin] = (b'a', None, b'keep standin')
617 actions[standin] = (b'a', None, b'keep standin')
627 else: # pick remote normal file
618 else: # pick remote normal file
628 actions[lfile] = (b'g', largs, b'replaces standin')
619 actions[lfile] = (b'g', largs, b'replaces standin')
629 actions[standin] = (b'r', None, b'replaced by non-standin')
620 actions[standin] = (b'r', None, b'replaced by non-standin')
630
621
631 return actions, diverge, renamedelete
622 return actions, diverge, renamedelete
632
623
633
624
634 @eh.wrapfunction(merge, b'recordupdates')
625 @eh.wrapfunction(merge, b'recordupdates')
635 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
626 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
636 if b'lfmr' in actions:
627 if b'lfmr' in actions:
637 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
628 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
638 for lfile, args, msg in actions[b'lfmr']:
629 for lfile, args, msg in actions[b'lfmr']:
639 # this should be executed before 'orig', to execute 'remove'
630 # this should be executed before 'orig', to execute 'remove'
640 # before all other actions
631 # before all other actions
641 repo.dirstate.remove(lfile)
632 repo.dirstate.remove(lfile)
642 # make sure lfile doesn't get synclfdirstate'd as normal
633 # make sure lfile doesn't get synclfdirstate'd as normal
643 lfdirstate.add(lfile)
634 lfdirstate.add(lfile)
644 lfdirstate.write()
635 lfdirstate.write()
645
636
646 return orig(repo, actions, branchmerge, getfiledata)
637 return orig(repo, actions, branchmerge, getfiledata)
647
638
648
639
649 # Override filemerge to prompt the user about how they wish to merge
640 # Override filemerge to prompt the user about how they wish to merge
650 # largefiles. This will handle identical edits without prompting the user.
641 # largefiles. This will handle identical edits without prompting the user.
651 @eh.wrapfunction(filemerge, b'_filemerge')
642 @eh.wrapfunction(filemerge, b'_filemerge')
652 def overridefilemerge(
643 def overridefilemerge(
653 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
644 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
654 ):
645 ):
655 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
646 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
656 return origfn(
647 return origfn(
657 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
648 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
658 )
649 )
659
650
660 ahash = lfutil.readasstandin(fca).lower()
651 ahash = lfutil.readasstandin(fca).lower()
661 dhash = lfutil.readasstandin(fcd).lower()
652 dhash = lfutil.readasstandin(fcd).lower()
662 ohash = lfutil.readasstandin(fco).lower()
653 ohash = lfutil.readasstandin(fco).lower()
663 if (
654 if (
664 ohash != ahash
655 ohash != ahash
665 and ohash != dhash
656 and ohash != dhash
666 and (
657 and (
667 dhash == ahash
658 dhash == ahash
668 or repo.ui.promptchoice(
659 or repo.ui.promptchoice(
669 _(
660 _(
670 b'largefile %s has a merge conflict\nancestor was %s\n'
661 b'largefile %s has a merge conflict\nancestor was %s\n'
671 b'you can keep (l)ocal %s or take (o)ther %s.\n'
662 b'you can keep (l)ocal %s or take (o)ther %s.\n'
672 b'what do you want to do?'
663 b'what do you want to do?'
673 b'$$ &Local $$ &Other'
664 b'$$ &Local $$ &Other'
674 )
665 )
675 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
666 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
676 0,
667 0,
677 )
668 )
678 == 1
669 == 1
679 )
670 )
680 ):
671 ):
681 repo.wwrite(fcd.path(), fco.data(), fco.flags())
672 repo.wwrite(fcd.path(), fco.data(), fco.flags())
682 return True, 0, False
673 return True, 0, False
683
674
684
675
685 @eh.wrapfunction(copiesmod, b'pathcopies')
676 @eh.wrapfunction(copiesmod, b'pathcopies')
686 def copiespathcopies(orig, ctx1, ctx2, match=None):
677 def copiespathcopies(orig, ctx1, ctx2, match=None):
687 copies = orig(ctx1, ctx2, match=match)
678 copies = orig(ctx1, ctx2, match=match)
688 updated = {}
679 updated = {}
689
680
690 for k, v in pycompat.iteritems(copies):
681 for k, v in pycompat.iteritems(copies):
691 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
682 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
692
683
693 return updated
684 return updated
694
685
695
686
696 # Copy first changes the matchers to match standins instead of
687 # Copy first changes the matchers to match standins instead of
697 # largefiles. Then it overrides util.copyfile in that function it
688 # largefiles. Then it overrides util.copyfile in that function it
698 # checks if the destination largefile already exists. It also keeps a
689 # checks if the destination largefile already exists. It also keeps a
699 # list of copied files so that the largefiles can be copied and the
690 # list of copied files so that the largefiles can be copied and the
700 # dirstate updated.
691 # dirstate updated.
701 @eh.wrapfunction(cmdutil, b'copy')
692 @eh.wrapfunction(cmdutil, b'copy')
702 def overridecopy(orig, ui, repo, pats, opts, rename=False):
693 def overridecopy(orig, ui, repo, pats, opts, rename=False):
703 # doesn't remove largefile on rename
694 # doesn't remove largefile on rename
704 if len(pats) < 2:
695 if len(pats) < 2:
705 # this isn't legal, let the original function deal with it
696 # this isn't legal, let the original function deal with it
706 return orig(ui, repo, pats, opts, rename)
697 return orig(ui, repo, pats, opts, rename)
707
698
708 # This could copy both lfiles and normal files in one command,
699 # This could copy both lfiles and normal files in one command,
709 # but we don't want to do that. First replace their matcher to
700 # but we don't want to do that. First replace their matcher to
710 # only match normal files and run it, then replace it to just
701 # only match normal files and run it, then replace it to just
711 # match largefiles and run it again.
702 # match largefiles and run it again.
712 nonormalfiles = False
703 nonormalfiles = False
713 nolfiles = False
704 nolfiles = False
714 manifest = repo[None].manifest()
705 manifest = repo[None].manifest()
715
706
716 def normalfilesmatchfn(
707 def normalfilesmatchfn(
717 orig,
708 orig,
718 ctx,
709 ctx,
719 pats=(),
710 pats=(),
720 opts=None,
711 opts=None,
721 globbed=False,
712 globbed=False,
722 default=b'relpath',
713 default=b'relpath',
723 badfn=None,
714 badfn=None,
724 ):
715 ):
725 if opts is None:
716 if opts is None:
726 opts = {}
717 opts = {}
727 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
718 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
728 return composenormalfilematcher(match, manifest)
719 return composenormalfilematcher(match, manifest)
729
720
730 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
721 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
731 try:
722 try:
732 result = orig(ui, repo, pats, opts, rename)
723 result = orig(ui, repo, pats, opts, rename)
733 except error.Abort as e:
724 except error.Abort as e:
734 if pycompat.bytestr(e) != _(b'no files to copy'):
725 if pycompat.bytestr(e) != _(b'no files to copy'):
735 raise e
726 raise e
736 else:
727 else:
737 nonormalfiles = True
728 nonormalfiles = True
738 result = 0
729 result = 0
739
730
740 # The first rename can cause our current working directory to be removed.
731 # The first rename can cause our current working directory to be removed.
741 # In that case there is nothing left to copy/rename so just quit.
732 # In that case there is nothing left to copy/rename so just quit.
742 try:
733 try:
743 repo.getcwd()
734 repo.getcwd()
744 except OSError:
735 except OSError:
745 return result
736 return result
746
737
747 def makestandin(relpath):
738 def makestandin(relpath):
748 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
739 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
749 return repo.wvfs.join(lfutil.standin(path))
740 return repo.wvfs.join(lfutil.standin(path))
750
741
751 fullpats = scmutil.expandpats(pats)
742 fullpats = scmutil.expandpats(pats)
752 dest = fullpats[-1]
743 dest = fullpats[-1]
753
744
754 if os.path.isdir(dest):
745 if os.path.isdir(dest):
755 if not os.path.isdir(makestandin(dest)):
746 if not os.path.isdir(makestandin(dest)):
756 os.makedirs(makestandin(dest))
747 os.makedirs(makestandin(dest))
757
748
758 try:
749 try:
759 # When we call orig below it creates the standins but we don't add
750 # When we call orig below it creates the standins but we don't add
760 # them to the dir state until later so lock during that time.
751 # them to the dir state until later so lock during that time.
761 wlock = repo.wlock()
752 wlock = repo.wlock()
762
753
763 manifest = repo[None].manifest()
754 manifest = repo[None].manifest()
764
755
765 def overridematch(
756 def overridematch(
766 orig,
757 orig,
767 ctx,
758 ctx,
768 pats=(),
759 pats=(),
769 opts=None,
760 opts=None,
770 globbed=False,
761 globbed=False,
771 default=b'relpath',
762 default=b'relpath',
772 badfn=None,
763 badfn=None,
773 ):
764 ):
774 if opts is None:
765 if opts is None:
775 opts = {}
766 opts = {}
776 newpats = []
767 newpats = []
777 # The patterns were previously mangled to add the standin
768 # The patterns were previously mangled to add the standin
778 # directory; we need to remove that now
769 # directory; we need to remove that now
779 for pat in pats:
770 for pat in pats:
780 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
771 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
781 newpats.append(pat.replace(lfutil.shortname, b''))
772 newpats.append(pat.replace(lfutil.shortname, b''))
782 else:
773 else:
783 newpats.append(pat)
774 newpats.append(pat)
784 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
775 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
785 m = copy.copy(match)
776 m = copy.copy(match)
786 lfile = lambda f: lfutil.standin(f) in manifest
777 lfile = lambda f: lfutil.standin(f) in manifest
787 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
778 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
788 m._fileset = set(m._files)
779 m._fileset = set(m._files)
789 origmatchfn = m.matchfn
780 origmatchfn = m.matchfn
790
781
791 def matchfn(f):
782 def matchfn(f):
792 lfile = lfutil.splitstandin(f)
783 lfile = lfutil.splitstandin(f)
793 return (
784 return (
794 lfile is not None
785 lfile is not None
795 and (f in manifest)
786 and (f in manifest)
796 and origmatchfn(lfile)
787 and origmatchfn(lfile)
797 or None
788 or None
798 )
789 )
799
790
800 m.matchfn = matchfn
791 m.matchfn = matchfn
801 return m
792 return m
802
793
803 listpats = []
794 listpats = []
804 for pat in pats:
795 for pat in pats:
805 if matchmod.patkind(pat) is not None:
796 if matchmod.patkind(pat) is not None:
806 listpats.append(pat)
797 listpats.append(pat)
807 else:
798 else:
808 listpats.append(makestandin(pat))
799 listpats.append(makestandin(pat))
809
800
810 copiedfiles = []
801 copiedfiles = []
811
802
812 def overridecopyfile(orig, src, dest, *args, **kwargs):
803 def overridecopyfile(orig, src, dest, *args, **kwargs):
813 if lfutil.shortname in src and dest.startswith(
804 if lfutil.shortname in src and dest.startswith(
814 repo.wjoin(lfutil.shortname)
805 repo.wjoin(lfutil.shortname)
815 ):
806 ):
816 destlfile = dest.replace(lfutil.shortname, b'')
807 destlfile = dest.replace(lfutil.shortname, b'')
817 if not opts[b'force'] and os.path.exists(destlfile):
808 if not opts[b'force'] and os.path.exists(destlfile):
818 raise IOError(
809 raise IOError(
819 b'', _(b'destination largefile already exists')
810 b'', _(b'destination largefile already exists')
820 )
811 )
821 copiedfiles.append((src, dest))
812 copiedfiles.append((src, dest))
822 orig(src, dest, *args, **kwargs)
813 orig(src, dest, *args, **kwargs)
823
814
824 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
815 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
825 with extensions.wrappedfunction(scmutil, b'match', overridematch):
816 with extensions.wrappedfunction(scmutil, b'match', overridematch):
826 result += orig(ui, repo, listpats, opts, rename)
817 result += orig(ui, repo, listpats, opts, rename)
827
818
828 lfdirstate = lfutil.openlfdirstate(ui, repo)
819 lfdirstate = lfutil.openlfdirstate(ui, repo)
829 for (src, dest) in copiedfiles:
820 for (src, dest) in copiedfiles:
830 if lfutil.shortname in src and dest.startswith(
821 if lfutil.shortname in src and dest.startswith(
831 repo.wjoin(lfutil.shortname)
822 repo.wjoin(lfutil.shortname)
832 ):
823 ):
833 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
824 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
834 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
825 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
835 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
826 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
836 if not os.path.isdir(destlfiledir):
827 if not os.path.isdir(destlfiledir):
837 os.makedirs(destlfiledir)
828 os.makedirs(destlfiledir)
838 if rename:
829 if rename:
839 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
830 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
840
831
841 # The file is gone, but this deletes any empty parent
832 # The file is gone, but this deletes any empty parent
842 # directories as a side-effect.
833 # directories as a side-effect.
843 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
834 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
844 lfdirstate.remove(srclfile)
835 lfdirstate.remove(srclfile)
845 else:
836 else:
846 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
837 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
847
838
848 lfdirstate.add(destlfile)
839 lfdirstate.add(destlfile)
849 lfdirstate.write()
840 lfdirstate.write()
850 except error.Abort as e:
841 except error.Abort as e:
851 if pycompat.bytestr(e) != _(b'no files to copy'):
842 if pycompat.bytestr(e) != _(b'no files to copy'):
852 raise e
843 raise e
853 else:
844 else:
854 nolfiles = True
845 nolfiles = True
855 finally:
846 finally:
856 wlock.release()
847 wlock.release()
857
848
858 if nolfiles and nonormalfiles:
849 if nolfiles and nonormalfiles:
859 raise error.Abort(_(b'no files to copy'))
850 raise error.Abort(_(b'no files to copy'))
860
851
861 return result
852 return result
862
853
863
854
864 # When the user calls revert, we have to be careful to not revert any
855 # When the user calls revert, we have to be careful to not revert any
865 # changes to other largefiles accidentally. This means we have to keep
856 # changes to other largefiles accidentally. This means we have to keep
866 # track of the largefiles that are being reverted so we only pull down
857 # track of the largefiles that are being reverted so we only pull down
867 # the necessary largefiles.
858 # the necessary largefiles.
868 #
859 #
869 # Standins are only updated (to match the hash of largefiles) before
860 # Standins are only updated (to match the hash of largefiles) before
870 # commits. Update the standins then run the original revert, changing
861 # commits. Update the standins then run the original revert, changing
871 # the matcher to hit standins instead of largefiles. Based on the
862 # the matcher to hit standins instead of largefiles. Based on the
872 # resulting standins update the largefiles.
863 # resulting standins update the largefiles.
873 @eh.wrapfunction(cmdutil, b'revert')
864 @eh.wrapfunction(cmdutil, b'revert')
874 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
865 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
875 # Because we put the standins in a bad state (by updating them)
866 # Because we put the standins in a bad state (by updating them)
876 # and then return them to a correct state we need to lock to
867 # and then return them to a correct state we need to lock to
877 # prevent others from changing them in their incorrect state.
868 # prevent others from changing them in their incorrect state.
878 with repo.wlock():
869 with repo.wlock():
879 lfdirstate = lfutil.openlfdirstate(ui, repo)
870 lfdirstate = lfutil.openlfdirstate(ui, repo)
880 s = lfutil.lfdirstatestatus(lfdirstate, repo)
871 s = lfutil.lfdirstatestatus(lfdirstate, repo)
881 lfdirstate.write()
872 lfdirstate.write()
882 for lfile in s.modified:
873 for lfile in s.modified:
883 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
874 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
884 for lfile in s.deleted:
875 for lfile in s.deleted:
885 fstandin = lfutil.standin(lfile)
876 fstandin = lfutil.standin(lfile)
886 if repo.wvfs.exists(fstandin):
877 if repo.wvfs.exists(fstandin):
887 repo.wvfs.unlink(fstandin)
878 repo.wvfs.unlink(fstandin)
888
879
889 oldstandins = lfutil.getstandinsstate(repo)
880 oldstandins = lfutil.getstandinsstate(repo)
890
881
891 def overridematch(
882 def overridematch(
892 orig,
883 orig,
893 mctx,
884 mctx,
894 pats=(),
885 pats=(),
895 opts=None,
886 opts=None,
896 globbed=False,
887 globbed=False,
897 default=b'relpath',
888 default=b'relpath',
898 badfn=None,
889 badfn=None,
899 ):
890 ):
900 if opts is None:
891 if opts is None:
901 opts = {}
892 opts = {}
902 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
893 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
903 m = copy.copy(match)
894 m = copy.copy(match)
904
895
905 # revert supports recursing into subrepos, and though largefiles
896 # revert supports recursing into subrepos, and though largefiles
906 # currently doesn't work correctly in that case, this match is
897 # currently doesn't work correctly in that case, this match is
907 # called, so the lfdirstate above may not be the correct one for
898 # called, so the lfdirstate above may not be the correct one for
908 # this invocation of match.
899 # this invocation of match.
909 lfdirstate = lfutil.openlfdirstate(
900 lfdirstate = lfutil.openlfdirstate(
910 mctx.repo().ui, mctx.repo(), False
901 mctx.repo().ui, mctx.repo(), False
911 )
902 )
912
903
913 wctx = repo[None]
904 wctx = repo[None]
914 matchfiles = []
905 matchfiles = []
915 for f in m._files:
906 for f in m._files:
916 standin = lfutil.standin(f)
907 standin = lfutil.standin(f)
917 if standin in ctx or standin in mctx:
908 if standin in ctx or standin in mctx:
918 matchfiles.append(standin)
909 matchfiles.append(standin)
919 elif standin in wctx or lfdirstate[f] == b'r':
910 elif standin in wctx or lfdirstate[f] == b'r':
920 continue
911 continue
921 else:
912 else:
922 matchfiles.append(f)
913 matchfiles.append(f)
923 m._files = matchfiles
914 m._files = matchfiles
924 m._fileset = set(m._files)
915 m._fileset = set(m._files)
925 origmatchfn = m.matchfn
916 origmatchfn = m.matchfn
926
917
927 def matchfn(f):
918 def matchfn(f):
928 lfile = lfutil.splitstandin(f)
919 lfile = lfutil.splitstandin(f)
929 if lfile is not None:
920 if lfile is not None:
930 return origmatchfn(lfile) and (f in ctx or f in mctx)
921 return origmatchfn(lfile) and (f in ctx or f in mctx)
931 return origmatchfn(f)
922 return origmatchfn(f)
932
923
933 m.matchfn = matchfn
924 m.matchfn = matchfn
934 return m
925 return m
935
926
936 with extensions.wrappedfunction(scmutil, b'match', overridematch):
927 with extensions.wrappedfunction(scmutil, b'match', overridematch):
937 orig(ui, repo, ctx, parents, *pats, **opts)
928 orig(ui, repo, ctx, parents, *pats, **opts)
938
929
939 newstandins = lfutil.getstandinsstate(repo)
930 newstandins = lfutil.getstandinsstate(repo)
940 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
931 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
941 # lfdirstate should be 'normallookup'-ed for updated files,
932 # lfdirstate should be 'normallookup'-ed for updated files,
942 # because reverting doesn't touch dirstate for 'normal' files
933 # because reverting doesn't touch dirstate for 'normal' files
943 # when target revision is explicitly specified: in such case,
934 # when target revision is explicitly specified: in such case,
944 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
935 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
945 # of target (standin) file.
936 # of target (standin) file.
946 lfcommands.updatelfiles(
937 lfcommands.updatelfiles(
947 ui, repo, filelist, printmessage=False, normallookup=True
938 ui, repo, filelist, printmessage=False, normallookup=True
948 )
939 )
949
940
950
941
951 # after pulling changesets, we need to take some extra care to get
942 # after pulling changesets, we need to take some extra care to get
952 # largefiles updated remotely
943 # largefiles updated remotely
953 @eh.wrapcommand(
944 @eh.wrapcommand(
954 b'pull',
945 b'pull',
955 opts=[
946 opts=[
956 (
947 (
957 b'',
948 b'',
958 b'all-largefiles',
949 b'all-largefiles',
959 None,
950 None,
960 _(b'download all pulled versions of largefiles (DEPRECATED)'),
951 _(b'download all pulled versions of largefiles (DEPRECATED)'),
961 ),
952 ),
962 (
953 (
963 b'',
954 b'',
964 b'lfrev',
955 b'lfrev',
965 [],
956 [],
966 _(b'download largefiles for these revisions'),
957 _(b'download largefiles for these revisions'),
967 _(b'REV'),
958 _(b'REV'),
968 ),
959 ),
969 ],
960 ],
970 )
961 )
971 def overridepull(orig, ui, repo, source=None, **opts):
962 def overridepull(orig, ui, repo, source=None, **opts):
972 revsprepull = len(repo)
963 revsprepull = len(repo)
973 if not source:
964 if not source:
974 source = b'default'
965 source = b'default'
975 repo.lfpullsource = source
966 repo.lfpullsource = source
976 result = orig(ui, repo, source, **opts)
967 result = orig(ui, repo, source, **opts)
977 revspostpull = len(repo)
968 revspostpull = len(repo)
978 lfrevs = opts.get('lfrev', [])
969 lfrevs = opts.get('lfrev', [])
979 if opts.get('all_largefiles'):
970 if opts.get('all_largefiles'):
980 lfrevs.append(b'pulled()')
971 lfrevs.append(b'pulled()')
981 if lfrevs and revspostpull > revsprepull:
972 if lfrevs and revspostpull > revsprepull:
982 numcached = 0
973 numcached = 0
983 repo.firstpulled = revsprepull # for pulled() revset expression
974 repo.firstpulled = revsprepull # for pulled() revset expression
984 try:
975 try:
985 for rev in scmutil.revrange(repo, lfrevs):
976 for rev in scmutil.revrange(repo, lfrevs):
986 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
977 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
987 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
978 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
988 numcached += len(cached)
979 numcached += len(cached)
989 finally:
980 finally:
990 del repo.firstpulled
981 del repo.firstpulled
991 ui.status(_(b"%d largefiles cached\n") % numcached)
982 ui.status(_(b"%d largefiles cached\n") % numcached)
992 return result
983 return result
993
984
994
985
995 @eh.wrapcommand(
986 @eh.wrapcommand(
996 b'push',
987 b'push',
997 opts=[
988 opts=[
998 (
989 (
999 b'',
990 b'',
1000 b'lfrev',
991 b'lfrev',
1001 [],
992 [],
1002 _(b'upload largefiles for these revisions'),
993 _(b'upload largefiles for these revisions'),
1003 _(b'REV'),
994 _(b'REV'),
1004 )
995 )
1005 ],
996 ],
1006 )
997 )
1007 def overridepush(orig, ui, repo, *args, **kwargs):
998 def overridepush(orig, ui, repo, *args, **kwargs):
1008 """Override push command and store --lfrev parameters in opargs"""
999 """Override push command and store --lfrev parameters in opargs"""
1009 lfrevs = kwargs.pop('lfrev', None)
1000 lfrevs = kwargs.pop('lfrev', None)
1010 if lfrevs:
1001 if lfrevs:
1011 opargs = kwargs.setdefault('opargs', {})
1002 opargs = kwargs.setdefault('opargs', {})
1012 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1003 opargs[b'lfrevs'] = scmutil.revrange(repo, lfrevs)
1013 return orig(ui, repo, *args, **kwargs)
1004 return orig(ui, repo, *args, **kwargs)
1014
1005
1015
1006
1016 @eh.wrapfunction(exchange, b'pushoperation')
1007 @eh.wrapfunction(exchange, b'pushoperation')
1017 def exchangepushoperation(orig, *args, **kwargs):
1008 def exchangepushoperation(orig, *args, **kwargs):
1018 """Override pushoperation constructor and store lfrevs parameter"""
1009 """Override pushoperation constructor and store lfrevs parameter"""
1019 lfrevs = kwargs.pop('lfrevs', None)
1010 lfrevs = kwargs.pop('lfrevs', None)
1020 pushop = orig(*args, **kwargs)
1011 pushop = orig(*args, **kwargs)
1021 pushop.lfrevs = lfrevs
1012 pushop.lfrevs = lfrevs
1022 return pushop
1013 return pushop
1023
1014
1024
1015
1025 @eh.revsetpredicate(b'pulled()')
1016 @eh.revsetpredicate(b'pulled()')
1026 def pulledrevsetsymbol(repo, subset, x):
1017 def pulledrevsetsymbol(repo, subset, x):
1027 """Changesets that just has been pulled.
1018 """Changesets that just has been pulled.
1028
1019
1029 Only available with largefiles from pull --lfrev expressions.
1020 Only available with largefiles from pull --lfrev expressions.
1030
1021
1031 .. container:: verbose
1022 .. container:: verbose
1032
1023
1033 Some examples:
1024 Some examples:
1034
1025
1035 - pull largefiles for all new changesets::
1026 - pull largefiles for all new changesets::
1036
1027
1037 hg pull -lfrev "pulled()"
1028 hg pull -lfrev "pulled()"
1038
1029
1039 - pull largefiles for all new branch heads::
1030 - pull largefiles for all new branch heads::
1040
1031
1041 hg pull -lfrev "head(pulled()) and not closed()"
1032 hg pull -lfrev "head(pulled()) and not closed()"
1042
1033
1043 """
1034 """
1044
1035
1045 try:
1036 try:
1046 firstpulled = repo.firstpulled
1037 firstpulled = repo.firstpulled
1047 except AttributeError:
1038 except AttributeError:
1048 raise error.Abort(_(b"pulled() only available in --lfrev"))
1039 raise error.Abort(_(b"pulled() only available in --lfrev"))
1049 return smartset.baseset([r for r in subset if r >= firstpulled])
1040 return smartset.baseset([r for r in subset if r >= firstpulled])
1050
1041
1051
1042
1052 @eh.wrapcommand(
1043 @eh.wrapcommand(
1053 b'clone',
1044 b'clone',
1054 opts=[
1045 opts=[
1055 (
1046 (
1056 b'',
1047 b'',
1057 b'all-largefiles',
1048 b'all-largefiles',
1058 None,
1049 None,
1059 _(b'download all versions of all largefiles'),
1050 _(b'download all versions of all largefiles'),
1060 )
1051 )
1061 ],
1052 ],
1062 )
1053 )
1063 def overrideclone(orig, ui, source, dest=None, **opts):
1054 def overrideclone(orig, ui, source, dest=None, **opts):
1064 d = dest
1055 d = dest
1065 if d is None:
1056 if d is None:
1066 d = hg.defaultdest(source)
1057 d = hg.defaultdest(source)
1067 if opts.get('all_largefiles') and not hg.islocal(d):
1058 if opts.get('all_largefiles') and not hg.islocal(d):
1068 raise error.Abort(
1059 raise error.Abort(
1069 _(b'--all-largefiles is incompatible with non-local destination %s')
1060 _(b'--all-largefiles is incompatible with non-local destination %s')
1070 % d
1061 % d
1071 )
1062 )
1072
1063
1073 return orig(ui, source, dest, **opts)
1064 return orig(ui, source, dest, **opts)
1074
1065
1075
1066
1076 @eh.wrapfunction(hg, b'clone')
1067 @eh.wrapfunction(hg, b'clone')
1077 def hgclone(orig, ui, opts, *args, **kwargs):
1068 def hgclone(orig, ui, opts, *args, **kwargs):
1078 result = orig(ui, opts, *args, **kwargs)
1069 result = orig(ui, opts, *args, **kwargs)
1079
1070
1080 if result is not None:
1071 if result is not None:
1081 sourcerepo, destrepo = result
1072 sourcerepo, destrepo = result
1082 repo = destrepo.local()
1073 repo = destrepo.local()
1083
1074
1084 # When cloning to a remote repo (like through SSH), no repo is available
1075 # When cloning to a remote repo (like through SSH), no repo is available
1085 # from the peer. Therefore the largefiles can't be downloaded and the
1076 # from the peer. Therefore the largefiles can't be downloaded and the
1086 # hgrc can't be updated.
1077 # hgrc can't be updated.
1087 if not repo:
1078 if not repo:
1088 return result
1079 return result
1089
1080
1090 # Caching is implicitly limited to 'rev' option, since the dest repo was
1081 # Caching is implicitly limited to 'rev' option, since the dest repo was
1091 # truncated at that point. The user may expect a download count with
1082 # truncated at that point. The user may expect a download count with
1092 # this option, so attempt whether or not this is a largefile repo.
1083 # this option, so attempt whether or not this is a largefile repo.
1093 if opts.get(b'all_largefiles'):
1084 if opts.get(b'all_largefiles'):
1094 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1085 success, missing = lfcommands.downloadlfiles(ui, repo, None)
1095
1086
1096 if missing != 0:
1087 if missing != 0:
1097 return None
1088 return None
1098
1089
1099 return result
1090 return result
1100
1091
1101
1092
1102 @eh.wrapcommand(b'rebase', extension=b'rebase')
1093 @eh.wrapcommand(b'rebase', extension=b'rebase')
1103 def overriderebase(orig, ui, repo, **opts):
1094 def overriderebase(orig, ui, repo, **opts):
1104 if not util.safehasattr(repo, b'_largefilesenabled'):
1095 if not util.safehasattr(repo, b'_largefilesenabled'):
1105 return orig(ui, repo, **opts)
1096 return orig(ui, repo, **opts)
1106
1097
1107 resuming = opts.get('continue')
1098 resuming = opts.get('continue')
1108 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1099 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1109 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1100 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1110 try:
1101 try:
1111 return orig(ui, repo, **opts)
1102 return orig(ui, repo, **opts)
1112 finally:
1103 finally:
1113 repo._lfstatuswriters.pop()
1104 repo._lfstatuswriters.pop()
1114 repo._lfcommithooks.pop()
1105 repo._lfcommithooks.pop()
1115
1106
1116
1107
1117 @eh.wrapcommand(b'archive')
1108 @eh.wrapcommand(b'archive')
1118 def overridearchivecmd(orig, ui, repo, dest, **opts):
1109 def overridearchivecmd(orig, ui, repo, dest, **opts):
1119 with lfstatus(repo.unfiltered()):
1110 with lfstatus(repo.unfiltered()):
1120 return orig(ui, repo.unfiltered(), dest, **opts)
1111 return orig(ui, repo.unfiltered(), dest, **opts)
1121
1112
1122
1113
1123 @eh.wrapfunction(webcommands, b'archive')
1114 @eh.wrapfunction(webcommands, b'archive')
1124 def hgwebarchive(orig, web):
1115 def hgwebarchive(orig, web):
1125 with lfstatus(web.repo):
1116 with lfstatus(web.repo):
1126 return orig(web)
1117 return orig(web)
1127
1118
1128
1119
1129 @eh.wrapfunction(archival, b'archive')
1120 @eh.wrapfunction(archival, b'archive')
1130 def overridearchive(
1121 def overridearchive(
1131 orig,
1122 orig,
1132 repo,
1123 repo,
1133 dest,
1124 dest,
1134 node,
1125 node,
1135 kind,
1126 kind,
1136 decode=True,
1127 decode=True,
1137 match=None,
1128 match=None,
1138 prefix=b'',
1129 prefix=b'',
1139 mtime=None,
1130 mtime=None,
1140 subrepos=None,
1131 subrepos=None,
1141 ):
1132 ):
1142 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1133 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1143 # unfiltered repo's attr, so check that as well.
1134 # unfiltered repo's attr, so check that as well.
1144 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1135 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1145 return orig(
1136 return orig(
1146 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1137 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1147 )
1138 )
1148
1139
1149 # No need to lock because we are only reading history and
1140 # No need to lock because we are only reading history and
1150 # largefile caches, neither of which are modified.
1141 # largefile caches, neither of which are modified.
1151 if node is not None:
1142 if node is not None:
1152 lfcommands.cachelfiles(repo.ui, repo, node)
1143 lfcommands.cachelfiles(repo.ui, repo, node)
1153
1144
1154 if kind not in archival.archivers:
1145 if kind not in archival.archivers:
1155 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1146 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1156
1147
1157 ctx = repo[node]
1148 ctx = repo[node]
1158
1149
1159 if kind == b'files':
1150 if kind == b'files':
1160 if prefix:
1151 if prefix:
1161 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1152 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1162 else:
1153 else:
1163 prefix = archival.tidyprefix(dest, kind, prefix)
1154 prefix = archival.tidyprefix(dest, kind, prefix)
1164
1155
1165 def write(name, mode, islink, getdata):
1156 def write(name, mode, islink, getdata):
1166 if match and not match(name):
1157 if match and not match(name):
1167 return
1158 return
1168 data = getdata()
1159 data = getdata()
1169 if decode:
1160 if decode:
1170 data = repo.wwritedata(name, data)
1161 data = repo.wwritedata(name, data)
1171 archiver.addfile(prefix + name, mode, islink, data)
1162 archiver.addfile(prefix + name, mode, islink, data)
1172
1163
1173 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1164 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1174
1165
1175 if repo.ui.configbool(b"ui", b"archivemeta"):
1166 if repo.ui.configbool(b"ui", b"archivemeta"):
1176 write(
1167 write(
1177 b'.hg_archival.txt',
1168 b'.hg_archival.txt',
1178 0o644,
1169 0o644,
1179 False,
1170 False,
1180 lambda: archival.buildmetadata(ctx),
1171 lambda: archival.buildmetadata(ctx),
1181 )
1172 )
1182
1173
1183 for f in ctx:
1174 for f in ctx:
1184 ff = ctx.flags(f)
1175 ff = ctx.flags(f)
1185 getdata = ctx[f].data
1176 getdata = ctx[f].data
1186 lfile = lfutil.splitstandin(f)
1177 lfile = lfutil.splitstandin(f)
1187 if lfile is not None:
1178 if lfile is not None:
1188 if node is not None:
1179 if node is not None:
1189 path = lfutil.findfile(repo, getdata().strip())
1180 path = lfutil.findfile(repo, getdata().strip())
1190
1181
1191 if path is None:
1182 if path is None:
1192 raise error.Abort(
1183 raise error.Abort(
1193 _(
1184 _(
1194 b'largefile %s not found in repo store or system cache'
1185 b'largefile %s not found in repo store or system cache'
1195 )
1186 )
1196 % lfile
1187 % lfile
1197 )
1188 )
1198 else:
1189 else:
1199 path = lfile
1190 path = lfile
1200
1191
1201 f = lfile
1192 f = lfile
1202
1193
1203 getdata = lambda: util.readfile(path)
1194 getdata = lambda: util.readfile(path)
1204 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1195 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1205
1196
1206 if subrepos:
1197 if subrepos:
1207 for subpath in sorted(ctx.substate):
1198 for subpath in sorted(ctx.substate):
1208 sub = ctx.workingsub(subpath)
1199 sub = ctx.workingsub(subpath)
1209 submatch = matchmod.subdirmatcher(subpath, match)
1200 submatch = matchmod.subdirmatcher(subpath, match)
1210 subprefix = prefix + subpath + b'/'
1201 subprefix = prefix + subpath + b'/'
1211 with lfstatus(sub._repo):
1202 with lfstatus(sub._repo):
1212 sub.archive(archiver, subprefix, submatch)
1203 sub.archive(archiver, subprefix, submatch)
1213
1204
1214 archiver.done()
1205 archiver.done()
1215
1206
1216
1207
1217 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1208 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1218 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1209 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1219 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1210 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1220 if not lfenabled or not repo._repo.lfstatus:
1211 if not lfenabled or not repo._repo.lfstatus:
1221 return orig(repo, archiver, prefix, match, decode)
1212 return orig(repo, archiver, prefix, match, decode)
1222
1213
1223 repo._get(repo._state + (b'hg',))
1214 repo._get(repo._state + (b'hg',))
1224 rev = repo._state[1]
1215 rev = repo._state[1]
1225 ctx = repo._repo[rev]
1216 ctx = repo._repo[rev]
1226
1217
1227 if ctx.node() is not None:
1218 if ctx.node() is not None:
1228 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1219 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1229
1220
1230 def write(name, mode, islink, getdata):
1221 def write(name, mode, islink, getdata):
1231 # At this point, the standin has been replaced with the largefile name,
1222 # At this point, the standin has been replaced with the largefile name,
1232 # so the normal matcher works here without the lfutil variants.
1223 # so the normal matcher works here without the lfutil variants.
1233 if match and not match(f):
1224 if match and not match(f):
1234 return
1225 return
1235 data = getdata()
1226 data = getdata()
1236 if decode:
1227 if decode:
1237 data = repo._repo.wwritedata(name, data)
1228 data = repo._repo.wwritedata(name, data)
1238
1229
1239 archiver.addfile(prefix + name, mode, islink, data)
1230 archiver.addfile(prefix + name, mode, islink, data)
1240
1231
1241 for f in ctx:
1232 for f in ctx:
1242 ff = ctx.flags(f)
1233 ff = ctx.flags(f)
1243 getdata = ctx[f].data
1234 getdata = ctx[f].data
1244 lfile = lfutil.splitstandin(f)
1235 lfile = lfutil.splitstandin(f)
1245 if lfile is not None:
1236 if lfile is not None:
1246 if ctx.node() is not None:
1237 if ctx.node() is not None:
1247 path = lfutil.findfile(repo._repo, getdata().strip())
1238 path = lfutil.findfile(repo._repo, getdata().strip())
1248
1239
1249 if path is None:
1240 if path is None:
1250 raise error.Abort(
1241 raise error.Abort(
1251 _(
1242 _(
1252 b'largefile %s not found in repo store or system cache'
1243 b'largefile %s not found in repo store or system cache'
1253 )
1244 )
1254 % lfile
1245 % lfile
1255 )
1246 )
1256 else:
1247 else:
1257 path = lfile
1248 path = lfile
1258
1249
1259 f = lfile
1250 f = lfile
1260
1251
1261 getdata = lambda: util.readfile(os.path.join(prefix, path))
1252 getdata = lambda: util.readfile(os.path.join(prefix, path))
1262
1253
1263 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1254 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1264
1255
1265 for subpath in sorted(ctx.substate):
1256 for subpath in sorted(ctx.substate):
1266 sub = ctx.workingsub(subpath)
1257 sub = ctx.workingsub(subpath)
1267 submatch = matchmod.subdirmatcher(subpath, match)
1258 submatch = matchmod.subdirmatcher(subpath, match)
1268 subprefix = prefix + subpath + b'/'
1259 subprefix = prefix + subpath + b'/'
1269 with lfstatus(sub._repo):
1260 with lfstatus(sub._repo):
1270 sub.archive(archiver, subprefix, submatch, decode)
1261 sub.archive(archiver, subprefix, submatch, decode)
1271
1262
1272
1263
1273 # If a largefile is modified, the change is not reflected in its
1264 # If a largefile is modified, the change is not reflected in its
1274 # standin until a commit. cmdutil.bailifchanged() raises an exception
1265 # standin until a commit. cmdutil.bailifchanged() raises an exception
1275 # if the repo has uncommitted changes. Wrap it to also check if
1266 # if the repo has uncommitted changes. Wrap it to also check if
1276 # largefiles were changed. This is used by bisect, backout and fetch.
1267 # largefiles were changed. This is used by bisect, backout and fetch.
1277 @eh.wrapfunction(cmdutil, b'bailifchanged')
1268 @eh.wrapfunction(cmdutil, b'bailifchanged')
1278 def overridebailifchanged(orig, repo, *args, **kwargs):
1269 def overridebailifchanged(orig, repo, *args, **kwargs):
1279 orig(repo, *args, **kwargs)
1270 orig(repo, *args, **kwargs)
1280 with lfstatus(repo):
1271 with lfstatus(repo):
1281 s = repo.status()
1272 s = repo.status()
1282 if s.modified or s.added or s.removed or s.deleted:
1273 if s.modified or s.added or s.removed or s.deleted:
1283 raise error.Abort(_(b'uncommitted changes'))
1274 raise error.Abort(_(b'uncommitted changes'))
1284
1275
1285
1276
1286 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1277 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1287 def postcommitstatus(orig, repo, *args, **kwargs):
1278 def postcommitstatus(orig, repo, *args, **kwargs):
1288 with lfstatus(repo):
1279 with lfstatus(repo):
1289 return orig(repo, *args, **kwargs)
1280 return orig(repo, *args, **kwargs)
1290
1281
1291
1282
1292 @eh.wrapfunction(cmdutil, b'forget')
1283 @eh.wrapfunction(cmdutil, b'forget')
1293 def cmdutilforget(
1284 def cmdutilforget(
1294 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1285 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1295 ):
1286 ):
1296 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1287 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1297 bad, forgot = orig(
1288 bad, forgot = orig(
1298 ui,
1289 ui,
1299 repo,
1290 repo,
1300 normalmatcher,
1291 normalmatcher,
1301 prefix,
1292 prefix,
1302 uipathfn,
1293 uipathfn,
1303 explicitonly,
1294 explicitonly,
1304 dryrun,
1295 dryrun,
1305 interactive,
1296 interactive,
1306 )
1297 )
1307 m = composelargefilematcher(match, repo[None].manifest())
1298 m = composelargefilematcher(match, repo[None].manifest())
1308
1299
1309 with lfstatus(repo):
1300 with lfstatus(repo):
1310 s = repo.status(match=m, clean=True)
1301 s = repo.status(match=m, clean=True)
1311 manifest = repo[None].manifest()
1302 manifest = repo[None].manifest()
1312 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1303 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1313 forget = [f for f in forget if lfutil.standin(f) in manifest]
1304 forget = [f for f in forget if lfutil.standin(f) in manifest]
1314
1305
1315 for f in forget:
1306 for f in forget:
1316 fstandin = lfutil.standin(f)
1307 fstandin = lfutil.standin(f)
1317 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1308 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1318 ui.warn(
1309 ui.warn(
1319 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1310 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1320 )
1311 )
1321 bad.append(f)
1312 bad.append(f)
1322
1313
1323 for f in forget:
1314 for f in forget:
1324 if ui.verbose or not m.exact(f):
1315 if ui.verbose or not m.exact(f):
1325 ui.status(_(b'removing %s\n') % uipathfn(f))
1316 ui.status(_(b'removing %s\n') % uipathfn(f))
1326
1317
1327 # Need to lock because standin files are deleted then removed from the
1318 # Need to lock because standin files are deleted then removed from the
1328 # repository and we could race in-between.
1319 # repository and we could race in-between.
1329 with repo.wlock():
1320 with repo.wlock():
1330 lfdirstate = lfutil.openlfdirstate(ui, repo)
1321 lfdirstate = lfutil.openlfdirstate(ui, repo)
1331 for f in forget:
1322 for f in forget:
1332 if lfdirstate[f] == b'a':
1323 if lfdirstate[f] == b'a':
1333 lfdirstate.drop(f)
1324 lfdirstate.drop(f)
1334 else:
1325 else:
1335 lfdirstate.remove(f)
1326 lfdirstate.remove(f)
1336 lfdirstate.write()
1327 lfdirstate.write()
1337 standins = [lfutil.standin(f) for f in forget]
1328 standins = [lfutil.standin(f) for f in forget]
1338 for f in standins:
1329 for f in standins:
1339 repo.wvfs.unlinkpath(f, ignoremissing=True)
1330 repo.wvfs.unlinkpath(f, ignoremissing=True)
1340 rejected = repo[None].forget(standins)
1331 rejected = repo[None].forget(standins)
1341
1332
1342 bad.extend(f for f in rejected if f in m.files())
1333 bad.extend(f for f in rejected if f in m.files())
1343 forgot.extend(f for f in forget if f not in rejected)
1334 forgot.extend(f for f in forget if f not in rejected)
1344 return bad, forgot
1335 return bad, forgot
1345
1336
1346
1337
1347 def _getoutgoings(repo, other, missing, addfunc):
1338 def _getoutgoings(repo, other, missing, addfunc):
1348 """get pairs of filename and largefile hash in outgoing revisions
1339 """get pairs of filename and largefile hash in outgoing revisions
1349 in 'missing'.
1340 in 'missing'.
1350
1341
1351 largefiles already existing on 'other' repository are ignored.
1342 largefiles already existing on 'other' repository are ignored.
1352
1343
1353 'addfunc' is invoked with each unique pairs of filename and
1344 'addfunc' is invoked with each unique pairs of filename and
1354 largefile hash value.
1345 largefile hash value.
1355 """
1346 """
1356 knowns = set()
1347 knowns = set()
1357 lfhashes = set()
1348 lfhashes = set()
1358
1349
1359 def dedup(fn, lfhash):
1350 def dedup(fn, lfhash):
1360 k = (fn, lfhash)
1351 k = (fn, lfhash)
1361 if k not in knowns:
1352 if k not in knowns:
1362 knowns.add(k)
1353 knowns.add(k)
1363 lfhashes.add(lfhash)
1354 lfhashes.add(lfhash)
1364
1355
1365 lfutil.getlfilestoupload(repo, missing, dedup)
1356 lfutil.getlfilestoupload(repo, missing, dedup)
1366 if lfhashes:
1357 if lfhashes:
1367 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1358 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1368 for fn, lfhash in knowns:
1359 for fn, lfhash in knowns:
1369 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1360 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1370 addfunc(fn, lfhash)
1361 addfunc(fn, lfhash)
1371
1362
1372
1363
1373 def outgoinghook(ui, repo, other, opts, missing):
1364 def outgoinghook(ui, repo, other, opts, missing):
1374 if opts.pop(b'large', None):
1365 if opts.pop(b'large', None):
1375 lfhashes = set()
1366 lfhashes = set()
1376 if ui.debugflag:
1367 if ui.debugflag:
1377 toupload = {}
1368 toupload = {}
1378
1369
1379 def addfunc(fn, lfhash):
1370 def addfunc(fn, lfhash):
1380 if fn not in toupload:
1371 if fn not in toupload:
1381 toupload[fn] = []
1372 toupload[fn] = []
1382 toupload[fn].append(lfhash)
1373 toupload[fn].append(lfhash)
1383 lfhashes.add(lfhash)
1374 lfhashes.add(lfhash)
1384
1375
1385 def showhashes(fn):
1376 def showhashes(fn):
1386 for lfhash in sorted(toupload[fn]):
1377 for lfhash in sorted(toupload[fn]):
1387 ui.debug(b' %s\n' % lfhash)
1378 ui.debug(b' %s\n' % lfhash)
1388
1379
1389 else:
1380 else:
1390 toupload = set()
1381 toupload = set()
1391
1382
1392 def addfunc(fn, lfhash):
1383 def addfunc(fn, lfhash):
1393 toupload.add(fn)
1384 toupload.add(fn)
1394 lfhashes.add(lfhash)
1385 lfhashes.add(lfhash)
1395
1386
1396 def showhashes(fn):
1387 def showhashes(fn):
1397 pass
1388 pass
1398
1389
1399 _getoutgoings(repo, other, missing, addfunc)
1390 _getoutgoings(repo, other, missing, addfunc)
1400
1391
1401 if not toupload:
1392 if not toupload:
1402 ui.status(_(b'largefiles: no files to upload\n'))
1393 ui.status(_(b'largefiles: no files to upload\n'))
1403 else:
1394 else:
1404 ui.status(
1395 ui.status(
1405 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1396 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1406 )
1397 )
1407 for file in sorted(toupload):
1398 for file in sorted(toupload):
1408 ui.status(lfutil.splitstandin(file) + b'\n')
1399 ui.status(lfutil.splitstandin(file) + b'\n')
1409 showhashes(file)
1400 showhashes(file)
1410 ui.status(b'\n')
1401 ui.status(b'\n')
1411
1402
1412
1403
1413 @eh.wrapcommand(
1404 @eh.wrapcommand(
1414 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1405 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1415 )
1406 )
1416 def _outgoingcmd(orig, *args, **kwargs):
1407 def _outgoingcmd(orig, *args, **kwargs):
1417 # Nothing to do here other than add the extra help option- the hook above
1408 # Nothing to do here other than add the extra help option- the hook above
1418 # processes it.
1409 # processes it.
1419 return orig(*args, **kwargs)
1410 return orig(*args, **kwargs)
1420
1411
1421
1412
1422 def summaryremotehook(ui, repo, opts, changes):
1413 def summaryremotehook(ui, repo, opts, changes):
1423 largeopt = opts.get(b'large', False)
1414 largeopt = opts.get(b'large', False)
1424 if changes is None:
1415 if changes is None:
1425 if largeopt:
1416 if largeopt:
1426 return (False, True) # only outgoing check is needed
1417 return (False, True) # only outgoing check is needed
1427 else:
1418 else:
1428 return (False, False)
1419 return (False, False)
1429 elif largeopt:
1420 elif largeopt:
1430 url, branch, peer, outgoing = changes[1]
1421 url, branch, peer, outgoing = changes[1]
1431 if peer is None:
1422 if peer is None:
1432 # i18n: column positioning for "hg summary"
1423 # i18n: column positioning for "hg summary"
1433 ui.status(_(b'largefiles: (no remote repo)\n'))
1424 ui.status(_(b'largefiles: (no remote repo)\n'))
1434 return
1425 return
1435
1426
1436 toupload = set()
1427 toupload = set()
1437 lfhashes = set()
1428 lfhashes = set()
1438
1429
1439 def addfunc(fn, lfhash):
1430 def addfunc(fn, lfhash):
1440 toupload.add(fn)
1431 toupload.add(fn)
1441 lfhashes.add(lfhash)
1432 lfhashes.add(lfhash)
1442
1433
1443 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1434 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1444
1435
1445 if not toupload:
1436 if not toupload:
1446 # i18n: column positioning for "hg summary"
1437 # i18n: column positioning for "hg summary"
1447 ui.status(_(b'largefiles: (no files to upload)\n'))
1438 ui.status(_(b'largefiles: (no files to upload)\n'))
1448 else:
1439 else:
1449 # i18n: column positioning for "hg summary"
1440 # i18n: column positioning for "hg summary"
1450 ui.status(
1441 ui.status(
1451 _(b'largefiles: %d entities for %d files to upload\n')
1442 _(b'largefiles: %d entities for %d files to upload\n')
1452 % (len(lfhashes), len(toupload))
1443 % (len(lfhashes), len(toupload))
1453 )
1444 )
1454
1445
1455
1446
1456 @eh.wrapcommand(
1447 @eh.wrapcommand(
1457 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1448 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1458 )
1449 )
1459 def overridesummary(orig, ui, repo, *pats, **opts):
1450 def overridesummary(orig, ui, repo, *pats, **opts):
1460 with lfstatus(repo):
1451 with lfstatus(repo):
1461 orig(ui, repo, *pats, **opts)
1452 orig(ui, repo, *pats, **opts)
1462
1453
1463
1454
1464 @eh.wrapfunction(scmutil, b'addremove')
1455 @eh.wrapfunction(scmutil, b'addremove')
1465 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1456 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1466 if opts is None:
1457 if opts is None:
1467 opts = {}
1458 opts = {}
1468 if not lfutil.islfilesrepo(repo):
1459 if not lfutil.islfilesrepo(repo):
1469 return orig(repo, matcher, prefix, uipathfn, opts)
1460 return orig(repo, matcher, prefix, uipathfn, opts)
1470 # Get the list of missing largefiles so we can remove them
1461 # Get the list of missing largefiles so we can remove them
1471 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1462 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1472 unsure, s = lfdirstate.status(
1463 unsure, s = lfdirstate.status(
1473 matchmod.always(),
1464 matchmod.always(),
1474 subrepos=[],
1465 subrepos=[],
1475 ignored=False,
1466 ignored=False,
1476 clean=False,
1467 clean=False,
1477 unknown=False,
1468 unknown=False,
1478 )
1469 )
1479
1470
1480 # Call into the normal remove code, but the removing of the standin, we want
1471 # Call into the normal remove code, but the removing of the standin, we want
1481 # to have handled by original addremove. Monkey patching here makes sure
1472 # to have handled by original addremove. Monkey patching here makes sure
1482 # we don't remove the standin in the largefiles code, preventing a very
1473 # we don't remove the standin in the largefiles code, preventing a very
1483 # confused state later.
1474 # confused state later.
1484 if s.deleted:
1475 if s.deleted:
1485 m = copy.copy(matcher)
1476 m = copy.copy(matcher)
1486
1477
1487 # The m._files and m._map attributes are not changed to the deleted list
1478 # The m._files and m._map attributes are not changed to the deleted list
1488 # because that affects the m.exact() test, which in turn governs whether
1479 # because that affects the m.exact() test, which in turn governs whether
1489 # or not the file name is printed, and how. Simply limit the original
1480 # or not the file name is printed, and how. Simply limit the original
1490 # matches to those in the deleted status list.
1481 # matches to those in the deleted status list.
1491 matchfn = m.matchfn
1482 matchfn = m.matchfn
1492 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1483 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1493
1484
1494 removelargefiles(
1485 removelargefiles(
1495 repo.ui,
1486 repo.ui,
1496 repo,
1487 repo,
1497 True,
1488 True,
1498 m,
1489 m,
1499 uipathfn,
1490 uipathfn,
1500 opts.get(b'dry_run'),
1491 opts.get(b'dry_run'),
1501 **pycompat.strkwargs(opts)
1492 **pycompat.strkwargs(opts)
1502 )
1493 )
1503 # Call into the normal add code, and any files that *should* be added as
1494 # Call into the normal add code, and any files that *should* be added as
1504 # largefiles will be
1495 # largefiles will be
1505 added, bad = addlargefiles(
1496 added, bad = addlargefiles(
1506 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1497 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1507 )
1498 )
1508 # Now that we've handled largefiles, hand off to the original addremove
1499 # Now that we've handled largefiles, hand off to the original addremove
1509 # function to take care of the rest. Make sure it doesn't do anything with
1500 # function to take care of the rest. Make sure it doesn't do anything with
1510 # largefiles by passing a matcher that will ignore them.
1501 # largefiles by passing a matcher that will ignore them.
1511 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1502 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1512 return orig(repo, matcher, prefix, uipathfn, opts)
1503 return orig(repo, matcher, prefix, uipathfn, opts)
1513
1504
1514
1505
1515 # Calling purge with --all will cause the largefiles to be deleted.
1506 # Calling purge with --all will cause the largefiles to be deleted.
1516 # Override repo.status to prevent this from happening.
1507 # Override repo.status to prevent this from happening.
1517 @eh.wrapcommand(b'purge', extension=b'purge')
1508 @eh.wrapcommand(b'purge', extension=b'purge')
1518 def overridepurge(orig, ui, repo, *dirs, **opts):
1509 def overridepurge(orig, ui, repo, *dirs, **opts):
1519 # XXX Monkey patching a repoview will not work. The assigned attribute will
1510 # XXX Monkey patching a repoview will not work. The assigned attribute will
1520 # be set on the unfiltered repo, but we will only lookup attributes in the
1511 # be set on the unfiltered repo, but we will only lookup attributes in the
1521 # unfiltered repo if the lookup in the repoview object itself fails. As the
1512 # unfiltered repo if the lookup in the repoview object itself fails. As the
1522 # monkey patched method exists on the repoview class the lookup will not
1513 # monkey patched method exists on the repoview class the lookup will not
1523 # fail. As a result, the original version will shadow the monkey patched
1514 # fail. As a result, the original version will shadow the monkey patched
1524 # one, defeating the monkey patch.
1515 # one, defeating the monkey patch.
1525 #
1516 #
1526 # As a work around we use an unfiltered repo here. We should do something
1517 # As a work around we use an unfiltered repo here. We should do something
1527 # cleaner instead.
1518 # cleaner instead.
1528 repo = repo.unfiltered()
1519 repo = repo.unfiltered()
1529 oldstatus = repo.status
1520 oldstatus = repo.status
1530
1521
1531 def overridestatus(
1522 def overridestatus(
1532 node1=b'.',
1523 node1=b'.',
1533 node2=None,
1524 node2=None,
1534 match=None,
1525 match=None,
1535 ignored=False,
1526 ignored=False,
1536 clean=False,
1527 clean=False,
1537 unknown=False,
1528 unknown=False,
1538 listsubrepos=False,
1529 listsubrepos=False,
1539 ):
1530 ):
1540 r = oldstatus(
1531 r = oldstatus(
1541 node1, node2, match, ignored, clean, unknown, listsubrepos
1532 node1, node2, match, ignored, clean, unknown, listsubrepos
1542 )
1533 )
1543 lfdirstate = lfutil.openlfdirstate(ui, repo)
1534 lfdirstate = lfutil.openlfdirstate(ui, repo)
1544 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1535 unknown = [f for f in r.unknown if lfdirstate[f] == b'?']
1545 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1536 ignored = [f for f in r.ignored if lfdirstate[f] == b'?']
1546 return scmutil.status(
1537 return scmutil.status(
1547 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1538 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1548 )
1539 )
1549
1540
1550 repo.status = overridestatus
1541 repo.status = overridestatus
1551 orig(ui, repo, *dirs, **opts)
1542 orig(ui, repo, *dirs, **opts)
1552 repo.status = oldstatus
1543 repo.status = oldstatus
1553
1544
1554
1545
1555 @eh.wrapcommand(b'rollback')
1546 @eh.wrapcommand(b'rollback')
1556 def overriderollback(orig, ui, repo, **opts):
1547 def overriderollback(orig, ui, repo, **opts):
1557 with repo.wlock():
1548 with repo.wlock():
1558 before = repo.dirstate.parents()
1549 before = repo.dirstate.parents()
1559 orphans = set(
1550 orphans = set(
1560 f
1551 f
1561 for f in repo.dirstate
1552 for f in repo.dirstate
1562 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1553 if lfutil.isstandin(f) and repo.dirstate[f] != b'r'
1563 )
1554 )
1564 result = orig(ui, repo, **opts)
1555 result = orig(ui, repo, **opts)
1565 after = repo.dirstate.parents()
1556 after = repo.dirstate.parents()
1566 if before == after:
1557 if before == after:
1567 return result # no need to restore standins
1558 return result # no need to restore standins
1568
1559
1569 pctx = repo[b'.']
1560 pctx = repo[b'.']
1570 for f in repo.dirstate:
1561 for f in repo.dirstate:
1571 if lfutil.isstandin(f):
1562 if lfutil.isstandin(f):
1572 orphans.discard(f)
1563 orphans.discard(f)
1573 if repo.dirstate[f] == b'r':
1564 if repo.dirstate[f] == b'r':
1574 repo.wvfs.unlinkpath(f, ignoremissing=True)
1565 repo.wvfs.unlinkpath(f, ignoremissing=True)
1575 elif f in pctx:
1566 elif f in pctx:
1576 fctx = pctx[f]
1567 fctx = pctx[f]
1577 repo.wwrite(f, fctx.data(), fctx.flags())
1568 repo.wwrite(f, fctx.data(), fctx.flags())
1578 else:
1569 else:
1579 # content of standin is not so important in 'a',
1570 # content of standin is not so important in 'a',
1580 # 'm' or 'n' (coming from the 2nd parent) cases
1571 # 'm' or 'n' (coming from the 2nd parent) cases
1581 lfutil.writestandin(repo, f, b'', False)
1572 lfutil.writestandin(repo, f, b'', False)
1582 for standin in orphans:
1573 for standin in orphans:
1583 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1574 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1584
1575
1585 lfdirstate = lfutil.openlfdirstate(ui, repo)
1576 lfdirstate = lfutil.openlfdirstate(ui, repo)
1586 orphans = set(lfdirstate)
1577 orphans = set(lfdirstate)
1587 lfiles = lfutil.listlfiles(repo)
1578 lfiles = lfutil.listlfiles(repo)
1588 for file in lfiles:
1579 for file in lfiles:
1589 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1580 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1590 orphans.discard(file)
1581 orphans.discard(file)
1591 for lfile in orphans:
1582 for lfile in orphans:
1592 lfdirstate.drop(lfile)
1583 lfdirstate.drop(lfile)
1593 lfdirstate.write()
1584 lfdirstate.write()
1594 return result
1585 return result
1595
1586
1596
1587
1597 @eh.wrapcommand(b'transplant', extension=b'transplant')
1588 @eh.wrapcommand(b'transplant', extension=b'transplant')
1598 def overridetransplant(orig, ui, repo, *revs, **opts):
1589 def overridetransplant(orig, ui, repo, *revs, **opts):
1599 resuming = opts.get('continue')
1590 resuming = opts.get('continue')
1600 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1591 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1601 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1592 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1602 try:
1593 try:
1603 result = orig(ui, repo, *revs, **opts)
1594 result = orig(ui, repo, *revs, **opts)
1604 finally:
1595 finally:
1605 repo._lfstatuswriters.pop()
1596 repo._lfstatuswriters.pop()
1606 repo._lfcommithooks.pop()
1597 repo._lfcommithooks.pop()
1607 return result
1598 return result
1608
1599
1609
1600
1610 @eh.wrapcommand(b'cat')
1601 @eh.wrapcommand(b'cat')
1611 def overridecat(orig, ui, repo, file1, *pats, **opts):
1602 def overridecat(orig, ui, repo, file1, *pats, **opts):
1612 opts = pycompat.byteskwargs(opts)
1603 opts = pycompat.byteskwargs(opts)
1613 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1604 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1614 err = 1
1605 err = 1
1615 notbad = set()
1606 notbad = set()
1616 m = scmutil.match(ctx, (file1,) + pats, opts)
1607 m = scmutil.match(ctx, (file1,) + pats, opts)
1617 origmatchfn = m.matchfn
1608 origmatchfn = m.matchfn
1618
1609
1619 def lfmatchfn(f):
1610 def lfmatchfn(f):
1620 if origmatchfn(f):
1611 if origmatchfn(f):
1621 return True
1612 return True
1622 lf = lfutil.splitstandin(f)
1613 lf = lfutil.splitstandin(f)
1623 if lf is None:
1614 if lf is None:
1624 return False
1615 return False
1625 notbad.add(lf)
1616 notbad.add(lf)
1626 return origmatchfn(lf)
1617 return origmatchfn(lf)
1627
1618
1628 m.matchfn = lfmatchfn
1619 m.matchfn = lfmatchfn
1629 origbadfn = m.bad
1620 origbadfn = m.bad
1630
1621
1631 def lfbadfn(f, msg):
1622 def lfbadfn(f, msg):
1632 if not f in notbad:
1623 if not f in notbad:
1633 origbadfn(f, msg)
1624 origbadfn(f, msg)
1634
1625
1635 m.bad = lfbadfn
1626 m.bad = lfbadfn
1636
1627
1637 origvisitdirfn = m.visitdir
1628 origvisitdirfn = m.visitdir
1638
1629
1639 def lfvisitdirfn(dir):
1630 def lfvisitdirfn(dir):
1640 if dir == lfutil.shortname:
1631 if dir == lfutil.shortname:
1641 return True
1632 return True
1642 ret = origvisitdirfn(dir)
1633 ret = origvisitdirfn(dir)
1643 if ret:
1634 if ret:
1644 return ret
1635 return ret
1645 lf = lfutil.splitstandin(dir)
1636 lf = lfutil.splitstandin(dir)
1646 if lf is None:
1637 if lf is None:
1647 return False
1638 return False
1648 return origvisitdirfn(lf)
1639 return origvisitdirfn(lf)
1649
1640
1650 m.visitdir = lfvisitdirfn
1641 m.visitdir = lfvisitdirfn
1651
1642
1652 for f in ctx.walk(m):
1643 for f in ctx.walk(m):
1653 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1644 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1654 lf = lfutil.splitstandin(f)
1645 lf = lfutil.splitstandin(f)
1655 if lf is None or origmatchfn(f):
1646 if lf is None or origmatchfn(f):
1656 # duplicating unreachable code from commands.cat
1647 # duplicating unreachable code from commands.cat
1657 data = ctx[f].data()
1648 data = ctx[f].data()
1658 if opts.get(b'decode'):
1649 if opts.get(b'decode'):
1659 data = repo.wwritedata(f, data)
1650 data = repo.wwritedata(f, data)
1660 fp.write(data)
1651 fp.write(data)
1661 else:
1652 else:
1662 hash = lfutil.readasstandin(ctx[f])
1653 hash = lfutil.readasstandin(ctx[f])
1663 if not lfutil.inusercache(repo.ui, hash):
1654 if not lfutil.inusercache(repo.ui, hash):
1664 store = storefactory.openstore(repo)
1655 store = storefactory.openstore(repo)
1665 success, missing = store.get([(lf, hash)])
1656 success, missing = store.get([(lf, hash)])
1666 if len(success) != 1:
1657 if len(success) != 1:
1667 raise error.Abort(
1658 raise error.Abort(
1668 _(
1659 _(
1669 b'largefile %s is not in cache and could not be '
1660 b'largefile %s is not in cache and could not be '
1670 b'downloaded'
1661 b'downloaded'
1671 )
1662 )
1672 % lf
1663 % lf
1673 )
1664 )
1674 path = lfutil.usercachepath(repo.ui, hash)
1665 path = lfutil.usercachepath(repo.ui, hash)
1675 with open(path, b"rb") as fpin:
1666 with open(path, b"rb") as fpin:
1676 for chunk in util.filechunkiter(fpin):
1667 for chunk in util.filechunkiter(fpin):
1677 fp.write(chunk)
1668 fp.write(chunk)
1678 err = 0
1669 err = 0
1679 return err
1670 return err
1680
1671
1681
1672
1682 @eh.wrapfunction(merge, b'update')
1673 @eh.wrapfunction(merge, b'update')
1683 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1674 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1684 matcher = kwargs.get('matcher', None)
1675 matcher = kwargs.get('matcher', None)
1685 # note if this is a partial update
1676 # note if this is a partial update
1686 partial = matcher and not matcher.always()
1677 partial = matcher and not matcher.always()
1687 with repo.wlock():
1678 with repo.wlock():
1688 # branch | | |
1679 # branch | | |
1689 # merge | force | partial | action
1680 # merge | force | partial | action
1690 # -------+-------+---------+--------------
1681 # -------+-------+---------+--------------
1691 # x | x | x | linear-merge
1682 # x | x | x | linear-merge
1692 # o | x | x | branch-merge
1683 # o | x | x | branch-merge
1693 # x | o | x | overwrite (as clean update)
1684 # x | o | x | overwrite (as clean update)
1694 # o | o | x | force-branch-merge (*1)
1685 # o | o | x | force-branch-merge (*1)
1695 # x | x | o | (*)
1686 # x | x | o | (*)
1696 # o | x | o | (*)
1687 # o | x | o | (*)
1697 # x | o | o | overwrite (as revert)
1688 # x | o | o | overwrite (as revert)
1698 # o | o | o | (*)
1689 # o | o | o | (*)
1699 #
1690 #
1700 # (*) don't care
1691 # (*) don't care
1701 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1692 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1702
1693
1703 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1694 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1704 unsure, s = lfdirstate.status(
1695 unsure, s = lfdirstate.status(
1705 matchmod.always(),
1696 matchmod.always(),
1706 subrepos=[],
1697 subrepos=[],
1707 ignored=False,
1698 ignored=False,
1708 clean=True,
1699 clean=True,
1709 unknown=False,
1700 unknown=False,
1710 )
1701 )
1711 oldclean = set(s.clean)
1702 oldclean = set(s.clean)
1712 pctx = repo[b'.']
1703 pctx = repo[b'.']
1713 dctx = repo[node]
1704 dctx = repo[node]
1714 for lfile in unsure + s.modified:
1705 for lfile in unsure + s.modified:
1715 lfileabs = repo.wvfs.join(lfile)
1706 lfileabs = repo.wvfs.join(lfile)
1716 if not repo.wvfs.exists(lfileabs):
1707 if not repo.wvfs.exists(lfileabs):
1717 continue
1708 continue
1718 lfhash = lfutil.hashfile(lfileabs)
1709 lfhash = lfutil.hashfile(lfileabs)
1719 standin = lfutil.standin(lfile)
1710 standin = lfutil.standin(lfile)
1720 lfutil.writestandin(
1711 lfutil.writestandin(
1721 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1712 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1722 )
1713 )
1723 if standin in pctx and lfhash == lfutil.readasstandin(
1714 if standin in pctx and lfhash == lfutil.readasstandin(
1724 pctx[standin]
1715 pctx[standin]
1725 ):
1716 ):
1726 oldclean.add(lfile)
1717 oldclean.add(lfile)
1727 for lfile in s.added:
1718 for lfile in s.added:
1728 fstandin = lfutil.standin(lfile)
1719 fstandin = lfutil.standin(lfile)
1729 if fstandin not in dctx:
1720 if fstandin not in dctx:
1730 # in this case, content of standin file is meaningless
1721 # in this case, content of standin file is meaningless
1731 # (in dctx, lfile is unknown, or normal file)
1722 # (in dctx, lfile is unknown, or normal file)
1732 continue
1723 continue
1733 lfutil.updatestandin(repo, lfile, fstandin)
1724 lfutil.updatestandin(repo, lfile, fstandin)
1734 # mark all clean largefiles as dirty, just in case the update gets
1725 # mark all clean largefiles as dirty, just in case the update gets
1735 # interrupted before largefiles and lfdirstate are synchronized
1726 # interrupted before largefiles and lfdirstate are synchronized
1736 for lfile in oldclean:
1727 for lfile in oldclean:
1737 lfdirstate.normallookup(lfile)
1728 lfdirstate.normallookup(lfile)
1738 lfdirstate.write()
1729 lfdirstate.write()
1739
1730
1740 oldstandins = lfutil.getstandinsstate(repo)
1731 oldstandins = lfutil.getstandinsstate(repo)
1741 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1732 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1742 # good candidate for in-memory merge (large files, custom dirstate,
1733 # good candidate for in-memory merge (large files, custom dirstate,
1743 # matcher usage).
1734 # matcher usage).
1744 kwargs['wc'] = repo[None]
1735 kwargs['wc'] = repo[None]
1745 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1736 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1746
1737
1747 newstandins = lfutil.getstandinsstate(repo)
1738 newstandins = lfutil.getstandinsstate(repo)
1748 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1739 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1749
1740
1750 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1741 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1751 # all the ones that didn't change as clean
1742 # all the ones that didn't change as clean
1752 for lfile in oldclean.difference(filelist):
1743 for lfile in oldclean.difference(filelist):
1753 lfdirstate.normal(lfile)
1744 lfdirstate.normal(lfile)
1754 lfdirstate.write()
1745 lfdirstate.write()
1755
1746
1756 if branchmerge or force or partial:
1747 if branchmerge or force or partial:
1757 filelist.extend(s.deleted + s.removed)
1748 filelist.extend(s.deleted + s.removed)
1758
1749
1759 lfcommands.updatelfiles(
1750 lfcommands.updatelfiles(
1760 repo.ui, repo, filelist=filelist, normallookup=partial
1751 repo.ui, repo, filelist=filelist, normallookup=partial
1761 )
1752 )
1762
1753
1763 return result
1754 return result
1764
1755
1765
1756
1766 @eh.wrapfunction(scmutil, b'marktouched')
1757 @eh.wrapfunction(scmutil, b'marktouched')
1767 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1758 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1768 result = orig(repo, files, *args, **kwargs)
1759 result = orig(repo, files, *args, **kwargs)
1769
1760
1770 filelist = []
1761 filelist = []
1771 for f in files:
1762 for f in files:
1772 lf = lfutil.splitstandin(f)
1763 lf = lfutil.splitstandin(f)
1773 if lf is not None:
1764 if lf is not None:
1774 filelist.append(lf)
1765 filelist.append(lf)
1775 if filelist:
1766 if filelist:
1776 lfcommands.updatelfiles(
1767 lfcommands.updatelfiles(
1777 repo.ui,
1768 repo.ui,
1778 repo,
1769 repo,
1779 filelist=filelist,
1770 filelist=filelist,
1780 printmessage=False,
1771 printmessage=False,
1781 normallookup=True,
1772 normallookup=True,
1782 )
1773 )
1783
1774
1784 return result
1775 return result
1785
1776
1786
1777
1787 @eh.wrapfunction(upgrade, b'preservedrequirements')
1778 @eh.wrapfunction(upgrade, b'preservedrequirements')
1788 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1779 @eh.wrapfunction(upgrade, b'supporteddestrequirements')
1789 def upgraderequirements(orig, repo):
1780 def upgraderequirements(orig, repo):
1790 reqs = orig(repo)
1781 reqs = orig(repo)
1791 if b'largefiles' in repo.requirements:
1782 if b'largefiles' in repo.requirements:
1792 reqs.add(b'largefiles')
1783 reqs.add(b'largefiles')
1793 return reqs
1784 return reqs
1794
1785
1795
1786
1796 _lfscheme = b'largefile://'
1787 _lfscheme = b'largefile://'
1797
1788
1798
1789
1799 @eh.wrapfunction(urlmod, b'open')
1790 @eh.wrapfunction(urlmod, b'open')
1800 def openlargefile(orig, ui, url_, data=None):
1791 def openlargefile(orig, ui, url_, data=None):
1801 if url_.startswith(_lfscheme):
1792 if url_.startswith(_lfscheme):
1802 if data:
1793 if data:
1803 msg = b"cannot use data on a 'largefile://' url"
1794 msg = b"cannot use data on a 'largefile://' url"
1804 raise error.ProgrammingError(msg)
1795 raise error.ProgrammingError(msg)
1805 lfid = url_[len(_lfscheme) :]
1796 lfid = url_[len(_lfscheme) :]
1806 return storefactory.getlfile(ui, lfid)
1797 return storefactory.getlfile(ui, lfid)
1807 else:
1798 else:
1808 return orig(ui, url_, data=data)
1799 return orig(ui, url_, data=data)
General Comments 0
You need to be logged in to leave comments. Login now