##// END OF EJS Templates
largefiles: only cache largefiles in new heads...
Na'Tosha Bard -
r16103:3e1efb45 stable
parent child Browse files
Show More
@@ -1,451 +1,459 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16 import tempfile
17 17
18 18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 19 from mercurial.i18n import _
20 20
21 21 shortname = '.hglf'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Portability wrappers ----------------------------------------------
26 26
27 27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 28 return dirstate.walk(matcher, [], unknown, ignored)
29 29
30 30 def repo_add(repo, list):
31 31 add = repo[None].add
32 32 return add(list)
33 33
34 34 def repo_remove(repo, list, unlink=False):
35 35 def remove(list, unlink):
36 36 wlock = repo.wlock()
37 37 try:
38 38 if unlink:
39 39 for f in list:
40 40 try:
41 41 util.unlinkpath(repo.wjoin(f))
42 42 except OSError, inst:
43 43 if inst.errno != errno.ENOENT:
44 44 raise
45 45 repo[None].forget(list)
46 46 finally:
47 47 wlock.release()
48 48 return remove(list, unlink=unlink)
49 49
50 50 def repo_forget(repo, list):
51 51 forget = repo[None].forget
52 52 return forget(list)
53 53
54 54 def findoutgoing(repo, remote, force):
55 55 from mercurial import discovery
56 56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 57 remote, force=force)
58 58 return repo.changelog.findmissing(common)
59 59
60 60 # -- Private worker functions ------------------------------------------
61 61
62 62 def getminsize(ui, assumelfiles, opt, default=10):
63 63 lfsize = opt
64 64 if not lfsize and assumelfiles:
65 65 lfsize = ui.config(longname, 'minsize', default=default)
66 66 if lfsize:
67 67 try:
68 68 lfsize = float(lfsize)
69 69 except ValueError:
70 70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 71 % lfsize)
72 72 if lfsize is None:
73 73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 74 return lfsize
75 75
76 76 def link(src, dest):
77 77 try:
78 78 util.oslink(src, dest)
79 79 except OSError:
80 80 # if hardlinks fail, fallback on atomic copy
81 81 dst = util.atomictempfile(dest)
82 82 for chunk in util.filechunkiter(open(src, 'rb')):
83 83 dst.write(chunk)
84 84 dst.close()
85 85 os.chmod(dest, os.stat(src).st_mode)
86 86
87 87 def usercachepath(ui, hash):
88 88 path = ui.configpath(longname, 'usercache', None)
89 89 if path:
90 90 path = os.path.join(path, hash)
91 91 else:
92 92 if os.name == 'nt':
93 93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 94 if appdata:
95 95 path = os.path.join(appdata, longname, hash)
96 96 elif platform.system() == 'Darwin':
97 97 home = os.getenv('HOME')
98 98 if home:
99 99 path = os.path.join(home, 'Library', 'Caches',
100 100 longname, hash)
101 101 elif os.name == 'posix':
102 102 path = os.getenv('XDG_CACHE_HOME')
103 103 if path:
104 104 path = os.path.join(path, longname, hash)
105 105 else:
106 106 home = os.getenv('HOME')
107 107 if home:
108 108 path = os.path.join(home, '.cache', longname, hash)
109 109 else:
110 110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 111 return path
112 112
113 113 def inusercache(ui, hash):
114 114 path = usercachepath(ui, hash)
115 115 return path and os.path.exists(path)
116 116
117 117 def findfile(repo, hash):
118 118 if instore(repo, hash):
119 119 repo.ui.note(_('Found %s in store\n') % hash)
120 120 return storepath(repo, hash)
121 121 elif inusercache(repo.ui, hash):
122 122 repo.ui.note(_('Found %s in system cache\n') % hash)
123 123 path = storepath(repo, hash)
124 124 util.makedirs(os.path.dirname(path))
125 125 link(usercachepath(repo.ui, hash), path)
126 126 return path
127 127 return None
128 128
129 129 class largefiles_dirstate(dirstate.dirstate):
130 130 def __getitem__(self, key):
131 131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 132 def normal(self, f):
133 133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 134 def remove(self, f):
135 135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 136 def add(self, f):
137 137 return super(largefiles_dirstate, self).add(unixpath(f))
138 138 def drop(self, f):
139 139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 140 def forget(self, f):
141 141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 142 def normallookup(self, f):
143 143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144 144
145 145 def openlfdirstate(ui, repo):
146 146 '''
147 147 Return a dirstate object that tracks largefiles: i.e. its root is
148 148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 149 '''
150 150 admin = repo.join(longname)
151 151 opener = scmutil.opener(admin)
152 152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 153 repo.dirstate._validate)
154 154
155 155 # If the largefiles dirstate does not exist, populate and create
156 156 # it. This ensures that we create it on the first meaningful
157 157 # largefiles operation in a new clone.
158 158 if not os.path.exists(os.path.join(admin, 'dirstate')):
159 159 util.makedirs(admin)
160 160 matcher = getstandinmatcher(repo)
161 161 for standin in dirstate_walk(repo.dirstate, matcher):
162 162 lfile = splitstandin(standin)
163 163 hash = readstandin(repo, lfile)
164 164 lfdirstate.normallookup(lfile)
165 165 try:
166 166 if hash == hashfile(repo.wjoin(lfile)):
167 167 lfdirstate.normal(lfile)
168 168 except OSError, err:
169 169 if err.errno != errno.ENOENT:
170 170 raise
171 171 return lfdirstate
172 172
173 173 def lfdirstate_status(lfdirstate, repo, rev):
174 174 match = match_.always(repo.root, repo.getcwd())
175 175 s = lfdirstate.status(match, [], False, False, False)
176 176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 177 for lfile in unsure:
178 178 if repo[rev][standin(lfile)].data().strip() != \
179 179 hashfile(repo.wjoin(lfile)):
180 180 modified.append(lfile)
181 181 else:
182 182 clean.append(lfile)
183 183 lfdirstate.normal(lfile)
184 184 return (modified, added, removed, missing, unknown, ignored, clean)
185 185
186 186 def listlfiles(repo, rev=None, matcher=None):
187 187 '''return a list of largefiles in the working copy or the
188 188 specified changeset'''
189 189
190 190 if matcher is None:
191 191 matcher = getstandinmatcher(repo)
192 192
193 193 # ignore unknown files in working directory
194 194 return [splitstandin(f)
195 195 for f in repo[rev].walk(matcher)
196 196 if rev is not None or repo.dirstate[f] != '?']
197 197
198 198 def instore(repo, hash):
199 199 return os.path.exists(storepath(repo, hash))
200 200
201 201 def storepath(repo, hash):
202 202 return repo.join(os.path.join(longname, hash))
203 203
204 204 def copyfromcache(repo, hash, filename):
205 205 '''Copy the specified largefile from the repo or system cache to
206 206 filename in the repository. Return true on success or false if the
207 207 file was not found in either cache (which should not happened:
208 208 this is meant to be called only after ensuring that the needed
209 209 largefile exists in the cache).'''
210 210 path = findfile(repo, hash)
211 211 if path is None:
212 212 return False
213 213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 214 # The write may fail before the file is fully written, but we
215 215 # don't use atomic writes in the working copy.
216 216 shutil.copy(path, repo.wjoin(filename))
217 217 return True
218 218
219 219 def copytostore(repo, rev, file, uploaded=False):
220 220 hash = readstandin(repo, file)
221 221 if instore(repo, hash):
222 222 return
223 223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224 224
225 225 def copyalltostore(repo, node):
226 226 '''Copy all largefiles in a given revision to the store'''
227 227
228 228 ctx = repo[node]
229 229 for filename in ctx.files():
230 230 if isstandin(filename) and filename in ctx.manifest():
231 231 realfile = splitstandin(filename)
232 232 copytostore(repo, ctx.node(), realfile)
233 233
234 234
235 235 def copytostoreabsolute(repo, file, hash):
236 236 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 237 if inusercache(repo.ui, hash):
238 238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 239 else:
240 240 dst = util.atomictempfile(storepath(repo, hash))
241 241 for chunk in util.filechunkiter(open(file, 'rb')):
242 242 dst.write(chunk)
243 243 dst.close()
244 244 util.copymode(file, storepath(repo, hash))
245 245 linktousercache(repo, hash)
246 246
247 247 def linktousercache(repo, hash):
248 248 path = usercachepath(repo.ui, hash)
249 249 if path:
250 250 util.makedirs(os.path.dirname(path))
251 251 link(storepath(repo, hash), path)
252 252
253 253 def getstandinmatcher(repo, pats=[], opts={}):
254 254 '''Return a match object that applies pats to the standin directory'''
255 255 standindir = repo.pathto(shortname)
256 256 if pats:
257 257 # patterns supplied: search standin directory relative to current dir
258 258 cwd = repo.getcwd()
259 259 if os.path.isabs(cwd):
260 260 # cwd is an absolute path for hg -R <reponame>
261 261 # work relative to the repository root in this case
262 262 cwd = ''
263 263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 264 elif os.path.isdir(standindir):
265 265 # no patterns: relative to repo root
266 266 pats = [standindir]
267 267 else:
268 268 # no patterns and no standin dir: return matcher that matches nothing
269 269 match = match_.match(repo.root, None, [], exact=True)
270 270 match.matchfn = lambda f: False
271 271 return match
272 272 return getmatcher(repo, pats, opts, showbad=False)
273 273
274 274 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 275 '''Wrapper around scmutil.match() that adds showbad: if false,
276 276 neuter the match object's bad() method so it does not print any
277 277 warnings about missing files or directories.'''
278 278 match = scmutil.match(repo[None], pats, opts)
279 279
280 280 if not showbad:
281 281 match.bad = lambda f, msg: None
282 282 return match
283 283
284 284 def composestandinmatcher(repo, rmatcher):
285 285 '''Return a matcher that accepts standins corresponding to the
286 286 files accepted by rmatcher. Pass the list of files in the matcher
287 287 as the paths specified by the user.'''
288 288 smatcher = getstandinmatcher(repo, rmatcher.files())
289 289 isstandin = smatcher.matchfn
290 290 def composed_matchfn(f):
291 291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 292 smatcher.matchfn = composed_matchfn
293 293
294 294 return smatcher
295 295
296 296 def standin(filename):
297 297 '''Return the repo-relative path to the standin for the specified big
298 298 file.'''
299 299 # Notes:
300 300 # 1) Most callers want an absolute path, but _create_standin() needs
301 301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 302 # it up to the caller to use repo.wjoin() to get an absolute path.
303 303 # 2) Join with '/' because that's what dirstate always uses, even on
304 304 # Windows. Change existing separator to '/' first in case we are
305 305 # passed filenames from an external source (like the command line).
306 306 return shortname + '/' + util.pconvert(filename)
307 307
308 308 def isstandin(filename):
309 309 '''Return true if filename is a big file standin. filename must be
310 310 in Mercurial's internal form (slash-separated).'''
311 311 return filename.startswith(shortname + '/')
312 312
313 313 def splitstandin(filename):
314 314 # Split on / because that's what dirstate always uses, even on Windows.
315 315 # Change local separator to / first just in case we are passed filenames
316 316 # from an external source (like the command line).
317 317 bits = util.pconvert(filename).split('/', 1)
318 318 if len(bits) == 2 and bits[0] == shortname:
319 319 return bits[1]
320 320 else:
321 321 return None
322 322
323 323 def updatestandin(repo, standin):
324 324 file = repo.wjoin(splitstandin(standin))
325 325 if os.path.exists(file):
326 326 hash = hashfile(file)
327 327 executable = getexecutable(file)
328 328 writestandin(repo, standin, hash, executable)
329 329
330 330 def readstandin(repo, filename, node=None):
331 331 '''read hex hash from standin for filename at given node, or working
332 332 directory if no node is given'''
333 333 return repo[node][standin(filename)].data().strip()
334 334
335 335 def writestandin(repo, standin, hash, executable):
336 336 '''write hash to <repo.root>/<standin>'''
337 337 writehash(hash, repo.wjoin(standin), executable)
338 338
339 339 def copyandhash(instream, outfile):
340 340 '''Read bytes from instream (iterable) and write them to outfile,
341 341 computing the SHA-1 hash of the data along the way. Close outfile
342 342 when done and return the binary hash.'''
343 343 hasher = util.sha1('')
344 344 for data in instream:
345 345 hasher.update(data)
346 346 outfile.write(data)
347 347
348 348 # Blecch: closing a file that somebody else opened is rude and
349 349 # wrong. But it's so darn convenient and practical! After all,
350 350 # outfile was opened just to copy and hash.
351 351 outfile.close()
352 352
353 353 return hasher.digest()
354 354
355 355 def hashrepofile(repo, file):
356 356 return hashfile(repo.wjoin(file))
357 357
358 358 def hashfile(file):
359 359 if not os.path.exists(file):
360 360 return ''
361 361 hasher = util.sha1('')
362 362 fd = open(file, 'rb')
363 363 for data in blockstream(fd):
364 364 hasher.update(data)
365 365 fd.close()
366 366 return hasher.hexdigest()
367 367
368 368 class limitreader(object):
369 369 def __init__(self, f, limit):
370 370 self.f = f
371 371 self.limit = limit
372 372
373 373 def read(self, length):
374 374 if self.limit == 0:
375 375 return ''
376 376 length = length > self.limit and self.limit or length
377 377 self.limit -= length
378 378 return self.f.read(length)
379 379
380 380 def close(self):
381 381 pass
382 382
383 383 def blockstream(infile, blocksize=128 * 1024):
384 384 """Generator that yields blocks of data from infile and closes infile."""
385 385 while True:
386 386 data = infile.read(blocksize)
387 387 if not data:
388 388 break
389 389 yield data
390 390 # same blecch as copyandhash() above
391 391 infile.close()
392 392
393 393 def writehash(hash, filename, executable):
394 394 util.makedirs(os.path.dirname(filename))
395 395 util.writefile(filename, hash + '\n')
396 396 os.chmod(filename, getmode(executable))
397 397
398 398 def getexecutable(filename):
399 399 mode = os.stat(filename).st_mode
400 400 return ((mode & stat.S_IXUSR) and
401 401 (mode & stat.S_IXGRP) and
402 402 (mode & stat.S_IXOTH))
403 403
404 404 def getmode(executable):
405 405 if executable:
406 406 return 0755
407 407 else:
408 408 return 0644
409 409
410 410 def urljoin(first, second, *arg):
411 411 def join(left, right):
412 412 if not left.endswith('/'):
413 413 left += '/'
414 414 if right.startswith('/'):
415 415 right = right[1:]
416 416 return left + right
417 417
418 418 url = join(first, second)
419 419 for a in arg:
420 420 url = join(url, a)
421 421 return url
422 422
423 423 def hexsha1(data):
424 424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 425 object data"""
426 426 h = util.sha1()
427 427 for chunk in util.filechunkiter(data):
428 428 h.update(chunk)
429 429 return h.hexdigest()
430 430
431 431 def httpsendfile(ui, filename):
432 432 return httpconnection.httpsendfile(ui, filename, 'rb')
433 433
434 434 def unixpath(path):
435 435 '''Return a version of path normalized for use with the lfdirstate.'''
436 436 return util.pconvert(os.path.normpath(path))
437 437
438 438 def islfilesrepo(repo):
439 439 return ('largefiles' in repo.requirements and
440 440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441 441
442 442 def mkstemp(repo, prefix):
443 443 '''Returns a file descriptor and a filename corresponding to a temporary
444 444 file in the repo's largefiles store.'''
445 445 path = repo.join(longname)
446 446 util.makedirs(path)
447 447 return tempfile.mkstemp(prefix=prefix, dir=path)
448 448
449 449 class storeprotonotcapable(Exception):
450 450 def __init__(self, storetypes):
451 451 self.storetypes = storetypes
452
453 def getcurrentheads(repo):
454 branches = repo.branchmap()
455 heads = []
456 for branch in branches:
457 newheads = repo.branchheads(branch)
458 heads = heads + newheads
459 return heads
@@ -1,964 +1,964 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 37 orig_matchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(override_match)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 56 def add_largefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if exact or not exists:
85 85 abovemin = (lfsize and
86 86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 88 lfnames.append(f)
89 89 if ui.verbose or not exact:
90 90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91 91
92 92 bad = []
93 93 standins = []
94 94
95 95 # Need to lock, otherwise there could be a race condition between
96 96 # when standins are created and added to the repo.
97 97 wlock = repo.wlock()
98 98 try:
99 99 if not opts.get('dry_run'):
100 100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 101 for f in lfnames:
102 102 standinname = lfutil.standin(f)
103 103 lfutil.writestandin(repo, standinname, hash='',
104 104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 105 standins.append(standinname)
106 106 if lfdirstate[f] == 'r':
107 107 lfdirstate.normallookup(f)
108 108 else:
109 109 lfdirstate.add(f)
110 110 lfdirstate.write()
111 111 bad += [lfutil.splitstandin(f)
112 112 for f in lfutil.repo_add(repo, standins)
113 113 if f in m.files()]
114 114 finally:
115 115 wlock.release()
116 116 return bad
117 117
118 118 def remove_largefiles(ui, repo, *pats, **opts):
119 119 after = opts.get('after')
120 120 if not pats and not after:
121 121 raise util.Abort(_('no files specified'))
122 122 m = scmutil.match(repo[None], pats, opts)
123 123 try:
124 124 repo.lfstatus = True
125 125 s = repo.status(match=m, clean=True)
126 126 finally:
127 127 repo.lfstatus = False
128 128 manifest = repo[None].manifest()
129 129 modified, added, deleted, clean = [[f for f in list
130 130 if lfutil.standin(f) in manifest]
131 131 for list in [s[0], s[1], s[3], s[6]]]
132 132
133 133 def warn(files, reason):
134 134 for f in files:
135 135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 136 % (m.rel(f), reason))
137 137
138 138 if after:
139 139 remove, forget = deleted, []
140 140 warn(modified + added + clean, _('file still exists'))
141 141 else:
142 142 remove, forget = deleted + clean, []
143 143 warn(modified, _('file is modified'))
144 144 warn(added, _('file has been marked for add'))
145 145
146 146 for f in sorted(remove + forget):
147 147 if ui.verbose or not m.exact(f):
148 148 ui.status(_('removing %s\n') % m.rel(f))
149 149
150 150 # Need to lock because standin files are deleted then removed from the
151 151 # repository and we could race inbetween.
152 152 wlock = repo.wlock()
153 153 try:
154 154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 155 for f in remove:
156 156 if not after:
157 157 # If this is being called by addremove, notify the user that we
158 158 # are removing the file.
159 159 if getattr(repo, "_isaddremove", False):
160 160 ui.status(_('removing %s\n' % f))
161 161 if os.path.exists(repo.wjoin(f)):
162 162 util.unlinkpath(repo.wjoin(f))
163 163 lfdirstate.remove(f)
164 164 lfdirstate.write()
165 165 forget = [lfutil.standin(f) for f in forget]
166 166 remove = [lfutil.standin(f) for f in remove]
167 167 lfutil.repo_forget(repo, forget)
168 168 # If this is being called by addremove, let the original addremove
169 169 # function handle this.
170 170 if not getattr(repo, "_isaddremove", False):
171 171 lfutil.repo_remove(repo, remove, unlink=True)
172 172 finally:
173 173 wlock.release()
174 174
175 175 # -- Wrappers: modify existing commands --------------------------------
176 176
177 177 # Add works by going through the files that the user wanted to add and
178 178 # checking if they should be added as largefiles. Then it makes a new
179 179 # matcher which matches only the normal files and runs the original
180 180 # version of add.
181 181 def override_add(orig, ui, repo, *pats, **opts):
182 182 normal = opts.pop('normal')
183 183 if normal:
184 184 if opts.get('large'):
185 185 raise util.Abort(_('--normal cannot be used with --large'))
186 186 return orig(ui, repo, *pats, **opts)
187 187 bad = add_largefiles(ui, repo, *pats, **opts)
188 188 installnormalfilesmatchfn(repo[None].manifest())
189 189 result = orig(ui, repo, *pats, **opts)
190 190 restorematchfn()
191 191
192 192 return (result == 1 or bad) and 1 or 0
193 193
194 194 def override_remove(orig, ui, repo, *pats, **opts):
195 195 installnormalfilesmatchfn(repo[None].manifest())
196 196 orig(ui, repo, *pats, **opts)
197 197 restorematchfn()
198 198 remove_largefiles(ui, repo, *pats, **opts)
199 199
200 200 def override_status(orig, ui, repo, *pats, **opts):
201 201 try:
202 202 repo.lfstatus = True
203 203 return orig(ui, repo, *pats, **opts)
204 204 finally:
205 205 repo.lfstatus = False
206 206
207 207 def override_log(orig, ui, repo, *pats, **opts):
208 208 try:
209 209 repo.lfstatus = True
210 210 orig(ui, repo, *pats, **opts)
211 211 finally:
212 212 repo.lfstatus = False
213 213
214 214 def override_verify(orig, ui, repo, *pats, **opts):
215 215 large = opts.pop('large', False)
216 216 all = opts.pop('lfa', False)
217 217 contents = opts.pop('lfc', False)
218 218
219 219 result = orig(ui, repo, *pats, **opts)
220 220 if large:
221 221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 222 return result
223 223
224 224 # Override needs to refresh standins so that update's normal merge
225 225 # will go through properly. Then the other update hook (overriding repo.update)
226 226 # will get the new files. Filemerge is also overriden so that the merge
227 227 # will merge standins correctly.
228 228 def override_update(orig, ui, repo, *pats, **opts):
229 229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 231 False, False)
232 232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233 233
234 234 # Need to lock between the standins getting updated and their
235 235 # largefiles getting updated
236 236 wlock = repo.wlock()
237 237 try:
238 238 if opts['check']:
239 239 mod = len(modified) > 0
240 240 for lfile in unsure:
241 241 standin = lfutil.standin(lfile)
242 242 if repo['.'][standin].data().strip() != \
243 243 lfutil.hashfile(repo.wjoin(lfile)):
244 244 mod = True
245 245 else:
246 246 lfdirstate.normal(lfile)
247 247 lfdirstate.write()
248 248 if mod:
249 249 raise util.Abort(_('uncommitted local changes'))
250 250 # XXX handle removed differently
251 251 if not opts['clean']:
252 252 for lfile in unsure + modified + added:
253 253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 254 finally:
255 255 wlock.release()
256 256 return orig(ui, repo, *pats, **opts)
257 257
258 258 # Before starting the manifest merge, merge.updates will call
259 259 # _checkunknown to check if there are any files in the merged-in
260 260 # changeset that collide with unknown files in the working copy.
261 261 #
262 262 # The largefiles are seen as unknown, so this prevents us from merging
263 263 # in a file 'foo' if we already have a largefile with the same name.
264 264 #
265 265 # The overridden function filters the unknown files by removing any
266 266 # largefiles. This makes the merge proceed and we can then handle this
267 267 # case further in the overridden manifestmerge function below.
268 268 def override_checkunknown(origfn, wctx, mctx, folding):
269 269 origunknown = wctx.unknown()
270 270 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
271 271 try:
272 272 return origfn(wctx, mctx, folding)
273 273 finally:
274 274 wctx._unknown = origunknown
275 275
276 276 # The manifest merge handles conflicts on the manifest level. We want
277 277 # to handle changes in largefile-ness of files at this level too.
278 278 #
279 279 # The strategy is to run the original manifestmerge and then process
280 280 # the action list it outputs. There are two cases we need to deal with:
281 281 #
282 282 # 1. Normal file in p1, largefile in p2. Here the largefile is
283 283 # detected via its standin file, which will enter the working copy
284 284 # with a "get" action. It is not "merge" since the standin is all
285 285 # Mercurial is concerned with at this level -- the link to the
286 286 # existing normal file is not relevant here.
287 287 #
288 288 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
289 289 # since the largefile will be present in the working copy and
290 290 # different from the normal file in p2. Mercurial therefore
291 291 # triggers a merge action.
292 292 #
293 293 # In both cases, we prompt the user and emit new actions to either
294 294 # remove the standin (if the normal file was kept) or to remove the
295 295 # normal file and get the standin (if the largefile was kept). The
296 296 # default prompt answer is to use the largefile version since it was
297 297 # presumably changed on purpose.
298 298 #
299 299 # Finally, the merge.applyupdates function will then take care of
300 300 # writing the files into the working copy and lfcommands.updatelfiles
301 301 # will update the largefiles.
302 302 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
303 303 actions = origfn(repo, p1, p2, pa, overwrite, partial)
304 304 processed = []
305 305
306 306 for action in actions:
307 307 if overwrite:
308 308 processed.append(action)
309 309 continue
310 310 f, m = action[:2]
311 311
312 312 choices = (_('&Largefile'), _('&Normal file'))
313 313 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
314 314 # Case 1: normal file in the working copy, largefile in
315 315 # the second parent
316 316 lfile = lfutil.splitstandin(f)
317 317 standin = f
318 318 msg = _('%s has been turned into a largefile\n'
319 319 'use (l)argefile or keep as (n)ormal file?') % lfile
320 320 if repo.ui.promptchoice(msg, choices, 0) == 0:
321 321 processed.append((lfile, "r"))
322 322 processed.append((standin, "g", p2.flags(standin)))
323 323 else:
324 324 processed.append((standin, "r"))
325 325 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
326 326 # Case 2: largefile in the working copy, normal file in
327 327 # the second parent
328 328 standin = lfutil.standin(f)
329 329 lfile = f
330 330 msg = _('%s has been turned into a normal file\n'
331 331 'keep as (l)argefile or use (n)ormal file?') % lfile
332 332 if repo.ui.promptchoice(msg, choices, 0) == 0:
333 333 processed.append((lfile, "r"))
334 334 else:
335 335 processed.append((standin, "r"))
336 336 processed.append((lfile, "g", p2.flags(lfile)))
337 337 else:
338 338 processed.append(action)
339 339
340 340 return processed
341 341
342 342 # Override filemerge to prompt the user about how they wish to merge
343 343 # largefiles. This will handle identical edits, and copy/rename +
344 344 # edit without prompting the user.
345 345 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
346 346 # Use better variable names here. Because this is a wrapper we cannot
347 347 # change the variable names in the function declaration.
348 348 fcdest, fcother, fcancestor = fcd, fco, fca
349 349 if not lfutil.isstandin(orig):
350 350 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
351 351 else:
352 352 if not fcother.cmp(fcdest): # files identical?
353 353 return None
354 354
355 355 # backwards, use working dir parent as ancestor
356 356 if fcancestor == fcother:
357 357 fcancestor = fcdest.parents()[0]
358 358
359 359 if orig != fcother.path():
360 360 repo.ui.status(_('merging %s and %s to %s\n')
361 361 % (lfutil.splitstandin(orig),
362 362 lfutil.splitstandin(fcother.path()),
363 363 lfutil.splitstandin(fcdest.path())))
364 364 else:
365 365 repo.ui.status(_('merging %s\n')
366 366 % lfutil.splitstandin(fcdest.path()))
367 367
368 368 if fcancestor.path() != fcother.path() and fcother.data() == \
369 369 fcancestor.data():
370 370 return 0
371 371 if fcancestor.path() != fcdest.path() and fcdest.data() == \
372 372 fcancestor.data():
373 373 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
374 374 return 0
375 375
376 376 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
377 377 'keep (l)ocal or take (o)ther?') %
378 378 lfutil.splitstandin(orig),
379 379 (_('&Local'), _('&Other')), 0) == 0:
380 380 return 0
381 381 else:
382 382 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
383 383 return 0
384 384
385 385 # Copy first changes the matchers to match standins instead of
386 386 # largefiles. Then it overrides util.copyfile in that function it
387 387 # checks if the destination largefile already exists. It also keeps a
388 388 # list of copied files so that the largefiles can be copied and the
389 389 # dirstate updated.
390 390 def override_copy(orig, ui, repo, pats, opts, rename=False):
391 391 # doesn't remove largefile on rename
392 392 if len(pats) < 2:
393 393 # this isn't legal, let the original function deal with it
394 394 return orig(ui, repo, pats, opts, rename)
395 395
396 396 def makestandin(relpath):
397 397 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
398 398 return os.path.join(repo.wjoin(lfutil.standin(path)))
399 399
400 400 fullpats = scmutil.expandpats(pats)
401 401 dest = fullpats[-1]
402 402
403 403 if os.path.isdir(dest):
404 404 if not os.path.isdir(makestandin(dest)):
405 405 os.makedirs(makestandin(dest))
406 406 # This could copy both lfiles and normal files in one command,
407 407 # but we don't want to do that. First replace their matcher to
408 408 # only match normal files and run it, then replace it to just
409 409 # match largefiles and run it again.
410 410 nonormalfiles = False
411 411 nolfiles = False
412 412 try:
413 413 try:
414 414 installnormalfilesmatchfn(repo[None].manifest())
415 415 result = orig(ui, repo, pats, opts, rename)
416 416 except util.Abort, e:
417 417 if str(e) != 'no files to copy':
418 418 raise e
419 419 else:
420 420 nonormalfiles = True
421 421 result = 0
422 422 finally:
423 423 restorematchfn()
424 424
425 425 # The first rename can cause our current working directory to be removed.
426 426 # In that case there is nothing left to copy/rename so just quit.
427 427 try:
428 428 repo.getcwd()
429 429 except OSError:
430 430 return result
431 431
432 432 try:
433 433 try:
434 434 # When we call orig below it creates the standins but we don't add them
435 435 # to the dir state until later so lock during that time.
436 436 wlock = repo.wlock()
437 437
438 438 manifest = repo[None].manifest()
439 439 oldmatch = None # for the closure
440 440 def override_match(ctx, pats=[], opts={}, globbed=False,
441 441 default='relpath'):
442 442 newpats = []
443 443 # The patterns were previously mangled to add the standin
444 444 # directory; we need to remove that now
445 445 for pat in pats:
446 446 if match_.patkind(pat) is None and lfutil.shortname in pat:
447 447 newpats.append(pat.replace(lfutil.shortname, ''))
448 448 else:
449 449 newpats.append(pat)
450 450 match = oldmatch(ctx, newpats, opts, globbed, default)
451 451 m = copy.copy(match)
452 452 lfile = lambda f: lfutil.standin(f) in manifest
453 453 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
454 454 m._fmap = set(m._files)
455 455 orig_matchfn = m.matchfn
456 456 m.matchfn = lambda f: (lfutil.isstandin(f) and
457 457 lfile(lfutil.splitstandin(f)) and
458 458 orig_matchfn(lfutil.splitstandin(f)) or
459 459 None)
460 460 return m
461 461 oldmatch = installmatchfn(override_match)
462 462 listpats = []
463 463 for pat in pats:
464 464 if match_.patkind(pat) is not None:
465 465 listpats.append(pat)
466 466 else:
467 467 listpats.append(makestandin(pat))
468 468
469 469 try:
470 470 origcopyfile = util.copyfile
471 471 copiedfiles = []
472 472 def override_copyfile(src, dest):
473 473 if (lfutil.shortname in src and
474 474 dest.startswith(repo.wjoin(lfutil.shortname))):
475 475 destlfile = dest.replace(lfutil.shortname, '')
476 476 if not opts['force'] and os.path.exists(destlfile):
477 477 raise IOError('',
478 478 _('destination largefile already exists'))
479 479 copiedfiles.append((src, dest))
480 480 origcopyfile(src, dest)
481 481
482 482 util.copyfile = override_copyfile
483 483 result += orig(ui, repo, listpats, opts, rename)
484 484 finally:
485 485 util.copyfile = origcopyfile
486 486
487 487 lfdirstate = lfutil.openlfdirstate(ui, repo)
488 488 for (src, dest) in copiedfiles:
489 489 if (lfutil.shortname in src and
490 490 dest.startswith(repo.wjoin(lfutil.shortname))):
491 491 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
492 492 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
493 493 destlfiledir = os.path.dirname(destlfile) or '.'
494 494 if not os.path.isdir(destlfiledir):
495 495 os.makedirs(destlfiledir)
496 496 if rename:
497 497 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
498 498 lfdirstate.remove(srclfile)
499 499 else:
500 500 util.copyfile(srclfile, destlfile)
501 501 lfdirstate.add(destlfile)
502 502 lfdirstate.write()
503 503 except util.Abort, e:
504 504 if str(e) != 'no files to copy':
505 505 raise e
506 506 else:
507 507 nolfiles = True
508 508 finally:
509 509 restorematchfn()
510 510 wlock.release()
511 511
512 512 if nolfiles and nonormalfiles:
513 513 raise util.Abort(_('no files to copy'))
514 514
515 515 return result
516 516
517 517 # When the user calls revert, we have to be careful to not revert any
518 518 # changes to other largefiles accidentally. This means we have to keep
519 519 # track of the largefiles that are being reverted so we only pull down
520 520 # the necessary largefiles.
521 521 #
522 522 # Standins are only updated (to match the hash of largefiles) before
523 523 # commits. Update the standins then run the original revert, changing
524 524 # the matcher to hit standins instead of largefiles. Based on the
525 525 # resulting standins update the largefiles. Then return the standins
526 526 # to their proper state
527 527 def override_revert(orig, ui, repo, *pats, **opts):
528 528 # Because we put the standins in a bad state (by updating them)
529 529 # and then return them to a correct state we need to lock to
530 530 # prevent others from changing them in their incorrect state.
531 531 wlock = repo.wlock()
532 532 try:
533 533 lfdirstate = lfutil.openlfdirstate(ui, repo)
534 534 (modified, added, removed, missing, unknown, ignored, clean) = \
535 535 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
536 536 for lfile in modified:
537 537 lfutil.updatestandin(repo, lfutil.standin(lfile))
538 538 for lfile in missing:
539 539 os.unlink(repo.wjoin(lfutil.standin(lfile)))
540 540
541 541 try:
542 542 ctx = repo[opts.get('rev')]
543 543 oldmatch = None # for the closure
544 544 def override_match(ctx, pats=[], opts={}, globbed=False,
545 545 default='relpath'):
546 546 match = oldmatch(ctx, pats, opts, globbed, default)
547 547 m = copy.copy(match)
548 548 def tostandin(f):
549 549 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
550 550 return lfutil.standin(f)
551 551 elif lfutil.standin(f) in repo[None]:
552 552 return None
553 553 return f
554 554 m._files = [tostandin(f) for f in m._files]
555 555 m._files = [f for f in m._files if f is not None]
556 556 m._fmap = set(m._files)
557 557 orig_matchfn = m.matchfn
558 558 def matchfn(f):
559 559 if lfutil.isstandin(f):
560 560 # We need to keep track of what largefiles are being
561 561 # matched so we know which ones to update later --
562 562 # otherwise we accidentally revert changes to other
563 563 # largefiles. This is repo-specific, so duckpunch the
564 564 # repo object to keep the list of largefiles for us
565 565 # later.
566 566 if orig_matchfn(lfutil.splitstandin(f)) and \
567 567 (f in repo[None] or f in ctx):
568 568 lfileslist = getattr(repo, '_lfilestoupdate', [])
569 569 lfileslist.append(lfutil.splitstandin(f))
570 570 repo._lfilestoupdate = lfileslist
571 571 return True
572 572 else:
573 573 return False
574 574 return orig_matchfn(f)
575 575 m.matchfn = matchfn
576 576 return m
577 577 oldmatch = installmatchfn(override_match)
578 578 scmutil.match
579 579 matches = override_match(repo[None], pats, opts)
580 580 orig(ui, repo, *pats, **opts)
581 581 finally:
582 582 restorematchfn()
583 583 lfileslist = getattr(repo, '_lfilestoupdate', [])
584 584 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
585 585 printmessage=False)
586 586
587 587 # empty out the largefiles list so we start fresh next time
588 588 repo._lfilestoupdate = []
589 589 for lfile in modified:
590 590 if lfile in lfileslist:
591 591 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
592 592 in repo['.']:
593 593 lfutil.writestandin(repo, lfutil.standin(lfile),
594 594 repo['.'][lfile].data().strip(),
595 595 'x' in repo['.'][lfile].flags())
596 596 lfdirstate = lfutil.openlfdirstate(ui, repo)
597 597 for lfile in added:
598 598 standin = lfutil.standin(lfile)
599 599 if standin not in ctx and (standin in matches or opts.get('all')):
600 600 if lfile in lfdirstate:
601 601 lfdirstate.drop(lfile)
602 602 util.unlinkpath(repo.wjoin(standin))
603 603 lfdirstate.write()
604 604 finally:
605 605 wlock.release()
606 606
607 607 def hg_update(orig, repo, node):
608 608 result = orig(repo, node)
609 609 lfcommands.updatelfiles(repo.ui, repo)
610 610 return result
611 611
612 612 def hg_clean(orig, repo, node, show_stats=True):
613 613 result = orig(repo, node, show_stats)
614 614 lfcommands.updatelfiles(repo.ui, repo)
615 615 return result
616 616
617 617 def hg_merge(orig, repo, node, force=None, remind=True):
618 618 # Mark the repo as being in the middle of a merge, so that
619 619 # updatelfiles() will know that it needs to trust the standins in
620 620 # the working copy, not in the standins in the current node
621 621 repo._ismerging = True
622 622 try:
623 623 result = orig(repo, node, force, remind)
624 624 lfcommands.updatelfiles(repo.ui, repo)
625 625 finally:
626 626 repo._ismerging = False
627 627 return result
628 628
629 629 # When we rebase a repository with remotely changed largefiles, we need to
630 630 # take some extra care so that the largefiles are correctly updated in the
631 631 # working copy
632 632 def override_pull(orig, ui, repo, source=None, **opts):
633 633 if opts.get('rebase', False):
634 634 repo._isrebasing = True
635 635 try:
636 636 if opts.get('update'):
637 637 del opts['update']
638 638 ui.debug('--update and --rebase are not compatible, ignoring '
639 639 'the update flag\n')
640 640 del opts['rebase']
641 641 cmdutil.bailifchanged(repo)
642 642 revsprepull = len(repo)
643 643 origpostincoming = commands.postincoming
644 644 def _dummy(*args, **kwargs):
645 645 pass
646 646 commands.postincoming = _dummy
647 647 repo.lfpullsource = source
648 648 if not source:
649 649 source = 'default'
650 650 try:
651 651 result = commands.pull(ui, repo, source, **opts)
652 652 finally:
653 653 commands.postincoming = origpostincoming
654 654 revspostpull = len(repo)
655 655 if revspostpull > revsprepull:
656 656 result = result or rebase.rebase(ui, repo)
657 657 finally:
658 658 repo._isrebasing = False
659 659 else:
660 660 repo.lfpullsource = source
661 661 if not source:
662 662 source = 'default'
663 oldheads = lfutil.getcurrentheads(repo)
663 664 result = orig(ui, repo, source, **opts)
664 665 # If we do not have the new largefiles for any new heads we pulled, we
665 666 # will run into a problem later if we try to merge or rebase with one of
666 667 # these heads, so cache the largefiles now direclty into the system
667 668 # cache.
668 669 ui.status(_("caching new largefiles\n"))
669 670 numcached = 0
670 branches = repo.branchmap()
671 for branch in branches:
672 heads = repo.branchheads(branch)
673 for head in heads:
674 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
675 numcached += len(cached)
671 heads = lfutil.getcurrentheads(repo)
672 newheads = set(heads).difference(set(oldheads))
673 for head in newheads:
674 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
675 numcached += len(cached)
676 676 ui.status(_("%d largefiles cached\n" % numcached))
677 677 return result
678 678
679 679 def override_rebase(orig, ui, repo, **opts):
680 680 repo._isrebasing = True
681 681 try:
682 682 orig(ui, repo, **opts)
683 683 finally:
684 684 repo._isrebasing = False
685 685
686 686 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
687 687 prefix=None, mtime=None, subrepos=None):
688 688 # No need to lock because we are only reading history and
689 689 # largefile caches, neither of which are modified.
690 690 lfcommands.cachelfiles(repo.ui, repo, node)
691 691
692 692 if kind not in archival.archivers:
693 693 raise util.Abort(_("unknown archive type '%s'") % kind)
694 694
695 695 ctx = repo[node]
696 696
697 697 if kind == 'files':
698 698 if prefix:
699 699 raise util.Abort(
700 700 _('cannot give prefix when archiving to files'))
701 701 else:
702 702 prefix = archival.tidyprefix(dest, kind, prefix)
703 703
704 704 def write(name, mode, islink, getdata):
705 705 if matchfn and not matchfn(name):
706 706 return
707 707 data = getdata()
708 708 if decode:
709 709 data = repo.wwritedata(name, data)
710 710 archiver.addfile(prefix + name, mode, islink, data)
711 711
712 712 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
713 713
714 714 if repo.ui.configbool("ui", "archivemeta", True):
715 715 def metadata():
716 716 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
717 717 hex(repo.changelog.node(0)), hex(node), ctx.branch())
718 718
719 719 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
720 720 if repo.tagtype(t) == 'global')
721 721 if not tags:
722 722 repo.ui.pushbuffer()
723 723 opts = {'template': '{latesttag}\n{latesttagdistance}',
724 724 'style': '', 'patch': None, 'git': None}
725 725 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
726 726 ltags, dist = repo.ui.popbuffer().split('\n')
727 727 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
728 728 tags += 'latesttagdistance: %s\n' % dist
729 729
730 730 return base + tags
731 731
732 732 write('.hg_archival.txt', 0644, False, metadata)
733 733
734 734 for f in ctx:
735 735 ff = ctx.flags(f)
736 736 getdata = ctx[f].data
737 737 if lfutil.isstandin(f):
738 738 path = lfutil.findfile(repo, getdata().strip())
739 739 if path is None:
740 740 raise util.Abort(
741 741 _('largefile %s not found in repo store or system cache')
742 742 % lfutil.splitstandin(f))
743 743 f = lfutil.splitstandin(f)
744 744
745 745 def getdatafn():
746 746 fd = None
747 747 try:
748 748 fd = open(path, 'rb')
749 749 return fd.read()
750 750 finally:
751 751 if fd:
752 752 fd.close()
753 753
754 754 getdata = getdatafn
755 755 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
756 756
757 757 if subrepos:
758 758 for subpath in ctx.substate:
759 759 sub = ctx.sub(subpath)
760 760 sub.archive(repo.ui, archiver, prefix)
761 761
762 762 archiver.done()
763 763
764 764 # If a largefile is modified, the change is not reflected in its
765 765 # standin until a commit. cmdutil.bailifchanged() raises an exception
766 766 # if the repo has uncommitted changes. Wrap it to also check if
767 767 # largefiles were changed. This is used by bisect and backout.
768 768 def override_bailifchanged(orig, repo):
769 769 orig(repo)
770 770 repo.lfstatus = True
771 771 modified, added, removed, deleted = repo.status()[:4]
772 772 repo.lfstatus = False
773 773 if modified or added or removed or deleted:
774 774 raise util.Abort(_('outstanding uncommitted changes'))
775 775
776 776 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
777 777 def override_fetch(orig, ui, repo, *pats, **opts):
778 778 repo.lfstatus = True
779 779 modified, added, removed, deleted = repo.status()[:4]
780 780 repo.lfstatus = False
781 781 if modified or added or removed or deleted:
782 782 raise util.Abort(_('outstanding uncommitted changes'))
783 783 return orig(ui, repo, *pats, **opts)
784 784
785 785 def override_forget(orig, ui, repo, *pats, **opts):
786 786 installnormalfilesmatchfn(repo[None].manifest())
787 787 orig(ui, repo, *pats, **opts)
788 788 restorematchfn()
789 789 m = scmutil.match(repo[None], pats, opts)
790 790
791 791 try:
792 792 repo.lfstatus = True
793 793 s = repo.status(match=m, clean=True)
794 794 finally:
795 795 repo.lfstatus = False
796 796 forget = sorted(s[0] + s[1] + s[3] + s[6])
797 797 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
798 798
799 799 for f in forget:
800 800 if lfutil.standin(f) not in repo.dirstate and not \
801 801 os.path.isdir(m.rel(lfutil.standin(f))):
802 802 ui.warn(_('not removing %s: file is already untracked\n')
803 803 % m.rel(f))
804 804
805 805 for f in forget:
806 806 if ui.verbose or not m.exact(f):
807 807 ui.status(_('removing %s\n') % m.rel(f))
808 808
809 809 # Need to lock because standin files are deleted then removed from the
810 810 # repository and we could race inbetween.
811 811 wlock = repo.wlock()
812 812 try:
813 813 lfdirstate = lfutil.openlfdirstate(ui, repo)
814 814 for f in forget:
815 815 if lfdirstate[f] == 'a':
816 816 lfdirstate.drop(f)
817 817 else:
818 818 lfdirstate.remove(f)
819 819 lfdirstate.write()
820 820 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
821 821 unlink=True)
822 822 finally:
823 823 wlock.release()
824 824
825 825 def getoutgoinglfiles(ui, repo, dest=None, **opts):
826 826 dest = ui.expandpath(dest or 'default-push', dest or 'default')
827 827 dest, branches = hg.parseurl(dest, opts.get('branch'))
828 828 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
829 829 if revs:
830 830 revs = [repo.lookup(rev) for rev in revs]
831 831
832 832 remoteui = hg.remoteui
833 833
834 834 try:
835 835 remote = hg.repository(remoteui(repo, opts), dest)
836 836 except error.RepoError:
837 837 return None
838 838 o = lfutil.findoutgoing(repo, remote, False)
839 839 if not o:
840 840 return None
841 841 o = repo.changelog.nodesbetween(o, revs)[0]
842 842 if opts.get('newest_first'):
843 843 o.reverse()
844 844
845 845 toupload = set()
846 846 for n in o:
847 847 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
848 848 ctx = repo[n]
849 849 files = set(ctx.files())
850 850 if len(parents) == 2:
851 851 mc = ctx.manifest()
852 852 mp1 = ctx.parents()[0].manifest()
853 853 mp2 = ctx.parents()[1].manifest()
854 854 for f in mp1:
855 855 if f not in mc:
856 856 files.add(f)
857 857 for f in mp2:
858 858 if f not in mc:
859 859 files.add(f)
860 860 for f in mc:
861 861 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
862 862 files.add(f)
863 863 toupload = toupload.union(
864 864 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
865 865 return toupload
866 866
867 867 def override_outgoing(orig, ui, repo, dest=None, **opts):
868 868 orig(ui, repo, dest, **opts)
869 869
870 870 if opts.pop('large', None):
871 871 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
872 872 if toupload is None:
873 873 ui.status(_('largefiles: No remote repo\n'))
874 874 else:
875 875 ui.status(_('largefiles to upload:\n'))
876 876 for file in toupload:
877 877 ui.status(lfutil.splitstandin(file) + '\n')
878 878 ui.status('\n')
879 879
880 880 def override_summary(orig, ui, repo, *pats, **opts):
881 881 try:
882 882 repo.lfstatus = True
883 883 orig(ui, repo, *pats, **opts)
884 884 finally:
885 885 repo.lfstatus = False
886 886
887 887 if opts.pop('large', None):
888 888 toupload = getoutgoinglfiles(ui, repo, None, **opts)
889 889 if toupload is None:
890 890 ui.status(_('largefiles: No remote repo\n'))
891 891 else:
892 892 ui.status(_('largefiles: %d to upload\n') % len(toupload))
893 893
894 894 def override_addremove(orig, ui, repo, *pats, **opts):
895 895 # Get the list of missing largefiles so we can remove them
896 896 lfdirstate = lfutil.openlfdirstate(ui, repo)
897 897 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
898 898 False, False)
899 899 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
900 900
901 901 # Call into the normal remove code, but the removing of the standin, we want
902 902 # to have handled by original addremove. Monkey patching here makes sure
903 903 # we don't remove the standin in the largefiles code, preventing a very
904 904 # confused state later.
905 905 if missing:
906 906 repo._isaddremove = True
907 907 remove_largefiles(ui, repo, *missing, **opts)
908 908 repo._isaddremove = False
909 909 # Call into the normal add code, and any files that *should* be added as
910 910 # largefiles will be
911 911 add_largefiles(ui, repo, *pats, **opts)
912 912 # Now that we've handled largefiles, hand off to the original addremove
913 913 # function to take care of the rest. Make sure it doesn't do anything with
914 914 # largefiles by installing a matcher that will ignore them.
915 915 installnormalfilesmatchfn(repo[None].manifest())
916 916 result = orig(ui, repo, *pats, **opts)
917 917 restorematchfn()
918 918 return result
919 919
920 920 # Calling purge with --all will cause the largefiles to be deleted.
921 921 # Override repo.status to prevent this from happening.
922 922 def override_purge(orig, ui, repo, *dirs, **opts):
923 923 oldstatus = repo.status
924 924 def override_status(node1='.', node2=None, match=None, ignored=False,
925 925 clean=False, unknown=False, listsubrepos=False):
926 926 r = oldstatus(node1, node2, match, ignored, clean, unknown,
927 927 listsubrepos)
928 928 lfdirstate = lfutil.openlfdirstate(ui, repo)
929 929 modified, added, removed, deleted, unknown, ignored, clean = r
930 930 unknown = [f for f in unknown if lfdirstate[f] == '?']
931 931 ignored = [f for f in ignored if lfdirstate[f] == '?']
932 932 return modified, added, removed, deleted, unknown, ignored, clean
933 933 repo.status = override_status
934 934 orig(ui, repo, *dirs, **opts)
935 935 repo.status = oldstatus
936 936
937 937 def override_rollback(orig, ui, repo, **opts):
938 938 result = orig(ui, repo, **opts)
939 939 merge.update(repo, node=None, branchmerge=False, force=True,
940 940 partial=lfutil.isstandin)
941 941 wlock = repo.wlock()
942 942 try:
943 943 lfdirstate = lfutil.openlfdirstate(ui, repo)
944 944 lfiles = lfutil.listlfiles(repo)
945 945 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
946 946 for file in lfiles:
947 947 if file in oldlfiles:
948 948 lfdirstate.normallookup(file)
949 949 else:
950 950 lfdirstate.add(file)
951 951 lfdirstate.write()
952 952 finally:
953 953 wlock.release()
954 954 return result
955 955
956 956 def override_transplant(orig, ui, repo, *revs, **opts):
957 957 try:
958 958 repo._istransplanting = True
959 959 result = orig(ui, repo, *revs, **opts)
960 960 lfcommands.updatelfiles(ui, repo, filelist=None,
961 961 printmessage=False)
962 962 finally:
963 963 repo._istransplanting = False
964 964 return result
General Comments 0
You need to be logged in to leave comments. Login now