##// END OF EJS Templates
largefiles: optimize update speed by only updating changed largefiles...
Na'Tosha Bard -
r16120:47ee41fc default
parent child Browse files
Show More
@@ -1,459 +1,467
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16 import tempfile
17 17
18 18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 19 from mercurial.i18n import _
20 20
21 21 shortname = '.hglf'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Portability wrappers ----------------------------------------------
26 26
27 27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 28 return dirstate.walk(matcher, [], unknown, ignored)
29 29
30 30 def repo_add(repo, list):
31 31 add = repo[None].add
32 32 return add(list)
33 33
34 34 def repo_remove(repo, list, unlink=False):
35 35 def remove(list, unlink):
36 36 wlock = repo.wlock()
37 37 try:
38 38 if unlink:
39 39 for f in list:
40 40 try:
41 41 util.unlinkpath(repo.wjoin(f))
42 42 except OSError, inst:
43 43 if inst.errno != errno.ENOENT:
44 44 raise
45 45 repo[None].forget(list)
46 46 finally:
47 47 wlock.release()
48 48 return remove(list, unlink=unlink)
49 49
50 50 def repo_forget(repo, list):
51 51 forget = repo[None].forget
52 52 return forget(list)
53 53
54 54 def findoutgoing(repo, remote, force):
55 55 from mercurial import discovery
56 56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 57 remote, force=force)
58 58 return repo.changelog.findmissing(common)
59 59
60 60 # -- Private worker functions ------------------------------------------
61 61
62 62 def getminsize(ui, assumelfiles, opt, default=10):
63 63 lfsize = opt
64 64 if not lfsize and assumelfiles:
65 65 lfsize = ui.config(longname, 'minsize', default=default)
66 66 if lfsize:
67 67 try:
68 68 lfsize = float(lfsize)
69 69 except ValueError:
70 70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 71 % lfsize)
72 72 if lfsize is None:
73 73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 74 return lfsize
75 75
76 76 def link(src, dest):
77 77 try:
78 78 util.oslink(src, dest)
79 79 except OSError:
80 80 # if hardlinks fail, fallback on atomic copy
81 81 dst = util.atomictempfile(dest)
82 82 for chunk in util.filechunkiter(open(src, 'rb')):
83 83 dst.write(chunk)
84 84 dst.close()
85 85 os.chmod(dest, os.stat(src).st_mode)
86 86
87 87 def usercachepath(ui, hash):
88 88 path = ui.configpath(longname, 'usercache', None)
89 89 if path:
90 90 path = os.path.join(path, hash)
91 91 else:
92 92 if os.name == 'nt':
93 93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 94 if appdata:
95 95 path = os.path.join(appdata, longname, hash)
96 96 elif platform.system() == 'Darwin':
97 97 home = os.getenv('HOME')
98 98 if home:
99 99 path = os.path.join(home, 'Library', 'Caches',
100 100 longname, hash)
101 101 elif os.name == 'posix':
102 102 path = os.getenv('XDG_CACHE_HOME')
103 103 if path:
104 104 path = os.path.join(path, longname, hash)
105 105 else:
106 106 home = os.getenv('HOME')
107 107 if home:
108 108 path = os.path.join(home, '.cache', longname, hash)
109 109 else:
110 110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 111 return path
112 112
113 113 def inusercache(ui, hash):
114 114 path = usercachepath(ui, hash)
115 115 return path and os.path.exists(path)
116 116
117 117 def findfile(repo, hash):
118 118 if instore(repo, hash):
119 119 repo.ui.note(_('Found %s in store\n') % hash)
120 120 return storepath(repo, hash)
121 121 elif inusercache(repo.ui, hash):
122 122 repo.ui.note(_('Found %s in system cache\n') % hash)
123 123 path = storepath(repo, hash)
124 124 util.makedirs(os.path.dirname(path))
125 125 link(usercachepath(repo.ui, hash), path)
126 126 return path
127 127 return None
128 128
129 129 class largefiles_dirstate(dirstate.dirstate):
130 130 def __getitem__(self, key):
131 131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 132 def normal(self, f):
133 133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 134 def remove(self, f):
135 135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 136 def add(self, f):
137 137 return super(largefiles_dirstate, self).add(unixpath(f))
138 138 def drop(self, f):
139 139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 140 def forget(self, f):
141 141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 142 def normallookup(self, f):
143 143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144 144
145 145 def openlfdirstate(ui, repo):
146 146 '''
147 147 Return a dirstate object that tracks largefiles: i.e. its root is
148 148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 149 '''
150 150 admin = repo.join(longname)
151 151 opener = scmutil.opener(admin)
152 152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 153 repo.dirstate._validate)
154 154
155 155 # If the largefiles dirstate does not exist, populate and create
156 156 # it. This ensures that we create it on the first meaningful
157 157 # largefiles operation in a new clone.
158 158 if not os.path.exists(os.path.join(admin, 'dirstate')):
159 159 util.makedirs(admin)
160 160 matcher = getstandinmatcher(repo)
161 161 for standin in dirstate_walk(repo.dirstate, matcher):
162 162 lfile = splitstandin(standin)
163 163 hash = readstandin(repo, lfile)
164 164 lfdirstate.normallookup(lfile)
165 165 try:
166 166 if hash == hashfile(repo.wjoin(lfile)):
167 167 lfdirstate.normal(lfile)
168 168 except OSError, err:
169 169 if err.errno != errno.ENOENT:
170 170 raise
171 171 return lfdirstate
172 172
173 173 def lfdirstate_status(lfdirstate, repo, rev):
174 174 match = match_.always(repo.root, repo.getcwd())
175 175 s = lfdirstate.status(match, [], False, False, False)
176 176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 177 for lfile in unsure:
178 178 if repo[rev][standin(lfile)].data().strip() != \
179 179 hashfile(repo.wjoin(lfile)):
180 180 modified.append(lfile)
181 181 else:
182 182 clean.append(lfile)
183 183 lfdirstate.normal(lfile)
184 184 return (modified, added, removed, missing, unknown, ignored, clean)
185 185
186 186 def listlfiles(repo, rev=None, matcher=None):
187 187 '''return a list of largefiles in the working copy or the
188 188 specified changeset'''
189 189
190 190 if matcher is None:
191 191 matcher = getstandinmatcher(repo)
192 192
193 193 # ignore unknown files in working directory
194 194 return [splitstandin(f)
195 195 for f in repo[rev].walk(matcher)
196 196 if rev is not None or repo.dirstate[f] != '?']
197 197
198 198 def instore(repo, hash):
199 199 return os.path.exists(storepath(repo, hash))
200 200
201 201 def storepath(repo, hash):
202 202 return repo.join(os.path.join(longname, hash))
203 203
204 204 def copyfromcache(repo, hash, filename):
205 205 '''Copy the specified largefile from the repo or system cache to
206 206 filename in the repository. Return true on success or false if the
207 207 file was not found in either cache (which should not happened:
208 208 this is meant to be called only after ensuring that the needed
209 209 largefile exists in the cache).'''
210 210 path = findfile(repo, hash)
211 211 if path is None:
212 212 return False
213 213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 214 # The write may fail before the file is fully written, but we
215 215 # don't use atomic writes in the working copy.
216 216 shutil.copy(path, repo.wjoin(filename))
217 217 return True
218 218
219 219 def copytostore(repo, rev, file, uploaded=False):
220 220 hash = readstandin(repo, file)
221 221 if instore(repo, hash):
222 222 return
223 223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224 224
225 225 def copyalltostore(repo, node):
226 226 '''Copy all largefiles in a given revision to the store'''
227 227
228 228 ctx = repo[node]
229 229 for filename in ctx.files():
230 230 if isstandin(filename) and filename in ctx.manifest():
231 231 realfile = splitstandin(filename)
232 232 copytostore(repo, ctx.node(), realfile)
233 233
234 234
235 235 def copytostoreabsolute(repo, file, hash):
236 236 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 237 if inusercache(repo.ui, hash):
238 238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 239 else:
240 240 dst = util.atomictempfile(storepath(repo, hash))
241 241 for chunk in util.filechunkiter(open(file, 'rb')):
242 242 dst.write(chunk)
243 243 dst.close()
244 244 util.copymode(file, storepath(repo, hash))
245 245 linktousercache(repo, hash)
246 246
247 247 def linktousercache(repo, hash):
248 248 path = usercachepath(repo.ui, hash)
249 249 if path:
250 250 util.makedirs(os.path.dirname(path))
251 251 link(storepath(repo, hash), path)
252 252
253 253 def getstandinmatcher(repo, pats=[], opts={}):
254 254 '''Return a match object that applies pats to the standin directory'''
255 255 standindir = repo.pathto(shortname)
256 256 if pats:
257 257 # patterns supplied: search standin directory relative to current dir
258 258 cwd = repo.getcwd()
259 259 if os.path.isabs(cwd):
260 260 # cwd is an absolute path for hg -R <reponame>
261 261 # work relative to the repository root in this case
262 262 cwd = ''
263 263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 264 elif os.path.isdir(standindir):
265 265 # no patterns: relative to repo root
266 266 pats = [standindir]
267 267 else:
268 268 # no patterns and no standin dir: return matcher that matches nothing
269 269 match = match_.match(repo.root, None, [], exact=True)
270 270 match.matchfn = lambda f: False
271 271 return match
272 272 return getmatcher(repo, pats, opts, showbad=False)
273 273
274 274 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 275 '''Wrapper around scmutil.match() that adds showbad: if false,
276 276 neuter the match object's bad() method so it does not print any
277 277 warnings about missing files or directories.'''
278 278 match = scmutil.match(repo[None], pats, opts)
279 279
280 280 if not showbad:
281 281 match.bad = lambda f, msg: None
282 282 return match
283 283
284 284 def composestandinmatcher(repo, rmatcher):
285 285 '''Return a matcher that accepts standins corresponding to the
286 286 files accepted by rmatcher. Pass the list of files in the matcher
287 287 as the paths specified by the user.'''
288 288 smatcher = getstandinmatcher(repo, rmatcher.files())
289 289 isstandin = smatcher.matchfn
290 290 def composed_matchfn(f):
291 291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 292 smatcher.matchfn = composed_matchfn
293 293
294 294 return smatcher
295 295
296 296 def standin(filename):
297 297 '''Return the repo-relative path to the standin for the specified big
298 298 file.'''
299 299 # Notes:
300 300 # 1) Most callers want an absolute path, but _create_standin() needs
301 301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 302 # it up to the caller to use repo.wjoin() to get an absolute path.
303 303 # 2) Join with '/' because that's what dirstate always uses, even on
304 304 # Windows. Change existing separator to '/' first in case we are
305 305 # passed filenames from an external source (like the command line).
306 306 return shortname + '/' + util.pconvert(filename)
307 307
308 308 def isstandin(filename):
309 309 '''Return true if filename is a big file standin. filename must be
310 310 in Mercurial's internal form (slash-separated).'''
311 311 return filename.startswith(shortname + '/')
312 312
313 313 def splitstandin(filename):
314 314 # Split on / because that's what dirstate always uses, even on Windows.
315 315 # Change local separator to / first just in case we are passed filenames
316 316 # from an external source (like the command line).
317 317 bits = util.pconvert(filename).split('/', 1)
318 318 if len(bits) == 2 and bits[0] == shortname:
319 319 return bits[1]
320 320 else:
321 321 return None
322 322
323 323 def updatestandin(repo, standin):
324 324 file = repo.wjoin(splitstandin(standin))
325 325 if os.path.exists(file):
326 326 hash = hashfile(file)
327 327 executable = getexecutable(file)
328 328 writestandin(repo, standin, hash, executable)
329 329
330 330 def readstandin(repo, filename, node=None):
331 331 '''read hex hash from standin for filename at given node, or working
332 332 directory if no node is given'''
333 333 return repo[node][standin(filename)].data().strip()
334 334
335 335 def writestandin(repo, standin, hash, executable):
336 336 '''write hash to <repo.root>/<standin>'''
337 337 writehash(hash, repo.wjoin(standin), executable)
338 338
339 339 def copyandhash(instream, outfile):
340 340 '''Read bytes from instream (iterable) and write them to outfile,
341 341 computing the SHA-1 hash of the data along the way. Close outfile
342 342 when done and return the binary hash.'''
343 343 hasher = util.sha1('')
344 344 for data in instream:
345 345 hasher.update(data)
346 346 outfile.write(data)
347 347
348 348 # Blecch: closing a file that somebody else opened is rude and
349 349 # wrong. But it's so darn convenient and practical! After all,
350 350 # outfile was opened just to copy and hash.
351 351 outfile.close()
352 352
353 353 return hasher.digest()
354 354
355 355 def hashrepofile(repo, file):
356 356 return hashfile(repo.wjoin(file))
357 357
358 358 def hashfile(file):
359 359 if not os.path.exists(file):
360 360 return ''
361 361 hasher = util.sha1('')
362 362 fd = open(file, 'rb')
363 363 for data in blockstream(fd):
364 364 hasher.update(data)
365 365 fd.close()
366 366 return hasher.hexdigest()
367 367
368 368 class limitreader(object):
369 369 def __init__(self, f, limit):
370 370 self.f = f
371 371 self.limit = limit
372 372
373 373 def read(self, length):
374 374 if self.limit == 0:
375 375 return ''
376 376 length = length > self.limit and self.limit or length
377 377 self.limit -= length
378 378 return self.f.read(length)
379 379
380 380 def close(self):
381 381 pass
382 382
383 383 def blockstream(infile, blocksize=128 * 1024):
384 384 """Generator that yields blocks of data from infile and closes infile."""
385 385 while True:
386 386 data = infile.read(blocksize)
387 387 if not data:
388 388 break
389 389 yield data
390 390 # same blecch as copyandhash() above
391 391 infile.close()
392 392
393 393 def writehash(hash, filename, executable):
394 394 util.makedirs(os.path.dirname(filename))
395 395 util.writefile(filename, hash + '\n')
396 396 os.chmod(filename, getmode(executable))
397 397
398 398 def getexecutable(filename):
399 399 mode = os.stat(filename).st_mode
400 400 return ((mode & stat.S_IXUSR) and
401 401 (mode & stat.S_IXGRP) and
402 402 (mode & stat.S_IXOTH))
403 403
404 404 def getmode(executable):
405 405 if executable:
406 406 return 0755
407 407 else:
408 408 return 0644
409 409
410 410 def urljoin(first, second, *arg):
411 411 def join(left, right):
412 412 if not left.endswith('/'):
413 413 left += '/'
414 414 if right.startswith('/'):
415 415 right = right[1:]
416 416 return left + right
417 417
418 418 url = join(first, second)
419 419 for a in arg:
420 420 url = join(url, a)
421 421 return url
422 422
423 423 def hexsha1(data):
424 424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 425 object data"""
426 426 h = util.sha1()
427 427 for chunk in util.filechunkiter(data):
428 428 h.update(chunk)
429 429 return h.hexdigest()
430 430
431 431 def httpsendfile(ui, filename):
432 432 return httpconnection.httpsendfile(ui, filename, 'rb')
433 433
434 434 def unixpath(path):
435 435 '''Return a version of path normalized for use with the lfdirstate.'''
436 436 return util.pconvert(os.path.normpath(path))
437 437
438 438 def islfilesrepo(repo):
439 439 return ('largefiles' in repo.requirements and
440 440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441 441
442 442 def mkstemp(repo, prefix):
443 443 '''Returns a file descriptor and a filename corresponding to a temporary
444 444 file in the repo's largefiles store.'''
445 445 path = repo.join(longname)
446 446 util.makedirs(path)
447 447 return tempfile.mkstemp(prefix=prefix, dir=path)
448 448
449 449 class storeprotonotcapable(Exception):
450 450 def __init__(self, storetypes):
451 451 self.storetypes = storetypes
452 452
453 453 def getcurrentheads(repo):
454 454 branches = repo.branchmap()
455 455 heads = []
456 456 for branch in branches:
457 457 newheads = repo.branchheads(branch)
458 458 heads = heads + newheads
459 459 return heads
460
461 def getstandinsstate(repo):
462 standins = []
463 matcher = getstandinmatcher(repo)
464 for standin in dirstate_walk(repo.dirstate, matcher):
465 lfile = splitstandin(standin)
466 standins.append((lfile, readstandin(repo, lfile)))
467 return standins
@@ -1,961 +1,973
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 37 orig_matchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(override_match)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 56 def add_largefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if exact or not exists:
85 85 abovemin = (lfsize and
86 86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 88 lfnames.append(f)
89 89 if ui.verbose or not exact:
90 90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91 91
92 92 bad = []
93 93 standins = []
94 94
95 95 # Need to lock, otherwise there could be a race condition between
96 96 # when standins are created and added to the repo.
97 97 wlock = repo.wlock()
98 98 try:
99 99 if not opts.get('dry_run'):
100 100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 101 for f in lfnames:
102 102 standinname = lfutil.standin(f)
103 103 lfutil.writestandin(repo, standinname, hash='',
104 104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 105 standins.append(standinname)
106 106 if lfdirstate[f] == 'r':
107 107 lfdirstate.normallookup(f)
108 108 else:
109 109 lfdirstate.add(f)
110 110 lfdirstate.write()
111 111 bad += [lfutil.splitstandin(f)
112 112 for f in lfutil.repo_add(repo, standins)
113 113 if f in m.files()]
114 114 finally:
115 115 wlock.release()
116 116 return bad
117 117
118 118 def remove_largefiles(ui, repo, *pats, **opts):
119 119 after = opts.get('after')
120 120 if not pats and not after:
121 121 raise util.Abort(_('no files specified'))
122 122 m = scmutil.match(repo[None], pats, opts)
123 123 try:
124 124 repo.lfstatus = True
125 125 s = repo.status(match=m, clean=True)
126 126 finally:
127 127 repo.lfstatus = False
128 128 manifest = repo[None].manifest()
129 129 modified, added, deleted, clean = [[f for f in list
130 130 if lfutil.standin(f) in manifest]
131 131 for list in [s[0], s[1], s[3], s[6]]]
132 132
133 133 def warn(files, reason):
134 134 for f in files:
135 135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 136 % (m.rel(f), reason))
137 137
138 138 if after:
139 139 remove, forget = deleted, []
140 140 warn(modified + added + clean, _('file still exists'))
141 141 else:
142 142 remove, forget = deleted + clean, []
143 143 warn(modified, _('file is modified'))
144 144 warn(added, _('file has been marked for add'))
145 145
146 146 for f in sorted(remove + forget):
147 147 if ui.verbose or not m.exact(f):
148 148 ui.status(_('removing %s\n') % m.rel(f))
149 149
150 150 # Need to lock because standin files are deleted then removed from the
151 151 # repository and we could race inbetween.
152 152 wlock = repo.wlock()
153 153 try:
154 154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 155 for f in remove:
156 156 if not after:
157 157 # If this is being called by addremove, notify the user that we
158 158 # are removing the file.
159 159 if getattr(repo, "_isaddremove", False):
160 160 ui.status(_('removing %s\n' % f))
161 161 if os.path.exists(repo.wjoin(f)):
162 162 util.unlinkpath(repo.wjoin(f))
163 163 lfdirstate.remove(f)
164 164 lfdirstate.write()
165 165 forget = [lfutil.standin(f) for f in forget]
166 166 remove = [lfutil.standin(f) for f in remove]
167 167 lfutil.repo_forget(repo, forget)
168 168 # If this is being called by addremove, let the original addremove
169 169 # function handle this.
170 170 if not getattr(repo, "_isaddremove", False):
171 171 lfutil.repo_remove(repo, remove, unlink=True)
172 172 finally:
173 173 wlock.release()
174 174
175 175 # -- Wrappers: modify existing commands --------------------------------
176 176
177 177 # Add works by going through the files that the user wanted to add and
178 178 # checking if they should be added as largefiles. Then it makes a new
179 179 # matcher which matches only the normal files and runs the original
180 180 # version of add.
181 181 def override_add(orig, ui, repo, *pats, **opts):
182 182 normal = opts.pop('normal')
183 183 if normal:
184 184 if opts.get('large'):
185 185 raise util.Abort(_('--normal cannot be used with --large'))
186 186 return orig(ui, repo, *pats, **opts)
187 187 bad = add_largefiles(ui, repo, *pats, **opts)
188 188 installnormalfilesmatchfn(repo[None].manifest())
189 189 result = orig(ui, repo, *pats, **opts)
190 190 restorematchfn()
191 191
192 192 return (result == 1 or bad) and 1 or 0
193 193
194 194 def override_remove(orig, ui, repo, *pats, **opts):
195 195 installnormalfilesmatchfn(repo[None].manifest())
196 196 orig(ui, repo, *pats, **opts)
197 197 restorematchfn()
198 198 remove_largefiles(ui, repo, *pats, **opts)
199 199
200 200 def override_status(orig, ui, repo, *pats, **opts):
201 201 try:
202 202 repo.lfstatus = True
203 203 return orig(ui, repo, *pats, **opts)
204 204 finally:
205 205 repo.lfstatus = False
206 206
207 207 def override_log(orig, ui, repo, *pats, **opts):
208 208 try:
209 209 repo.lfstatus = True
210 210 orig(ui, repo, *pats, **opts)
211 211 finally:
212 212 repo.lfstatus = False
213 213
214 214 def override_verify(orig, ui, repo, *pats, **opts):
215 215 large = opts.pop('large', False)
216 216 all = opts.pop('lfa', False)
217 217 contents = opts.pop('lfc', False)
218 218
219 219 result = orig(ui, repo, *pats, **opts)
220 220 if large:
221 221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 222 return result
223 223
224 224 # Override needs to refresh standins so that update's normal merge
225 225 # will go through properly. Then the other update hook (overriding repo.update)
226 226 # will get the new files. Filemerge is also overriden so that the merge
227 227 # will merge standins correctly.
228 228 def override_update(orig, ui, repo, *pats, **opts):
229 229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 231 False, False)
232 232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233 233
234 234 # Need to lock between the standins getting updated and their
235 235 # largefiles getting updated
236 236 wlock = repo.wlock()
237 237 try:
238 238 if opts['check']:
239 239 mod = len(modified) > 0
240 240 for lfile in unsure:
241 241 standin = lfutil.standin(lfile)
242 242 if repo['.'][standin].data().strip() != \
243 243 lfutil.hashfile(repo.wjoin(lfile)):
244 244 mod = True
245 245 else:
246 246 lfdirstate.normal(lfile)
247 247 lfdirstate.write()
248 248 if mod:
249 249 raise util.Abort(_('uncommitted local changes'))
250 250 # XXX handle removed differently
251 251 if not opts['clean']:
252 252 for lfile in unsure + modified + added:
253 253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 254 finally:
255 255 wlock.release()
256 256 return orig(ui, repo, *pats, **opts)
257 257
258 258 # Before starting the manifest merge, merge.updates will call
259 259 # _checkunknown to check if there are any files in the merged-in
260 260 # changeset that collide with unknown files in the working copy.
261 261 #
262 262 # The largefiles are seen as unknown, so this prevents us from merging
263 263 # in a file 'foo' if we already have a largefile with the same name.
264 264 #
265 265 # The overridden function filters the unknown files by removing any
266 266 # largefiles. This makes the merge proceed and we can then handle this
267 267 # case further in the overridden manifestmerge function below.
268 268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
269 269 if lfutil.standin(f) in wctx:
270 270 return False
271 271 return origfn(repo, wctx, mctx, f)
272 272
273 273 # The manifest merge handles conflicts on the manifest level. We want
274 274 # to handle changes in largefile-ness of files at this level too.
275 275 #
276 276 # The strategy is to run the original manifestmerge and then process
277 277 # the action list it outputs. There are two cases we need to deal with:
278 278 #
279 279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 280 # detected via its standin file, which will enter the working copy
281 281 # with a "get" action. It is not "merge" since the standin is all
282 282 # Mercurial is concerned with at this level -- the link to the
283 283 # existing normal file is not relevant here.
284 284 #
285 285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 286 # since the largefile will be present in the working copy and
287 287 # different from the normal file in p2. Mercurial therefore
288 288 # triggers a merge action.
289 289 #
290 290 # In both cases, we prompt the user and emit new actions to either
291 291 # remove the standin (if the normal file was kept) or to remove the
292 292 # normal file and get the standin (if the largefile was kept). The
293 293 # default prompt answer is to use the largefile version since it was
294 294 # presumably changed on purpose.
295 295 #
296 296 # Finally, the merge.applyupdates function will then take care of
297 297 # writing the files into the working copy and lfcommands.updatelfiles
298 298 # will update the largefiles.
299 299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 301 processed = []
302 302
303 303 for action in actions:
304 304 if overwrite:
305 305 processed.append(action)
306 306 continue
307 307 f, m = action[:2]
308 308
309 309 choices = (_('&Largefile'), _('&Normal file'))
310 310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 311 # Case 1: normal file in the working copy, largefile in
312 312 # the second parent
313 313 lfile = lfutil.splitstandin(f)
314 314 standin = f
315 315 msg = _('%s has been turned into a largefile\n'
316 316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 318 processed.append((lfile, "r"))
319 319 processed.append((standin, "g", p2.flags(standin)))
320 320 else:
321 321 processed.append((standin, "r"))
322 322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 323 # Case 2: largefile in the working copy, normal file in
324 324 # the second parent
325 325 standin = lfutil.standin(f)
326 326 lfile = f
327 327 msg = _('%s has been turned into a normal file\n'
328 328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 330 processed.append((lfile, "r"))
331 331 else:
332 332 processed.append((standin, "r"))
333 333 processed.append((lfile, "g", p2.flags(lfile)))
334 334 else:
335 335 processed.append(action)
336 336
337 337 return processed
338 338
339 339 # Override filemerge to prompt the user about how they wish to merge
340 340 # largefiles. This will handle identical edits, and copy/rename +
341 341 # edit without prompting the user.
342 342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 343 # Use better variable names here. Because this is a wrapper we cannot
344 344 # change the variable names in the function declaration.
345 345 fcdest, fcother, fcancestor = fcd, fco, fca
346 346 if not lfutil.isstandin(orig):
347 347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 348 else:
349 349 if not fcother.cmp(fcdest): # files identical?
350 350 return None
351 351
352 352 # backwards, use working dir parent as ancestor
353 353 if fcancestor == fcother:
354 354 fcancestor = fcdest.parents()[0]
355 355
356 356 if orig != fcother.path():
357 357 repo.ui.status(_('merging %s and %s to %s\n')
358 358 % (lfutil.splitstandin(orig),
359 359 lfutil.splitstandin(fcother.path()),
360 360 lfutil.splitstandin(fcdest.path())))
361 361 else:
362 362 repo.ui.status(_('merging %s\n')
363 363 % lfutil.splitstandin(fcdest.path()))
364 364
365 365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 366 fcancestor.data():
367 367 return 0
368 368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 369 fcancestor.data():
370 370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 371 return 0
372 372
373 373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 374 'keep (l)ocal or take (o)ther?') %
375 375 lfutil.splitstandin(orig),
376 376 (_('&Local'), _('&Other')), 0) == 0:
377 377 return 0
378 378 else:
379 379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 380 return 0
381 381
382 382 # Copy first changes the matchers to match standins instead of
383 383 # largefiles. Then it overrides util.copyfile in that function it
384 384 # checks if the destination largefile already exists. It also keeps a
385 385 # list of copied files so that the largefiles can be copied and the
386 386 # dirstate updated.
387 387 def override_copy(orig, ui, repo, pats, opts, rename=False):
388 388 # doesn't remove largefile on rename
389 389 if len(pats) < 2:
390 390 # this isn't legal, let the original function deal with it
391 391 return orig(ui, repo, pats, opts, rename)
392 392
393 393 def makestandin(relpath):
394 394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396 396
397 397 fullpats = scmutil.expandpats(pats)
398 398 dest = fullpats[-1]
399 399
400 400 if os.path.isdir(dest):
401 401 if not os.path.isdir(makestandin(dest)):
402 402 os.makedirs(makestandin(dest))
403 403 # This could copy both lfiles and normal files in one command,
404 404 # but we don't want to do that. First replace their matcher to
405 405 # only match normal files and run it, then replace it to just
406 406 # match largefiles and run it again.
407 407 nonormalfiles = False
408 408 nolfiles = False
409 409 try:
410 410 try:
411 411 installnormalfilesmatchfn(repo[None].manifest())
412 412 result = orig(ui, repo, pats, opts, rename)
413 413 except util.Abort, e:
414 414 if str(e) != 'no files to copy':
415 415 raise e
416 416 else:
417 417 nonormalfiles = True
418 418 result = 0
419 419 finally:
420 420 restorematchfn()
421 421
422 422 # The first rename can cause our current working directory to be removed.
423 423 # In that case there is nothing left to copy/rename so just quit.
424 424 try:
425 425 repo.getcwd()
426 426 except OSError:
427 427 return result
428 428
429 429 try:
430 430 try:
431 431 # When we call orig below it creates the standins but we don't add them
432 432 # to the dir state until later so lock during that time.
433 433 wlock = repo.wlock()
434 434
435 435 manifest = repo[None].manifest()
436 436 oldmatch = None # for the closure
437 437 def override_match(ctx, pats=[], opts={}, globbed=False,
438 438 default='relpath'):
439 439 newpats = []
440 440 # The patterns were previously mangled to add the standin
441 441 # directory; we need to remove that now
442 442 for pat in pats:
443 443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 444 newpats.append(pat.replace(lfutil.shortname, ''))
445 445 else:
446 446 newpats.append(pat)
447 447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 448 m = copy.copy(match)
449 449 lfile = lambda f: lfutil.standin(f) in manifest
450 450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 451 m._fmap = set(m._files)
452 452 orig_matchfn = m.matchfn
453 453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 454 (f in manifest) and
455 455 orig_matchfn(lfutil.splitstandin(f)) or
456 456 None)
457 457 return m
458 458 oldmatch = installmatchfn(override_match)
459 459 listpats = []
460 460 for pat in pats:
461 461 if match_.patkind(pat) is not None:
462 462 listpats.append(pat)
463 463 else:
464 464 listpats.append(makestandin(pat))
465 465
466 466 try:
467 467 origcopyfile = util.copyfile
468 468 copiedfiles = []
469 469 def override_copyfile(src, dest):
470 470 if (lfutil.shortname in src and
471 471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 472 destlfile = dest.replace(lfutil.shortname, '')
473 473 if not opts['force'] and os.path.exists(destlfile):
474 474 raise IOError('',
475 475 _('destination largefile already exists'))
476 476 copiedfiles.append((src, dest))
477 477 origcopyfile(src, dest)
478 478
479 479 util.copyfile = override_copyfile
480 480 result += orig(ui, repo, listpats, opts, rename)
481 481 finally:
482 482 util.copyfile = origcopyfile
483 483
484 484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 485 for (src, dest) in copiedfiles:
486 486 if (lfutil.shortname in src and
487 487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 490 destlfiledir = os.path.dirname(destlfile) or '.'
491 491 if not os.path.isdir(destlfiledir):
492 492 os.makedirs(destlfiledir)
493 493 if rename:
494 494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 495 lfdirstate.remove(srclfile)
496 496 else:
497 497 util.copyfile(srclfile, destlfile)
498 498 lfdirstate.add(destlfile)
499 499 lfdirstate.write()
500 500 except util.Abort, e:
501 501 if str(e) != 'no files to copy':
502 502 raise e
503 503 else:
504 504 nolfiles = True
505 505 finally:
506 506 restorematchfn()
507 507 wlock.release()
508 508
509 509 if nolfiles and nonormalfiles:
510 510 raise util.Abort(_('no files to copy'))
511 511
512 512 return result
513 513
514 514 # When the user calls revert, we have to be careful to not revert any
515 515 # changes to other largefiles accidentally. This means we have to keep
516 516 # track of the largefiles that are being reverted so we only pull down
517 517 # the necessary largefiles.
518 518 #
519 519 # Standins are only updated (to match the hash of largefiles) before
520 520 # commits. Update the standins then run the original revert, changing
521 521 # the matcher to hit standins instead of largefiles. Based on the
522 522 # resulting standins update the largefiles. Then return the standins
523 523 # to their proper state
524 524 def override_revert(orig, ui, repo, *pats, **opts):
525 525 # Because we put the standins in a bad state (by updating them)
526 526 # and then return them to a correct state we need to lock to
527 527 # prevent others from changing them in their incorrect state.
528 528 wlock = repo.wlock()
529 529 try:
530 530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
533 533 for lfile in modified:
534 534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 535 for lfile in missing:
536 536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537 537
538 538 try:
539 539 ctx = repo[opts.get('rev')]
540 540 oldmatch = None # for the closure
541 541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 542 default='relpath'):
543 543 match = oldmatch(ctx, pats, opts, globbed, default)
544 544 m = copy.copy(match)
545 545 def tostandin(f):
546 546 if lfutil.standin(f) in ctx:
547 547 return lfutil.standin(f)
548 548 elif lfutil.standin(f) in repo[None]:
549 549 return None
550 550 return f
551 551 m._files = [tostandin(f) for f in m._files]
552 552 m._files = [f for f in m._files if f is not None]
553 553 m._fmap = set(m._files)
554 554 orig_matchfn = m.matchfn
555 555 def matchfn(f):
556 556 if lfutil.isstandin(f):
557 557 # We need to keep track of what largefiles are being
558 558 # matched so we know which ones to update later --
559 559 # otherwise we accidentally revert changes to other
560 560 # largefiles. This is repo-specific, so duckpunch the
561 561 # repo object to keep the list of largefiles for us
562 562 # later.
563 563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 564 (f in repo[None] or f in ctx):
565 565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 566 lfileslist.append(lfutil.splitstandin(f))
567 567 repo._lfilestoupdate = lfileslist
568 568 return True
569 569 else:
570 570 return False
571 571 return orig_matchfn(f)
572 572 m.matchfn = matchfn
573 573 return m
574 574 oldmatch = installmatchfn(override_match)
575 575 scmutil.match
576 576 matches = override_match(repo[None], pats, opts)
577 577 orig(ui, repo, *pats, **opts)
578 578 finally:
579 579 restorematchfn()
580 580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 582 printmessage=False)
583 583
584 584 # empty out the largefiles list so we start fresh next time
585 585 repo._lfilestoupdate = []
586 586 for lfile in modified:
587 587 if lfile in lfileslist:
588 588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 589 in repo['.']:
590 590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 591 repo['.'][lfile].data().strip(),
592 592 'x' in repo['.'][lfile].flags())
593 593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 594 for lfile in added:
595 595 standin = lfutil.standin(lfile)
596 596 if standin not in ctx and (standin in matches or opts.get('all')):
597 597 if lfile in lfdirstate:
598 598 lfdirstate.drop(lfile)
599 599 util.unlinkpath(repo.wjoin(standin))
600 600 lfdirstate.write()
601 601 finally:
602 602 wlock.release()
603 603
604 604 def hg_update(orig, repo, node):
605 # In order to not waste a lot of extra time during the update largefiles
606 # step, we keep track of the state of the standins before and after we
607 # call the original update function, and only update the standins that
608 # have changed in the hg.update() call
609 oldstandins = lfutil.getstandinsstate(repo)
605 610 result = orig(repo, node)
606 lfcommands.updatelfiles(repo.ui, repo)
611 newstandins = lfutil.getstandinsstate(repo)
612 tobeupdated = set(oldstandins).symmetric_difference(set(newstandins))
613 filelist = []
614 for f in tobeupdated:
615 if f[0] not in filelist:
616 filelist.append(f[0])
617
618 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
607 619 return result
608 620
609 621 def hg_clean(orig, repo, node, show_stats=True):
610 622 result = orig(repo, node, show_stats)
611 623 lfcommands.updatelfiles(repo.ui, repo)
612 624 return result
613 625
614 626 def hg_merge(orig, repo, node, force=None, remind=True):
615 627 # Mark the repo as being in the middle of a merge, so that
616 628 # updatelfiles() will know that it needs to trust the standins in
617 629 # the working copy, not in the standins in the current node
618 630 repo._ismerging = True
619 631 try:
620 632 result = orig(repo, node, force, remind)
621 633 lfcommands.updatelfiles(repo.ui, repo)
622 634 finally:
623 635 repo._ismerging = False
624 636 return result
625 637
626 638 # When we rebase a repository with remotely changed largefiles, we need to
627 639 # take some extra care so that the largefiles are correctly updated in the
628 640 # working copy
629 641 def override_pull(orig, ui, repo, source=None, **opts):
630 642 if opts.get('rebase', False):
631 643 repo._isrebasing = True
632 644 try:
633 645 if opts.get('update'):
634 646 del opts['update']
635 647 ui.debug('--update and --rebase are not compatible, ignoring '
636 648 'the update flag\n')
637 649 del opts['rebase']
638 650 cmdutil.bailifchanged(repo)
639 651 revsprepull = len(repo)
640 652 origpostincoming = commands.postincoming
641 653 def _dummy(*args, **kwargs):
642 654 pass
643 655 commands.postincoming = _dummy
644 656 repo.lfpullsource = source
645 657 if not source:
646 658 source = 'default'
647 659 try:
648 660 result = commands.pull(ui, repo, source, **opts)
649 661 finally:
650 662 commands.postincoming = origpostincoming
651 663 revspostpull = len(repo)
652 664 if revspostpull > revsprepull:
653 665 result = result or rebase.rebase(ui, repo)
654 666 finally:
655 667 repo._isrebasing = False
656 668 else:
657 669 repo.lfpullsource = source
658 670 if not source:
659 671 source = 'default'
660 672 oldheads = lfutil.getcurrentheads(repo)
661 673 result = orig(ui, repo, source, **opts)
662 674 # If we do not have the new largefiles for any new heads we pulled, we
663 675 # will run into a problem later if we try to merge or rebase with one of
664 676 # these heads, so cache the largefiles now direclty into the system
665 677 # cache.
666 678 ui.status(_("caching new largefiles\n"))
667 679 numcached = 0
668 680 heads = lfutil.getcurrentheads(repo)
669 681 newheads = set(heads).difference(set(oldheads))
670 682 for head in newheads:
671 683 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
672 684 numcached += len(cached)
673 685 ui.status(_("%d largefiles cached\n" % numcached))
674 686 return result
675 687
676 688 def override_rebase(orig, ui, repo, **opts):
677 689 repo._isrebasing = True
678 690 try:
679 691 orig(ui, repo, **opts)
680 692 finally:
681 693 repo._isrebasing = False
682 694
683 695 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
684 696 prefix=None, mtime=None, subrepos=None):
685 697 # No need to lock because we are only reading history and
686 698 # largefile caches, neither of which are modified.
687 699 lfcommands.cachelfiles(repo.ui, repo, node)
688 700
689 701 if kind not in archival.archivers:
690 702 raise util.Abort(_("unknown archive type '%s'") % kind)
691 703
692 704 ctx = repo[node]
693 705
694 706 if kind == 'files':
695 707 if prefix:
696 708 raise util.Abort(
697 709 _('cannot give prefix when archiving to files'))
698 710 else:
699 711 prefix = archival.tidyprefix(dest, kind, prefix)
700 712
701 713 def write(name, mode, islink, getdata):
702 714 if matchfn and not matchfn(name):
703 715 return
704 716 data = getdata()
705 717 if decode:
706 718 data = repo.wwritedata(name, data)
707 719 archiver.addfile(prefix + name, mode, islink, data)
708 720
709 721 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
710 722
711 723 if repo.ui.configbool("ui", "archivemeta", True):
712 724 def metadata():
713 725 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
714 726 hex(repo.changelog.node(0)), hex(node), ctx.branch())
715 727
716 728 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
717 729 if repo.tagtype(t) == 'global')
718 730 if not tags:
719 731 repo.ui.pushbuffer()
720 732 opts = {'template': '{latesttag}\n{latesttagdistance}',
721 733 'style': '', 'patch': None, 'git': None}
722 734 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
723 735 ltags, dist = repo.ui.popbuffer().split('\n')
724 736 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
725 737 tags += 'latesttagdistance: %s\n' % dist
726 738
727 739 return base + tags
728 740
729 741 write('.hg_archival.txt', 0644, False, metadata)
730 742
731 743 for f in ctx:
732 744 ff = ctx.flags(f)
733 745 getdata = ctx[f].data
734 746 if lfutil.isstandin(f):
735 747 path = lfutil.findfile(repo, getdata().strip())
736 748 if path is None:
737 749 raise util.Abort(
738 750 _('largefile %s not found in repo store or system cache')
739 751 % lfutil.splitstandin(f))
740 752 f = lfutil.splitstandin(f)
741 753
742 754 def getdatafn():
743 755 fd = None
744 756 try:
745 757 fd = open(path, 'rb')
746 758 return fd.read()
747 759 finally:
748 760 if fd:
749 761 fd.close()
750 762
751 763 getdata = getdatafn
752 764 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
753 765
754 766 if subrepos:
755 767 for subpath in ctx.substate:
756 768 sub = ctx.sub(subpath)
757 769 sub.archive(repo.ui, archiver, prefix)
758 770
759 771 archiver.done()
760 772
761 773 # If a largefile is modified, the change is not reflected in its
762 774 # standin until a commit. cmdutil.bailifchanged() raises an exception
763 775 # if the repo has uncommitted changes. Wrap it to also check if
764 776 # largefiles were changed. This is used by bisect and backout.
765 777 def override_bailifchanged(orig, repo):
766 778 orig(repo)
767 779 repo.lfstatus = True
768 780 modified, added, removed, deleted = repo.status()[:4]
769 781 repo.lfstatus = False
770 782 if modified or added or removed or deleted:
771 783 raise util.Abort(_('outstanding uncommitted changes'))
772 784
773 785 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
774 786 def override_fetch(orig, ui, repo, *pats, **opts):
775 787 repo.lfstatus = True
776 788 modified, added, removed, deleted = repo.status()[:4]
777 789 repo.lfstatus = False
778 790 if modified or added or removed or deleted:
779 791 raise util.Abort(_('outstanding uncommitted changes'))
780 792 return orig(ui, repo, *pats, **opts)
781 793
782 794 def override_forget(orig, ui, repo, *pats, **opts):
783 795 installnormalfilesmatchfn(repo[None].manifest())
784 796 orig(ui, repo, *pats, **opts)
785 797 restorematchfn()
786 798 m = scmutil.match(repo[None], pats, opts)
787 799
788 800 try:
789 801 repo.lfstatus = True
790 802 s = repo.status(match=m, clean=True)
791 803 finally:
792 804 repo.lfstatus = False
793 805 forget = sorted(s[0] + s[1] + s[3] + s[6])
794 806 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
795 807
796 808 for f in forget:
797 809 if lfutil.standin(f) not in repo.dirstate and not \
798 810 os.path.isdir(m.rel(lfutil.standin(f))):
799 811 ui.warn(_('not removing %s: file is already untracked\n')
800 812 % m.rel(f))
801 813
802 814 for f in forget:
803 815 if ui.verbose or not m.exact(f):
804 816 ui.status(_('removing %s\n') % m.rel(f))
805 817
806 818 # Need to lock because standin files are deleted then removed from the
807 819 # repository and we could race inbetween.
808 820 wlock = repo.wlock()
809 821 try:
810 822 lfdirstate = lfutil.openlfdirstate(ui, repo)
811 823 for f in forget:
812 824 if lfdirstate[f] == 'a':
813 825 lfdirstate.drop(f)
814 826 else:
815 827 lfdirstate.remove(f)
816 828 lfdirstate.write()
817 829 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
818 830 unlink=True)
819 831 finally:
820 832 wlock.release()
821 833
822 834 def getoutgoinglfiles(ui, repo, dest=None, **opts):
823 835 dest = ui.expandpath(dest or 'default-push', dest or 'default')
824 836 dest, branches = hg.parseurl(dest, opts.get('branch'))
825 837 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
826 838 if revs:
827 839 revs = [repo.lookup(rev) for rev in revs]
828 840
829 841 remoteui = hg.remoteui
830 842
831 843 try:
832 844 remote = hg.repository(remoteui(repo, opts), dest)
833 845 except error.RepoError:
834 846 return None
835 847 o = lfutil.findoutgoing(repo, remote, False)
836 848 if not o:
837 849 return None
838 850 o = repo.changelog.nodesbetween(o, revs)[0]
839 851 if opts.get('newest_first'):
840 852 o.reverse()
841 853
842 854 toupload = set()
843 855 for n in o:
844 856 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
845 857 ctx = repo[n]
846 858 files = set(ctx.files())
847 859 if len(parents) == 2:
848 860 mc = ctx.manifest()
849 861 mp1 = ctx.parents()[0].manifest()
850 862 mp2 = ctx.parents()[1].manifest()
851 863 for f in mp1:
852 864 if f not in mc:
853 865 files.add(f)
854 866 for f in mp2:
855 867 if f not in mc:
856 868 files.add(f)
857 869 for f in mc:
858 870 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
859 871 files.add(f)
860 872 toupload = toupload.union(
861 873 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
862 874 return toupload
863 875
864 876 def override_outgoing(orig, ui, repo, dest=None, **opts):
865 877 orig(ui, repo, dest, **opts)
866 878
867 879 if opts.pop('large', None):
868 880 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
869 881 if toupload is None:
870 882 ui.status(_('largefiles: No remote repo\n'))
871 883 else:
872 884 ui.status(_('largefiles to upload:\n'))
873 885 for file in toupload:
874 886 ui.status(lfutil.splitstandin(file) + '\n')
875 887 ui.status('\n')
876 888
877 889 def override_summary(orig, ui, repo, *pats, **opts):
878 890 try:
879 891 repo.lfstatus = True
880 892 orig(ui, repo, *pats, **opts)
881 893 finally:
882 894 repo.lfstatus = False
883 895
884 896 if opts.pop('large', None):
885 897 toupload = getoutgoinglfiles(ui, repo, None, **opts)
886 898 if toupload is None:
887 899 ui.status(_('largefiles: No remote repo\n'))
888 900 else:
889 901 ui.status(_('largefiles: %d to upload\n') % len(toupload))
890 902
891 903 def override_addremove(orig, ui, repo, *pats, **opts):
892 904 # Get the list of missing largefiles so we can remove them
893 905 lfdirstate = lfutil.openlfdirstate(ui, repo)
894 906 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
895 907 False, False)
896 908 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
897 909
898 910 # Call into the normal remove code, but the removing of the standin, we want
899 911 # to have handled by original addremove. Monkey patching here makes sure
900 912 # we don't remove the standin in the largefiles code, preventing a very
901 913 # confused state later.
902 914 if missing:
903 915 repo._isaddremove = True
904 916 remove_largefiles(ui, repo, *missing, **opts)
905 917 repo._isaddremove = False
906 918 # Call into the normal add code, and any files that *should* be added as
907 919 # largefiles will be
908 920 add_largefiles(ui, repo, *pats, **opts)
909 921 # Now that we've handled largefiles, hand off to the original addremove
910 922 # function to take care of the rest. Make sure it doesn't do anything with
911 923 # largefiles by installing a matcher that will ignore them.
912 924 installnormalfilesmatchfn(repo[None].manifest())
913 925 result = orig(ui, repo, *pats, **opts)
914 926 restorematchfn()
915 927 return result
916 928
917 929 # Calling purge with --all will cause the largefiles to be deleted.
918 930 # Override repo.status to prevent this from happening.
919 931 def override_purge(orig, ui, repo, *dirs, **opts):
920 932 oldstatus = repo.status
921 933 def override_status(node1='.', node2=None, match=None, ignored=False,
922 934 clean=False, unknown=False, listsubrepos=False):
923 935 r = oldstatus(node1, node2, match, ignored, clean, unknown,
924 936 listsubrepos)
925 937 lfdirstate = lfutil.openlfdirstate(ui, repo)
926 938 modified, added, removed, deleted, unknown, ignored, clean = r
927 939 unknown = [f for f in unknown if lfdirstate[f] == '?']
928 940 ignored = [f for f in ignored if lfdirstate[f] == '?']
929 941 return modified, added, removed, deleted, unknown, ignored, clean
930 942 repo.status = override_status
931 943 orig(ui, repo, *dirs, **opts)
932 944 repo.status = oldstatus
933 945
934 946 def override_rollback(orig, ui, repo, **opts):
935 947 result = orig(ui, repo, **opts)
936 948 merge.update(repo, node=None, branchmerge=False, force=True,
937 949 partial=lfutil.isstandin)
938 950 wlock = repo.wlock()
939 951 try:
940 952 lfdirstate = lfutil.openlfdirstate(ui, repo)
941 953 lfiles = lfutil.listlfiles(repo)
942 954 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
943 955 for file in lfiles:
944 956 if file in oldlfiles:
945 957 lfdirstate.normallookup(file)
946 958 else:
947 959 lfdirstate.add(file)
948 960 lfdirstate.write()
949 961 finally:
950 962 wlock.release()
951 963 return result
952 964
953 965 def override_transplant(orig, ui, repo, *revs, **opts):
954 966 try:
955 967 repo._istransplanting = True
956 968 result = orig(ui, repo, *revs, **opts)
957 969 lfcommands.updatelfiles(ui, repo, filelist=None,
958 970 printmessage=False)
959 971 finally:
960 972 repo._istransplanting = False
961 973 return result
General Comments 0
You need to be logged in to leave comments. Login now