##// END OF EJS Templates
largefiles: fix inappropriate locking (issue3182)...
Levi Bard -
r15794:0d91211d default
parent child Browse files
Show More
@@ -1,462 +1,450 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16 import tempfile
17 17
18 18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 19 from mercurial.i18n import _
20 20
21 21 shortname = '.hglf'
22 22 longname = 'largefiles'
23 23
24 24
25 25 # -- Portability wrappers ----------------------------------------------
26 26
27 27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 28 return dirstate.walk(matcher, [], unknown, ignored)
29 29
30 30 def repo_add(repo, list):
31 31 add = repo[None].add
32 32 return add(list)
33 33
34 34 def repo_remove(repo, list, unlink=False):
35 35 def remove(list, unlink):
36 36 wlock = repo.wlock()
37 37 try:
38 38 if unlink:
39 39 for f in list:
40 40 try:
41 41 util.unlinkpath(repo.wjoin(f))
42 42 except OSError, inst:
43 43 if inst.errno != errno.ENOENT:
44 44 raise
45 45 repo[None].forget(list)
46 46 finally:
47 47 wlock.release()
48 48 return remove(list, unlink=unlink)
49 49
50 50 def repo_forget(repo, list):
51 51 forget = repo[None].forget
52 52 return forget(list)
53 53
54 54 def findoutgoing(repo, remote, force):
55 55 from mercurial import discovery
56 56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 57 remote, force=force)
58 58 return repo.changelog.findmissing(common)
59 59
60 60 # -- Private worker functions ------------------------------------------
61 61
62 62 def getminsize(ui, assumelfiles, opt, default=10):
63 63 lfsize = opt
64 64 if not lfsize and assumelfiles:
65 65 lfsize = ui.config(longname, 'minsize', default=default)
66 66 if lfsize:
67 67 try:
68 68 lfsize = float(lfsize)
69 69 except ValueError:
70 70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 71 % lfsize)
72 72 if lfsize is None:
73 73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 74 return lfsize
75 75
76 76 def link(src, dest):
77 77 try:
78 78 util.oslink(src, dest)
79 79 except OSError:
80 80 # if hardlinks fail, fallback on atomic copy
81 81 dst = util.atomictempfile(dest)
82 82 for chunk in util.filechunkiter(open(src, 'rb')):
83 83 dst.write(chunk)
84 84 dst.close()
85 85 os.chmod(dest, os.stat(src).st_mode)
86 86
87 87 def usercachepath(ui, hash):
88 88 path = ui.configpath(longname, 'usercache', None)
89 89 if path:
90 90 path = os.path.join(path, hash)
91 91 else:
92 92 if os.name == 'nt':
93 93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 94 if appdata:
95 95 path = os.path.join(appdata, longname, hash)
96 96 elif platform.system() == 'Darwin':
97 97 home = os.getenv('HOME')
98 98 if home:
99 99 path = os.path.join(home, 'Library', 'Caches',
100 100 longname, hash)
101 101 elif os.name == 'posix':
102 102 path = os.getenv('XDG_CACHE_HOME')
103 103 if path:
104 104 path = os.path.join(path, longname, hash)
105 105 else:
106 106 home = os.getenv('HOME')
107 107 if home:
108 108 path = os.path.join(home, '.cache', longname, hash)
109 109 else:
110 110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 111 return path
112 112
113 113 def inusercache(ui, hash):
114 114 path = usercachepath(ui, hash)
115 115 return path and os.path.exists(path)
116 116
117 117 def findfile(repo, hash):
118 118 if instore(repo, hash):
119 119 repo.ui.note(_('Found %s in store\n') % hash)
120 120 elif inusercache(repo.ui, hash):
121 121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 122 path = storepath(repo, hash)
123 123 util.makedirs(os.path.dirname(path))
124 124 link(usercachepath(repo.ui, hash), path)
125 125 else:
126 126 return None
127 127 return storepath(repo, hash)
128 128
129 129 class largefiles_dirstate(dirstate.dirstate):
130 130 def __getitem__(self, key):
131 131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 132 def normal(self, f):
133 133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 134 def remove(self, f):
135 135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 136 def add(self, f):
137 137 return super(largefiles_dirstate, self).add(unixpath(f))
138 138 def drop(self, f):
139 139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 140 def forget(self, f):
141 141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 142 def normallookup(self, f):
143 143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144 144
145 145 def openlfdirstate(ui, repo):
146 146 '''
147 147 Return a dirstate object that tracks largefiles: i.e. its root is
148 148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 149 '''
150 150 admin = repo.join(longname)
151 151 opener = scmutil.opener(admin)
152 152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 153 repo.dirstate._validate)
154 154
155 155 # If the largefiles dirstate does not exist, populate and create
156 156 # it. This ensures that we create it on the first meaningful
157 # largefiles operation in a new clone. It also gives us an easy
158 # way to forcibly rebuild largefiles state:
159 # rm .hg/largefiles/dirstate && hg status
160 # Or even, if things are really messed up:
161 # rm -rf .hg/largefiles && hg status
157 # largefiles operation in a new clone.
162 158 if not os.path.exists(os.path.join(admin, 'dirstate')):
163 159 util.makedirs(admin)
164 160 matcher = getstandinmatcher(repo)
165 161 for standin in dirstate_walk(repo.dirstate, matcher):
166 162 lfile = splitstandin(standin)
167 163 hash = readstandin(repo, lfile)
168 164 lfdirstate.normallookup(lfile)
169 165 try:
170 166 if hash == hashfile(repo.wjoin(lfile)):
171 167 lfdirstate.normal(lfile)
172 168 except OSError, err:
173 169 if err.errno != errno.ENOENT:
174 170 raise
175
176 lfdirstate.write()
177
178 171 return lfdirstate
179 172
180 173 def lfdirstate_status(lfdirstate, repo, rev):
181 wlock = repo.wlock()
182 try:
183 match = match_.always(repo.root, repo.getcwd())
184 s = lfdirstate.status(match, [], False, False, False)
185 unsure, modified, added, removed, missing, unknown, ignored, clean = s
186 for lfile in unsure:
187 if repo[rev][standin(lfile)].data().strip() != \
188 hashfile(repo.wjoin(lfile)):
189 modified.append(lfile)
190 else:
191 clean.append(lfile)
192 lfdirstate.normal(lfile)
193 lfdirstate.write()
194 finally:
195 wlock.release()
174 match = match_.always(repo.root, repo.getcwd())
175 s = lfdirstate.status(match, [], False, False, False)
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 for lfile in unsure:
178 if repo[rev][standin(lfile)].data().strip() != \
179 hashfile(repo.wjoin(lfile)):
180 modified.append(lfile)
181 else:
182 clean.append(lfile)
183 lfdirstate.normal(lfile)
196 184 return (modified, added, removed, missing, unknown, ignored, clean)
197 185
198 186 def listlfiles(repo, rev=None, matcher=None):
199 187 '''return a list of largefiles in the working copy or the
200 188 specified changeset'''
201 189
202 190 if matcher is None:
203 191 matcher = getstandinmatcher(repo)
204 192
205 193 # ignore unknown files in working directory
206 194 return [splitstandin(f)
207 195 for f in repo[rev].walk(matcher)
208 196 if rev is not None or repo.dirstate[f] != '?']
209 197
210 198 def instore(repo, hash):
211 199 return os.path.exists(storepath(repo, hash))
212 200
213 201 def storepath(repo, hash):
214 202 return repo.join(os.path.join(longname, hash))
215 203
216 204 def copyfromcache(repo, hash, filename):
217 205 '''Copy the specified largefile from the repo or system cache to
218 206 filename in the repository. Return true on success or false if the
219 207 file was not found in either cache (which should not happened:
220 208 this is meant to be called only after ensuring that the needed
221 209 largefile exists in the cache).'''
222 210 path = findfile(repo, hash)
223 211 if path is None:
224 212 return False
225 213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
226 214 # The write may fail before the file is fully written, but we
227 215 # don't use atomic writes in the working copy.
228 216 shutil.copy(path, repo.wjoin(filename))
229 217 return True
230 218
231 219 def copytostore(repo, rev, file, uploaded=False):
232 220 hash = readstandin(repo, file)
233 221 if instore(repo, hash):
234 222 return
235 223 copytostoreabsolute(repo, repo.wjoin(file), hash)
236 224
237 225 def copytostoreabsolute(repo, file, hash):
238 226 util.makedirs(os.path.dirname(storepath(repo, hash)))
239 227 if inusercache(repo.ui, hash):
240 228 link(usercachepath(repo.ui, hash), storepath(repo, hash))
241 229 else:
242 230 dst = util.atomictempfile(storepath(repo, hash))
243 231 for chunk in util.filechunkiter(open(file, 'rb')):
244 232 dst.write(chunk)
245 233 dst.close()
246 234 util.copymode(file, storepath(repo, hash))
247 235 linktousercache(repo, hash)
248 236
249 237 def linktousercache(repo, hash):
250 238 path = usercachepath(repo.ui, hash)
251 239 if path:
252 240 util.makedirs(os.path.dirname(path))
253 241 link(storepath(repo, hash), path)
254 242
255 243 def getstandinmatcher(repo, pats=[], opts={}):
256 244 '''Return a match object that applies pats to the standin directory'''
257 245 standindir = repo.pathto(shortname)
258 246 if pats:
259 247 # patterns supplied: search standin directory relative to current dir
260 248 cwd = repo.getcwd()
261 249 if os.path.isabs(cwd):
262 250 # cwd is an absolute path for hg -R <reponame>
263 251 # work relative to the repository root in this case
264 252 cwd = ''
265 253 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
266 254 elif os.path.isdir(standindir):
267 255 # no patterns: relative to repo root
268 256 pats = [standindir]
269 257 else:
270 258 # no patterns and no standin dir: return matcher that matches nothing
271 259 match = match_.match(repo.root, None, [], exact=True)
272 260 match.matchfn = lambda f: False
273 261 return match
274 262 return getmatcher(repo, pats, opts, showbad=False)
275 263
276 264 def getmatcher(repo, pats=[], opts={}, showbad=True):
277 265 '''Wrapper around scmutil.match() that adds showbad: if false,
278 266 neuter the match object's bad() method so it does not print any
279 267 warnings about missing files or directories.'''
280 268 match = scmutil.match(repo[None], pats, opts)
281 269
282 270 if not showbad:
283 271 match.bad = lambda f, msg: None
284 272 return match
285 273
286 274 def composestandinmatcher(repo, rmatcher):
287 275 '''Return a matcher that accepts standins corresponding to the
288 276 files accepted by rmatcher. Pass the list of files in the matcher
289 277 as the paths specified by the user.'''
290 278 smatcher = getstandinmatcher(repo, rmatcher.files())
291 279 isstandin = smatcher.matchfn
292 280 def composed_matchfn(f):
293 281 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
294 282 smatcher.matchfn = composed_matchfn
295 283
296 284 return smatcher
297 285
298 286 def standin(filename):
299 287 '''Return the repo-relative path to the standin for the specified big
300 288 file.'''
301 289 # Notes:
302 290 # 1) Most callers want an absolute path, but _create_standin() needs
303 291 # it repo-relative so lfadd() can pass it to repo_add(). So leave
304 292 # it up to the caller to use repo.wjoin() to get an absolute path.
305 293 # 2) Join with '/' because that's what dirstate always uses, even on
306 294 # Windows. Change existing separator to '/' first in case we are
307 295 # passed filenames from an external source (like the command line).
308 296 return shortname + '/' + filename.replace(os.sep, '/')
309 297
310 298 def isstandin(filename):
311 299 '''Return true if filename is a big file standin. filename must be
312 300 in Mercurial's internal form (slash-separated).'''
313 301 return filename.startswith(shortname + '/')
314 302
315 303 def splitstandin(filename):
316 304 # Split on / because that's what dirstate always uses, even on Windows.
317 305 # Change local separator to / first just in case we are passed filenames
318 306 # from an external source (like the command line).
319 307 bits = filename.replace(os.sep, '/').split('/', 1)
320 308 if len(bits) == 2 and bits[0] == shortname:
321 309 return bits[1]
322 310 else:
323 311 return None
324 312
325 313 def updatestandin(repo, standin):
326 314 file = repo.wjoin(splitstandin(standin))
327 315 if os.path.exists(file):
328 316 hash = hashfile(file)
329 317 executable = getexecutable(file)
330 318 writestandin(repo, standin, hash, executable)
331 319
332 320 def readstandin(repo, filename, node=None):
333 321 '''read hex hash from standin for filename at given node, or working
334 322 directory if no node is given'''
335 323 return repo[node][standin(filename)].data().strip()
336 324
337 325 def writestandin(repo, standin, hash, executable):
338 326 '''write hash to <repo.root>/<standin>'''
339 327 writehash(hash, repo.wjoin(standin), executable)
340 328
341 329 def copyandhash(instream, outfile):
342 330 '''Read bytes from instream (iterable) and write them to outfile,
343 331 computing the SHA-1 hash of the data along the way. Close outfile
344 332 when done and return the binary hash.'''
345 333 hasher = util.sha1('')
346 334 for data in instream:
347 335 hasher.update(data)
348 336 outfile.write(data)
349 337
350 338 # Blecch: closing a file that somebody else opened is rude and
351 339 # wrong. But it's so darn convenient and practical! After all,
352 340 # outfile was opened just to copy and hash.
353 341 outfile.close()
354 342
355 343 return hasher.digest()
356 344
357 345 def hashrepofile(repo, file):
358 346 return hashfile(repo.wjoin(file))
359 347
360 348 def hashfile(file):
361 349 if not os.path.exists(file):
362 350 return ''
363 351 hasher = util.sha1('')
364 352 fd = open(file, 'rb')
365 353 for data in blockstream(fd):
366 354 hasher.update(data)
367 355 fd.close()
368 356 return hasher.hexdigest()
369 357
370 358 class limitreader(object):
371 359 def __init__(self, f, limit):
372 360 self.f = f
373 361 self.limit = limit
374 362
375 363 def read(self, length):
376 364 if self.limit == 0:
377 365 return ''
378 366 length = length > self.limit and self.limit or length
379 367 self.limit -= length
380 368 return self.f.read(length)
381 369
382 370 def close(self):
383 371 pass
384 372
385 373 def blockstream(infile, blocksize=128 * 1024):
386 374 """Generator that yields blocks of data from infile and closes infile."""
387 375 while True:
388 376 data = infile.read(blocksize)
389 377 if not data:
390 378 break
391 379 yield data
392 380 # same blecch as copyandhash() above
393 381 infile.close()
394 382
395 383 def readhash(filename):
396 384 rfile = open(filename, 'rb')
397 385 hash = rfile.read(40)
398 386 rfile.close()
399 387 if len(hash) < 40:
400 388 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
401 389 % (filename, len(hash)))
402 390 return hash
403 391
404 392 def writehash(hash, filename, executable):
405 393 util.makedirs(os.path.dirname(filename))
406 394 util.writefile(filename, hash + '\n')
407 395 os.chmod(filename, getmode(executable))
408 396
409 397 def getexecutable(filename):
410 398 mode = os.stat(filename).st_mode
411 399 return ((mode & stat.S_IXUSR) and
412 400 (mode & stat.S_IXGRP) and
413 401 (mode & stat.S_IXOTH))
414 402
415 403 def getmode(executable):
416 404 if executable:
417 405 return 0755
418 406 else:
419 407 return 0644
420 408
421 409 def urljoin(first, second, *arg):
422 410 def join(left, right):
423 411 if not left.endswith('/'):
424 412 left += '/'
425 413 if right.startswith('/'):
426 414 right = right[1:]
427 415 return left + right
428 416
429 417 url = join(first, second)
430 418 for a in arg:
431 419 url = join(url, a)
432 420 return url
433 421
434 422 def hexsha1(data):
435 423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
436 424 object data"""
437 425 h = util.sha1()
438 426 for chunk in util.filechunkiter(data):
439 427 h.update(chunk)
440 428 return h.hexdigest()
441 429
442 430 def httpsendfile(ui, filename):
443 431 return httpconnection.httpsendfile(ui, filename, 'rb')
444 432
445 433 def unixpath(path):
446 434 '''Return a version of path normalized for use with the lfdirstate.'''
447 435 return os.path.normpath(path).replace(os.sep, '/')
448 436
449 437 def islfilesrepo(repo):
450 438 return ('largefiles' in repo.requirements and
451 439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
452 440
453 441 def mkstemp(repo, prefix):
454 442 '''Returns a file descriptor and a filename corresponding to a temporary
455 443 file in the repo's largefiles store.'''
456 444 path = repo.join(longname)
457 445 util.makedirs(path)
458 446 return tempfile.mkstemp(prefix=prefix, dir=path)
459 447
460 448 class storeprotonotcapable(Exception):
461 449 def __init__(self, storetypes):
462 450 self.storetypes = storetypes
@@ -1,927 +1,931 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 37 orig_matchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(override_match)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 56 def add_largefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if exact or not exists:
85 85 abovemin = (lfsize and
86 86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 88 lfnames.append(f)
89 89 if ui.verbose or not exact:
90 90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91 91
92 92 bad = []
93 93 standins = []
94 94
95 95 # Need to lock, otherwise there could be a race condition between
96 96 # when standins are created and added to the repo.
97 97 wlock = repo.wlock()
98 98 try:
99 99 if not opts.get('dry_run'):
100 100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 101 for f in lfnames:
102 102 standinname = lfutil.standin(f)
103 103 lfutil.writestandin(repo, standinname, hash='',
104 104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 105 standins.append(standinname)
106 106 if lfdirstate[f] == 'r':
107 107 lfdirstate.normallookup(f)
108 108 else:
109 109 lfdirstate.add(f)
110 110 lfdirstate.write()
111 111 bad += [lfutil.splitstandin(f)
112 112 for f in lfutil.repo_add(repo, standins)
113 113 if f in m.files()]
114 114 finally:
115 115 wlock.release()
116 116 return bad
117 117
118 118 def remove_largefiles(ui, repo, *pats, **opts):
119 119 after = opts.get('after')
120 120 if not pats and not after:
121 121 raise util.Abort(_('no files specified'))
122 122 m = scmutil.match(repo[None], pats, opts)
123 123 try:
124 124 repo.lfstatus = True
125 125 s = repo.status(match=m, clean=True)
126 126 finally:
127 127 repo.lfstatus = False
128 128 manifest = repo[None].manifest()
129 129 modified, added, deleted, clean = [[f for f in list
130 130 if lfutil.standin(f) in manifest]
131 131 for list in [s[0], s[1], s[3], s[6]]]
132 132
133 133 def warn(files, reason):
134 134 for f in files:
135 135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 136 % (m.rel(f), reason))
137 137
138 138 if after:
139 139 remove, forget = deleted, []
140 140 warn(modified + added + clean, _('file still exists'))
141 141 else:
142 142 remove, forget = deleted + clean, []
143 143 warn(modified, _('file is modified'))
144 144 warn(added, _('file has been marked for add'))
145 145
146 146 for f in sorted(remove + forget):
147 147 if ui.verbose or not m.exact(f):
148 148 ui.status(_('removing %s\n') % m.rel(f))
149 149
150 150 # Need to lock because standin files are deleted then removed from the
151 151 # repository and we could race inbetween.
152 152 wlock = repo.wlock()
153 153 try:
154 154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 155 for f in remove:
156 156 if not after:
157 157 # If this is being called by addremove, notify the user that we
158 158 # are removing the file.
159 159 if getattr(repo, "_isaddremove", False):
160 160 ui.status(_('removing %s\n' % f))
161 161 if os.path.exists(repo.wjoin(f)):
162 162 os.unlink(repo.wjoin(f))
163 163 currentdir = os.path.split(f)[0]
164 164 while currentdir and not os.listdir(repo.wjoin(currentdir)):
165 165 os.rmdir(repo.wjoin(currentdir))
166 166 currentdir = os.path.split(currentdir)[0]
167 167 lfdirstate.remove(f)
168 168 lfdirstate.write()
169 169 forget = [lfutil.standin(f) for f in forget]
170 170 remove = [lfutil.standin(f) for f in remove]
171 171 lfutil.repo_forget(repo, forget)
172 172 # If this is being called by addremove, let the original addremove
173 173 # function handle this.
174 174 if not getattr(repo, "_isaddremove", False):
175 175 lfutil.repo_remove(repo, remove, unlink=True)
176 176 finally:
177 177 wlock.release()
178 178
179 179 # -- Wrappers: modify existing commands --------------------------------
180 180
181 181 # Add works by going through the files that the user wanted to add and
182 182 # checking if they should be added as largefiles. Then it makes a new
183 183 # matcher which matches only the normal files and runs the original
184 184 # version of add.
185 185 def override_add(orig, ui, repo, *pats, **opts):
186 186 bad = add_largefiles(ui, repo, *pats, **opts)
187 187 installnormalfilesmatchfn(repo[None].manifest())
188 188 result = orig(ui, repo, *pats, **opts)
189 189 restorematchfn()
190 190
191 191 return (result == 1 or bad) and 1 or 0
192 192
193 193 def override_remove(orig, ui, repo, *pats, **opts):
194 194 installnormalfilesmatchfn(repo[None].manifest())
195 195 orig(ui, repo, *pats, **opts)
196 196 restorematchfn()
197 197 remove_largefiles(ui, repo, *pats, **opts)
198 198
199 199 def override_status(orig, ui, repo, *pats, **opts):
200 200 try:
201 201 repo.lfstatus = True
202 202 return orig(ui, repo, *pats, **opts)
203 203 finally:
204 204 repo.lfstatus = False
205 205
206 206 def override_log(orig, ui, repo, *pats, **opts):
207 207 try:
208 208 repo.lfstatus = True
209 209 orig(ui, repo, *pats, **opts)
210 210 finally:
211 211 repo.lfstatus = False
212 212
213 213 def override_verify(orig, ui, repo, *pats, **opts):
214 214 large = opts.pop('large', False)
215 215 all = opts.pop('lfa', False)
216 216 contents = opts.pop('lfc', False)
217 217
218 218 result = orig(ui, repo, *pats, **opts)
219 219 if large:
220 220 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
221 221 return result
222 222
223 223 # Override needs to refresh standins so that update's normal merge
224 224 # will go through properly. Then the other update hook (overriding repo.update)
225 225 # will get the new files. Filemerge is also overriden so that the merge
226 226 # will merge standins correctly.
227 227 def override_update(orig, ui, repo, *pats, **opts):
228 228 lfdirstate = lfutil.openlfdirstate(ui, repo)
229 229 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
230 230 False, False)
231 231 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
232 232
233 233 # Need to lock between the standins getting updated and their
234 234 # largefiles getting updated
235 235 wlock = repo.wlock()
236 236 try:
237 237 if opts['check']:
238 238 mod = len(modified) > 0
239 239 for lfile in unsure:
240 240 standin = lfutil.standin(lfile)
241 241 if repo['.'][standin].data().strip() != \
242 242 lfutil.hashfile(repo.wjoin(lfile)):
243 243 mod = True
244 244 else:
245 245 lfdirstate.normal(lfile)
246 246 lfdirstate.write()
247 247 if mod:
248 248 raise util.Abort(_('uncommitted local changes'))
249 249 # XXX handle removed differently
250 250 if not opts['clean']:
251 251 for lfile in unsure + modified + added:
252 252 lfutil.updatestandin(repo, lfutil.standin(lfile))
253 253 finally:
254 254 wlock.release()
255 255 return orig(ui, repo, *pats, **opts)
256 256
257 257 # Before starting the manifest merge, merge.updates will call
258 258 # _checkunknown to check if there are any files in the merged-in
259 259 # changeset that collide with unknown files in the working copy.
260 260 #
261 261 # The largefiles are seen as unknown, so this prevents us from merging
262 262 # in a file 'foo' if we already have a largefile with the same name.
263 263 #
264 264 # The overridden function filters the unknown files by removing any
265 265 # largefiles. This makes the merge proceed and we can then handle this
266 266 # case further in the overridden manifestmerge function below.
267 267 def override_checkunknown(origfn, wctx, mctx, folding):
268 268 origunknown = wctx.unknown()
269 269 wctx._unknown = filter(lambda f: lfutil.standin(f) not in wctx, origunknown)
270 270 try:
271 271 return origfn(wctx, mctx, folding)
272 272 finally:
273 273 wctx._unknown = origunknown
274 274
275 275 # The manifest merge handles conflicts on the manifest level. We want
276 276 # to handle changes in largefile-ness of files at this level too.
277 277 #
278 278 # The strategy is to run the original manifestmerge and then process
279 279 # the action list it outputs. There are two cases we need to deal with:
280 280 #
281 281 # 1. Normal file in p1, largefile in p2. Here the largefile is
282 282 # detected via its standin file, which will enter the working copy
283 283 # with a "get" action. It is not "merge" since the standin is all
284 284 # Mercurial is concerned with at this level -- the link to the
285 285 # existing normal file is not relevant here.
286 286 #
287 287 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
288 288 # since the largefile will be present in the working copy and
289 289 # different from the normal file in p2. Mercurial therefore
290 290 # triggers a merge action.
291 291 #
292 292 # In both cases, we prompt the user and emit new actions to either
293 293 # remove the standin (if the normal file was kept) or to remove the
294 294 # normal file and get the standin (if the largefile was kept). The
295 295 # default prompt answer is to use the largefile version since it was
296 296 # presumably changed on purpose.
297 297 #
298 298 # Finally, the merge.applyupdates function will then take care of
299 299 # writing the files into the working copy and lfcommands.updatelfiles
300 300 # will update the largefiles.
301 301 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
302 302 actions = origfn(repo, p1, p2, pa, overwrite, partial)
303 303 processed = []
304 304
305 305 for action in actions:
306 306 if overwrite:
307 307 processed.append(action)
308 308 continue
309 309 f, m = action[:2]
310 310
311 311 choices = (_('&Largefile'), _('&Normal file'))
312 312 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
313 313 # Case 1: normal file in the working copy, largefile in
314 314 # the second parent
315 315 lfile = lfutil.splitstandin(f)
316 316 standin = f
317 317 msg = _('%s has been turned into a largefile\n'
318 318 'use (l)argefile or keep as (n)ormal file?') % lfile
319 319 if repo.ui.promptchoice(msg, choices, 0) == 0:
320 320 processed.append((lfile, "r"))
321 321 processed.append((standin, "g", p2.flags(standin)))
322 322 else:
323 323 processed.append((standin, "r"))
324 324 elif m == "m" and lfutil.standin(f) in p1 and f in p2:
325 325 # Case 2: largefile in the working copy, normal file in
326 326 # the second parent
327 327 standin = lfutil.standin(f)
328 328 lfile = f
329 329 msg = _('%s has been turned into a normal file\n'
330 330 'keep as (l)argefile or use (n)ormal file?') % lfile
331 331 if repo.ui.promptchoice(msg, choices, 0) == 0:
332 332 processed.append((lfile, "r"))
333 333 else:
334 334 processed.append((standin, "r"))
335 335 processed.append((lfile, "g", p2.flags(lfile)))
336 336 else:
337 337 processed.append(action)
338 338
339 339 return processed
340 340
341 341 # Override filemerge to prompt the user about how they wish to merge
342 342 # largefiles. This will handle identical edits, and copy/rename +
343 343 # edit without prompting the user.
344 344 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
345 345 # Use better variable names here. Because this is a wrapper we cannot
346 346 # change the variable names in the function declaration.
347 347 fcdest, fcother, fcancestor = fcd, fco, fca
348 348 if not lfutil.isstandin(orig):
349 349 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
350 350 else:
351 351 if not fcother.cmp(fcdest): # files identical?
352 352 return None
353 353
354 354 # backwards, use working dir parent as ancestor
355 355 if fcancestor == fcother:
356 356 fcancestor = fcdest.parents()[0]
357 357
358 358 if orig != fcother.path():
359 359 repo.ui.status(_('merging %s and %s to %s\n')
360 360 % (lfutil.splitstandin(orig),
361 361 lfutil.splitstandin(fcother.path()),
362 362 lfutil.splitstandin(fcdest.path())))
363 363 else:
364 364 repo.ui.status(_('merging %s\n')
365 365 % lfutil.splitstandin(fcdest.path()))
366 366
367 367 if fcancestor.path() != fcother.path() and fcother.data() == \
368 368 fcancestor.data():
369 369 return 0
370 370 if fcancestor.path() != fcdest.path() and fcdest.data() == \
371 371 fcancestor.data():
372 372 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
373 373 return 0
374 374
375 375 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
376 376 'keep (l)ocal or take (o)ther?') %
377 377 lfutil.splitstandin(orig),
378 378 (_('&Local'), _('&Other')), 0) == 0:
379 379 return 0
380 380 else:
381 381 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
382 382 return 0
383 383
384 384 # Copy first changes the matchers to match standins instead of
385 385 # largefiles. Then it overrides util.copyfile in that function it
386 386 # checks if the destination largefile already exists. It also keeps a
387 387 # list of copied files so that the largefiles can be copied and the
388 388 # dirstate updated.
389 389 def override_copy(orig, ui, repo, pats, opts, rename=False):
390 390 # doesn't remove largefile on rename
391 391 if len(pats) < 2:
392 392 # this isn't legal, let the original function deal with it
393 393 return orig(ui, repo, pats, opts, rename)
394 394
395 395 def makestandin(relpath):
396 396 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
397 397 return os.path.join(repo.wjoin(lfutil.standin(path)))
398 398
399 399 fullpats = scmutil.expandpats(pats)
400 400 dest = fullpats[-1]
401 401
402 402 if os.path.isdir(dest):
403 403 if not os.path.isdir(makestandin(dest)):
404 404 os.makedirs(makestandin(dest))
405 405 # This could copy both lfiles and normal files in one command,
406 406 # but we don't want to do that. First replace their matcher to
407 407 # only match normal files and run it, then replace it to just
408 408 # match largefiles and run it again.
409 409 nonormalfiles = False
410 410 nolfiles = False
411 411 try:
412 412 try:
413 413 installnormalfilesmatchfn(repo[None].manifest())
414 414 result = orig(ui, repo, pats, opts, rename)
415 415 except util.Abort, e:
416 416 if str(e) != 'no files to copy':
417 417 raise e
418 418 else:
419 419 nonormalfiles = True
420 420 result = 0
421 421 finally:
422 422 restorematchfn()
423 423
424 424 # The first rename can cause our current working directory to be removed.
425 425 # In that case there is nothing left to copy/rename so just quit.
426 426 try:
427 427 repo.getcwd()
428 428 except OSError:
429 429 return result
430 430
431 431 try:
432 432 try:
433 433 # When we call orig below it creates the standins but we don't add them
434 434 # to the dir state until later so lock during that time.
435 435 wlock = repo.wlock()
436 436
437 437 manifest = repo[None].manifest()
438 438 oldmatch = None # for the closure
439 439 def override_match(ctx, pats=[], opts={}, globbed=False,
440 440 default='relpath'):
441 441 newpats = []
442 442 # The patterns were previously mangled to add the standin
443 443 # directory; we need to remove that now
444 444 for pat in pats:
445 445 if match_.patkind(pat) is None and lfutil.shortname in pat:
446 446 newpats.append(pat.replace(lfutil.shortname, ''))
447 447 else:
448 448 newpats.append(pat)
449 449 match = oldmatch(ctx, newpats, opts, globbed, default)
450 450 m = copy.copy(match)
451 451 lfile = lambda f: lfutil.standin(f) in manifest
452 452 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
453 453 m._fmap = set(m._files)
454 454 orig_matchfn = m.matchfn
455 455 m.matchfn = lambda f: (lfutil.isstandin(f) and
456 456 lfile(lfutil.splitstandin(f)) and
457 457 orig_matchfn(lfutil.splitstandin(f)) or
458 458 None)
459 459 return m
460 460 oldmatch = installmatchfn(override_match)
461 461 listpats = []
462 462 for pat in pats:
463 463 if match_.patkind(pat) is not None:
464 464 listpats.append(pat)
465 465 else:
466 466 listpats.append(makestandin(pat))
467 467
468 468 try:
469 469 origcopyfile = util.copyfile
470 470 copiedfiles = []
471 471 def override_copyfile(src, dest):
472 472 if (lfutil.shortname in src and
473 473 dest.startswith(repo.wjoin(lfutil.shortname))):
474 474 destlfile = dest.replace(lfutil.shortname, '')
475 475 if not opts['force'] and os.path.exists(destlfile):
476 476 raise IOError('',
477 477 _('destination largefile already exists'))
478 478 copiedfiles.append((src, dest))
479 479 origcopyfile(src, dest)
480 480
481 481 util.copyfile = override_copyfile
482 482 result += orig(ui, repo, listpats, opts, rename)
483 483 finally:
484 484 util.copyfile = origcopyfile
485 485
486 486 lfdirstate = lfutil.openlfdirstate(ui, repo)
487 487 for (src, dest) in copiedfiles:
488 488 if (lfutil.shortname in src and
489 489 dest.startswith(repo.wjoin(lfutil.shortname))):
490 490 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
491 491 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
492 492 destlfiledir = os.path.dirname(destlfile) or '.'
493 493 if not os.path.isdir(destlfiledir):
494 494 os.makedirs(destlfiledir)
495 495 if rename:
496 496 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
497 497 lfdirstate.remove(srclfile)
498 498 else:
499 499 util.copyfile(srclfile, destlfile)
500 500 lfdirstate.add(destlfile)
501 501 lfdirstate.write()
502 502 except util.Abort, e:
503 503 if str(e) != 'no files to copy':
504 504 raise e
505 505 else:
506 506 nolfiles = True
507 507 finally:
508 508 restorematchfn()
509 509 wlock.release()
510 510
511 511 if nolfiles and nonormalfiles:
512 512 raise util.Abort(_('no files to copy'))
513 513
514 514 return result
515 515
516 516 # When the user calls revert, we have to be careful to not revert any
517 517 # changes to other largefiles accidentally. This means we have to keep
518 518 # track of the largefiles that are being reverted so we only pull down
519 519 # the necessary largefiles.
520 520 #
521 521 # Standins are only updated (to match the hash of largefiles) before
522 522 # commits. Update the standins then run the original revert, changing
523 523 # the matcher to hit standins instead of largefiles. Based on the
524 524 # resulting standins update the largefiles. Then return the standins
525 525 # to their proper state
526 526 def override_revert(orig, ui, repo, *pats, **opts):
527 527 # Because we put the standins in a bad state (by updating them)
528 528 # and then return them to a correct state we need to lock to
529 529 # prevent others from changing them in their incorrect state.
530 530 wlock = repo.wlock()
531 531 try:
532 532 lfdirstate = lfutil.openlfdirstate(ui, repo)
533 533 (modified, added, removed, missing, unknown, ignored, clean) = \
534 534 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
535 535 for lfile in modified:
536 536 lfutil.updatestandin(repo, lfutil.standin(lfile))
537 537
538 538 try:
539 539 ctx = repo[opts.get('rev')]
540 540 oldmatch = None # for the closure
541 541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 542 default='relpath'):
543 543 match = oldmatch(ctx, pats, opts, globbed, default)
544 544 m = copy.copy(match)
545 545 def tostandin(f):
546 546 if lfutil.standin(f) in ctx or lfutil.standin(f) in ctx:
547 547 return lfutil.standin(f)
548 548 elif lfutil.standin(f) in repo[None]:
549 549 return None
550 550 return f
551 551 m._files = [tostandin(f) for f in m._files]
552 552 m._files = [f for f in m._files if f is not None]
553 553 m._fmap = set(m._files)
554 554 orig_matchfn = m.matchfn
555 555 def matchfn(f):
556 556 if lfutil.isstandin(f):
557 557 # We need to keep track of what largefiles are being
558 558 # matched so we know which ones to update later --
559 559 # otherwise we accidentally revert changes to other
560 560 # largefiles. This is repo-specific, so duckpunch the
561 561 # repo object to keep the list of largefiles for us
562 562 # later.
563 563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 564 (f in repo[None] or f in ctx):
565 565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 566 lfileslist.append(lfutil.splitstandin(f))
567 567 repo._lfilestoupdate = lfileslist
568 568 return True
569 569 else:
570 570 return False
571 571 return orig_matchfn(f)
572 572 m.matchfn = matchfn
573 573 return m
574 574 oldmatch = installmatchfn(override_match)
575 575 scmutil.match
576 576 matches = override_match(repo[None], pats, opts)
577 577 orig(ui, repo, *pats, **opts)
578 578 finally:
579 579 restorematchfn()
580 580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 582 printmessage=False)
583 583
584 584 # empty out the largefiles list so we start fresh next time
585 585 repo._lfilestoupdate = []
586 586 for lfile in modified:
587 587 if lfile in lfileslist:
588 588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 589 in repo['.']:
590 590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 591 repo['.'][lfile].data().strip(),
592 592 'x' in repo['.'][lfile].flags())
593 593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 594 for lfile in added:
595 595 standin = lfutil.standin(lfile)
596 596 if standin not in ctx and (standin in matches or opts.get('all')):
597 597 if lfile in lfdirstate:
598 598 lfdirstate.drop(lfile)
599 599 util.unlinkpath(repo.wjoin(standin))
600 600 lfdirstate.write()
601 601 finally:
602 602 wlock.release()
603 603
604 604 def hg_update(orig, repo, node):
605 605 result = orig(repo, node)
606 606 lfcommands.updatelfiles(repo.ui, repo)
607 607 return result
608 608
609 609 def hg_clean(orig, repo, node, show_stats=True):
610 610 result = orig(repo, node, show_stats)
611 611 lfcommands.updatelfiles(repo.ui, repo)
612 612 return result
613 613
614 614 def hg_merge(orig, repo, node, force=None, remind=True):
615 615 result = orig(repo, node, force, remind)
616 616 lfcommands.updatelfiles(repo.ui, repo)
617 617 return result
618 618
619 619 # When we rebase a repository with remotely changed largefiles, we need to
620 620 # take some extra care so that the largefiles are correctly updated in the
621 621 # working copy
622 622 def override_pull(orig, ui, repo, source=None, **opts):
623 623 if opts.get('rebase', False):
624 624 repo._isrebasing = True
625 625 try:
626 626 if opts.get('update'):
627 627 del opts['update']
628 628 ui.debug('--update and --rebase are not compatible, ignoring '
629 629 'the update flag\n')
630 630 del opts['rebase']
631 631 cmdutil.bailifchanged(repo)
632 632 revsprepull = len(repo)
633 633 origpostincoming = commands.postincoming
634 634 def _dummy(*args, **kwargs):
635 635 pass
636 636 commands.postincoming = _dummy
637 637 repo.lfpullsource = source
638 638 if not source:
639 639 source = 'default'
640 640 try:
641 641 result = commands.pull(ui, repo, source, **opts)
642 642 finally:
643 643 commands.postincoming = origpostincoming
644 644 revspostpull = len(repo)
645 645 if revspostpull > revsprepull:
646 646 result = result or rebase.rebase(ui, repo)
647 647 finally:
648 648 repo._isrebasing = False
649 649 else:
650 650 repo.lfpullsource = source
651 651 if not source:
652 652 source = 'default'
653 653 result = orig(ui, repo, source, **opts)
654 654 return result
655 655
656 656 def override_rebase(orig, ui, repo, **opts):
657 657 repo._isrebasing = True
658 658 try:
659 659 orig(ui, repo, **opts)
660 660 finally:
661 661 repo._isrebasing = False
662 662
663 663 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
664 664 prefix=None, mtime=None, subrepos=None):
665 665 # No need to lock because we are only reading history and
666 666 # largefile caches, neither of which are modified.
667 667 lfcommands.cachelfiles(repo.ui, repo, node)
668 668
669 669 if kind not in archival.archivers:
670 670 raise util.Abort(_("unknown archive type '%s'") % kind)
671 671
672 672 ctx = repo[node]
673 673
674 674 if kind == 'files':
675 675 if prefix:
676 676 raise util.Abort(
677 677 _('cannot give prefix when archiving to files'))
678 678 else:
679 679 prefix = archival.tidyprefix(dest, kind, prefix)
680 680
681 681 def write(name, mode, islink, getdata):
682 682 if matchfn and not matchfn(name):
683 683 return
684 684 data = getdata()
685 685 if decode:
686 686 data = repo.wwritedata(name, data)
687 687 archiver.addfile(prefix + name, mode, islink, data)
688 688
689 689 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
690 690
691 691 if repo.ui.configbool("ui", "archivemeta", True):
692 692 def metadata():
693 693 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
694 694 hex(repo.changelog.node(0)), hex(node), ctx.branch())
695 695
696 696 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
697 697 if repo.tagtype(t) == 'global')
698 698 if not tags:
699 699 repo.ui.pushbuffer()
700 700 opts = {'template': '{latesttag}\n{latesttagdistance}',
701 701 'style': '', 'patch': None, 'git': None}
702 702 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
703 703 ltags, dist = repo.ui.popbuffer().split('\n')
704 704 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
705 705 tags += 'latesttagdistance: %s\n' % dist
706 706
707 707 return base + tags
708 708
709 709 write('.hg_archival.txt', 0644, False, metadata)
710 710
711 711 for f in ctx:
712 712 ff = ctx.flags(f)
713 713 getdata = ctx[f].data
714 714 if lfutil.isstandin(f):
715 715 path = lfutil.findfile(repo, getdata().strip())
716 716 f = lfutil.splitstandin(f)
717 717
718 718 def getdatafn():
719 719 fd = None
720 720 try:
721 721 fd = open(path, 'rb')
722 722 return fd.read()
723 723 finally:
724 724 if fd:
725 725 fd.close()
726 726
727 727 getdata = getdatafn
728 728 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
729 729
730 730 if subrepos:
731 731 for subpath in ctx.substate:
732 732 sub = ctx.sub(subpath)
733 733 sub.archive(repo.ui, archiver, prefix)
734 734
735 735 archiver.done()
736 736
737 737 # If a largefile is modified, the change is not reflected in its
738 738 # standin until a commit. cmdutil.bailifchanged() raises an exception
739 739 # if the repo has uncommitted changes. Wrap it to also check if
740 740 # largefiles were changed. This is used by bisect and backout.
741 741 def override_bailifchanged(orig, repo):
742 742 orig(repo)
743 743 repo.lfstatus = True
744 744 modified, added, removed, deleted = repo.status()[:4]
745 745 repo.lfstatus = False
746 746 if modified or added or removed or deleted:
747 747 raise util.Abort(_('outstanding uncommitted changes'))
748 748
749 749 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
750 750 def override_fetch(orig, ui, repo, *pats, **opts):
751 751 repo.lfstatus = True
752 752 modified, added, removed, deleted = repo.status()[:4]
753 753 repo.lfstatus = False
754 754 if modified or added or removed or deleted:
755 755 raise util.Abort(_('outstanding uncommitted changes'))
756 756 return orig(ui, repo, *pats, **opts)
757 757
758 758 def override_forget(orig, ui, repo, *pats, **opts):
759 759 installnormalfilesmatchfn(repo[None].manifest())
760 760 orig(ui, repo, *pats, **opts)
761 761 restorematchfn()
762 762 m = scmutil.match(repo[None], pats, opts)
763 763
764 764 try:
765 765 repo.lfstatus = True
766 766 s = repo.status(match=m, clean=True)
767 767 finally:
768 768 repo.lfstatus = False
769 769 forget = sorted(s[0] + s[1] + s[3] + s[6])
770 770 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
771 771
772 772 for f in forget:
773 773 if lfutil.standin(f) not in repo.dirstate and not \
774 774 os.path.isdir(m.rel(lfutil.standin(f))):
775 775 ui.warn(_('not removing %s: file is already untracked\n')
776 776 % m.rel(f))
777 777
778 778 for f in forget:
779 779 if ui.verbose or not m.exact(f):
780 780 ui.status(_('removing %s\n') % m.rel(f))
781 781
782 782 # Need to lock because standin files are deleted then removed from the
783 783 # repository and we could race inbetween.
784 784 wlock = repo.wlock()
785 785 try:
786 786 lfdirstate = lfutil.openlfdirstate(ui, repo)
787 787 for f in forget:
788 788 if lfdirstate[f] == 'a':
789 789 lfdirstate.drop(f)
790 790 else:
791 791 lfdirstate.remove(f)
792 792 lfdirstate.write()
793 793 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
794 794 unlink=True)
795 795 finally:
796 796 wlock.release()
797 797
798 798 def getoutgoinglfiles(ui, repo, dest=None, **opts):
799 799 dest = ui.expandpath(dest or 'default-push', dest or 'default')
800 800 dest, branches = hg.parseurl(dest, opts.get('branch'))
801 801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
802 802 if revs:
803 803 revs = [repo.lookup(rev) for rev in revs]
804 804
805 805 remoteui = hg.remoteui
806 806
807 807 try:
808 808 remote = hg.repository(remoteui(repo, opts), dest)
809 809 except error.RepoError:
810 810 return None
811 811 o = lfutil.findoutgoing(repo, remote, False)
812 812 if not o:
813 813 return None
814 814 o = repo.changelog.nodesbetween(o, revs)[0]
815 815 if opts.get('newest_first'):
816 816 o.reverse()
817 817
818 818 toupload = set()
819 819 for n in o:
820 820 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
821 821 ctx = repo[n]
822 822 files = set(ctx.files())
823 823 if len(parents) == 2:
824 824 mc = ctx.manifest()
825 825 mp1 = ctx.parents()[0].manifest()
826 826 mp2 = ctx.parents()[1].manifest()
827 827 for f in mp1:
828 828 if f not in mc:
829 829 files.add(f)
830 830 for f in mp2:
831 831 if f not in mc:
832 832 files.add(f)
833 833 for f in mc:
834 834 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
835 835 files.add(f)
836 836 toupload = toupload.union(
837 837 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
838 838 return toupload
839 839
840 840 def override_outgoing(orig, ui, repo, dest=None, **opts):
841 841 orig(ui, repo, dest, **opts)
842 842
843 843 if opts.pop('large', None):
844 844 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
845 845 if toupload is None:
846 846 ui.status(_('largefiles: No remote repo\n'))
847 847 else:
848 848 ui.status(_('largefiles to upload:\n'))
849 849 for file in toupload:
850 850 ui.status(lfutil.splitstandin(file) + '\n')
851 851 ui.status('\n')
852 852
853 853 def override_summary(orig, ui, repo, *pats, **opts):
854 854 try:
855 855 repo.lfstatus = True
856 856 orig(ui, repo, *pats, **opts)
857 857 finally:
858 858 repo.lfstatus = False
859 859
860 860 if opts.pop('large', None):
861 861 toupload = getoutgoinglfiles(ui, repo, None, **opts)
862 862 if toupload is None:
863 863 ui.status(_('largefiles: No remote repo\n'))
864 864 else:
865 865 ui.status(_('largefiles: %d to upload\n') % len(toupload))
866 866
867 867 def override_addremove(orig, ui, repo, *pats, **opts):
868 868 # Get the list of missing largefiles so we can remove them
869 869 lfdirstate = lfutil.openlfdirstate(ui, repo)
870 870 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
871 871 False, False)
872 872 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
873 873
874 874 # Call into the normal remove code, but the removing of the standin, we want
875 875 # to have handled by original addremove. Monkey patching here makes sure
876 876 # we don't remove the standin in the largefiles code, preventing a very
877 877 # confused state later.
878 878 repo._isaddremove = True
879 879 remove_largefiles(ui, repo, *missing, **opts)
880 880 repo._isaddremove = False
881 881 # Call into the normal add code, and any files that *should* be added as
882 882 # largefiles will be
883 883 add_largefiles(ui, repo, *pats, **opts)
884 884 # Now that we've handled largefiles, hand off to the original addremove
885 885 # function to take care of the rest. Make sure it doesn't do anything with
886 886 # largefiles by installing a matcher that will ignore them.
887 887 installnormalfilesmatchfn(repo[None].manifest())
888 888 result = orig(ui, repo, *pats, **opts)
889 889 restorematchfn()
890 890 return result
891 891
892 892 # Calling purge with --all will cause the largefiles to be deleted.
893 893 # Override repo.status to prevent this from happening.
894 894 def override_purge(orig, ui, repo, *dirs, **opts):
895 895 oldstatus = repo.status
896 896 def override_status(node1='.', node2=None, match=None, ignored=False,
897 897 clean=False, unknown=False, listsubrepos=False):
898 898 r = oldstatus(node1, node2, match, ignored, clean, unknown,
899 899 listsubrepos)
900 900 lfdirstate = lfutil.openlfdirstate(ui, repo)
901 901 modified, added, removed, deleted, unknown, ignored, clean = r
902 902 unknown = [f for f in unknown if lfdirstate[f] == '?']
903 903 ignored = [f for f in ignored if lfdirstate[f] == '?']
904 904 return modified, added, removed, deleted, unknown, ignored, clean
905 905 repo.status = override_status
906 906 orig(ui, repo, *dirs, **opts)
907 907 repo.status = oldstatus
908 908
909 909 def override_rollback(orig, ui, repo, **opts):
910 910 result = orig(ui, repo, **opts)
911 911 merge.update(repo, node=None, branchmerge=False, force=True,
912 912 partial=lfutil.isstandin)
913 lfdirstate = lfutil.openlfdirstate(ui, repo)
914 lfiles = lfutil.listlfiles(repo)
915 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
916 for file in lfiles:
917 if file in oldlfiles:
918 lfdirstate.normallookup(file)
919 else:
920 lfdirstate.add(file)
921 lfdirstate.write()
913 wlock = repo.wlock()
914 try:
915 lfdirstate = lfutil.openlfdirstate(ui, repo)
916 lfiles = lfutil.listlfiles(repo)
917 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
918 for file in lfiles:
919 if file in oldlfiles:
920 lfdirstate.normallookup(file)
921 else:
922 lfdirstate.add(file)
923 lfdirstate.write()
924 finally:
925 wlock.release()
922 926 return result
923 927
924 928 def override_transplant(orig, ui, repo, *revs, **opts):
925 929 result = orig(ui, repo, *revs, **opts)
926 930 lfcommands.updatelfiles(repo.ui, repo)
927 931 return result
@@ -1,450 +1,451 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13
14 14 from mercurial import context, error, manifest, match as match_, util
15 15 from mercurial import node as node_
16 16 from mercurial.i18n import _
17 17
18 18 import lfcommands
19 19 import proto
20 20 import lfutil
21 21
22 22 def reposetup(ui, repo):
23 23 # wire repositories should be given new wireproto functions but not the
24 24 # other largefiles modifications
25 25 if not repo.local():
26 26 return proto.wirereposetup(ui, repo)
27 27
28 28 for name in ('status', 'commitctx', 'commit', 'push'):
29 29 method = getattr(repo, name)
30 30 if (isinstance(method, types.FunctionType) and
31 31 method.func_name == 'wrap'):
32 32 ui.warn(_('largefiles: repo method %r appears to have already been'
33 33 ' wrapped by another extension: '
34 34 'largefiles may behave incorrectly\n')
35 35 % name)
36 36
37 37 class lfiles_repo(repo.__class__):
38 38 lfstatus = False
39 39 def status_nolfiles(self, *args, **kwargs):
40 40 return super(lfiles_repo, self).status(*args, **kwargs)
41 41
42 42 # When lfstatus is set, return a context that gives the names
43 43 # of largefiles instead of their corresponding standins and
44 44 # identifies the largefiles as always binary, regardless of
45 45 # their actual contents.
46 46 def __getitem__(self, changeid):
47 47 ctx = super(lfiles_repo, self).__getitem__(changeid)
48 48 if self.lfstatus:
49 49 class lfiles_manifestdict(manifest.manifestdict):
50 50 def __contains__(self, filename):
51 51 if super(lfiles_manifestdict,
52 52 self).__contains__(filename):
53 53 return True
54 54 return super(lfiles_manifestdict,
55 55 self).__contains__(lfutil.standin(filename))
56 56 class lfiles_ctx(ctx.__class__):
57 57 def files(self):
58 58 filenames = super(lfiles_ctx, self).files()
59 59 return [lfutil.splitstandin(f) or f for f in filenames]
60 60 def manifest(self):
61 61 man1 = super(lfiles_ctx, self).manifest()
62 62 man1.__class__ = lfiles_manifestdict
63 63 return man1
64 64 def filectx(self, path, fileid=None, filelog=None):
65 65 try:
66 66 result = super(lfiles_ctx, self).filectx(path,
67 67 fileid, filelog)
68 68 except error.LookupError:
69 69 # Adding a null character will cause Mercurial to
70 70 # identify this as a binary file.
71 71 result = super(lfiles_ctx, self).filectx(
72 72 lfutil.standin(path), fileid, filelog)
73 73 olddata = result.data
74 74 result.data = lambda: olddata() + '\0'
75 75 return result
76 76 ctx.__class__ = lfiles_ctx
77 77 return ctx
78 78
79 79 # Figure out the status of big files and insert them into the
80 80 # appropriate list in the result. Also removes standin files
81 81 # from the listing. Revert to the original status if
82 82 # self.lfstatus is False.
83 83 def status(self, node1='.', node2=None, match=None, ignored=False,
84 84 clean=False, unknown=False, listsubrepos=False):
85 85 listignored, listclean, listunknown = ignored, clean, unknown
86 86 if not self.lfstatus:
87 87 return super(lfiles_repo, self).status(node1, node2, match,
88 88 listignored, listclean, listunknown, listsubrepos)
89 89 else:
90 90 # some calls in this function rely on the old version of status
91 91 self.lfstatus = False
92 92 if isinstance(node1, context.changectx):
93 93 ctx1 = node1
94 94 else:
95 95 ctx1 = repo[node1]
96 96 if isinstance(node2, context.changectx):
97 97 ctx2 = node2
98 98 else:
99 99 ctx2 = repo[node2]
100 100 working = ctx2.rev() is None
101 101 parentworking = working and ctx1 == self['.']
102 102
103 103 def inctx(file, ctx):
104 104 try:
105 105 if ctx.rev() is None:
106 106 return file in ctx.manifest()
107 107 ctx[file]
108 108 return True
109 109 except KeyError:
110 110 return False
111 111
112 112 if match is None:
113 113 match = match_.always(self.root, self.getcwd())
114 114
115 115 # First check if there were files specified on the
116 116 # command line. If there were, and none of them were
117 117 # largefiles, we should just bail here and let super
118 118 # handle it -- thus gaining a big performance boost.
119 119 lfdirstate = lfutil.openlfdirstate(ui, self)
120 120 if match.files() and not match.anypats():
121 121 matchedfiles = [f for f in match.files() if f in lfdirstate]
122 122 if not matchedfiles:
123 123 return super(lfiles_repo, self).status(node1, node2,
124 124 match, listignored, listclean,
125 125 listunknown, listsubrepos)
126 126
127 127 # Create a copy of match that matches standins instead
128 128 # of largefiles.
129 129 def tostandin(file):
130 130 if inctx(lfutil.standin(file), ctx2):
131 131 return lfutil.standin(file)
132 132 return file
133 133
134 134 # Create a function that we can use to override what is
135 135 # normally the ignore matcher. We've already checked
136 136 # for ignored files on the first dirstate walk, and
137 137 # unecessarily re-checking here causes a huge performance
138 138 # hit because lfdirstate only knows about largefiles
139 139 def _ignoreoverride(self):
140 140 return False
141 141
142 142 m = copy.copy(match)
143 143 m._files = [tostandin(f) for f in m._files]
144 144
145 145 # Get ignored files here even if we weren't asked for them; we
146 146 # must use the result here for filtering later
147 147 result = super(lfiles_repo, self).status(node1, node2, m,
148 148 True, clean, unknown, listsubrepos)
149 149 if working:
150 # hold the wlock while we read largefiles and
151 # update the lfdirstate
152 wlock = repo.wlock()
153 150 try:
154 151 # Any non-largefiles that were explicitly listed must be
155 152 # taken out or lfdirstate.status will report an error.
156 153 # The status of these files was already computed using
157 154 # super's status.
158 155 # Override lfdirstate's ignore matcher to not do
159 156 # anything
160 157 orig_ignore = lfdirstate._ignore
161 158 lfdirstate._ignore = _ignoreoverride
162 159
163 160 match._files = [f for f in match._files if f in
164 161 lfdirstate]
165 162 # Don't waste time getting the ignored and unknown
166 163 # files again; we already have them
167 164 s = lfdirstate.status(match, [], False,
168 165 listclean, False)
169 166 (unsure, modified, added, removed, missing, unknown,
170 167 ignored, clean) = s
171 168 # Replace the list of ignored and unknown files with
172 169 # the previously caclulated lists, and strip out the
173 170 # largefiles
174 171 lfiles = set(lfdirstate._map)
175 172 ignored = set(result[5]).difference(lfiles)
176 173 unknown = set(result[4]).difference(lfiles)
177 174 if parentworking:
178 175 for lfile in unsure:
179 176 standin = lfutil.standin(lfile)
180 177 if standin not in ctx1:
181 178 # from second parent
182 179 modified.append(lfile)
183 180 elif ctx1[standin].data().strip() \
184 181 != lfutil.hashfile(self.wjoin(lfile)):
185 182 modified.append(lfile)
186 183 else:
187 184 clean.append(lfile)
188 185 lfdirstate.normal(lfile)
189 lfdirstate.write()
190 186 else:
191 187 tocheck = unsure + modified + added + clean
192 188 modified, added, clean = [], [], []
193 189
194 190 for lfile in tocheck:
195 191 standin = lfutil.standin(lfile)
196 192 if inctx(standin, ctx1):
197 193 if ctx1[standin].data().strip() != \
198 194 lfutil.hashfile(self.wjoin(lfile)):
199 195 modified.append(lfile)
200 196 else:
201 197 clean.append(lfile)
202 198 else:
203 199 added.append(lfile)
200 finally:
204 201 # Replace the original ignore function
205 202 lfdirstate._ignore = orig_ignore
206 finally:
207 wlock.release()
208 203
209 204 for standin in ctx1.manifest():
210 205 if not lfutil.isstandin(standin):
211 206 continue
212 207 lfile = lfutil.splitstandin(standin)
213 208 if not match(lfile):
214 209 continue
215 210 if lfile not in lfdirstate:
216 211 removed.append(lfile)
217 212
218 213 # Filter result lists
219 214 result = list(result)
220 215
221 216 # Largefiles are not really removed when they're
222 217 # still in the normal dirstate. Likewise, normal
223 218 # files are not really removed if it's still in
224 219 # lfdirstate. This happens in merges where files
225 220 # change type.
226 221 removed = [f for f in removed if f not in repo.dirstate]
227 222 result[2] = [f for f in result[2] if f not in lfdirstate]
228 223
229 224 # Unknown files
230 225 unknown = set(unknown).difference(ignored)
231 226 result[4] = [f for f in unknown
232 227 if (repo.dirstate[f] == '?' and
233 228 not lfutil.isstandin(f))]
234 229 # Ignored files were calculated earlier by the dirstate,
235 230 # and we already stripped out the largefiles from the list
236 231 result[5] = ignored
237 232 # combine normal files and largefiles
238 233 normals = [[fn for fn in filelist
239 234 if not lfutil.isstandin(fn)]
240 235 for filelist in result]
241 236 lfiles = (modified, added, removed, missing, [], [], clean)
242 237 result = [sorted(list1 + list2)
243 238 for (list1, list2) in zip(normals, lfiles)]
244 239 else:
245 240 def toname(f):
246 241 if lfutil.isstandin(f):
247 242 return lfutil.splitstandin(f)
248 243 return f
249 244 result = [[toname(f) for f in items] for items in result]
250 245
251 246 if not listunknown:
252 247 result[4] = []
253 248 if not listignored:
254 249 result[5] = []
255 250 if not listclean:
256 251 result[6] = []
257 252 self.lfstatus = True
258 253 return result
259 254
260 255 # As part of committing, copy all of the largefiles into the
261 256 # cache.
262 257 def commitctx(self, *args, **kwargs):
263 258 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
264 259 ctx = self[node]
265 260 for filename in ctx.files():
266 261 if lfutil.isstandin(filename) and filename in ctx.manifest():
267 262 realfile = lfutil.splitstandin(filename)
268 263 lfutil.copytostore(self, ctx.node(), realfile)
269 264
270 265 return node
271 266
272 267 # Before commit, largefile standins have not had their
273 268 # contents updated to reflect the hash of their largefile.
274 269 # Do that here.
275 270 def commit(self, text="", user=None, date=None, match=None,
276 271 force=False, editor=False, extra={}):
277 272 orig = super(lfiles_repo, self).commit
278 273
279 274 wlock = repo.wlock()
280 275 try:
281 276 # Case 0: Rebase
282 277 # We have to take the time to pull down the new largefiles now.
283 278 # Otherwise if we are rebasing, any largefiles that were
284 279 # modified in the destination changesets get overwritten, either
285 280 # by the rebase or in the first commit after the rebase.
286 281 # updatelfiles will update the dirstate to mark any pulled
287 282 # largefiles as modified
288 283 if getattr(repo, "_isrebasing", False):
289 284 lfcommands.updatelfiles(repo.ui, repo)
290 285 result = orig(text=text, user=user, date=date, match=match,
291 286 force=force, editor=editor, extra=extra)
292 287 return result
293 288 # Case 1: user calls commit with no specific files or
294 289 # include/exclude patterns: refresh and commit all files that
295 290 # are "dirty".
296 291 if ((match is None) or
297 292 (not match.anypats() and not match.files())):
298 293 # Spend a bit of time here to get a list of files we know
299 294 # are modified so we can compare only against those.
300 295 # It can cost a lot of time (several seconds)
301 296 # otherwise to update all standins if the largefiles are
302 297 # large.
303 298 lfdirstate = lfutil.openlfdirstate(ui, self)
304 299 dirtymatch = match_.always(repo.root, repo.getcwd())
305 300 s = lfdirstate.status(dirtymatch, [], False, False, False)
306 301 modifiedfiles = []
307 302 for i in s:
308 303 modifiedfiles.extend(i)
309 304 lfiles = lfutil.listlfiles(self)
310 305 # this only loops through largefiles that exist (not
311 306 # removed/renamed)
312 307 for lfile in lfiles:
313 308 if lfile in modifiedfiles:
314 309 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
315 310 # this handles the case where a rebase is being
316 311 # performed and the working copy is not updated
317 312 # yet.
318 313 if os.path.exists(self.wjoin(lfile)):
319 314 lfutil.updatestandin(self,
320 315 lfutil.standin(lfile))
321 316 lfdirstate.normal(lfile)
322 317 for lfile in lfdirstate:
323 318 if lfile in modifiedfiles:
324 319 if not os.path.exists(
325 320 repo.wjoin(lfutil.standin(lfile))):
326 321 lfdirstate.drop(lfile)
322
323 result = orig(text=text, user=user, date=date, match=match,
324 force=force, editor=editor, extra=extra)
325 # This needs to be after commit; otherwise precommit hooks
326 # get the wrong status
327 327 lfdirstate.write()
328
329 return orig(text=text, user=user, date=date, match=match,
330 force=force, editor=editor, extra=extra)
328 return result
331 329
332 330 for f in match.files():
333 331 if lfutil.isstandin(f):
334 332 raise util.Abort(
335 333 _('file "%s" is a largefile standin') % f,
336 334 hint=('commit the largefile itself instead'))
337 335
338 336 # Case 2: user calls commit with specified patterns: refresh
339 337 # any matching big files.
340 338 smatcher = lfutil.composestandinmatcher(self, match)
341 339 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
342 340
343 341 # No matching big files: get out of the way and pass control to
344 342 # the usual commit() method.
345 343 if not standins:
346 344 return orig(text=text, user=user, date=date, match=match,
347 345 force=force, editor=editor, extra=extra)
348 346
349 347 # Refresh all matching big files. It's possible that the
350 348 # commit will end up failing, in which case the big files will
351 349 # stay refreshed. No harm done: the user modified them and
352 350 # asked to commit them, so sooner or later we're going to
353 351 # refresh the standins. Might as well leave them refreshed.
354 352 lfdirstate = lfutil.openlfdirstate(ui, self)
355 353 for standin in standins:
356 354 lfile = lfutil.splitstandin(standin)
357 355 if lfdirstate[lfile] <> 'r':
358 356 lfutil.updatestandin(self, standin)
359 357 lfdirstate.normal(lfile)
360 358 else:
361 359 lfdirstate.drop(lfile)
362 lfdirstate.write()
363 360
364 361 # Cook up a new matcher that only matches regular files or
365 362 # standins corresponding to the big files requested by the
366 363 # user. Have to modify _files to prevent commit() from
367 364 # complaining "not tracked" for big files.
368 365 lfiles = lfutil.listlfiles(repo)
369 366 match = copy.copy(match)
370 367 orig_matchfn = match.matchfn
371 368
372 369 # Check both the list of largefiles and the list of
373 370 # standins because if a largefile was removed, it
374 371 # won't be in the list of largefiles at this point
375 372 match._files += sorted(standins)
376 373
377 374 actualfiles = []
378 375 for f in match._files:
379 376 fstandin = lfutil.standin(f)
380 377
381 378 # ignore known largefiles and standins
382 379 if f in lfiles or fstandin in standins:
383 380 continue
384 381
385 382 # append directory separator to avoid collisions
386 383 if not fstandin.endswith(os.sep):
387 384 fstandin += os.sep
388 385
389 386 # prevalidate matching standin directories
390 387 if util.any(st for st in match._files
391 388 if st.startswith(fstandin)):
392 389 continue
393 390 actualfiles.append(f)
394 391 match._files = actualfiles
395 392
396 393 def matchfn(f):
397 394 if orig_matchfn(f):
398 395 return f not in lfiles
399 396 else:
400 397 return f in standins
401 398
402 399 match.matchfn = matchfn
403 return orig(text=text, user=user, date=date, match=match,
400 result = orig(text=text, user=user, date=date, match=match,
404 401 force=force, editor=editor, extra=extra)
402 # This needs to be after commit; otherwise precommit hooks
403 # get the wrong status
404 lfdirstate.write()
405 return result
405 406 finally:
406 407 wlock.release()
407 408
408 409 def push(self, remote, force=False, revs=None, newbranch=False):
409 410 o = lfutil.findoutgoing(repo, remote, force)
410 411 if o:
411 412 toupload = set()
412 413 o = repo.changelog.nodesbetween(o, revs)[0]
413 414 for n in o:
414 415 parents = [p for p in repo.changelog.parents(n)
415 416 if p != node_.nullid]
416 417 ctx = repo[n]
417 418 files = set(ctx.files())
418 419 if len(parents) == 2:
419 420 mc = ctx.manifest()
420 421 mp1 = ctx.parents()[0].manifest()
421 422 mp2 = ctx.parents()[1].manifest()
422 423 for f in mp1:
423 424 if f not in mc:
424 425 files.add(f)
425 426 for f in mp2:
426 427 if f not in mc:
427 428 files.add(f)
428 429 for f in mc:
429 430 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
430 431 None):
431 432 files.add(f)
432 433
433 434 toupload = toupload.union(
434 435 set([ctx[f].data().strip()
435 436 for f in files
436 437 if lfutil.isstandin(f) and f in ctx]))
437 438 lfcommands.uploadlfiles(ui, self, remote, toupload)
438 439 return super(lfiles_repo, self).push(remote, force, revs,
439 440 newbranch)
440 441
441 442 repo.__class__ = lfiles_repo
442 443
443 444 def checkrequireslfiles(ui, repo, **kwargs):
444 445 if 'largefiles' not in repo.requirements and util.any(
445 446 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
446 447 repo.requirements.add('largefiles')
447 448 repo._writerequirements()
448 449
449 450 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
450 451 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,68 +1,67 b''
1 1 Test how largefiles abort in case the disk runs full
2 2
3 3 $ cat > criple.py <<EOF
4 4 > import os, errno, shutil
5 5 > from mercurial import util
6 6 > #
7 7 > # this makes the original largefiles code abort:
8 8 > def copyfileobj(fsrc, fdst, length=16*1024):
9 9 > fdst.write(fsrc.read(4))
10 10 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
11 11 > shutil.copyfileobj = copyfileobj
12 12 > #
13 13 > # this makes the rewritten code abort:
14 14 > def filechunkiter(f, size=65536, limit=None):
15 15 > yield f.read(4)
16 16 > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
17 17 > util.filechunkiter = filechunkiter
18 18 > #
19 19 > def oslink(src, dest):
20 20 > raise OSError("no hardlinks, try copying instead")
21 21 > util.oslink = oslink
22 22 > EOF
23 23
24 24 $ echo "[extensions]" >> $HGRCPATH
25 25 $ echo "largefiles =" >> $HGRCPATH
26 26
27 27 $ hg init alice
28 28 $ cd alice
29 29 $ echo "this is a very big file" > big
30 30 $ hg add --large big
31 31 $ hg commit --config extensions.criple=$TESTTMP/criple.py -m big
32 32 abort: No space left on device
33 33 [255]
34 34
35 35 The largefile is not created in .hg/largefiles:
36 36
37 37 $ ls .hg/largefiles
38 38 dirstate
39 39
40 40 The user cache is not even created:
41 41
42 42 >>> import os; os.path.exists("$HOME/.cache/largefiles/")
43 43 False
44 44
45 45 Make the commit with space on the device:
46 46
47 47 $ hg commit -m big
48 48
49 49 Now make a clone with a full disk, and make sure lfutil.link function
50 50 makes copies instead of hardlinks:
51 51
52 52 $ cd ..
53 53 $ hg --config extensions.criple=$TESTTMP/criple.py clone --pull alice bob
54 54 requesting all changes
55 55 adding changesets
56 56 adding manifests
57 57 adding file changes
58 58 added 1 changesets with 1 changes to 1 files
59 59 updating to branch default
60 60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 61 getting changed largefiles
62 62 abort: No space left on device
63 63 [255]
64 64
65 65 The largefile is not created in .hg/largefiles:
66 66
67 67 $ ls bob/.hg/largefiles
68 dirstate
General Comments 0
You need to be logged in to leave comments. Login now