##// END OF EJS Templates
largefiles: move calculation of largefiles for updating to utility function
Na'Tosha Bard -
r16245:a18ad914 default
parent child Browse files
Show More
@@ -1,459 +1,467 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 29 def repo_add(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 33 def repo_remove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 49 def repo_forget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 56 remote, force=force)
57 57 return repo.changelog.findmissing(common)
58 58
59 59 # -- Private worker functions ------------------------------------------
60 60
61 61 def getminsize(ui, assumelfiles, opt, default=10):
62 62 lfsize = opt
63 63 if not lfsize and assumelfiles:
64 64 lfsize = ui.config(longname, 'minsize', default=default)
65 65 if lfsize:
66 66 try:
67 67 lfsize = float(lfsize)
68 68 except ValueError:
69 69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 70 % lfsize)
71 71 if lfsize is None:
72 72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 73 return lfsize
74 74
75 75 def link(src, dest):
76 76 try:
77 77 util.oslink(src, dest)
78 78 except OSError:
79 79 # if hardlinks fail, fallback on atomic copy
80 80 dst = util.atomictempfile(dest)
81 81 for chunk in util.filechunkiter(open(src, 'rb')):
82 82 dst.write(chunk)
83 83 dst.close()
84 84 os.chmod(dest, os.stat(src).st_mode)
85 85
86 86 def usercachepath(ui, hash):
87 87 path = ui.configpath(longname, 'usercache', None)
88 88 if path:
89 89 path = os.path.join(path, hash)
90 90 else:
91 91 if os.name == 'nt':
92 92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 93 if appdata:
94 94 path = os.path.join(appdata, longname, hash)
95 95 elif platform.system() == 'Darwin':
96 96 home = os.getenv('HOME')
97 97 if home:
98 98 path = os.path.join(home, 'Library', 'Caches',
99 99 longname, hash)
100 100 elif os.name == 'posix':
101 101 path = os.getenv('XDG_CACHE_HOME')
102 102 if path:
103 103 path = os.path.join(path, longname, hash)
104 104 else:
105 105 home = os.getenv('HOME')
106 106 if home:
107 107 path = os.path.join(home, '.cache', longname, hash)
108 108 else:
109 109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 110 return path
111 111
112 112 def inusercache(ui, hash):
113 113 path = usercachepath(ui, hash)
114 114 return path and os.path.exists(path)
115 115
116 116 def findfile(repo, hash):
117 117 if instore(repo, hash):
118 118 repo.ui.note(_('Found %s in store\n') % hash)
119 119 return storepath(repo, hash)
120 120 elif inusercache(repo.ui, hash):
121 121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 122 path = storepath(repo, hash)
123 123 util.makedirs(os.path.dirname(path))
124 124 link(usercachepath(repo.ui, hash), path)
125 125 return path
126 126 return None
127 127
128 128 class largefiles_dirstate(dirstate.dirstate):
129 129 def __getitem__(self, key):
130 130 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 131 def normal(self, f):
132 132 return super(largefiles_dirstate, self).normal(unixpath(f))
133 133 def remove(self, f):
134 134 return super(largefiles_dirstate, self).remove(unixpath(f))
135 135 def add(self, f):
136 136 return super(largefiles_dirstate, self).add(unixpath(f))
137 137 def drop(self, f):
138 138 return super(largefiles_dirstate, self).drop(unixpath(f))
139 139 def forget(self, f):
140 140 return super(largefiles_dirstate, self).forget(unixpath(f))
141 141 def normallookup(self, f):
142 142 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143 143
144 144 def openlfdirstate(ui, repo):
145 145 '''
146 146 Return a dirstate object that tracks largefiles: i.e. its root is
147 147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 148 '''
149 149 admin = repo.join(longname)
150 150 opener = scmutil.opener(admin)
151 151 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 152 repo.dirstate._validate)
153 153
154 154 # If the largefiles dirstate does not exist, populate and create
155 155 # it. This ensures that we create it on the first meaningful
156 156 # largefiles operation in a new clone.
157 157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 158 util.makedirs(admin)
159 159 matcher = getstandinmatcher(repo)
160 160 for standin in dirstate_walk(repo.dirstate, matcher):
161 161 lfile = splitstandin(standin)
162 162 hash = readstandin(repo, lfile)
163 163 lfdirstate.normallookup(lfile)
164 164 try:
165 165 if hash == hashfile(repo.wjoin(lfile)):
166 166 lfdirstate.normal(lfile)
167 167 except OSError, err:
168 168 if err.errno != errno.ENOENT:
169 169 raise
170 170 return lfdirstate
171 171
172 172 def lfdirstate_status(lfdirstate, repo, rev):
173 173 match = match_.always(repo.root, repo.getcwd())
174 174 s = lfdirstate.status(match, [], False, False, False)
175 175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 176 for lfile in unsure:
177 177 if repo[rev][standin(lfile)].data().strip() != \
178 178 hashfile(repo.wjoin(lfile)):
179 179 modified.append(lfile)
180 180 else:
181 181 clean.append(lfile)
182 182 lfdirstate.normal(lfile)
183 183 return (modified, added, removed, missing, unknown, ignored, clean)
184 184
185 185 def listlfiles(repo, rev=None, matcher=None):
186 186 '''return a list of largefiles in the working copy or the
187 187 specified changeset'''
188 188
189 189 if matcher is None:
190 190 matcher = getstandinmatcher(repo)
191 191
192 192 # ignore unknown files in working directory
193 193 return [splitstandin(f)
194 194 for f in repo[rev].walk(matcher)
195 195 if rev is not None or repo.dirstate[f] != '?']
196 196
197 197 def instore(repo, hash):
198 198 return os.path.exists(storepath(repo, hash))
199 199
200 200 def storepath(repo, hash):
201 201 return repo.join(os.path.join(longname, hash))
202 202
203 203 def copyfromcache(repo, hash, filename):
204 204 '''Copy the specified largefile from the repo or system cache to
205 205 filename in the repository. Return true on success or false if the
206 206 file was not found in either cache (which should not happened:
207 207 this is meant to be called only after ensuring that the needed
208 208 largefile exists in the cache).'''
209 209 path = findfile(repo, hash)
210 210 if path is None:
211 211 return False
212 212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 213 # The write may fail before the file is fully written, but we
214 214 # don't use atomic writes in the working copy.
215 215 shutil.copy(path, repo.wjoin(filename))
216 216 return True
217 217
218 218 def copytostore(repo, rev, file, uploaded=False):
219 219 hash = readstandin(repo, file)
220 220 if instore(repo, hash):
221 221 return
222 222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 223
224 224 def copyalltostore(repo, node):
225 225 '''Copy all largefiles in a given revision to the store'''
226 226
227 227 ctx = repo[node]
228 228 for filename in ctx.files():
229 229 if isstandin(filename) and filename in ctx.manifest():
230 230 realfile = splitstandin(filename)
231 231 copytostore(repo, ctx.node(), realfile)
232 232
233 233
234 234 def copytostoreabsolute(repo, file, hash):
235 235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 236 if inusercache(repo.ui, hash):
237 237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 238 else:
239 239 dst = util.atomictempfile(storepath(repo, hash),
240 240 createmode=repo.store.createmode)
241 241 for chunk in util.filechunkiter(open(file, 'rb')):
242 242 dst.write(chunk)
243 243 dst.close()
244 244 linktousercache(repo, hash)
245 245
246 246 def linktousercache(repo, hash):
247 247 path = usercachepath(repo.ui, hash)
248 248 if path:
249 249 util.makedirs(os.path.dirname(path))
250 250 link(storepath(repo, hash), path)
251 251
252 252 def getstandinmatcher(repo, pats=[], opts={}):
253 253 '''Return a match object that applies pats to the standin directory'''
254 254 standindir = repo.pathto(shortname)
255 255 if pats:
256 256 # patterns supplied: search standin directory relative to current dir
257 257 cwd = repo.getcwd()
258 258 if os.path.isabs(cwd):
259 259 # cwd is an absolute path for hg -R <reponame>
260 260 # work relative to the repository root in this case
261 261 cwd = ''
262 262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 263 elif os.path.isdir(standindir):
264 264 # no patterns: relative to repo root
265 265 pats = [standindir]
266 266 else:
267 267 # no patterns and no standin dir: return matcher that matches nothing
268 268 match = match_.match(repo.root, None, [], exact=True)
269 269 match.matchfn = lambda f: False
270 270 return match
271 271 return getmatcher(repo, pats, opts, showbad=False)
272 272
273 273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 275 neuter the match object's bad() method so it does not print any
276 276 warnings about missing files or directories.'''
277 277 match = scmutil.match(repo[None], pats, opts)
278 278
279 279 if not showbad:
280 280 match.bad = lambda f, msg: None
281 281 return match
282 282
283 283 def composestandinmatcher(repo, rmatcher):
284 284 '''Return a matcher that accepts standins corresponding to the
285 285 files accepted by rmatcher. Pass the list of files in the matcher
286 286 as the paths specified by the user.'''
287 287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 288 isstandin = smatcher.matchfn
289 289 def composed_matchfn(f):
290 290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 291 smatcher.matchfn = composed_matchfn
292 292
293 293 return smatcher
294 294
295 295 def standin(filename):
296 296 '''Return the repo-relative path to the standin for the specified big
297 297 file.'''
298 298 # Notes:
299 299 # 1) Most callers want an absolute path, but _create_standin() needs
300 300 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 302 # 2) Join with '/' because that's what dirstate always uses, even on
303 303 # Windows. Change existing separator to '/' first in case we are
304 304 # passed filenames from an external source (like the command line).
305 305 return shortname + '/' + util.pconvert(filename)
306 306
307 307 def isstandin(filename):
308 308 '''Return true if filename is a big file standin. filename must be
309 309 in Mercurial's internal form (slash-separated).'''
310 310 return filename.startswith(shortname + '/')
311 311
312 312 def splitstandin(filename):
313 313 # Split on / because that's what dirstate always uses, even on Windows.
314 314 # Change local separator to / first just in case we are passed filenames
315 315 # from an external source (like the command line).
316 316 bits = util.pconvert(filename).split('/', 1)
317 317 if len(bits) == 2 and bits[0] == shortname:
318 318 return bits[1]
319 319 else:
320 320 return None
321 321
322 322 def updatestandin(repo, standin):
323 323 file = repo.wjoin(splitstandin(standin))
324 324 if os.path.exists(file):
325 325 hash = hashfile(file)
326 326 executable = getexecutable(file)
327 327 writestandin(repo, standin, hash, executable)
328 328
329 329 def readstandin(repo, filename, node=None):
330 330 '''read hex hash from standin for filename at given node, or working
331 331 directory if no node is given'''
332 332 return repo[node][standin(filename)].data().strip()
333 333
334 334 def writestandin(repo, standin, hash, executable):
335 335 '''write hash to <repo.root>/<standin>'''
336 336 writehash(hash, repo.wjoin(standin), executable)
337 337
338 338 def copyandhash(instream, outfile):
339 339 '''Read bytes from instream (iterable) and write them to outfile,
340 340 computing the SHA-1 hash of the data along the way. Close outfile
341 341 when done and return the binary hash.'''
342 342 hasher = util.sha1('')
343 343 for data in instream:
344 344 hasher.update(data)
345 345 outfile.write(data)
346 346
347 347 # Blecch: closing a file that somebody else opened is rude and
348 348 # wrong. But it's so darn convenient and practical! After all,
349 349 # outfile was opened just to copy and hash.
350 350 outfile.close()
351 351
352 352 return hasher.digest()
353 353
354 354 def hashrepofile(repo, file):
355 355 return hashfile(repo.wjoin(file))
356 356
357 357 def hashfile(file):
358 358 if not os.path.exists(file):
359 359 return ''
360 360 hasher = util.sha1('')
361 361 fd = open(file, 'rb')
362 362 for data in blockstream(fd):
363 363 hasher.update(data)
364 364 fd.close()
365 365 return hasher.hexdigest()
366 366
367 367 class limitreader(object):
368 368 def __init__(self, f, limit):
369 369 self.f = f
370 370 self.limit = limit
371 371
372 372 def read(self, length):
373 373 if self.limit == 0:
374 374 return ''
375 375 length = length > self.limit and self.limit or length
376 376 self.limit -= length
377 377 return self.f.read(length)
378 378
379 379 def close(self):
380 380 pass
381 381
382 382 def blockstream(infile, blocksize=128 * 1024):
383 383 """Generator that yields blocks of data from infile and closes infile."""
384 384 while True:
385 385 data = infile.read(blocksize)
386 386 if not data:
387 387 break
388 388 yield data
389 389 # same blecch as copyandhash() above
390 390 infile.close()
391 391
392 392 def writehash(hash, filename, executable):
393 393 util.makedirs(os.path.dirname(filename))
394 394 util.writefile(filename, hash + '\n')
395 395 os.chmod(filename, getmode(executable))
396 396
397 397 def getexecutable(filename):
398 398 mode = os.stat(filename).st_mode
399 399 return ((mode & stat.S_IXUSR) and
400 400 (mode & stat.S_IXGRP) and
401 401 (mode & stat.S_IXOTH))
402 402
403 403 def getmode(executable):
404 404 if executable:
405 405 return 0755
406 406 else:
407 407 return 0644
408 408
409 409 def urljoin(first, second, *arg):
410 410 def join(left, right):
411 411 if not left.endswith('/'):
412 412 left += '/'
413 413 if right.startswith('/'):
414 414 right = right[1:]
415 415 return left + right
416 416
417 417 url = join(first, second)
418 418 for a in arg:
419 419 url = join(url, a)
420 420 return url
421 421
422 422 def hexsha1(data):
423 423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 424 object data"""
425 425 h = util.sha1()
426 426 for chunk in util.filechunkiter(data):
427 427 h.update(chunk)
428 428 return h.hexdigest()
429 429
430 430 def httpsendfile(ui, filename):
431 431 return httpconnection.httpsendfile(ui, filename, 'rb')
432 432
433 433 def unixpath(path):
434 434 '''Return a version of path normalized for use with the lfdirstate.'''
435 435 return util.pconvert(os.path.normpath(path))
436 436
437 437 def islfilesrepo(repo):
438 438 return ('largefiles' in repo.requirements and
439 439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 440
441 441 class storeprotonotcapable(Exception):
442 442 def __init__(self, storetypes):
443 443 self.storetypes = storetypes
444 444
445 445 def getcurrentheads(repo):
446 446 branches = repo.branchmap()
447 447 heads = []
448 448 for branch in branches:
449 449 newheads = repo.branchheads(branch)
450 450 heads = heads + newheads
451 451 return heads
452 452
453 453 def getstandinsstate(repo):
454 454 standins = []
455 455 matcher = getstandinmatcher(repo)
456 456 for standin in dirstate_walk(repo.dirstate, matcher):
457 457 lfile = splitstandin(standin)
458 458 standins.append((lfile, readstandin(repo, lfile)))
459 459 return standins
460
461 def getlfilestoupdate(oldstandins, newstandins):
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 filelist = []
464 for f in changedstandins:
465 if f[0] not in filelist:
466 filelist.append(f[0])
467 return filelist
@@ -1,973 +1,965 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 29 def override_match(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 37 orig_matchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(override_match)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 56 def add_largefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if exact or not exists:
85 85 abovemin = (lfsize and
86 86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 88 lfnames.append(f)
89 89 if ui.verbose or not exact:
90 90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91 91
92 92 bad = []
93 93 standins = []
94 94
95 95 # Need to lock, otherwise there could be a race condition between
96 96 # when standins are created and added to the repo.
97 97 wlock = repo.wlock()
98 98 try:
99 99 if not opts.get('dry_run'):
100 100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 101 for f in lfnames:
102 102 standinname = lfutil.standin(f)
103 103 lfutil.writestandin(repo, standinname, hash='',
104 104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 105 standins.append(standinname)
106 106 if lfdirstate[f] == 'r':
107 107 lfdirstate.normallookup(f)
108 108 else:
109 109 lfdirstate.add(f)
110 110 lfdirstate.write()
111 111 bad += [lfutil.splitstandin(f)
112 112 for f in lfutil.repo_add(repo, standins)
113 113 if f in m.files()]
114 114 finally:
115 115 wlock.release()
116 116 return bad
117 117
118 118 def remove_largefiles(ui, repo, *pats, **opts):
119 119 after = opts.get('after')
120 120 if not pats and not after:
121 121 raise util.Abort(_('no files specified'))
122 122 m = scmutil.match(repo[None], pats, opts)
123 123 try:
124 124 repo.lfstatus = True
125 125 s = repo.status(match=m, clean=True)
126 126 finally:
127 127 repo.lfstatus = False
128 128 manifest = repo[None].manifest()
129 129 modified, added, deleted, clean = [[f for f in list
130 130 if lfutil.standin(f) in manifest]
131 131 for list in [s[0], s[1], s[3], s[6]]]
132 132
133 133 def warn(files, reason):
134 134 for f in files:
135 135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 136 % (m.rel(f), reason))
137 137
138 138 if after:
139 139 remove, forget = deleted, []
140 140 warn(modified + added + clean, _('file still exists'))
141 141 else:
142 142 remove, forget = deleted + clean, []
143 143 warn(modified, _('file is modified'))
144 144 warn(added, _('file has been marked for add'))
145 145
146 146 for f in sorted(remove + forget):
147 147 if ui.verbose or not m.exact(f):
148 148 ui.status(_('removing %s\n') % m.rel(f))
149 149
150 150 # Need to lock because standin files are deleted then removed from the
151 151 # repository and we could race inbetween.
152 152 wlock = repo.wlock()
153 153 try:
154 154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 155 for f in remove:
156 156 if not after:
157 157 # If this is being called by addremove, notify the user that we
158 158 # are removing the file.
159 159 if getattr(repo, "_isaddremove", False):
160 160 ui.status(_('removing %s\n') % f)
161 161 if os.path.exists(repo.wjoin(f)):
162 162 util.unlinkpath(repo.wjoin(f))
163 163 lfdirstate.remove(f)
164 164 lfdirstate.write()
165 165 forget = [lfutil.standin(f) for f in forget]
166 166 remove = [lfutil.standin(f) for f in remove]
167 167 lfutil.repo_forget(repo, forget)
168 168 # If this is being called by addremove, let the original addremove
169 169 # function handle this.
170 170 if not getattr(repo, "_isaddremove", False):
171 171 lfutil.repo_remove(repo, remove, unlink=True)
172 172 finally:
173 173 wlock.release()
174 174
175 175 # -- Wrappers: modify existing commands --------------------------------
176 176
177 177 # Add works by going through the files that the user wanted to add and
178 178 # checking if they should be added as largefiles. Then it makes a new
179 179 # matcher which matches only the normal files and runs the original
180 180 # version of add.
181 181 def override_add(orig, ui, repo, *pats, **opts):
182 182 normal = opts.pop('normal')
183 183 if normal:
184 184 if opts.get('large'):
185 185 raise util.Abort(_('--normal cannot be used with --large'))
186 186 return orig(ui, repo, *pats, **opts)
187 187 bad = add_largefiles(ui, repo, *pats, **opts)
188 188 installnormalfilesmatchfn(repo[None].manifest())
189 189 result = orig(ui, repo, *pats, **opts)
190 190 restorematchfn()
191 191
192 192 return (result == 1 or bad) and 1 or 0
193 193
194 194 def override_remove(orig, ui, repo, *pats, **opts):
195 195 installnormalfilesmatchfn(repo[None].manifest())
196 196 orig(ui, repo, *pats, **opts)
197 197 restorematchfn()
198 198 remove_largefiles(ui, repo, *pats, **opts)
199 199
200 200 def override_status(orig, ui, repo, *pats, **opts):
201 201 try:
202 202 repo.lfstatus = True
203 203 return orig(ui, repo, *pats, **opts)
204 204 finally:
205 205 repo.lfstatus = False
206 206
207 207 def override_log(orig, ui, repo, *pats, **opts):
208 208 try:
209 209 repo.lfstatus = True
210 210 orig(ui, repo, *pats, **opts)
211 211 finally:
212 212 repo.lfstatus = False
213 213
214 214 def override_verify(orig, ui, repo, *pats, **opts):
215 215 large = opts.pop('large', False)
216 216 all = opts.pop('lfa', False)
217 217 contents = opts.pop('lfc', False)
218 218
219 219 result = orig(ui, repo, *pats, **opts)
220 220 if large:
221 221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 222 return result
223 223
224 224 # Override needs to refresh standins so that update's normal merge
225 225 # will go through properly. Then the other update hook (overriding repo.update)
226 226 # will get the new files. Filemerge is also overriden so that the merge
227 227 # will merge standins correctly.
228 228 def override_update(orig, ui, repo, *pats, **opts):
229 229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 231 False, False)
232 232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233 233
234 234 # Need to lock between the standins getting updated and their
235 235 # largefiles getting updated
236 236 wlock = repo.wlock()
237 237 try:
238 238 if opts['check']:
239 239 mod = len(modified) > 0
240 240 for lfile in unsure:
241 241 standin = lfutil.standin(lfile)
242 242 if repo['.'][standin].data().strip() != \
243 243 lfutil.hashfile(repo.wjoin(lfile)):
244 244 mod = True
245 245 else:
246 246 lfdirstate.normal(lfile)
247 247 lfdirstate.write()
248 248 if mod:
249 249 raise util.Abort(_('uncommitted local changes'))
250 250 # XXX handle removed differently
251 251 if not opts['clean']:
252 252 for lfile in unsure + modified + added:
253 253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 254 finally:
255 255 wlock.release()
256 256 return orig(ui, repo, *pats, **opts)
257 257
258 258 # Before starting the manifest merge, merge.updates will call
259 259 # _checkunknown to check if there are any files in the merged-in
260 260 # changeset that collide with unknown files in the working copy.
261 261 #
262 262 # The largefiles are seen as unknown, so this prevents us from merging
263 263 # in a file 'foo' if we already have a largefile with the same name.
264 264 #
265 265 # The overridden function filters the unknown files by removing any
266 266 # largefiles. This makes the merge proceed and we can then handle this
267 267 # case further in the overridden manifestmerge function below.
268 268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
269 269 if lfutil.standin(f) in wctx:
270 270 return False
271 271 return origfn(repo, wctx, mctx, f)
272 272
273 273 # The manifest merge handles conflicts on the manifest level. We want
274 274 # to handle changes in largefile-ness of files at this level too.
275 275 #
276 276 # The strategy is to run the original manifestmerge and then process
277 277 # the action list it outputs. There are two cases we need to deal with:
278 278 #
279 279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 280 # detected via its standin file, which will enter the working copy
281 281 # with a "get" action. It is not "merge" since the standin is all
282 282 # Mercurial is concerned with at this level -- the link to the
283 283 # existing normal file is not relevant here.
284 284 #
285 285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 286 # since the largefile will be present in the working copy and
287 287 # different from the normal file in p2. Mercurial therefore
288 288 # triggers a merge action.
289 289 #
290 290 # In both cases, we prompt the user and emit new actions to either
291 291 # remove the standin (if the normal file was kept) or to remove the
292 292 # normal file and get the standin (if the largefile was kept). The
293 293 # default prompt answer is to use the largefile version since it was
294 294 # presumably changed on purpose.
295 295 #
296 296 # Finally, the merge.applyupdates function will then take care of
297 297 # writing the files into the working copy and lfcommands.updatelfiles
298 298 # will update the largefiles.
299 299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 301 processed = []
302 302
303 303 for action in actions:
304 304 if overwrite:
305 305 processed.append(action)
306 306 continue
307 307 f, m = action[:2]
308 308
309 309 choices = (_('&Largefile'), _('&Normal file'))
310 310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 311 # Case 1: normal file in the working copy, largefile in
312 312 # the second parent
313 313 lfile = lfutil.splitstandin(f)
314 314 standin = f
315 315 msg = _('%s has been turned into a largefile\n'
316 316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 318 processed.append((lfile, "r"))
319 319 processed.append((standin, "g", p2.flags(standin)))
320 320 else:
321 321 processed.append((standin, "r"))
322 322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 323 # Case 2: largefile in the working copy, normal file in
324 324 # the second parent
325 325 standin = lfutil.standin(f)
326 326 lfile = f
327 327 msg = _('%s has been turned into a normal file\n'
328 328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 330 processed.append((lfile, "r"))
331 331 else:
332 332 processed.append((standin, "r"))
333 333 processed.append((lfile, "g", p2.flags(lfile)))
334 334 else:
335 335 processed.append(action)
336 336
337 337 return processed
338 338
339 339 # Override filemerge to prompt the user about how they wish to merge
340 340 # largefiles. This will handle identical edits, and copy/rename +
341 341 # edit without prompting the user.
342 342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 343 # Use better variable names here. Because this is a wrapper we cannot
344 344 # change the variable names in the function declaration.
345 345 fcdest, fcother, fcancestor = fcd, fco, fca
346 346 if not lfutil.isstandin(orig):
347 347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 348 else:
349 349 if not fcother.cmp(fcdest): # files identical?
350 350 return None
351 351
352 352 # backwards, use working dir parent as ancestor
353 353 if fcancestor == fcother:
354 354 fcancestor = fcdest.parents()[0]
355 355
356 356 if orig != fcother.path():
357 357 repo.ui.status(_('merging %s and %s to %s\n')
358 358 % (lfutil.splitstandin(orig),
359 359 lfutil.splitstandin(fcother.path()),
360 360 lfutil.splitstandin(fcdest.path())))
361 361 else:
362 362 repo.ui.status(_('merging %s\n')
363 363 % lfutil.splitstandin(fcdest.path()))
364 364
365 365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 366 fcancestor.data():
367 367 return 0
368 368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 369 fcancestor.data():
370 370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 371 return 0
372 372
373 373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 374 'keep (l)ocal or take (o)ther?') %
375 375 lfutil.splitstandin(orig),
376 376 (_('&Local'), _('&Other')), 0) == 0:
377 377 return 0
378 378 else:
379 379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 380 return 0
381 381
382 382 # Copy first changes the matchers to match standins instead of
383 383 # largefiles. Then it overrides util.copyfile in that function it
384 384 # checks if the destination largefile already exists. It also keeps a
385 385 # list of copied files so that the largefiles can be copied and the
386 386 # dirstate updated.
387 387 def override_copy(orig, ui, repo, pats, opts, rename=False):
388 388 # doesn't remove largefile on rename
389 389 if len(pats) < 2:
390 390 # this isn't legal, let the original function deal with it
391 391 return orig(ui, repo, pats, opts, rename)
392 392
393 393 def makestandin(relpath):
394 394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396 396
397 397 fullpats = scmutil.expandpats(pats)
398 398 dest = fullpats[-1]
399 399
400 400 if os.path.isdir(dest):
401 401 if not os.path.isdir(makestandin(dest)):
402 402 os.makedirs(makestandin(dest))
403 403 # This could copy both lfiles and normal files in one command,
404 404 # but we don't want to do that. First replace their matcher to
405 405 # only match normal files and run it, then replace it to just
406 406 # match largefiles and run it again.
407 407 nonormalfiles = False
408 408 nolfiles = False
409 409 try:
410 410 try:
411 411 installnormalfilesmatchfn(repo[None].manifest())
412 412 result = orig(ui, repo, pats, opts, rename)
413 413 except util.Abort, e:
414 414 if str(e) != 'no files to copy':
415 415 raise e
416 416 else:
417 417 nonormalfiles = True
418 418 result = 0
419 419 finally:
420 420 restorematchfn()
421 421
422 422 # The first rename can cause our current working directory to be removed.
423 423 # In that case there is nothing left to copy/rename so just quit.
424 424 try:
425 425 repo.getcwd()
426 426 except OSError:
427 427 return result
428 428
429 429 try:
430 430 try:
431 431 # When we call orig below it creates the standins but we don't add them
432 432 # to the dir state until later so lock during that time.
433 433 wlock = repo.wlock()
434 434
435 435 manifest = repo[None].manifest()
436 436 oldmatch = None # for the closure
437 437 def override_match(ctx, pats=[], opts={}, globbed=False,
438 438 default='relpath'):
439 439 newpats = []
440 440 # The patterns were previously mangled to add the standin
441 441 # directory; we need to remove that now
442 442 for pat in pats:
443 443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 444 newpats.append(pat.replace(lfutil.shortname, ''))
445 445 else:
446 446 newpats.append(pat)
447 447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 448 m = copy.copy(match)
449 449 lfile = lambda f: lfutil.standin(f) in manifest
450 450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 451 m._fmap = set(m._files)
452 452 orig_matchfn = m.matchfn
453 453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 454 (f in manifest) and
455 455 orig_matchfn(lfutil.splitstandin(f)) or
456 456 None)
457 457 return m
458 458 oldmatch = installmatchfn(override_match)
459 459 listpats = []
460 460 for pat in pats:
461 461 if match_.patkind(pat) is not None:
462 462 listpats.append(pat)
463 463 else:
464 464 listpats.append(makestandin(pat))
465 465
466 466 try:
467 467 origcopyfile = util.copyfile
468 468 copiedfiles = []
469 469 def override_copyfile(src, dest):
470 470 if (lfutil.shortname in src and
471 471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 472 destlfile = dest.replace(lfutil.shortname, '')
473 473 if not opts['force'] and os.path.exists(destlfile):
474 474 raise IOError('',
475 475 _('destination largefile already exists'))
476 476 copiedfiles.append((src, dest))
477 477 origcopyfile(src, dest)
478 478
479 479 util.copyfile = override_copyfile
480 480 result += orig(ui, repo, listpats, opts, rename)
481 481 finally:
482 482 util.copyfile = origcopyfile
483 483
484 484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 485 for (src, dest) in copiedfiles:
486 486 if (lfutil.shortname in src and
487 487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 490 destlfiledir = os.path.dirname(destlfile) or '.'
491 491 if not os.path.isdir(destlfiledir):
492 492 os.makedirs(destlfiledir)
493 493 if rename:
494 494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 495 lfdirstate.remove(srclfile)
496 496 else:
497 497 util.copyfile(srclfile, destlfile)
498 498 lfdirstate.add(destlfile)
499 499 lfdirstate.write()
500 500 except util.Abort, e:
501 501 if str(e) != 'no files to copy':
502 502 raise e
503 503 else:
504 504 nolfiles = True
505 505 finally:
506 506 restorematchfn()
507 507 wlock.release()
508 508
509 509 if nolfiles and nonormalfiles:
510 510 raise util.Abort(_('no files to copy'))
511 511
512 512 return result
513 513
514 514 # When the user calls revert, we have to be careful to not revert any
515 515 # changes to other largefiles accidentally. This means we have to keep
516 516 # track of the largefiles that are being reverted so we only pull down
517 517 # the necessary largefiles.
518 518 #
519 519 # Standins are only updated (to match the hash of largefiles) before
520 520 # commits. Update the standins then run the original revert, changing
521 521 # the matcher to hit standins instead of largefiles. Based on the
522 522 # resulting standins update the largefiles. Then return the standins
523 523 # to their proper state
524 524 def override_revert(orig, ui, repo, *pats, **opts):
525 525 # Because we put the standins in a bad state (by updating them)
526 526 # and then return them to a correct state we need to lock to
527 527 # prevent others from changing them in their incorrect state.
528 528 wlock = repo.wlock()
529 529 try:
530 530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
533 533 for lfile in modified:
534 534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 535 for lfile in missing:
536 536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537 537
538 538 try:
539 539 ctx = repo[opts.get('rev')]
540 540 oldmatch = None # for the closure
541 541 def override_match(ctx, pats=[], opts={}, globbed=False,
542 542 default='relpath'):
543 543 match = oldmatch(ctx, pats, opts, globbed, default)
544 544 m = copy.copy(match)
545 545 def tostandin(f):
546 546 if lfutil.standin(f) in ctx:
547 547 return lfutil.standin(f)
548 548 elif lfutil.standin(f) in repo[None]:
549 549 return None
550 550 return f
551 551 m._files = [tostandin(f) for f in m._files]
552 552 m._files = [f for f in m._files if f is not None]
553 553 m._fmap = set(m._files)
554 554 orig_matchfn = m.matchfn
555 555 def matchfn(f):
556 556 if lfutil.isstandin(f):
557 557 # We need to keep track of what largefiles are being
558 558 # matched so we know which ones to update later --
559 559 # otherwise we accidentally revert changes to other
560 560 # largefiles. This is repo-specific, so duckpunch the
561 561 # repo object to keep the list of largefiles for us
562 562 # later.
563 563 if orig_matchfn(lfutil.splitstandin(f)) and \
564 564 (f in repo[None] or f in ctx):
565 565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 566 lfileslist.append(lfutil.splitstandin(f))
567 567 repo._lfilestoupdate = lfileslist
568 568 return True
569 569 else:
570 570 return False
571 571 return orig_matchfn(f)
572 572 m.matchfn = matchfn
573 573 return m
574 574 oldmatch = installmatchfn(override_match)
575 575 scmutil.match
576 576 matches = override_match(repo[None], pats, opts)
577 577 orig(ui, repo, *pats, **opts)
578 578 finally:
579 579 restorematchfn()
580 580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 582 printmessage=False)
583 583
584 584 # empty out the largefiles list so we start fresh next time
585 585 repo._lfilestoupdate = []
586 586 for lfile in modified:
587 587 if lfile in lfileslist:
588 588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 589 in repo['.']:
590 590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 591 repo['.'][lfile].data().strip(),
592 592 'x' in repo['.'][lfile].flags())
593 593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 594 for lfile in added:
595 595 standin = lfutil.standin(lfile)
596 596 if standin not in ctx and (standin in matches or opts.get('all')):
597 597 if lfile in lfdirstate:
598 598 lfdirstate.drop(lfile)
599 599 util.unlinkpath(repo.wjoin(standin))
600 600 lfdirstate.write()
601 601 finally:
602 602 wlock.release()
603 603
604 604 def hg_update(orig, repo, node):
605 # In order to not waste a lot of extra time during the update largefiles
606 # step, we keep track of the state of the standins before and after we
607 # call the original update function, and only update the standins that
608 # have changed in the hg.update() call
605 # Only call updatelfiles the standins that have changed to save time
609 606 oldstandins = lfutil.getstandinsstate(repo)
610 607 result = orig(repo, node)
611 608 newstandins = lfutil.getstandinsstate(repo)
612 tobeupdated = set(oldstandins).symmetric_difference(set(newstandins))
613 filelist = []
614 for f in tobeupdated:
615 if f[0] not in filelist:
616 filelist.append(f[0])
617
609 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
618 610 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
619 611 return result
620 612
621 613 def hg_clean(orig, repo, node, show_stats=True):
622 614 result = orig(repo, node, show_stats)
623 615 lfcommands.updatelfiles(repo.ui, repo)
624 616 return result
625 617
626 618 def hg_merge(orig, repo, node, force=None, remind=True):
627 619 # Mark the repo as being in the middle of a merge, so that
628 620 # updatelfiles() will know that it needs to trust the standins in
629 621 # the working copy, not in the standins in the current node
630 622 repo._ismerging = True
631 623 try:
632 624 result = orig(repo, node, force, remind)
633 625 lfcommands.updatelfiles(repo.ui, repo)
634 626 finally:
635 627 repo._ismerging = False
636 628 return result
637 629
638 630 # When we rebase a repository with remotely changed largefiles, we need to
639 631 # take some extra care so that the largefiles are correctly updated in the
640 632 # working copy
641 633 def override_pull(orig, ui, repo, source=None, **opts):
642 634 if opts.get('rebase', False):
643 635 repo._isrebasing = True
644 636 try:
645 637 if opts.get('update'):
646 638 del opts['update']
647 639 ui.debug('--update and --rebase are not compatible, ignoring '
648 640 'the update flag\n')
649 641 del opts['rebase']
650 642 cmdutil.bailifchanged(repo)
651 643 revsprepull = len(repo)
652 644 origpostincoming = commands.postincoming
653 645 def _dummy(*args, **kwargs):
654 646 pass
655 647 commands.postincoming = _dummy
656 648 repo.lfpullsource = source
657 649 if not source:
658 650 source = 'default'
659 651 try:
660 652 result = commands.pull(ui, repo, source, **opts)
661 653 finally:
662 654 commands.postincoming = origpostincoming
663 655 revspostpull = len(repo)
664 656 if revspostpull > revsprepull:
665 657 result = result or rebase.rebase(ui, repo)
666 658 finally:
667 659 repo._isrebasing = False
668 660 else:
669 661 repo.lfpullsource = source
670 662 if not source:
671 663 source = 'default'
672 664 oldheads = lfutil.getcurrentheads(repo)
673 665 result = orig(ui, repo, source, **opts)
674 666 # If we do not have the new largefiles for any new heads we pulled, we
675 667 # will run into a problem later if we try to merge or rebase with one of
676 668 # these heads, so cache the largefiles now direclty into the system
677 669 # cache.
678 670 ui.status(_("caching new largefiles\n"))
679 671 numcached = 0
680 672 heads = lfutil.getcurrentheads(repo)
681 673 newheads = set(heads).difference(set(oldheads))
682 674 for head in newheads:
683 675 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
684 676 numcached += len(cached)
685 677 ui.status(_("%d largefiles cached\n") % numcached)
686 678 return result
687 679
688 680 def override_rebase(orig, ui, repo, **opts):
689 681 repo._isrebasing = True
690 682 try:
691 683 orig(ui, repo, **opts)
692 684 finally:
693 685 repo._isrebasing = False
694 686
695 687 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
696 688 prefix=None, mtime=None, subrepos=None):
697 689 # No need to lock because we are only reading history and
698 690 # largefile caches, neither of which are modified.
699 691 lfcommands.cachelfiles(repo.ui, repo, node)
700 692
701 693 if kind not in archival.archivers:
702 694 raise util.Abort(_("unknown archive type '%s'") % kind)
703 695
704 696 ctx = repo[node]
705 697
706 698 if kind == 'files':
707 699 if prefix:
708 700 raise util.Abort(
709 701 _('cannot give prefix when archiving to files'))
710 702 else:
711 703 prefix = archival.tidyprefix(dest, kind, prefix)
712 704
713 705 def write(name, mode, islink, getdata):
714 706 if matchfn and not matchfn(name):
715 707 return
716 708 data = getdata()
717 709 if decode:
718 710 data = repo.wwritedata(name, data)
719 711 archiver.addfile(prefix + name, mode, islink, data)
720 712
721 713 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
722 714
723 715 if repo.ui.configbool("ui", "archivemeta", True):
724 716 def metadata():
725 717 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
726 718 hex(repo.changelog.node(0)), hex(node), ctx.branch())
727 719
728 720 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
729 721 if repo.tagtype(t) == 'global')
730 722 if not tags:
731 723 repo.ui.pushbuffer()
732 724 opts = {'template': '{latesttag}\n{latesttagdistance}',
733 725 'style': '', 'patch': None, 'git': None}
734 726 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
735 727 ltags, dist = repo.ui.popbuffer().split('\n')
736 728 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
737 729 tags += 'latesttagdistance: %s\n' % dist
738 730
739 731 return base + tags
740 732
741 733 write('.hg_archival.txt', 0644, False, metadata)
742 734
743 735 for f in ctx:
744 736 ff = ctx.flags(f)
745 737 getdata = ctx[f].data
746 738 if lfutil.isstandin(f):
747 739 path = lfutil.findfile(repo, getdata().strip())
748 740 if path is None:
749 741 raise util.Abort(
750 742 _('largefile %s not found in repo store or system cache')
751 743 % lfutil.splitstandin(f))
752 744 f = lfutil.splitstandin(f)
753 745
754 746 def getdatafn():
755 747 fd = None
756 748 try:
757 749 fd = open(path, 'rb')
758 750 return fd.read()
759 751 finally:
760 752 if fd:
761 753 fd.close()
762 754
763 755 getdata = getdatafn
764 756 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
765 757
766 758 if subrepos:
767 759 for subpath in ctx.substate:
768 760 sub = ctx.sub(subpath)
769 761 sub.archive(repo.ui, archiver, prefix)
770 762
771 763 archiver.done()
772 764
773 765 # If a largefile is modified, the change is not reflected in its
774 766 # standin until a commit. cmdutil.bailifchanged() raises an exception
775 767 # if the repo has uncommitted changes. Wrap it to also check if
776 768 # largefiles were changed. This is used by bisect and backout.
777 769 def override_bailifchanged(orig, repo):
778 770 orig(repo)
779 771 repo.lfstatus = True
780 772 modified, added, removed, deleted = repo.status()[:4]
781 773 repo.lfstatus = False
782 774 if modified or added or removed or deleted:
783 775 raise util.Abort(_('outstanding uncommitted changes'))
784 776
785 777 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
786 778 def override_fetch(orig, ui, repo, *pats, **opts):
787 779 repo.lfstatus = True
788 780 modified, added, removed, deleted = repo.status()[:4]
789 781 repo.lfstatus = False
790 782 if modified or added or removed or deleted:
791 783 raise util.Abort(_('outstanding uncommitted changes'))
792 784 return orig(ui, repo, *pats, **opts)
793 785
794 786 def override_forget(orig, ui, repo, *pats, **opts):
795 787 installnormalfilesmatchfn(repo[None].manifest())
796 788 orig(ui, repo, *pats, **opts)
797 789 restorematchfn()
798 790 m = scmutil.match(repo[None], pats, opts)
799 791
800 792 try:
801 793 repo.lfstatus = True
802 794 s = repo.status(match=m, clean=True)
803 795 finally:
804 796 repo.lfstatus = False
805 797 forget = sorted(s[0] + s[1] + s[3] + s[6])
806 798 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
807 799
808 800 for f in forget:
809 801 if lfutil.standin(f) not in repo.dirstate and not \
810 802 os.path.isdir(m.rel(lfutil.standin(f))):
811 803 ui.warn(_('not removing %s: file is already untracked\n')
812 804 % m.rel(f))
813 805
814 806 for f in forget:
815 807 if ui.verbose or not m.exact(f):
816 808 ui.status(_('removing %s\n') % m.rel(f))
817 809
818 810 # Need to lock because standin files are deleted then removed from the
819 811 # repository and we could race inbetween.
820 812 wlock = repo.wlock()
821 813 try:
822 814 lfdirstate = lfutil.openlfdirstate(ui, repo)
823 815 for f in forget:
824 816 if lfdirstate[f] == 'a':
825 817 lfdirstate.drop(f)
826 818 else:
827 819 lfdirstate.remove(f)
828 820 lfdirstate.write()
829 821 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
830 822 unlink=True)
831 823 finally:
832 824 wlock.release()
833 825
834 826 def getoutgoinglfiles(ui, repo, dest=None, **opts):
835 827 dest = ui.expandpath(dest or 'default-push', dest or 'default')
836 828 dest, branches = hg.parseurl(dest, opts.get('branch'))
837 829 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
838 830 if revs:
839 831 revs = [repo.lookup(rev) for rev in revs]
840 832
841 833 remoteui = hg.remoteui
842 834
843 835 try:
844 836 remote = hg.repository(remoteui(repo, opts), dest)
845 837 except error.RepoError:
846 838 return None
847 839 o = lfutil.findoutgoing(repo, remote, False)
848 840 if not o:
849 841 return None
850 842 o = repo.changelog.nodesbetween(o, revs)[0]
851 843 if opts.get('newest_first'):
852 844 o.reverse()
853 845
854 846 toupload = set()
855 847 for n in o:
856 848 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
857 849 ctx = repo[n]
858 850 files = set(ctx.files())
859 851 if len(parents) == 2:
860 852 mc = ctx.manifest()
861 853 mp1 = ctx.parents()[0].manifest()
862 854 mp2 = ctx.parents()[1].manifest()
863 855 for f in mp1:
864 856 if f not in mc:
865 857 files.add(f)
866 858 for f in mp2:
867 859 if f not in mc:
868 860 files.add(f)
869 861 for f in mc:
870 862 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
871 863 files.add(f)
872 864 toupload = toupload.union(
873 865 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
874 866 return toupload
875 867
876 868 def override_outgoing(orig, ui, repo, dest=None, **opts):
877 869 orig(ui, repo, dest, **opts)
878 870
879 871 if opts.pop('large', None):
880 872 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
881 873 if toupload is None:
882 874 ui.status(_('largefiles: No remote repo\n'))
883 875 else:
884 876 ui.status(_('largefiles to upload:\n'))
885 877 for file in toupload:
886 878 ui.status(lfutil.splitstandin(file) + '\n')
887 879 ui.status('\n')
888 880
889 881 def override_summary(orig, ui, repo, *pats, **opts):
890 882 try:
891 883 repo.lfstatus = True
892 884 orig(ui, repo, *pats, **opts)
893 885 finally:
894 886 repo.lfstatus = False
895 887
896 888 if opts.pop('large', None):
897 889 toupload = getoutgoinglfiles(ui, repo, None, **opts)
898 890 if toupload is None:
899 891 ui.status(_('largefiles: No remote repo\n'))
900 892 else:
901 893 ui.status(_('largefiles: %d to upload\n') % len(toupload))
902 894
903 895 def override_addremove(orig, ui, repo, *pats, **opts):
904 896 # Get the list of missing largefiles so we can remove them
905 897 lfdirstate = lfutil.openlfdirstate(ui, repo)
906 898 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
907 899 False, False)
908 900 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
909 901
910 902 # Call into the normal remove code, but the removing of the standin, we want
911 903 # to have handled by original addremove. Monkey patching here makes sure
912 904 # we don't remove the standin in the largefiles code, preventing a very
913 905 # confused state later.
914 906 if missing:
915 907 repo._isaddremove = True
916 908 remove_largefiles(ui, repo, *missing, **opts)
917 909 repo._isaddremove = False
918 910 # Call into the normal add code, and any files that *should* be added as
919 911 # largefiles will be
920 912 add_largefiles(ui, repo, *pats, **opts)
921 913 # Now that we've handled largefiles, hand off to the original addremove
922 914 # function to take care of the rest. Make sure it doesn't do anything with
923 915 # largefiles by installing a matcher that will ignore them.
924 916 installnormalfilesmatchfn(repo[None].manifest())
925 917 result = orig(ui, repo, *pats, **opts)
926 918 restorematchfn()
927 919 return result
928 920
929 921 # Calling purge with --all will cause the largefiles to be deleted.
930 922 # Override repo.status to prevent this from happening.
931 923 def override_purge(orig, ui, repo, *dirs, **opts):
932 924 oldstatus = repo.status
933 925 def override_status(node1='.', node2=None, match=None, ignored=False,
934 926 clean=False, unknown=False, listsubrepos=False):
935 927 r = oldstatus(node1, node2, match, ignored, clean, unknown,
936 928 listsubrepos)
937 929 lfdirstate = lfutil.openlfdirstate(ui, repo)
938 930 modified, added, removed, deleted, unknown, ignored, clean = r
939 931 unknown = [f for f in unknown if lfdirstate[f] == '?']
940 932 ignored = [f for f in ignored if lfdirstate[f] == '?']
941 933 return modified, added, removed, deleted, unknown, ignored, clean
942 934 repo.status = override_status
943 935 orig(ui, repo, *dirs, **opts)
944 936 repo.status = oldstatus
945 937
946 938 def override_rollback(orig, ui, repo, **opts):
947 939 result = orig(ui, repo, **opts)
948 940 merge.update(repo, node=None, branchmerge=False, force=True,
949 941 partial=lfutil.isstandin)
950 942 wlock = repo.wlock()
951 943 try:
952 944 lfdirstate = lfutil.openlfdirstate(ui, repo)
953 945 lfiles = lfutil.listlfiles(repo)
954 946 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
955 947 for file in lfiles:
956 948 if file in oldlfiles:
957 949 lfdirstate.normallookup(file)
958 950 else:
959 951 lfdirstate.add(file)
960 952 lfdirstate.write()
961 953 finally:
962 954 wlock.release()
963 955 return result
964 956
965 957 def override_transplant(orig, ui, repo, *revs, **opts):
966 958 try:
967 959 repo._istransplanting = True
968 960 result = orig(ui, repo, *revs, **opts)
969 961 lfcommands.updatelfiles(ui, repo, filelist=None,
970 962 printmessage=False)
971 963 finally:
972 964 repo._istransplanting = False
973 965 return result
General Comments 0
You need to be logged in to leave comments. Login now