##// END OF EJS Templates
largefiles: don't break existing tests (syntax error, bad imports)
Greg Ward -
r15188:8e115063 default
parent child Browse files
Show More
@@ -1,497 +1,497 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import inspect
14 14 import shutil
15 15 import stat
16 16 import hashlib
17 17
18 18 from mercurial import cmdutil, dirstate, httpconnection, match as match_, \
19 19 url as url_, util
20 20 from mercurial.i18n import _
21 21
22 22 try:
23 23 from mercurial import scmutil
24 24 except ImportError:
25 25 pass
26 26
27 27 shortname = '.hglf'
28 28 longname = 'largefiles'
29 29
30 30
31 31 # -- Portability wrappers ----------------------------------------------
32 32
33 33 if 'subrepos' in inspect.getargspec(dirstate.dirstate.status)[0]:
34 34 # for Mercurial >= 1.5
35 35 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
36 36 return dirstate.walk(matcher, [], unknown, ignored)
37 37 else:
38 38 # for Mercurial <= 1.4
39 39 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
40 40 return dirstate.walk(matcher, unknown, ignored)
41 41
42 42 def repo_add(repo, list):
43 43 try:
44 44 # Mercurial <= 1.5
45 45 add = repo.add
46 46 except AttributeError:
47 47 # Mercurial >= 1.6
48 48 add = repo[None].add
49 49 return add(list)
50 50
51 51 def repo_remove(repo, list, unlink=False):
52 52 try:
53 53 # Mercurial <= 1.5
54 54 remove = repo.remove
55 55 except AttributeError:
56 56 # Mercurial >= 1.6
57 57 try:
58 58 # Mercurial <= 1.8
59 59 remove = repo[None].remove
60 60 except AttributeError:
61 61 # Mercurial >= 1.9
62 62 def remove(list, unlink):
63 63 wlock = repo.wlock()
64 64 try:
65 65 if unlink:
66 66 for f in list:
67 67 try:
68 68 util.unlinkpath(repo.wjoin(f))
69 69 except OSError, inst:
70 70 if inst.errno != errno.ENOENT:
71 71 raise
72 72 repo[None].forget(list)
73 73 finally:
74 74 wlock.release()
75 75
76 76 return remove(list, unlink=unlink)
77 77
78 78 def repo_forget(repo, list):
79 79 try:
80 80 # Mercurial <= 1.5
81 81 forget = repo.forget
82 82 except AttributeError:
83 83 # Mercurial >= 1.6
84 84 forget = repo[None].forget
85 85 return forget(list)
86 86
87 87 def findoutgoing(repo, remote, force):
88 88 # First attempt is for Mercurial <= 1.5 second is for >= 1.6
89 89 try:
90 90 return repo.findoutgoing(remote)
91 91 except AttributeError:
92 92 from mercurial import discovery
93 93 try:
94 94 # Mercurial <= 1.8
95 95 return discovery.findoutgoing(repo, remote, force=force)
96 96 except AttributeError:
97 97 # Mercurial >= 1.9
98 98 common, _anyinc, _heads = discovery.findcommonincoming(repo,
99 99 remote, force=force)
100 100 return repo.changelog.findmissing(common)
101 101
102 102 # -- Private worker functions ------------------------------------------
103 103
104 104 if os.name == 'nt':
105 105 from mercurial import win32
106 106 linkfn = win32.oslink
107 107
108 108 def link(src, dest):
109 109 try:
110 110 linkfn(src, dest)
111 111 except OSError:
112 112 # If hardlinks fail fall back on copy
113 113 shutil.copyfile(src, dest)
114 114 os.chmod(dest, os.stat(src).st_mode)
115 115
116 116 def systemcachepath(ui, hash):
117 117 path = ui.config(longname, 'systemcache', None)
118 118 if path:
119 119 path = os.path.join(path, hash)
120 120 else:
121 121 if os.name == 'nt':
122 122 path = os.path.join(os.getenv('LOCALAPPDATA') or \
123 123 os.getenv('APPDATA'), longname, hash)
124 124 elif os.name == 'posix':
125 125 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
126 126 else:
127 127 raise util.Abort(_('Unknown operating system: %s\n') % os.name)
128 128 return path
129 129
130 130 def insystemcache(ui, hash):
131 131 return os.path.exists(systemcachepath(ui, hash))
132 132
133 133 def findfile(repo, hash):
134 134 if incache(repo, hash):
135 135 repo.ui.note(_('Found %s in cache\n') % hash)
136 136 return cachepath(repo, hash)
137 137 if insystemcache(repo.ui, hash):
138 138 repo.ui.note(_('Found %s in system cache\n') % hash)
139 139 return systemcachepath(repo.ui, hash)
140 140 return None
141 141
142 142 class largefiles_dirstate(dirstate.dirstate):
143 143 def __getitem__(self, key):
144 144 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
145 145 def normal(self, f):
146 146 return super(largefiles_dirstate, self).normal(unixpath(f))
147 147 def remove(self, f):
148 148 return super(largefiles_dirstate, self).remove(unixpath(f))
149 149 def add(self, f):
150 150 return super(largefiles_dirstate, self).add(unixpath(f))
151 151 def drop(self, f):
152 152 return super(largefiles_dirstate, self).drop(unixpath(f))
153 153 def forget(self, f):
154 154 return super(largefiles_dirstate, self).forget(unixpath(f))
155 155
156 156 def openlfdirstate(ui, repo):
157 157 '''
158 158 Return a dirstate object that tracks big files: i.e. its root is the
159 159 repo root, but it is saved in .hg/largefiles/dirstate.
160 160 '''
161 161 admin = repo.join(longname)
162 162 try:
163 163 # Mercurial >= 1.9
164 164 opener = scmutil.opener(admin)
165 165 except ImportError:
166 166 # Mercurial <= 1.8
167 167 opener = util.opener(admin)
168 168 if util.safehasattr(repo.dirstate, '_validate'):
169 169 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
170 170 repo.dirstate._validate)
171 171 else:
172 172 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
173 173
174 174 # If the largefiles dirstate does not exist, populate and create it. This
175 175 # ensures that we create it on the first meaningful largefiles operation in
176 176 # a new clone. It also gives us an easy way to forcibly rebuild largefiles
177 177 # state:
178 178 # rm .hg/largefiles/dirstate && hg status
179 179 # Or even, if things are really messed up:
180 180 # rm -rf .hg/largefiles && hg status
181 181 if not os.path.exists(os.path.join(admin, 'dirstate')):
182 182 util.makedirs(admin)
183 183 matcher = getstandinmatcher(repo)
184 184 for standin in dirstate_walk(repo.dirstate, matcher):
185 185 lfile = splitstandin(standin)
186 186 hash = readstandin(repo, lfile)
187 187 lfdirstate.normallookup(lfile)
188 188 try:
189 189 if hash == hashfile(lfile):
190 190 lfdirstate.normal(lfile)
191 191 except IOError, err:
192 192 if err.errno != errno.ENOENT:
193 193 raise
194 194
195 195 lfdirstate.write()
196 196
197 197 return lfdirstate
198 198
199 199 def lfdirstate_status(lfdirstate, repo, rev):
200 200 wlock = repo.wlock()
201 201 try:
202 202 match = match_.always(repo.root, repo.getcwd())
203 203 s = lfdirstate.status(match, [], False, False, False)
204 204 unsure, modified, added, removed, missing, unknown, ignored, clean = s
205 205 for lfile in unsure:
206 206 if repo[rev][standin(lfile)].data().strip() != \
207 207 hashfile(repo.wjoin(lfile)):
208 208 modified.append(lfile)
209 209 else:
210 210 clean.append(lfile)
211 211 lfdirstate.normal(lfile)
212 212 lfdirstate.write()
213 213 finally:
214 214 wlock.release()
215 215 return (modified, added, removed, missing, unknown, ignored, clean)
216 216
217 217 def listlfiles(repo, rev=None, matcher=None):
218 218 '''list largefiles in the working copy or specified changeset'''
219 219
220 220 if matcher is None:
221 221 matcher = getstandinmatcher(repo)
222 222
223 223 # ignore unknown files in working directory
224 224 return [splitstandin(f) for f in repo[rev].walk(matcher) \
225 225 if rev is not None or repo.dirstate[f] != '?']
226 226
227 227 def incache(repo, hash):
228 228 return os.path.exists(cachepath(repo, hash))
229 229
230 230 def createdir(dir):
231 231 if not os.path.exists(dir):
232 232 os.makedirs(dir)
233 233
234 234 def cachepath(repo, hash):
235 235 return repo.join(os.path.join(longname, hash))
236 236
237 237 def copyfromcache(repo, hash, filename):
238 238 '''copyfromcache copies the specified largefile from the repo or system
239 239 cache to the specified location in the repository. It will not throw an
240 240 exception on failure, as it is meant to be called only after ensuring that
241 241 the needed largefile exists in the cache.'''
242 242 path = findfile(repo, hash)
243 243 if path is None:
244 244 return False
245 245 util.makedirs(os.path.dirname(repo.wjoin(filename)))
246 246 shutil.copy(path, repo.wjoin(filename))
247 247 return True
248 248
249 249 def copytocache(repo, rev, file, uploaded=False):
250 250 hash = readstandin(repo, file)
251 251 if incache(repo, hash):
252 252 return
253 253 copytocacheabsolute(repo, repo.wjoin(file), hash)
254 254
255 255 def copytocacheabsolute(repo, file, hash):
256 256 createdir(os.path.dirname(cachepath(repo, hash)))
257 257 if insystemcache(repo.ui, hash):
258 258 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
259 259 else:
260 260 shutil.copyfile(file, cachepath(repo, hash))
261 261 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
262 262 linktosystemcache(repo, hash)
263 263
264 264 def linktosystemcache(repo, hash):
265 265 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
266 266 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
267 267
268 268 def getstandinmatcher(repo, pats=[], opts={}):
269 269 '''Return a match object that applies pats to the standin directory'''
270 270 standindir = repo.pathto(shortname)
271 271 if pats:
272 272 # patterns supplied: search standin directory relative to current dir
273 273 cwd = repo.getcwd()
274 274 if os.path.isabs(cwd):
275 275 # cwd is an absolute path for hg -R <reponame>
276 276 # work relative to the repository root in this case
277 277 cwd = ''
278 278 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
279 279 elif os.path.isdir(standindir):
280 280 # no patterns: relative to repo root
281 281 pats = [standindir]
282 282 else:
283 283 # no patterns and no standin dir: return matcher that matches nothing
284 284 match = match_.match(repo.root, None, [], exact=True)
285 285 match.matchfn = lambda f: False
286 286 return match
287 287 return getmatcher(repo, pats, opts, showbad=False)
288 288
289 289 def getmatcher(repo, pats=[], opts={}, showbad=True):
290 290 '''Wrapper around scmutil.match() that adds showbad: if false, neuter
291 291 the match object\'s bad() method so it does not print any warnings
292 292 about missing files or directories.'''
293 293 try:
294 294 # Mercurial >= 1.9
295 295 match = scmutil.match(repo[None], pats, opts)
296 296 except ImportError:
297 297 # Mercurial <= 1.8
298 298 match = cmdutil.match(repo, pats, opts)
299 299
300 300 if not showbad:
301 301 match.bad = lambda f, msg: None
302 302 return match
303 303
304 304 def composestandinmatcher(repo, rmatcher):
305 305 '''Return a matcher that accepts standins corresponding to the files
306 306 accepted by rmatcher. Pass the list of files in the matcher as the
307 307 paths specified by the user.'''
308 308 smatcher = getstandinmatcher(repo, rmatcher.files())
309 309 isstandin = smatcher.matchfn
310 310 def composed_matchfn(f):
311 311 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
312 312 smatcher.matchfn = composed_matchfn
313 313
314 314 return smatcher
315 315
316 316 def standin(filename):
317 317 '''Return the repo-relative path to the standin for the specified big
318 318 file.'''
319 319 # Notes:
320 320 # 1) Most callers want an absolute path, but _create_standin() needs
321 321 # it repo-relative so lfadd() can pass it to repo_add(). So leave
322 322 # it up to the caller to use repo.wjoin() to get an absolute path.
323 323 # 2) Join with '/' because that's what dirstate always uses, even on
324 324 # Windows. Change existing separator to '/' first in case we are
325 325 # passed filenames from an external source (like the command line).
326 326 return shortname + '/' + filename.replace(os.sep, '/')
327 327
328 328 def isstandin(filename):
329 329 '''Return true if filename is a big file standin. filename must
330 330 be in Mercurial\'s internal form (slash-separated).'''
331 331 return filename.startswith(shortname + '/')
332 332
333 333 def splitstandin(filename):
334 334 # Split on / because that's what dirstate always uses, even on Windows.
335 335 # Change local separator to / first just in case we are passed filenames
336 336 # from an external source (like the command line).
337 337 bits = filename.replace(os.sep, '/').split('/', 1)
338 338 if len(bits) == 2 and bits[0] == shortname:
339 339 return bits[1]
340 340 else:
341 341 return None
342 342
343 343 def updatestandin(repo, standin):
344 344 file = repo.wjoin(splitstandin(standin))
345 345 if os.path.exists(file):
346 346 hash = hashfile(file)
347 347 executable = getexecutable(file)
348 348 writestandin(repo, standin, hash, executable)
349 349
350 350 def readstandin(repo, filename, node=None):
351 351 '''read hex hash from standin for filename at given node, or working
352 352 directory if no node is given'''
353 353 return repo[node][standin(filename)].data().strip()
354 354
355 355 def writestandin(repo, standin, hash, executable):
356 356 '''write hhash to <repo.root>/<standin>'''
357 357 writehash(hash, repo.wjoin(standin), executable)
358 358
359 359 def copyandhash(instream, outfile):
360 360 '''Read bytes from instream (iterable) and write them to outfile,
361 361 computing the SHA-1 hash of the data along the way. Close outfile
362 362 when done and return the binary hash.'''
363 363 hasher = util.sha1('')
364 364 for data in instream:
365 365 hasher.update(data)
366 366 outfile.write(data)
367 367
368 368 # Blecch: closing a file that somebody else opened is rude and
369 369 # wrong. But it's so darn convenient and practical! After all,
370 370 # outfile was opened just to copy and hash.
371 371 outfile.close()
372 372
373 373 return hasher.digest()
374 374
375 375 def hashrepofile(repo, file):
376 376 return hashfile(repo.wjoin(file))
377 377
378 378 def hashfile(file):
379 379 if not os.path.exists(file):
380 380 return ''
381 381 hasher = util.sha1('')
382 382 fd = open(file, 'rb')
383 383 for data in blockstream(fd):
384 384 hasher.update(data)
385 385 fd.close()
386 386 return hasher.hexdigest()
387 387
388 388 class limitreader(object):
389 389 def __init__(self, f, limit):
390 390 self.f = f
391 391 self.limit = limit
392 392
393 393 def read(self, length):
394 394 if self.limit == 0:
395 395 return ''
396 396 length = length > self.limit and self.limit or length
397 397 self.limit -= length
398 398 return self.f.read(length)
399 399
400 400 def close(self):
401 401 pass
402 402
403 403 def blockstream(infile, blocksize=128 * 1024):
404 404 """Generator that yields blocks of data from infile and closes infile."""
405 405 while True:
406 406 data = infile.read(blocksize)
407 407 if not data:
408 408 break
409 409 yield data
410 410 # Same blecch as above.
411 411 infile.close()
412 412
413 413 def readhash(filename):
414 414 rfile = open(filename, 'rb')
415 415 hash = rfile.read(40)
416 416 rfile.close()
417 417 if len(hash) < 40:
418 418 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
419 419 % (filename, len(hash)))
420 420 return hash
421 421
422 422 def writehash(hash, filename, executable):
423 423 util.makedirs(os.path.dirname(filename))
424 424 if os.path.exists(filename):
425 425 os.unlink(filename)
426 426 wfile = open(filename, 'wb')
427 427
428 428 try:
429 429 wfile.write(hash)
430 430 wfile.write('\n')
431 431 finally:
432 432 wfile.close()
433 433 if os.path.exists(filename):
434 434 os.chmod(filename, getmode(executable))
435 435
436 436 def getexecutable(filename):
437 437 mode = os.stat(filename).st_mode
438 438 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
439 439 stat.S_IXOTH)
440 440
441 441 def getmode(executable):
442 442 if executable:
443 443 return 0755
444 444 else:
445 445 return 0644
446 446
447 447 def urljoin(first, second, *arg):
448 448 def join(left, right):
449 449 if not left.endswith('/'):
450 450 left += '/'
451 451 if right.startswith('/'):
452 452 right = right[1:]
453 453 return left + right
454 454
455 455 url = join(first, second)
456 456 for a in arg:
457 457 url = join(url, a)
458 458 return url
459 459
460 460 def hexsha1(data):
461 461 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
462 462 object data"""
463 463 h = hashlib.sha1()
464 464 for chunk in util.filechunkiter(data):
465 465 h.update(chunk)
466 466 return h.hexdigest()
467 467
468 468 def httpsendfile(ui, filename):
469 469 try:
470 470 # Mercurial >= 1.9
471 471 return httpconnection.httpsendfile(ui, filename, 'rb')
472 472 except ImportError:
473 473 if 'ui' in inspect.getargspec(url_.httpsendfile.__init__)[0]:
474 474 # Mercurial == 1.8
475 475 return url_.httpsendfile(ui, filename, 'rb')
476 476 else:
477 477 # Mercurial <= 1.7
478 478 return url_.httpsendfile(filename, 'rb')
479 479
480 480 # Convert a path to a unix style path. This is used to give a
481 481 # canonical path to the lfdirstate.
482 482 def unixpath(path):
483 483 return os.path.normpath(path).replace(os.sep, '/')
484 484
485 485 def islfilesrepo(repo):
486 486 return ('largefiles' in repo.requirements and
487 any_(shortname + '/' in f[0] for f in repo.store.datafiles())
487 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
488 488
489 489 def any_(gen):
490 490 for x in gen:
491 491 if x:
492 492 return True
493 493 return False
494 494
495 495 class storeprotonotcapable(BaseException):
496 496 def __init__(self, storetypes):
497 497 self.storetypes = storetypes
@@ -1,106 +1,105 b''
1 1 # Copyright 2010-2011 Fog Creek Software
2 2 # Copyright 2010-2011 Unity Technologies
3 3 #
4 4 # This software may be used and distributed according to the terms of the
5 5 # GNU General Public License version 2 or any later version.
6 6
7 7 '''Remote largefile store; the base class for servestore'''
8 8
9 9 import urllib2
10 import HTTPError
11 10
12 11 from mercurial import util
13 12 from mercurial.i18n import _
14 13
15 14 import lfutil
16 15 import basestore
17 16
18 17 class remotestore(basestore.basestore):
19 18 """A largefile store accessed over a network"""
20 19 def __init__(self, ui, repo, url):
21 20 super(remotestore, self).__init__(ui, repo, url)
22 21
23 22 def put(self, source, hash):
24 23 if self._verify(hash):
25 24 return
26 25 if self.sendfile(source, hash):
27 26 raise util.Abort(
28 27 _('remotestore: could not put %s to remote store %s')
29 28 % (source, self.url))
30 29 self.ui.debug(
31 30 _('remotestore: put %s to remote store %s') % (source, self.url))
32 31
33 32 def exists(self, hash):
34 33 return self._verify(hash)
35 34
36 35 def sendfile(self, filename, hash):
37 36 self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
38 37 fd = None
39 38 try:
40 39 try:
41 40 fd = lfutil.httpsendfile(self.ui, filename)
42 41 except IOError, e:
43 42 raise util.Abort(
44 43 _('remotestore: could not open file %s: %s')
45 44 % (filename, str(e)))
46 45 return self._put(hash, fd)
47 46 finally:
48 47 if fd:
49 48 fd.close()
50 49
51 50 def _getfile(self, tmpfile, filename, hash):
52 51 # quit if the largefile isn't there
53 52 stat = self._stat(hash)
54 53 if stat:
55 54 raise util.Abort(_('remotestore: largefile %s is %s') %
56 55 (hash, stat == 1 and 'invalid' or 'missing'))
57 56
58 57 try:
59 58 length, infile = self._get(hash)
60 except HTTPError, e:
59 except urllib2.HTTPError, e:
61 60 # 401s get converted to util.Aborts; everything else is fine being
62 61 # turned into a StoreError
63 62 raise basestore.StoreError(filename, hash, self.url, str(e))
64 63 except urllib2.URLError, e:
65 64 # This usually indicates a connection problem, so don't
66 65 # keep trying with the other files... they will probably
67 66 # all fail too.
68 67 raise util.Abort('%s: %s' % (self.url, str(e.reason)))
69 68 except IOError, e:
70 69 raise basestore.StoreError(filename, hash, self.url, str(e))
71 70
72 71 # Mercurial does not close its SSH connections after writing a stream
73 72 if length is not None:
74 73 infile = lfutil.limitreader(infile, length)
75 74 return lfutil.copyandhash(lfutil.blockstream(infile), tmpfile)
76 75
77 76 def _verify(self, hash):
78 77 return not self._stat(hash)
79 78
80 79 def _verifyfile(self, cctx, cset, contents, standin, verified):
81 80 filename = lfutil.splitstandin(standin)
82 81 if not filename:
83 82 return False
84 83 fctx = cctx[standin]
85 84 key = (filename, fctx.filenode())
86 85 if key in verified:
87 86 return False
88 87
89 88 verified.add(key)
90 89
91 90 stat = self._stat(hash)
92 91 if not stat:
93 92 return False
94 93 elif stat == 1:
95 94 self.ui.warn(
96 95 _('changeset %s: %s: contents differ\n')
97 96 % (cset, filename))
98 97 return True # failed
99 98 elif stat == 2:
100 99 self.ui.warn(
101 100 _('changeset %s: %s missing\n')
102 101 % (cset, filename))
103 102 return True # failed
104 103 else:
105 104 raise util.Abort(_('check failed, unexpected response'
106 105 'statlfile: %d') % stat)
@@ -1,138 +1,138 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httprepo, localrepo, sshrepo, sshserver, wireproto
12 httprepo, localrepo, sshrepo, sshserver, util, wireproto
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, protocol
15 15
16 16 import overrides
17 17 import proto
18 18
19 19 def uisetup(ui):
20 20 # Disable auto-status for some commands which assume that all
21 21 # files in the result are under Mercurial's control
22 22
23 23 entry = extensions.wrapcommand(commands.table, 'add',
24 24 overrides.override_add)
25 25 addopt = [('', 'large', None, _('add as largefile')),
26 26 ('', 'lfsize', '', _('add all files above this size (in megabytes)'
27 27 'as largefiles (default: 10)'))]
28 28 entry[1].extend(addopt)
29 29
30 30 entry = extensions.wrapcommand(commands.table, 'addremove',
31 31 overrides.override_addremove)
32 32 entry = extensions.wrapcommand(commands.table, 'remove',
33 33 overrides.override_remove)
34 34 entry = extensions.wrapcommand(commands.table, 'forget',
35 35 overrides.override_forget)
36 36 entry = extensions.wrapcommand(commands.table, 'status',
37 37 overrides.override_status)
38 38 entry = extensions.wrapcommand(commands.table, 'log',
39 39 overrides.override_log)
40 40 entry = extensions.wrapcommand(commands.table, 'rollback',
41 41 overrides.override_rollback)
42 42 entry = extensions.wrapcommand(commands.table, 'verify',
43 43 overrides.override_verify)
44 44
45 45 verifyopt = [('', 'large', None, _('verify largefiles')),
46 46 ('', 'lfa', None,
47 47 _('verify all revisions of largefiles not just current')),
48 48 ('', 'lfc', None,
49 49 _('verify largefile contents not just existence'))]
50 50 entry[1].extend(verifyopt)
51 51
52 52 entry = extensions.wrapcommand(commands.table, 'outgoing',
53 53 overrides.override_outgoing)
54 54 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
55 55 entry[1].extend(outgoingopt)
56 56 entry = extensions.wrapcommand(commands.table, 'summary',
57 57 overrides.override_summary)
58 58 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
59 59 entry[1].extend(summaryopt)
60 60
61 61 entry = extensions.wrapcommand(commands.table, 'update',
62 62 overrides.override_update)
63 63 entry = extensions.wrapcommand(commands.table, 'pull',
64 64 overrides.override_pull)
65 65 entry = extensions.wrapfunction(filemerge, 'filemerge',
66 66 overrides.override_filemerge)
67 67 entry = extensions.wrapfunction(cmdutil, 'copy',
68 68 overrides.override_copy)
69 69
70 70 # Backout calls revert so we need to override both the command and the
71 71 # function
72 72 entry = extensions.wrapcommand(commands.table, 'revert',
73 73 overrides.override_revert)
74 74 entry = extensions.wrapfunction(commands, 'revert',
75 75 overrides.override_revert)
76 76
77 77 # clone uses hg._update instead of hg.update even though they are the
78 78 # same function... so wrap both of them)
79 79 extensions.wrapfunction(hg, 'update', overrides.hg_update)
80 80 extensions.wrapfunction(hg, '_update', overrides.hg_update)
81 81 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
82 82 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
83 83
84 84 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
85 85 if util.safehasattr(cmdutil, 'bailifchanged'):
86 86 extensions.wrapfunction(cmdutil, 'bailifchanged',
87 87 overrides.override_bailifchanged)
88 88 else:
89 89 extensions.wrapfunction(cmdutil, 'bail_if_changed',
90 90 overrides.override_bailifchanged)
91 91
92 92 # create the new wireproto commands ...
93 93 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
94 94 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
95 95 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
96 96
97 97 # ... and wrap some existing ones
98 98 wireproto.commands['capabilities'] = (proto.capabilities, '')
99 99 wireproto.commands['heads'] = (proto.heads, '')
100 100 wireproto.commands['lheads'] = (wireproto.heads, '')
101 101
102 102 # make putlfile behave the same as push and {get,stat}lfile behave the same
103 103 # as pull w.r.t. permissions checks
104 104 hgweb_mod.perms['putlfile'] = 'push'
105 105 hgweb_mod.perms['getlfile'] = 'pull'
106 106 hgweb_mod.perms['statlfile'] = 'pull'
107 107
108 108 # the hello wireproto command uses wireproto.capabilities, so it won't see
109 109 # our largefiles capability unless we replace the actual function as well.
110 110 proto.capabilities_orig = wireproto.capabilities
111 111 wireproto.capabilities = proto.capabilities
112 112
113 113 # these let us reject non-lfiles clients and make them display our error
114 114 # messages
115 115 protocol.webproto.refuseclient = proto.webproto_refuseclient
116 116 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
117 117
118 118 # can't do this in reposetup because it needs to have happened before
119 119 # wirerepo.__init__ is called
120 120 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
121 121 proto.http_oldcallstream = httprepo.httprepository._callstream
122 122 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
123 123 httprepo.httprepository._callstream = proto.httprepo_callstream
124 124
125 125 # don't die on seeing a repo with the largefiles requirement
126 126 localrepo.localrepository.supported |= set(['largefiles'])
127 127
128 128 # override some extensions' stuff as well
129 129 for name, module in extensions.extensions():
130 130 if name == 'fetch':
131 131 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
132 132 overrides.override_fetch)
133 133 if name == 'purge':
134 134 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
135 135 overrides.override_purge)
136 136 if name == 'rebase':
137 137 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
138 138 overrides.override_rebase)
General Comments 0
You need to be logged in to leave comments. Login now