##// END OF EJS Templates
remotefilelog: add some docstring...
Augie Fackler -
r40547:b35a096b default
parent child Browse files
Show More
@@ -1,1106 +1,1117 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
14
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
18 operate on.
19
9 20 Configs:
10 21
11 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
12 23 ``packs.maxpacksize`` specifies the maximum pack file size
13 24 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
14 25 shared cache (trees only for now)
15 26 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
16 27 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
17 28 update, and on other commands that use them. Different from pullprefetch.
18 29 ``remotefilelog.gcrepack`` does garbage collection during repack when True
19 30 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
20 31 it is garbage collected
21 32 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
22 33 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
23 34 days after which it is no longer prefetched.
24 35 ``remotefilelog.prefetchdelay`` specifies delay between background
25 36 prefetches in seconds after operations that change the working copy parent
26 37 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
27 38 pack files required to be considered part of a generation. In particular,
28 39 minimum number of packs files > gencountlimit.
29 40 ``remotefilelog.data.generations`` list for specifying the lower bound of
30 41 each generation of the data pack files. For example, list ['100MB','1MB']
31 42 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
32 43 1MB, 100MB) and [100MB, infinity).
33 44 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
34 45 include in an incremental data repack.
35 46 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
36 47 it to be considered for an incremental data repack.
37 48 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
38 49 to include in an incremental data repack.
39 50 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
40 51 history pack files required to be considered part of a generation. In
41 52 particular, minimum number of packs files > gencountlimit.
42 53 ``remotefilelog.history.generations`` list for specifying the lower bound of
43 54 each generation of the historhy pack files. For example, list [
44 55 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
45 56 0, 1MB), [1MB, 100MB) and [100MB, infinity).
46 57 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
47 58 include in an incremental history repack.
48 59 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
49 60 for it to be considered for an incremental history repack.
50 61 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
51 62 files to include in an incremental history repack.
52 63 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
53 64 background
54 65 ``remotefilelog.cachepath`` path to cache
55 66 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
56 67 group
57 68 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
58 69 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
59 70 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
60 71 ``remotefilelog.includepattern``pattern of files to include in pulls
61 72 ``remotefilelog.fetchwarning``: message to print when too many
62 73 single-file fetches occur
63 74 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
64 75 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
65 76 files, otherwise use optimistic fetching
66 77 ``remotefilelog.pullprefetch`` revset for selecting files that should be
67 78 eagerly downloaded rather than lazily
68 79 ``remotefilelog.reponame`` name of the repo. If set, used to partition
69 80 data from other repos in a shared store.
70 81 ``remotefilelog.server`` if true, enable server-side functionality
71 82 ``remotefilelog.servercachepath`` path for caching blobs on the server
72 83 ``remotefilelog.serverexpiration`` number of days to keep cached server
73 84 blobs
74 85 ``remotefilelog.validatecache`` if set, check cache entries for corruption
75 86 before returning blobs
76 87 ``remotefilelog.validatecachelog`` if set, check cache entries for
77 88 corruption before returning metadata
78 89
79 90 """
80 91 from __future__ import absolute_import
81 92
82 93 import os
83 94 import time
84 95 import traceback
85 96
86 97 from mercurial.node import hex
87 98 from mercurial.i18n import _
88 99 from mercurial import (
89 100 changegroup,
90 101 changelog,
91 102 cmdutil,
92 103 commands,
93 104 configitems,
94 105 context,
95 106 copies,
96 107 debugcommands as hgdebugcommands,
97 108 dispatch,
98 109 error,
99 110 exchange,
100 111 extensions,
101 112 hg,
102 113 localrepo,
103 114 match,
104 115 merge,
105 116 node as nodemod,
106 117 patch,
107 118 registrar,
108 119 repair,
109 120 repoview,
110 121 revset,
111 122 scmutil,
112 123 smartset,
113 124 templatekw,
114 125 util,
115 126 )
116 127 from . import (
117 128 constants,
118 129 debugcommands,
119 130 fileserverclient,
120 131 remotefilectx,
121 132 remotefilelog,
122 133 remotefilelogserver,
123 134 repack as repackmod,
124 135 shallowbundle,
125 136 shallowrepo,
126 137 shallowstore,
127 138 shallowutil,
128 139 shallowverifier,
129 140 )
130 141
131 142 # ensures debug commands are registered
132 143 hgdebugcommands.command
133 144
134 145 try:
135 146 from mercurial import streamclone
136 147 streamclone._walkstreamfiles
137 148 hasstreamclone = True
138 149 except Exception:
139 150 hasstreamclone = False
140 151
141 152 cmdtable = {}
142 153 command = registrar.command(cmdtable)
143 154
144 155 configtable = {}
145 156 configitem = registrar.configitem(configtable)
146 157
147 158 configitem('remotefilelog', 'debug', default=False)
148 159
149 160 configitem('remotefilelog', 'reponame', default='')
150 161 configitem('remotefilelog', 'cachepath', default=None)
151 162 configitem('remotefilelog', 'cachegroup', default=None)
152 163 configitem('remotefilelog', 'cacheprocess', default=None)
153 164 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
154 165 configitem("remotefilelog", "cachelimit", default="1000 GB")
155 166
156 167 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
157 168 alias=[('remotefilelog', 'fallbackrepo')])
158 169
159 170 configitem('remotefilelog', 'validatecachelog', default=None)
160 171 configitem('remotefilelog', 'validatecache', default='on')
161 172 configitem('remotefilelog', 'server', default=None)
162 173 configitem('remotefilelog', 'servercachepath', default=None)
163 174 configitem("remotefilelog", "serverexpiration", default=30)
164 175 configitem('remotefilelog', 'backgroundrepack', default=False)
165 176 configitem('remotefilelog', 'bgprefetchrevs', default=None)
166 177 configitem('remotefilelog', 'pullprefetch', default=None)
167 178 configitem('remotefilelog', 'backgroundprefetch', default=False)
168 179 configitem('remotefilelog', 'prefetchdelay', default=120)
169 180 configitem('remotefilelog', 'prefetchdays', default=14)
170 181
171 182 configitem('remotefilelog', 'getfilesstep', default=10000)
172 183 configitem('remotefilelog', 'getfilestype', default='optimistic')
173 184 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
174 185 configitem('remotefilelog', 'fetchwarning', default='')
175 186
176 187 configitem('remotefilelog', 'includepattern', default=None)
177 188 configitem('remotefilelog', 'excludepattern', default=None)
178 189
179 190 configitem('remotefilelog', 'gcrepack', default=False)
180 191 configitem('remotefilelog', 'repackonhggc', default=False)
181 192 configitem('remotefilelog', 'datapackversion', default=0)
182 193 configitem('repack', 'chainorphansbysize', default=True)
183 194
184 195 configitem('packs', 'maxpacksize', default=0)
185 196 configitem('packs', 'maxchainlen', default=1000)
186 197
187 198 configitem('remotefilelog', 'historypackv1', default=False)
188 199 # default TTL limit is 30 days
189 200 _defaultlimit = 60 * 60 * 24 * 30
190 201 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
191 202
192 203 configitem('remotefilelog', 'data.gencountlimit', default=2),
193 204 configitem('remotefilelog', 'data.generations',
194 205 default=['1GB', '100MB', '1MB'])
195 206 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
196 207 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
197 208 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
198 209
199 210 configitem('remotefilelog', 'history.gencountlimit', default=2),
200 211 configitem('remotefilelog', 'history.generations', default=['100MB'])
201 212 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
202 213 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
203 214 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
204 215
205 216 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
206 217 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
207 218 # be specifying the version(s) of Mercurial they are tested with, or
208 219 # leave the attribute unspecified.
209 220 testedwith = 'ships-with-hg-core'
210 221
211 222 repoclass = localrepo.localrepository
212 223 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
213 224
214 225 def uisetup(ui):
215 226 """Wraps user facing Mercurial commands to swap them out with shallow
216 227 versions.
217 228 """
218 229 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
219 230
220 231 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
221 232 entry[1].append(('', 'shallow', None,
222 233 _("create a shallow clone which uses remote file "
223 234 "history")))
224 235
225 236 extensions.wrapcommand(commands.table, 'debugindex',
226 237 debugcommands.debugindex)
227 238 extensions.wrapcommand(commands.table, 'debugindexdot',
228 239 debugcommands.debugindexdot)
229 240 extensions.wrapcommand(commands.table, 'log', log)
230 241 extensions.wrapcommand(commands.table, 'pull', pull)
231 242
232 243 # Prevent 'hg manifest --all'
233 244 def _manifest(orig, ui, repo, *args, **opts):
234 245 if (constants.SHALLOWREPO_REQUIREMENT in repo.requirements
235 246 and opts.get('all')):
236 247 raise error.Abort(_("--all is not supported in a shallow repo"))
237 248
238 249 return orig(ui, repo, *args, **opts)
239 250 extensions.wrapcommand(commands.table, "manifest", _manifest)
240 251
241 252 # Wrap remotefilelog with lfs code
242 253 def _lfsloaded(loaded=False):
243 254 lfsmod = None
244 255 try:
245 256 lfsmod = extensions.find('lfs')
246 257 except KeyError:
247 258 pass
248 259 if lfsmod:
249 260 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
250 261 fileserverclient._lfsmod = lfsmod
251 262 extensions.afterloaded('lfs', _lfsloaded)
252 263
253 264 # debugdata needs remotefilelog.len to work
254 265 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
255 266
256 267 def cloneshallow(orig, ui, repo, *args, **opts):
257 268 if opts.get('shallow'):
258 269 repos = []
259 270 def pull_shallow(orig, self, *args, **kwargs):
260 271 if constants.SHALLOWREPO_REQUIREMENT not in self.requirements:
261 272 repos.append(self.unfiltered())
262 273 # set up the client hooks so the post-clone update works
263 274 setupclient(self.ui, self.unfiltered())
264 275
265 276 # setupclient fixed the class on the repo itself
266 277 # but we also need to fix it on the repoview
267 278 if isinstance(self, repoview.repoview):
268 279 self.__class__.__bases__ = (self.__class__.__bases__[0],
269 280 self.unfiltered().__class__)
270 281 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
271 282 self._writerequirements()
272 283
273 284 # Since setupclient hadn't been called, exchange.pull was not
274 285 # wrapped. So we need to manually invoke our version of it.
275 286 return exchangepull(orig, self, *args, **kwargs)
276 287 else:
277 288 return orig(self, *args, **kwargs)
278 289 extensions.wrapfunction(exchange, 'pull', pull_shallow)
279 290
280 291 # Wrap the stream logic to add requirements and to pass include/exclude
281 292 # patterns around.
282 293 def setup_streamout(repo, remote):
283 294 # Replace remote.stream_out with a version that sends file
284 295 # patterns.
285 296 def stream_out_shallow(orig):
286 297 caps = remote.capabilities()
287 298 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
288 299 opts = {}
289 300 if repo.includepattern:
290 301 opts['includepattern'] = '\0'.join(repo.includepattern)
291 302 if repo.excludepattern:
292 303 opts['excludepattern'] = '\0'.join(repo.excludepattern)
293 304 return remote._callstream('stream_out_shallow', **opts)
294 305 else:
295 306 return orig()
296 307 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
297 308 if hasstreamclone:
298 309 def stream_wrap(orig, op):
299 310 setup_streamout(op.repo, op.remote)
300 311 return orig(op)
301 312 extensions.wrapfunction(
302 313 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
303 314
304 315 def canperformstreamclone(orig, pullop, bundle2=False):
305 316 # remotefilelog is currently incompatible with the
306 317 # bundle2 flavor of streamclones, so force us to use
307 318 # v1 instead.
308 319 if 'v2' in pullop.remotebundle2caps.get('stream', []):
309 320 pullop.remotebundle2caps['stream'] = [
310 321 c for c in pullop.remotebundle2caps['stream']
311 322 if c != 'v2']
312 323 if bundle2:
313 324 return False, None
314 325 supported, requirements = orig(pullop, bundle2=bundle2)
315 326 if requirements is not None:
316 327 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
317 328 return supported, requirements
318 329 extensions.wrapfunction(
319 330 streamclone, 'canperformstreamclone', canperformstreamclone)
320 331 else:
321 332 def stream_in_shallow(orig, repo, remote, requirements):
322 333 setup_streamout(repo, remote)
323 334 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
324 335 return orig(repo, remote, requirements)
325 336 extensions.wrapfunction(
326 337 localrepo.localrepository, 'stream_in', stream_in_shallow)
327 338
328 339 try:
329 340 orig(ui, repo, *args, **opts)
330 341 finally:
331 342 if opts.get('shallow'):
332 343 for r in repos:
333 344 if util.safehasattr(r, 'fileservice'):
334 345 r.fileservice.close()
335 346
336 347 def debugdatashallow(orig, *args, **kwds):
337 348 oldlen = remotefilelog.remotefilelog.__len__
338 349 try:
339 350 remotefilelog.remotefilelog.__len__ = lambda x: 1
340 351 return orig(*args, **kwds)
341 352 finally:
342 353 remotefilelog.remotefilelog.__len__ = oldlen
343 354
344 355 def reposetup(ui, repo):
345 356 if not isinstance(repo, localrepo.localrepository):
346 357 return
347 358
348 359 # put here intentionally bc doesnt work in uisetup
349 360 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
350 361 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
351 362
352 363 isserverenabled = ui.configbool('remotefilelog', 'server')
353 364 isshallowclient = constants.SHALLOWREPO_REQUIREMENT in repo.requirements
354 365
355 366 if isserverenabled and isshallowclient:
356 367 raise RuntimeError("Cannot be both a server and shallow client.")
357 368
358 369 if isshallowclient:
359 370 setupclient(ui, repo)
360 371
361 372 if isserverenabled:
362 373 remotefilelogserver.setupserver(ui, repo)
363 374
364 375 def setupclient(ui, repo):
365 376 if not isinstance(repo, localrepo.localrepository):
366 377 return
367 378
368 379 # Even clients get the server setup since they need to have the
369 380 # wireprotocol endpoints registered.
370 381 remotefilelogserver.onetimesetup(ui)
371 382 onetimeclientsetup(ui)
372 383
373 384 shallowrepo.wraprepo(repo)
374 385 repo.store = shallowstore.wrapstore(repo.store)
375 386
376 387 clientonetime = False
377 388 def onetimeclientsetup(ui):
378 389 global clientonetime
379 390 if clientonetime:
380 391 return
381 392 clientonetime = True
382 393
383 394 changegroup.cgpacker = shallowbundle.shallowcg1packer
384 395
385 396 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
386 397 shallowbundle.addchangegroupfiles)
387 398 extensions.wrapfunction(
388 399 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
389 400
390 401 def storewrapper(orig, requirements, path, vfstype):
391 402 s = orig(requirements, path, vfstype)
392 403 if constants.SHALLOWREPO_REQUIREMENT in requirements:
393 404 s = shallowstore.wrapstore(s)
394 405
395 406 return s
396 407 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
397 408
398 409 extensions.wrapfunction(exchange, 'pull', exchangepull)
399 410
400 411 # prefetch files before update
401 412 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
402 413 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
403 414 manifest = mctx.manifest()
404 415 files = []
405 416 for f, args, msg in actions['g']:
406 417 files.append((f, hex(manifest[f])))
407 418 # batch fetch the needed files from the server
408 419 repo.fileservice.prefetch(files)
409 420 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
410 421 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
411 422
412 423 # Prefetch merge checkunknownfiles
413 424 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
414 425 *args, **kwargs):
415 426 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
416 427 files = []
417 428 sparsematch = repo.maybesparsematch(mctx.rev())
418 429 for f, (m, actionargs, msg) in actions.iteritems():
419 430 if sparsematch and not sparsematch(f):
420 431 continue
421 432 if m in ('c', 'dc', 'cm'):
422 433 files.append((f, hex(mctx.filenode(f))))
423 434 elif m == 'dg':
424 435 f2 = actionargs[0]
425 436 files.append((f2, hex(mctx.filenode(f2))))
426 437 # batch fetch the needed files from the server
427 438 repo.fileservice.prefetch(files)
428 439 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
429 440 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
430 441
431 442 # Prefetch files before status attempts to look at their size and contents
432 443 def checklookup(orig, self, files):
433 444 repo = self._repo
434 445 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
435 446 prefetchfiles = []
436 447 for parent in self._parents:
437 448 for f in files:
438 449 if f in parent:
439 450 prefetchfiles.append((f, hex(parent.filenode(f))))
440 451 # batch fetch the needed files from the server
441 452 repo.fileservice.prefetch(prefetchfiles)
442 453 return orig(self, files)
443 454 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
444 455
445 456 # Prefetch the logic that compares added and removed files for renames
446 457 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
447 458 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
448 459 files = []
449 460 parentctx = repo['.']
450 461 for f in removed:
451 462 files.append((f, hex(parentctx.filenode(f))))
452 463 # batch fetch the needed files from the server
453 464 repo.fileservice.prefetch(files)
454 465 return orig(repo, matcher, added, removed, *args, **kwargs)
455 466 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
456 467
457 468 # prefetch files before mergecopies check
458 469 def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
459 470 u1, u2 = orig(repo, c1, c2, *args, **kwargs)
460 471 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
461 472 m1 = c1.manifest()
462 473 m2 = c2.manifest()
463 474 files = []
464 475
465 476 sparsematch1 = repo.maybesparsematch(c1.rev())
466 477 if sparsematch1:
467 478 sparseu1 = []
468 479 for f in u1:
469 480 if sparsematch1(f):
470 481 files.append((f, hex(m1[f])))
471 482 sparseu1.append(f)
472 483 u1 = sparseu1
473 484
474 485 sparsematch2 = repo.maybesparsematch(c2.rev())
475 486 if sparsematch2:
476 487 sparseu2 = []
477 488 for f in u2:
478 489 if sparsematch2(f):
479 490 files.append((f, hex(m2[f])))
480 491 sparseu2.append(f)
481 492 u2 = sparseu2
482 493
483 494 # batch fetch the needed files from the server
484 495 repo.fileservice.prefetch(files)
485 496 return u1, u2
486 497 extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap)
487 498
488 499 # prefetch files before pathcopies check
489 500 def computeforwardmissing(orig, a, b, match=None):
490 501 missing = list(orig(a, b, match=match))
491 502 repo = a._repo
492 503 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
493 504 mb = b.manifest()
494 505
495 506 files = []
496 507 sparsematch = repo.maybesparsematch(b.rev())
497 508 if sparsematch:
498 509 sparsemissing = []
499 510 for f in missing:
500 511 if sparsematch(f):
501 512 files.append((f, hex(mb[f])))
502 513 sparsemissing.append(f)
503 514 missing = sparsemissing
504 515
505 516 # batch fetch the needed files from the server
506 517 repo.fileservice.prefetch(files)
507 518 return missing
508 519 extensions.wrapfunction(copies, '_computeforwardmissing',
509 520 computeforwardmissing)
510 521
511 522 # close cache miss server connection after the command has finished
512 523 def runcommand(orig, lui, repo, *args, **kwargs):
513 524 try:
514 525 return orig(lui, repo, *args, **kwargs)
515 526 finally:
516 527 # repo can be None when running in chg:
517 528 # - at startup, reposetup was called because serve is not norepo
518 529 # - a norepo command like "help" is called
519 530 if repo and constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
520 531 repo.fileservice.close()
521 532 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
522 533
523 534 # disappointing hacks below
524 535 templatekw.getrenamedfn = getrenamedfn
525 536 extensions.wrapfunction(revset, 'filelog', filelogrevset)
526 537 revset.symbols['filelog'] = revset.filelog
527 538 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
528 539
529 540 # prevent strip from stripping remotefilelogs
530 541 def _collectbrokencsets(orig, repo, files, striprev):
531 542 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
532 543 files = list([f for f in files if not repo.shallowmatch(f)])
533 544 return orig(repo, files, striprev)
534 545 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
535 546
536 547 # Don't commit filelogs until we know the commit hash, since the hash
537 548 # is present in the filelog blob.
538 549 # This violates Mercurial's filelog->manifest->changelog write order,
539 550 # but is generally fine for client repos.
540 551 pendingfilecommits = []
541 552 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
542 553 flags, cachedelta=None, _metatuple=None):
543 554 if isinstance(link, int):
544 555 pendingfilecommits.append(
545 556 (self, rawtext, transaction, link, p1, p2, node, flags,
546 557 cachedelta, _metatuple))
547 558 return node
548 559 else:
549 560 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
550 561 cachedelta, _metatuple=_metatuple)
551 562 extensions.wrapfunction(
552 563 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
553 564
554 565 def changelogadd(orig, self, *args):
555 566 oldlen = len(self)
556 567 node = orig(self, *args)
557 568 newlen = len(self)
558 569 if oldlen != newlen:
559 570 for oldargs in pendingfilecommits:
560 571 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
561 572 linknode = self.node(link)
562 573 if linknode == node:
563 574 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
564 575 else:
565 576 raise error.ProgrammingError(
566 577 'pending multiple integer revisions are not supported')
567 578 else:
568 579 # "link" is actually wrong here (it is set to len(changelog))
569 580 # if changelog remains unchanged, skip writing file revisions
570 581 # but still do a sanity check about pending multiple revisions
571 582 if len(set(x[3] for x in pendingfilecommits)) > 1:
572 583 raise error.ProgrammingError(
573 584 'pending multiple integer revisions are not supported')
574 585 del pendingfilecommits[:]
575 586 return node
576 587 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
577 588
578 589 # changectx wrappers
579 590 def filectx(orig, self, path, fileid=None, filelog=None):
580 591 if fileid is None:
581 592 fileid = self.filenode(path)
582 593 if (constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements and
583 594 self._repo.shallowmatch(path)):
584 595 return remotefilectx.remotefilectx(self._repo, path,
585 596 fileid=fileid, changectx=self, filelog=filelog)
586 597 return orig(self, path, fileid=fileid, filelog=filelog)
587 598 extensions.wrapfunction(context.changectx, 'filectx', filectx)
588 599
589 600 def workingfilectx(orig, self, path, filelog=None):
590 601 if (constants.SHALLOWREPO_REQUIREMENT in self._repo.requirements and
591 602 self._repo.shallowmatch(path)):
592 603 return remotefilectx.remoteworkingfilectx(self._repo,
593 604 path, workingctx=self, filelog=filelog)
594 605 return orig(self, path, filelog=filelog)
595 606 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
596 607
597 608 # prefetch required revisions before a diff
598 609 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
599 610 copy, getfilectx, *args, **kwargs):
600 611 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
601 612 prefetch = []
602 613 mf1 = ctx1.manifest()
603 614 for fname in modified + added + removed:
604 615 if fname in mf1:
605 616 fnode = getfilectx(fname, ctx1).filenode()
606 617 # fnode can be None if it's a edited working ctx file
607 618 if fnode:
608 619 prefetch.append((fname, hex(fnode)))
609 620 if fname not in removed:
610 621 fnode = getfilectx(fname, ctx2).filenode()
611 622 if fnode:
612 623 prefetch.append((fname, hex(fnode)))
613 624
614 625 repo.fileservice.prefetch(prefetch)
615 626
616 627 return orig(repo, revs, ctx1, ctx2, modified, added, removed,
617 628 copy, getfilectx, *args, **kwargs)
618 629 extensions.wrapfunction(patch, 'trydiff', trydiff)
619 630
620 631 # Prevent verify from processing files
621 632 # a stub for mercurial.hg.verify()
622 633 def _verify(orig, repo):
623 634 lock = repo.lock()
624 635 try:
625 636 return shallowverifier.shallowverifier(repo).verify()
626 637 finally:
627 638 lock.release()
628 639
629 640 extensions.wrapfunction(hg, 'verify', _verify)
630 641
631 642 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
632 643
633 644 def getrenamedfn(repo, endrev=None):
634 645 rcache = {}
635 646
636 647 def getrenamed(fn, rev):
637 648 '''looks up all renames for a file (up to endrev) the first
638 649 time the file is given. It indexes on the changerev and only
639 650 parses the manifest if linkrev != changerev.
640 651 Returns rename info for fn at changerev rev.'''
641 652 if rev in rcache.setdefault(fn, {}):
642 653 return rcache[fn][rev]
643 654
644 655 try:
645 656 fctx = repo[rev].filectx(fn)
646 657 for ancestor in fctx.ancestors():
647 658 if ancestor.path() == fn:
648 659 renamed = ancestor.renamed()
649 660 rcache[fn][ancestor.rev()] = renamed
650 661
651 662 return fctx.renamed()
652 663 except error.LookupError:
653 664 return None
654 665
655 666 return getrenamed
656 667
657 668 def walkfilerevs(orig, repo, match, follow, revs, fncache):
658 669 if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
659 670 return orig(repo, match, follow, revs, fncache)
660 671
661 672 # remotefilelog's can't be walked in rev order, so throw.
662 673 # The caller will see the exception and walk the commit tree instead.
663 674 if not follow:
664 675 raise cmdutil.FileWalkError("Cannot walk via filelog")
665 676
666 677 wanted = set()
667 678 minrev, maxrev = min(revs), max(revs)
668 679
669 680 pctx = repo['.']
670 681 for filename in match.files():
671 682 if filename not in pctx:
672 683 raise error.Abort(_('cannot follow file not in parent '
673 684 'revision: "%s"') % filename)
674 685 fctx = pctx[filename]
675 686
676 687 linkrev = fctx.linkrev()
677 688 if linkrev >= minrev and linkrev <= maxrev:
678 689 fncache.setdefault(linkrev, []).append(filename)
679 690 wanted.add(linkrev)
680 691
681 692 for ancestor in fctx.ancestors():
682 693 linkrev = ancestor.linkrev()
683 694 if linkrev >= minrev and linkrev <= maxrev:
684 695 fncache.setdefault(linkrev, []).append(ancestor.path())
685 696 wanted.add(linkrev)
686 697
687 698 return wanted
688 699
689 700 def filelogrevset(orig, repo, subset, x):
690 701 """``filelog(pattern)``
691 702 Changesets connected to the specified filelog.
692 703
693 704 For performance reasons, ``filelog()`` does not show every changeset
694 705 that affects the requested file(s). See :hg:`help log` for details. For
695 706 a slower, more accurate result, use ``file()``.
696 707 """
697 708
698 709 if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
699 710 return orig(repo, subset, x)
700 711
701 712 # i18n: "filelog" is a keyword
702 713 pat = revset.getstring(x, _("filelog requires a pattern"))
703 714 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
704 715 ctx=repo[None])
705 716 s = set()
706 717
707 718 if not match.patkind(pat):
708 719 # slow
709 720 for r in subset:
710 721 ctx = repo[r]
711 722 cfiles = ctx.files()
712 723 for f in m.files():
713 724 if f in cfiles:
714 725 s.add(ctx.rev())
715 726 break
716 727 else:
717 728 # partial
718 729 files = (f for f in repo[None] if m(f))
719 730 for f in files:
720 731 fctx = repo[None].filectx(f)
721 732 s.add(fctx.linkrev())
722 733 for actx in fctx.ancestors():
723 734 s.add(actx.linkrev())
724 735
725 736 return smartset.baseset([r for r in subset if r in s])
726 737
727 738 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
728 739 def gc(ui, *args, **opts):
729 740 '''garbage collect the client and server filelog caches
730 741 '''
731 742 cachepaths = set()
732 743
733 744 # get the system client cache
734 745 systemcache = shallowutil.getcachepath(ui, allowempty=True)
735 746 if systemcache:
736 747 cachepaths.add(systemcache)
737 748
738 749 # get repo client and server cache
739 750 repopaths = []
740 751 pwd = ui.environ.get('PWD')
741 752 if pwd:
742 753 repopaths.append(pwd)
743 754
744 755 repopaths.extend(args)
745 756 repos = []
746 757 for repopath in repopaths:
747 758 try:
748 759 repo = hg.peer(ui, {}, repopath)
749 760 repos.append(repo)
750 761
751 762 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
752 763 if repocache:
753 764 cachepaths.add(repocache)
754 765 except error.RepoError:
755 766 pass
756 767
757 768 # gc client cache
758 769 for cachepath in cachepaths:
759 770 gcclient(ui, cachepath)
760 771
761 772 # gc server cache
762 773 for repo in repos:
763 774 remotefilelogserver.gcserver(ui, repo._repo)
764 775
765 776 def gcclient(ui, cachepath):
766 777 # get list of repos that use this cache
767 778 repospath = os.path.join(cachepath, 'repos')
768 779 if not os.path.exists(repospath):
769 780 ui.warn(_("no known cache at %s\n") % cachepath)
770 781 return
771 782
772 783 reposfile = open(repospath, 'r')
773 784 repos = set([r[:-1] for r in reposfile.readlines()])
774 785 reposfile.close()
775 786
776 787 # build list of useful files
777 788 validrepos = []
778 789 keepkeys = set()
779 790
780 791 _analyzing = _("analyzing repositories")
781 792
782 793 sharedcache = None
783 794 filesrepacked = False
784 795
785 796 count = 0
786 797 for path in repos:
787 798 ui.progress(_analyzing, count, unit="repos", total=len(repos))
788 799 count += 1
789 800 try:
790 801 path = ui.expandpath(os.path.normpath(path))
791 802 except TypeError as e:
792 803 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
793 804 traceback.print_exc()
794 805 continue
795 806 try:
796 807 peer = hg.peer(ui, {}, path)
797 808 repo = peer._repo
798 809 except error.RepoError:
799 810 continue
800 811
801 812 validrepos.append(path)
802 813
803 814 # Protect against any repo or config changes that have happened since
804 815 # this repo was added to the repos file. We'd rather this loop succeed
805 816 # and too much be deleted, than the loop fail and nothing gets deleted.
806 817 if constants.SHALLOWREPO_REQUIREMENT not in repo.requirements:
807 818 continue
808 819
809 820 if not util.safehasattr(repo, 'name'):
810 821 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
811 822 continue
812 823
813 824 # If garbage collection on repack and repack on hg gc are enabled
814 825 # then loose files are repacked and garbage collected.
815 826 # Otherwise regular garbage collection is performed.
816 827 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
817 828 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
818 829 if repackonhggc and gcrepack:
819 830 try:
820 831 repackmod.incrementalrepack(repo)
821 832 filesrepacked = True
822 833 continue
823 834 except (IOError, repackmod.RepackAlreadyRunning):
824 835 # If repack cannot be performed due to not enough disk space
825 836 # continue doing garbage collection of loose files w/o repack
826 837 pass
827 838
828 839 reponame = repo.name
829 840 if not sharedcache:
830 841 sharedcache = repo.sharedstore
831 842
832 843 # Compute a keepset which is not garbage collected
833 844 def keyfn(fname, fnode):
834 845 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
835 846 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
836 847
837 848 ui.progress(_analyzing, None)
838 849
839 850 # write list of valid repos back
840 851 oldumask = os.umask(0o002)
841 852 try:
842 853 reposfile = open(repospath, 'w')
843 854 reposfile.writelines([("%s\n" % r) for r in validrepos])
844 855 reposfile.close()
845 856 finally:
846 857 os.umask(oldumask)
847 858
848 859 # prune cache
849 860 if sharedcache is not None:
850 861 sharedcache.gc(keepkeys)
851 862 elif not filesrepacked:
852 863 ui.warn(_("warning: no valid repos in repofile\n"))
853 864
854 865 def log(orig, ui, repo, *pats, **opts):
855 866 if constants.SHALLOWREPO_REQUIREMENT not in repo.requirements:
856 867 return orig(ui, repo, *pats, **opts)
857 868
858 869 follow = opts.get('follow')
859 870 revs = opts.get('rev')
860 871 if pats:
861 872 # Force slowpath for non-follow patterns and follows that start from
862 873 # non-working-copy-parent revs.
863 874 if not follow or revs:
864 875 # This forces the slowpath
865 876 opts['removed'] = True
866 877
867 878 # If this is a non-follow log without any revs specified, recommend that
868 879 # the user add -f to speed it up.
869 880 if not follow and not revs:
870 881 match, pats = scmutil.matchandpats(repo['.'], pats, opts)
871 882 isfile = not match.anypats()
872 883 if isfile:
873 884 for file in match.files():
874 885 if not os.path.isfile(repo.wjoin(file)):
875 886 isfile = False
876 887 break
877 888
878 889 if isfile:
879 890 ui.warn(_("warning: file log can be slow on large repos - " +
880 891 "use -f to speed it up\n"))
881 892
882 893 return orig(ui, repo, *pats, **opts)
883 894
884 895 def revdatelimit(ui, revset):
885 896 """Update revset so that only changesets no older than 'prefetchdays' days
886 897 are included. The default value is set to 14 days. If 'prefetchdays' is set
887 898 to zero or negative value then date restriction is not applied.
888 899 """
889 900 days = ui.configint('remotefilelog', 'prefetchdays')
890 901 if days > 0:
891 902 revset = '(%s) & date(-%s)' % (revset, days)
892 903 return revset
893 904
894 905 def readytofetch(repo):
895 906 """Check that enough time has passed since the last background prefetch.
896 907 This only relates to prefetches after operations that change the working
897 908 copy parent. Default delay between background prefetches is 2 minutes.
898 909 """
899 910 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
900 911 fname = repo.vfs.join('lastprefetch')
901 912
902 913 ready = False
903 914 with open(fname, 'a'):
904 915 # the with construct above is used to avoid race conditions
905 916 modtime = os.path.getmtime(fname)
906 917 if (time.time() - modtime) > timeout:
907 918 os.utime(fname, None)
908 919 ready = True
909 920
910 921 return ready
911 922
912 923 def wcpprefetch(ui, repo, **kwargs):
913 924 """Prefetches in background revisions specified by bgprefetchrevs revset.
914 925 Does background repack if backgroundrepack flag is set in config.
915 926 """
916 927 shallow = constants.SHALLOWREPO_REQUIREMENT in repo.requirements
917 928 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
918 929 isready = readytofetch(repo)
919 930
920 931 if not (shallow and bgprefetchrevs and isready):
921 932 return
922 933
923 934 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
924 935 # update a revset with a date limit
925 936 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
926 937
927 938 def anon():
928 939 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
929 940 return
930 941 repo.ranprefetch = True
931 942 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
932 943
933 944 repo._afterlock(anon)
934 945
935 946 def pull(orig, ui, repo, *pats, **opts):
936 947 result = orig(ui, repo, *pats, **opts)
937 948
938 949 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
939 950 # prefetch if it's configured
940 951 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
941 952 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
942 953 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
943 954
944 955 if prefetchrevset:
945 956 ui.status(_("prefetching file contents\n"))
946 957 revs = scmutil.revrange(repo, [prefetchrevset])
947 958 base = repo['.'].rev()
948 959 if bgprefetch:
949 960 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
950 961 else:
951 962 repo.prefetch(revs, base=base)
952 963 if bgrepack:
953 964 repackmod.backgroundrepack(repo, incremental=True)
954 965 elif bgrepack:
955 966 repackmod.backgroundrepack(repo, incremental=True)
956 967
957 968 return result
958 969
959 970 def exchangepull(orig, repo, remote, *args, **kwargs):
960 971 # Hook into the callstream/getbundle to insert bundle capabilities
961 972 # during a pull.
962 973 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
963 974 **kwargs):
964 975 if not bundlecaps:
965 976 bundlecaps = set()
966 977 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
967 978 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
968 979 **kwargs)
969 980
970 981 if util.safehasattr(remote, '_callstream'):
971 982 remote._localrepo = repo
972 983 elif util.safehasattr(remote, 'getbundle'):
973 984 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
974 985
975 986 return orig(repo, remote, *args, **kwargs)
976 987
977 988 def _fileprefetchhook(repo, revs, match):
978 989 if constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
979 990 allfiles = []
980 991 for rev in revs:
981 992 if rev == nodemod.wdirrev or rev is None:
982 993 continue
983 994 ctx = repo[rev]
984 995 mf = ctx.manifest()
985 996 sparsematch = repo.maybesparsematch(ctx.rev())
986 997 for path in ctx.walk(match):
987 998 if path.endswith('/'):
988 999 # Tree manifest that's being excluded as part of narrow
989 1000 continue
990 1001 if (not sparsematch or sparsematch(path)) and path in mf:
991 1002 allfiles.append((path, hex(mf[path])))
992 1003 repo.fileservice.prefetch(allfiles)
993 1004
994 1005 @command('debugremotefilelog', [
995 1006 ('d', 'decompress', None, _('decompress the filelog first')),
996 1007 ], _('hg debugremotefilelog <path>'), norepo=True)
997 1008 def debugremotefilelog(ui, path, **opts):
998 1009 return debugcommands.debugremotefilelog(ui, path, **opts)
999 1010
1000 1011 @command('verifyremotefilelog', [
1001 1012 ('d', 'decompress', None, _('decompress the filelogs first')),
1002 1013 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1003 1014 def verifyremotefilelog(ui, path, **opts):
1004 1015 return debugcommands.verifyremotefilelog(ui, path, **opts)
1005 1016
1006 1017 @command('debugdatapack', [
1007 1018 ('', 'long', None, _('print the long hashes')),
1008 1019 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1009 1020 ], _('hg debugdatapack <paths>'), norepo=True)
1010 1021 def debugdatapack(ui, *paths, **opts):
1011 1022 return debugcommands.debugdatapack(ui, *paths, **opts)
1012 1023
1013 1024 @command('debughistorypack', [
1014 1025 ], _('hg debughistorypack <path>'), norepo=True)
1015 1026 def debughistorypack(ui, path, **opts):
1016 1027 return debugcommands.debughistorypack(ui, path)
1017 1028
1018 1029 @command('debugkeepset', [
1019 1030 ], _('hg debugkeepset'))
1020 1031 def debugkeepset(ui, repo, **opts):
1021 1032 # The command is used to measure keepset computation time
1022 1033 def keyfn(fname, fnode):
1023 1034 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1024 1035 repackmod.keepset(repo, keyfn)
1025 1036 return
1026 1037
1027 1038 @command('debugwaitonrepack', [
1028 1039 ], _('hg debugwaitonrepack'))
1029 1040 def debugwaitonrepack(ui, repo, **opts):
1030 1041 return debugcommands.debugwaitonrepack(repo)
1031 1042
1032 1043 @command('debugwaitonprefetch', [
1033 1044 ], _('hg debugwaitonprefetch'))
1034 1045 def debugwaitonprefetch(ui, repo, **opts):
1035 1046 return debugcommands.debugwaitonprefetch(repo)
1036 1047
1037 1048 def resolveprefetchopts(ui, opts):
1038 1049 if not opts.get('rev'):
1039 1050 revset = ['.', 'draft()']
1040 1051
1041 1052 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1042 1053 if prefetchrevset:
1043 1054 revset.append('(%s)' % prefetchrevset)
1044 1055 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1045 1056 if bgprefetchrevs:
1046 1057 revset.append('(%s)' % bgprefetchrevs)
1047 1058 revset = '+'.join(revset)
1048 1059
1049 1060 # update a revset with a date limit
1050 1061 revset = revdatelimit(ui, revset)
1051 1062
1052 1063 opts['rev'] = [revset]
1053 1064
1054 1065 if not opts.get('base'):
1055 1066 opts['base'] = None
1056 1067
1057 1068 return opts
1058 1069
1059 1070 @command('prefetch', [
1060 1071 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1061 1072 ('', 'repack', False, _('run repack after prefetch')),
1062 1073 ('b', 'base', '', _("rev that is assumed to already be local")),
1063 1074 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1064 1075 def prefetch(ui, repo, *pats, **opts):
1065 1076 """prefetch file revisions from the server
1066 1077
1067 1078 Prefetchs file revisions for the specified revs and stores them in the
1068 1079 local remotefilelog cache. If no rev is specified, the default rev is
1069 1080 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1070 1081 File names or patterns can be used to limit which files are downloaded.
1071 1082
1072 1083 Return 0 on success.
1073 1084 """
1074 1085 if not constants.SHALLOWREPO_REQUIREMENT in repo.requirements:
1075 1086 raise error.Abort(_("repo is not shallow"))
1076 1087
1077 1088 opts = resolveprefetchopts(ui, opts)
1078 1089 revs = scmutil.revrange(repo, opts.get('rev'))
1079 1090 repo.prefetch(revs, opts.get('base'), pats, opts)
1080 1091
1081 1092 # Run repack in background
1082 1093 if opts.get('repack'):
1083 1094 repackmod.backgroundrepack(repo, incremental=True)
1084 1095
1085 1096 @command('repack', [
1086 1097 ('', 'background', None, _('run in a background process'), None),
1087 1098 ('', 'incremental', None, _('do an incremental repack'), None),
1088 1099 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1089 1100 ], _('hg repack [OPTIONS]'))
1090 1101 def repack_(ui, repo, *pats, **opts):
1091 1102 if opts.get('background'):
1092 1103 repackmod.backgroundrepack(repo, incremental=opts.get('incremental'),
1093 1104 packsonly=opts.get('packsonly', False))
1094 1105 return
1095 1106
1096 1107 options = {'packsonly': opts.get('packsonly')}
1097 1108
1098 1109 try:
1099 1110 if opts.get('incremental'):
1100 1111 repackmod.incrementalrepack(repo, options=options)
1101 1112 else:
1102 1113 repackmod.fullrepack(repo, options=options)
1103 1114 except repackmod.RepackAlreadyRunning as ex:
1104 1115 # Don't propogate the exception if the repack is already in
1105 1116 # progress, since we want the command to exit 0.
1106 1117 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now