##// END OF EJS Templates
remotefilelog: move most setup from onetimesetup() to uisetup()...
Martin von Zweigbergk -
r42460:8a0e03f7 default
parent child Browse files
Show More
@@ -1,1124 +1,1111 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial import (
136 136 changegroup,
137 137 changelog,
138 138 cmdutil,
139 139 commands,
140 140 configitems,
141 141 context,
142 142 copies,
143 143 debugcommands as hgdebugcommands,
144 144 dispatch,
145 145 error,
146 146 exchange,
147 147 extensions,
148 148 hg,
149 149 localrepo,
150 150 match,
151 151 merge,
152 152 node as nodemod,
153 153 patch,
154 154 pycompat,
155 155 registrar,
156 156 repair,
157 157 repoview,
158 158 revset,
159 159 scmutil,
160 160 smartset,
161 161 streamclone,
162 162 util,
163 163 )
164 164 from . import (
165 165 constants,
166 166 debugcommands,
167 167 fileserverclient,
168 168 remotefilectx,
169 169 remotefilelog,
170 170 remotefilelogserver,
171 171 repack as repackmod,
172 172 shallowbundle,
173 173 shallowrepo,
174 174 shallowstore,
175 175 shallowutil,
176 176 shallowverifier,
177 177 )
178 178
179 179 # ensures debug commands are registered
180 180 hgdebugcommands.command
181 181
182 182 cmdtable = {}
183 183 command = registrar.command(cmdtable)
184 184
185 185 configtable = {}
186 186 configitem = registrar.configitem(configtable)
187 187
188 188 configitem('remotefilelog', 'debug', default=False)
189 189
190 190 configitem('remotefilelog', 'reponame', default='')
191 191 configitem('remotefilelog', 'cachepath', default=None)
192 192 configitem('remotefilelog', 'cachegroup', default=None)
193 193 configitem('remotefilelog', 'cacheprocess', default=None)
194 194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196 196
197 197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 198 alias=[('remotefilelog', 'fallbackrepo')])
199 199
200 200 configitem('remotefilelog', 'validatecachelog', default=None)
201 201 configitem('remotefilelog', 'validatecache', default='on')
202 202 configitem('remotefilelog', 'server', default=None)
203 203 configitem('remotefilelog', 'servercachepath', default=None)
204 204 configitem("remotefilelog", "serverexpiration", default=30)
205 205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 207 configitem('remotefilelog', 'pullprefetch', default=None)
208 208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 210 configitem('remotefilelog', 'prefetchdays', default=14)
211 211
212 212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 215 configitem('remotefilelog', 'fetchwarning', default='')
216 216
217 217 configitem('remotefilelog', 'includepattern', default=None)
218 218 configitem('remotefilelog', 'excludepattern', default=None)
219 219
220 220 configitem('remotefilelog', 'gcrepack', default=False)
221 221 configitem('remotefilelog', 'repackonhggc', default=False)
222 222 configitem('repack', 'chainorphansbysize', default=True)
223 223
224 224 configitem('packs', 'maxpacksize', default=0)
225 225 configitem('packs', 'maxchainlen', default=1000)
226 226
227 227 # default TTL limit is 30 days
228 228 _defaultlimit = 60 * 60 * 24 * 30
229 229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230 230
231 231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 232 configitem('remotefilelog', 'data.generations',
233 233 default=['1GB', '100MB', '1MB'])
234 234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237 237
238 238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243 243
244 244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 246 # be specifying the version(s) of Mercurial they are tested with, or
247 247 # leave the attribute unspecified.
248 248 testedwith = 'ships-with-hg-core'
249 249
250 250 repoclass = localrepo.localrepository
251 251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252 252
253 253 isenabled = shallowutil.isenabled
254 254
255 255 def uisetup(ui):
256 256 """Wraps user facing Mercurial commands to swap them out with shallow
257 257 versions.
258 258 """
259 259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260 260
261 261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 262 entry[1].append(('', 'shallow', None,
263 263 _("create a shallow clone which uses remote file "
264 264 "history")))
265 265
266 266 extensions.wrapcommand(commands.table, 'debugindex',
267 267 debugcommands.debugindex)
268 268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 269 debugcommands.debugindexdot)
270 270 extensions.wrapcommand(commands.table, 'log', log)
271 271 extensions.wrapcommand(commands.table, 'pull', pull)
272 272
273 273 # Prevent 'hg manifest --all'
274 274 def _manifest(orig, ui, repo, *args, **opts):
275 275 if (isenabled(repo) and opts.get(r'all')):
276 276 raise error.Abort(_("--all is not supported in a shallow repo"))
277 277
278 278 return orig(ui, repo, *args, **opts)
279 279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280 280
281 281 # Wrap remotefilelog with lfs code
282 282 def _lfsloaded(loaded=False):
283 283 lfsmod = None
284 284 try:
285 285 lfsmod = extensions.find('lfs')
286 286 except KeyError:
287 287 pass
288 288 if lfsmod:
289 289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 290 fileserverclient._lfsmod = lfsmod
291 291 extensions.afterloaded('lfs', _lfsloaded)
292 292
293 293 # debugdata needs remotefilelog.len to work
294 294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295 295
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 shallowbundle.addchangegroupfiles)
300 extensions.wrapfunction(
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 computeforwardmissing)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 extensions.wrapfunction(hg, 'verify', _verify)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317
318 # disappointing hacks below
319 scmutil.getrenamedfn = getrenamedfn
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 revset.symbols['filelog'] = revset.filelog
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323
324
296 325 def cloneshallow(orig, ui, repo, *args, **opts):
297 326 if opts.get(r'shallow'):
298 327 repos = []
299 328 def pull_shallow(orig, self, *args, **kwargs):
300 329 if not isenabled(self):
301 330 repos.append(self.unfiltered())
302 331 # set up the client hooks so the post-clone update works
303 332 setupclient(self.ui, self.unfiltered())
304 333
305 334 # setupclient fixed the class on the repo itself
306 335 # but we also need to fix it on the repoview
307 336 if isinstance(self, repoview.repoview):
308 337 self.__class__.__bases__ = (self.__class__.__bases__[0],
309 338 self.unfiltered().__class__)
310 339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
311 340 self._writerequirements()
312 341
313 342 # Since setupclient hadn't been called, exchange.pull was not
314 343 # wrapped. So we need to manually invoke our version of it.
315 344 return exchangepull(orig, self, *args, **kwargs)
316 345 else:
317 346 return orig(self, *args, **kwargs)
318 347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
319 348
320 349 # Wrap the stream logic to add requirements and to pass include/exclude
321 350 # patterns around.
322 351 def setup_streamout(repo, remote):
323 352 # Replace remote.stream_out with a version that sends file
324 353 # patterns.
325 354 def stream_out_shallow(orig):
326 355 caps = remote.capabilities()
327 356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
328 357 opts = {}
329 358 if repo.includepattern:
330 359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
331 360 if repo.excludepattern:
332 361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
333 362 return remote._callstream('stream_out_shallow', **opts)
334 363 else:
335 364 return orig()
336 365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
337 366 def stream_wrap(orig, op):
338 367 setup_streamout(op.repo, op.remote)
339 368 return orig(op)
340 369 extensions.wrapfunction(
341 370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
342 371
343 372 def canperformstreamclone(orig, pullop, bundle2=False):
344 373 # remotefilelog is currently incompatible with the
345 374 # bundle2 flavor of streamclones, so force us to use
346 375 # v1 instead.
347 376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
348 377 pullop.remotebundle2caps['stream'] = [
349 378 c for c in pullop.remotebundle2caps['stream']
350 379 if c != 'v2']
351 380 if bundle2:
352 381 return False, None
353 382 supported, requirements = orig(pullop, bundle2=bundle2)
354 383 if requirements is not None:
355 384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
356 385 return supported, requirements
357 386 extensions.wrapfunction(
358 387 streamclone, 'canperformstreamclone', canperformstreamclone)
359 388
360 389 try:
361 390 orig(ui, repo, *args, **opts)
362 391 finally:
363 392 if opts.get(r'shallow'):
364 393 for r in repos:
365 394 if util.safehasattr(r, 'fileservice'):
366 395 r.fileservice.close()
367 396
368 397 def debugdatashallow(orig, *args, **kwds):
369 398 oldlen = remotefilelog.remotefilelog.__len__
370 399 try:
371 400 remotefilelog.remotefilelog.__len__ = lambda x: 1
372 401 return orig(*args, **kwds)
373 402 finally:
374 403 remotefilelog.remotefilelog.__len__ = oldlen
375 404
376 405 def reposetup(ui, repo):
377 406 if not repo.local():
378 407 return
379 408
380 409 # put here intentionally bc doesnt work in uisetup
381 410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
382 411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
383 412
384 413 isserverenabled = ui.configbool('remotefilelog', 'server')
385 414 isshallowclient = isenabled(repo)
386 415
387 416 if isserverenabled and isshallowclient:
388 417 raise RuntimeError("Cannot be both a server and shallow client.")
389 418
390 419 if isshallowclient:
391 420 setupclient(ui, repo)
392 421
393 422 if isserverenabled:
394 423 remotefilelogserver.setupserver(ui, repo)
395 424
396 425 def setupclient(ui, repo):
397 426 if not isinstance(repo, localrepo.localrepository):
398 427 return
399 428
400 429 # Even clients get the server setup since they need to have the
401 430 # wireprotocol endpoints registered.
402 431 remotefilelogserver.onetimesetup(ui)
403 432 onetimeclientsetup(ui)
404 433
405 434 shallowrepo.wraprepo(repo)
406 435 repo.store = shallowstore.wrapstore(repo.store)
407 436
408 437 def storewrapper(orig, requirements, path, vfstype):
409 438 s = orig(requirements, path, vfstype)
410 439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
411 440 s = shallowstore.wrapstore(s)
412 441
413 442 return s
414 443
415 444 # prefetch files before update
416 445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
417 446 if isenabled(repo):
418 447 manifest = mctx.manifest()
419 448 files = []
420 449 for f, args, msg in actions['g']:
421 450 files.append((f, hex(manifest[f])))
422 451 # batch fetch the needed files from the server
423 452 repo.fileservice.prefetch(files)
424 453 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
425 454
426 455 # Prefetch merge checkunknownfiles
427 456 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
428 457 *args, **kwargs):
429 458 if isenabled(repo):
430 459 files = []
431 460 sparsematch = repo.maybesparsematch(mctx.rev())
432 461 for f, (m, actionargs, msg) in actions.iteritems():
433 462 if sparsematch and not sparsematch(f):
434 463 continue
435 464 if m in ('c', 'dc', 'cm'):
436 465 files.append((f, hex(mctx.filenode(f))))
437 466 elif m == 'dg':
438 467 f2 = actionargs[0]
439 468 files.append((f2, hex(mctx.filenode(f2))))
440 469 # batch fetch the needed files from the server
441 470 repo.fileservice.prefetch(files)
442 471 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
443 472
444 473 # Prefetch files before status attempts to look at their size and contents
445 474 def checklookup(orig, self, files):
446 475 repo = self._repo
447 476 if isenabled(repo):
448 477 prefetchfiles = []
449 478 for parent in self._parents:
450 479 for f in files:
451 480 if f in parent:
452 481 prefetchfiles.append((f, hex(parent.filenode(f))))
453 482 # batch fetch the needed files from the server
454 483 repo.fileservice.prefetch(prefetchfiles)
455 484 return orig(self, files)
456 485
457 486 # Prefetch the logic that compares added and removed files for renames
458 487 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
459 488 if isenabled(repo):
460 489 files = []
461 490 pmf = repo['.'].manifest()
462 491 for f in removed:
463 492 if f in pmf:
464 493 files.append((f, hex(pmf[f])))
465 494 # batch fetch the needed files from the server
466 495 repo.fileservice.prefetch(files)
467 496 return orig(repo, matcher, added, removed, *args, **kwargs)
468 497
469 498 # prefetch files before pathcopies check
470 499 def computeforwardmissing(orig, a, b, match=None):
471 500 missing = orig(a, b, match=match)
472 501 repo = a._repo
473 502 if isenabled(repo):
474 503 mb = b.manifest()
475 504
476 505 files = []
477 506 sparsematch = repo.maybesparsematch(b.rev())
478 507 if sparsematch:
479 508 sparsemissing = set()
480 509 for f in missing:
481 510 if sparsematch(f):
482 511 files.append((f, hex(mb[f])))
483 512 sparsemissing.add(f)
484 513 missing = sparsemissing
485 514
486 515 # batch fetch the needed files from the server
487 516 repo.fileservice.prefetch(files)
488 517 return missing
489 518
490 519 # close cache miss server connection after the command has finished
491 520 def runcommand(orig, lui, repo, *args, **kwargs):
492 521 fileservice = None
493 522 # repo can be None when running in chg:
494 523 # - at startup, reposetup was called because serve is not norepo
495 524 # - a norepo command like "help" is called
496 525 if repo and isenabled(repo):
497 526 fileservice = repo.fileservice
498 527 try:
499 528 return orig(lui, repo, *args, **kwargs)
500 529 finally:
501 530 if fileservice:
502 531 fileservice.close()
503 532
504 533 # prevent strip from stripping remotefilelogs
505 534 def _collectbrokencsets(orig, repo, files, striprev):
506 535 if isenabled(repo):
507 536 files = list([f for f in files if not repo.shallowmatch(f)])
508 537 return orig(repo, files, striprev)
509 538
510 539 # changectx wrappers
511 540 def filectx(orig, self, path, fileid=None, filelog=None):
512 541 if fileid is None:
513 542 fileid = self.filenode(path)
514 543 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
515 544 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
516 545 changectx=self, filelog=filelog)
517 546 return orig(self, path, fileid=fileid, filelog=filelog)
518 547
519 548 def workingfilectx(orig, self, path, filelog=None):
520 549 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
521 550 return remotefilectx.remoteworkingfilectx(self._repo, path,
522 551 workingctx=self,
523 552 filelog=filelog)
524 553 return orig(self, path, filelog=filelog)
525 554
526 555 # prefetch required revisions before a diff
527 556 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
528 557 copy, getfilectx, *args, **kwargs):
529 558 if isenabled(repo):
530 559 prefetch = []
531 560 mf1 = ctx1.manifest()
532 561 for fname in modified + added + removed:
533 562 if fname in mf1:
534 563 fnode = getfilectx(fname, ctx1).filenode()
535 564 # fnode can be None if it's a edited working ctx file
536 565 if fnode:
537 566 prefetch.append((fname, hex(fnode)))
538 567 if fname not in removed:
539 568 fnode = getfilectx(fname, ctx2).filenode()
540 569 if fnode:
541 570 prefetch.append((fname, hex(fnode)))
542 571
543 572 repo.fileservice.prefetch(prefetch)
544 573
545 574 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
546 575 getfilectx, *args, **kwargs)
547 576
548 577 # Prevent verify from processing files
549 578 # a stub for mercurial.hg.verify()
550 579 def _verify(orig, repo, level=None):
551 580 lock = repo.lock()
552 581 try:
553 582 return shallowverifier.shallowverifier(repo).verify()
554 583 finally:
555 584 lock.release()
556 585
557 586
558 587 clientonetime = False
559 588 def onetimeclientsetup(ui):
560 589 global clientonetime
561 590 if clientonetime:
562 591 return
563 592 clientonetime = True
564 593
565 changegroup.cgpacker = shallowbundle.shallowcg1packer
566
567 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
568 shallowbundle.addchangegroupfiles)
569 extensions.wrapfunction(
570 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
571
572 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
573
574 extensions.wrapfunction(exchange, 'pull', exchangepull)
575
576 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
577
578 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
579
580 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
581
582 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
583
584 extensions.wrapfunction(copies, '_computeforwardmissing',
585 computeforwardmissing)
586
587 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
588
589 # disappointing hacks below
590 scmutil.getrenamedfn = getrenamedfn
591 extensions.wrapfunction(revset, 'filelog', filelogrevset)
592 revset.symbols['filelog'] = revset.filelog
593 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
594
595 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
596
597 594 # Don't commit filelogs until we know the commit hash, since the hash
598 595 # is present in the filelog blob.
599 596 # This violates Mercurial's filelog->manifest->changelog write order,
600 597 # but is generally fine for client repos.
601 598 pendingfilecommits = []
602 599 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
603 600 flags, cachedelta=None, _metatuple=None):
604 601 if isinstance(link, int):
605 602 pendingfilecommits.append(
606 603 (self, rawtext, transaction, link, p1, p2, node, flags,
607 604 cachedelta, _metatuple))
608 605 return node
609 606 else:
610 607 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
611 608 cachedelta, _metatuple=_metatuple)
612 609 extensions.wrapfunction(
613 610 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
614 611
615 612 def changelogadd(orig, self, *args):
616 613 oldlen = len(self)
617 614 node = orig(self, *args)
618 615 newlen = len(self)
619 616 if oldlen != newlen:
620 617 for oldargs in pendingfilecommits:
621 618 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
622 619 linknode = self.node(link)
623 620 if linknode == node:
624 621 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
625 622 else:
626 623 raise error.ProgrammingError(
627 624 'pending multiple integer revisions are not supported')
628 625 else:
629 626 # "link" is actually wrong here (it is set to len(changelog))
630 627 # if changelog remains unchanged, skip writing file revisions
631 628 # but still do a sanity check about pending multiple revisions
632 629 if len(set(x[3] for x in pendingfilecommits)) > 1:
633 630 raise error.ProgrammingError(
634 631 'pending multiple integer revisions are not supported')
635 632 del pendingfilecommits[:]
636 633 return node
637 634 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
638 635
639 extensions.wrapfunction(context.changectx, 'filectx', filectx)
640
641 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
642
643 extensions.wrapfunction(patch, 'trydiff', trydiff)
644
645 extensions.wrapfunction(hg, 'verify', _verify)
646
647 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
648
649 636 def getrenamedfn(repo, endrev=None):
650 637 rcache = {}
651 638
652 639 def getrenamed(fn, rev):
653 640 '''looks up all renames for a file (up to endrev) the first
654 641 time the file is given. It indexes on the changerev and only
655 642 parses the manifest if linkrev != changerev.
656 643 Returns rename info for fn at changerev rev.'''
657 644 if rev in rcache.setdefault(fn, {}):
658 645 return rcache[fn][rev]
659 646
660 647 try:
661 648 fctx = repo[rev].filectx(fn)
662 649 for ancestor in fctx.ancestors():
663 650 if ancestor.path() == fn:
664 651 renamed = ancestor.renamed()
665 652 rcache[fn][ancestor.rev()] = renamed and renamed[0]
666 653
667 654 renamed = fctx.renamed()
668 655 return renamed and renamed[0]
669 656 except error.LookupError:
670 657 return None
671 658
672 659 return getrenamed
673 660
674 661 def walkfilerevs(orig, repo, match, follow, revs, fncache):
675 662 if not isenabled(repo):
676 663 return orig(repo, match, follow, revs, fncache)
677 664
678 665 # remotefilelog's can't be walked in rev order, so throw.
679 666 # The caller will see the exception and walk the commit tree instead.
680 667 if not follow:
681 668 raise cmdutil.FileWalkError("Cannot walk via filelog")
682 669
683 670 wanted = set()
684 671 minrev, maxrev = min(revs), max(revs)
685 672
686 673 pctx = repo['.']
687 674 for filename in match.files():
688 675 if filename not in pctx:
689 676 raise error.Abort(_('cannot follow file not in parent '
690 677 'revision: "%s"') % filename)
691 678 fctx = pctx[filename]
692 679
693 680 linkrev = fctx.linkrev()
694 681 if linkrev >= minrev and linkrev <= maxrev:
695 682 fncache.setdefault(linkrev, []).append(filename)
696 683 wanted.add(linkrev)
697 684
698 685 for ancestor in fctx.ancestors():
699 686 linkrev = ancestor.linkrev()
700 687 if linkrev >= minrev and linkrev <= maxrev:
701 688 fncache.setdefault(linkrev, []).append(ancestor.path())
702 689 wanted.add(linkrev)
703 690
704 691 return wanted
705 692
706 693 def filelogrevset(orig, repo, subset, x):
707 694 """``filelog(pattern)``
708 695 Changesets connected to the specified filelog.
709 696
710 697 For performance reasons, ``filelog()`` does not show every changeset
711 698 that affects the requested file(s). See :hg:`help log` for details. For
712 699 a slower, more accurate result, use ``file()``.
713 700 """
714 701
715 702 if not isenabled(repo):
716 703 return orig(repo, subset, x)
717 704
718 705 # i18n: "filelog" is a keyword
719 706 pat = revset.getstring(x, _("filelog requires a pattern"))
720 707 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
721 708 ctx=repo[None])
722 709 s = set()
723 710
724 711 if not match.patkind(pat):
725 712 # slow
726 713 for r in subset:
727 714 ctx = repo[r]
728 715 cfiles = ctx.files()
729 716 for f in m.files():
730 717 if f in cfiles:
731 718 s.add(ctx.rev())
732 719 break
733 720 else:
734 721 # partial
735 722 files = (f for f in repo[None] if m(f))
736 723 for f in files:
737 724 fctx = repo[None].filectx(f)
738 725 s.add(fctx.linkrev())
739 726 for actx in fctx.ancestors():
740 727 s.add(actx.linkrev())
741 728
742 729 return smartset.baseset([r for r in subset if r in s])
743 730
744 731 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
745 732 def gc(ui, *args, **opts):
746 733 '''garbage collect the client and server filelog caches
747 734 '''
748 735 cachepaths = set()
749 736
750 737 # get the system client cache
751 738 systemcache = shallowutil.getcachepath(ui, allowempty=True)
752 739 if systemcache:
753 740 cachepaths.add(systemcache)
754 741
755 742 # get repo client and server cache
756 743 repopaths = []
757 744 pwd = ui.environ.get('PWD')
758 745 if pwd:
759 746 repopaths.append(pwd)
760 747
761 748 repopaths.extend(args)
762 749 repos = []
763 750 for repopath in repopaths:
764 751 try:
765 752 repo = hg.peer(ui, {}, repopath)
766 753 repos.append(repo)
767 754
768 755 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
769 756 if repocache:
770 757 cachepaths.add(repocache)
771 758 except error.RepoError:
772 759 pass
773 760
774 761 # gc client cache
775 762 for cachepath in cachepaths:
776 763 gcclient(ui, cachepath)
777 764
778 765 # gc server cache
779 766 for repo in repos:
780 767 remotefilelogserver.gcserver(ui, repo._repo)
781 768
782 769 def gcclient(ui, cachepath):
783 770 # get list of repos that use this cache
784 771 repospath = os.path.join(cachepath, 'repos')
785 772 if not os.path.exists(repospath):
786 773 ui.warn(_("no known cache at %s\n") % cachepath)
787 774 return
788 775
789 776 reposfile = open(repospath, 'rb')
790 777 repos = {r[:-1] for r in reposfile.readlines()}
791 778 reposfile.close()
792 779
793 780 # build list of useful files
794 781 validrepos = []
795 782 keepkeys = set()
796 783
797 784 sharedcache = None
798 785 filesrepacked = False
799 786
800 787 count = 0
801 788 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
802 789 total=len(repos))
803 790 for path in repos:
804 791 progress.update(count)
805 792 count += 1
806 793 try:
807 794 path = ui.expandpath(os.path.normpath(path))
808 795 except TypeError as e:
809 796 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
810 797 traceback.print_exc()
811 798 continue
812 799 try:
813 800 peer = hg.peer(ui, {}, path)
814 801 repo = peer._repo
815 802 except error.RepoError:
816 803 continue
817 804
818 805 validrepos.append(path)
819 806
820 807 # Protect against any repo or config changes that have happened since
821 808 # this repo was added to the repos file. We'd rather this loop succeed
822 809 # and too much be deleted, than the loop fail and nothing gets deleted.
823 810 if not isenabled(repo):
824 811 continue
825 812
826 813 if not util.safehasattr(repo, 'name'):
827 814 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
828 815 continue
829 816
830 817 # If garbage collection on repack and repack on hg gc are enabled
831 818 # then loose files are repacked and garbage collected.
832 819 # Otherwise regular garbage collection is performed.
833 820 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
834 821 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
835 822 if repackonhggc and gcrepack:
836 823 try:
837 824 repackmod.incrementalrepack(repo)
838 825 filesrepacked = True
839 826 continue
840 827 except (IOError, repackmod.RepackAlreadyRunning):
841 828 # If repack cannot be performed due to not enough disk space
842 829 # continue doing garbage collection of loose files w/o repack
843 830 pass
844 831
845 832 reponame = repo.name
846 833 if not sharedcache:
847 834 sharedcache = repo.sharedstore
848 835
849 836 # Compute a keepset which is not garbage collected
850 837 def keyfn(fname, fnode):
851 838 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
852 839 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
853 840
854 841 progress.complete()
855 842
856 843 # write list of valid repos back
857 844 oldumask = os.umask(0o002)
858 845 try:
859 846 reposfile = open(repospath, 'wb')
860 847 reposfile.writelines([("%s\n" % r) for r in validrepos])
861 848 reposfile.close()
862 849 finally:
863 850 os.umask(oldumask)
864 851
865 852 # prune cache
866 853 if sharedcache is not None:
867 854 sharedcache.gc(keepkeys)
868 855 elif not filesrepacked:
869 856 ui.warn(_("warning: no valid repos in repofile\n"))
870 857
871 858 def log(orig, ui, repo, *pats, **opts):
872 859 if not isenabled(repo):
873 860 return orig(ui, repo, *pats, **opts)
874 861
875 862 follow = opts.get(r'follow')
876 863 revs = opts.get(r'rev')
877 864 if pats:
878 865 # Force slowpath for non-follow patterns and follows that start from
879 866 # non-working-copy-parent revs.
880 867 if not follow or revs:
881 868 # This forces the slowpath
882 869 opts[r'removed'] = True
883 870
884 871 # If this is a non-follow log without any revs specified, recommend that
885 872 # the user add -f to speed it up.
886 873 if not follow and not revs:
887 874 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
888 875 isfile = not match.anypats()
889 876 if isfile:
890 877 for file in match.files():
891 878 if not os.path.isfile(repo.wjoin(file)):
892 879 isfile = False
893 880 break
894 881
895 882 if isfile:
896 883 ui.warn(_("warning: file log can be slow on large repos - " +
897 884 "use -f to speed it up\n"))
898 885
899 886 return orig(ui, repo, *pats, **opts)
900 887
901 888 def revdatelimit(ui, revset):
902 889 """Update revset so that only changesets no older than 'prefetchdays' days
903 890 are included. The default value is set to 14 days. If 'prefetchdays' is set
904 891 to zero or negative value then date restriction is not applied.
905 892 """
906 893 days = ui.configint('remotefilelog', 'prefetchdays')
907 894 if days > 0:
908 895 revset = '(%s) & date(-%s)' % (revset, days)
909 896 return revset
910 897
911 898 def readytofetch(repo):
912 899 """Check that enough time has passed since the last background prefetch.
913 900 This only relates to prefetches after operations that change the working
914 901 copy parent. Default delay between background prefetches is 2 minutes.
915 902 """
916 903 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
917 904 fname = repo.vfs.join('lastprefetch')
918 905
919 906 ready = False
920 907 with open(fname, 'a'):
921 908 # the with construct above is used to avoid race conditions
922 909 modtime = os.path.getmtime(fname)
923 910 if (time.time() - modtime) > timeout:
924 911 os.utime(fname, None)
925 912 ready = True
926 913
927 914 return ready
928 915
929 916 def wcpprefetch(ui, repo, **kwargs):
930 917 """Prefetches in background revisions specified by bgprefetchrevs revset.
931 918 Does background repack if backgroundrepack flag is set in config.
932 919 """
933 920 shallow = isenabled(repo)
934 921 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
935 922 isready = readytofetch(repo)
936 923
937 924 if not (shallow and bgprefetchrevs and isready):
938 925 return
939 926
940 927 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
941 928 # update a revset with a date limit
942 929 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
943 930
944 931 def anon():
945 932 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
946 933 return
947 934 repo.ranprefetch = True
948 935 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
949 936
950 937 repo._afterlock(anon)
951 938
952 939 def pull(orig, ui, repo, *pats, **opts):
953 940 result = orig(ui, repo, *pats, **opts)
954 941
955 942 if isenabled(repo):
956 943 # prefetch if it's configured
957 944 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
958 945 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
959 946 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
960 947
961 948 if prefetchrevset:
962 949 ui.status(_("prefetching file contents\n"))
963 950 revs = scmutil.revrange(repo, [prefetchrevset])
964 951 base = repo['.'].rev()
965 952 if bgprefetch:
966 953 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
967 954 else:
968 955 repo.prefetch(revs, base=base)
969 956 if bgrepack:
970 957 repackmod.backgroundrepack(repo, incremental=True)
971 958 elif bgrepack:
972 959 repackmod.backgroundrepack(repo, incremental=True)
973 960
974 961 return result
975 962
976 963 def exchangepull(orig, repo, remote, *args, **kwargs):
977 964 # Hook into the callstream/getbundle to insert bundle capabilities
978 965 # during a pull.
979 966 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
980 967 **kwargs):
981 968 if not bundlecaps:
982 969 bundlecaps = set()
983 970 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
984 971 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
985 972 **kwargs)
986 973
987 974 if util.safehasattr(remote, '_callstream'):
988 975 remote._localrepo = repo
989 976 elif util.safehasattr(remote, 'getbundle'):
990 977 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
991 978
992 979 return orig(repo, remote, *args, **kwargs)
993 980
994 981 def _fileprefetchhook(repo, revs, match):
995 982 if isenabled(repo):
996 983 allfiles = []
997 984 for rev in revs:
998 985 if rev == nodemod.wdirrev or rev is None:
999 986 continue
1000 987 ctx = repo[rev]
1001 988 mf = ctx.manifest()
1002 989 sparsematch = repo.maybesparsematch(ctx.rev())
1003 990 for path in ctx.walk(match):
1004 991 if path.endswith('/'):
1005 992 # Tree manifest that's being excluded as part of narrow
1006 993 continue
1007 994 if (not sparsematch or sparsematch(path)) and path in mf:
1008 995 allfiles.append((path, hex(mf[path])))
1009 996 repo.fileservice.prefetch(allfiles)
1010 997
1011 998 @command('debugremotefilelog', [
1012 999 ('d', 'decompress', None, _('decompress the filelog first')),
1013 1000 ], _('hg debugremotefilelog <path>'), norepo=True)
1014 1001 def debugremotefilelog(ui, path, **opts):
1015 1002 return debugcommands.debugremotefilelog(ui, path, **opts)
1016 1003
1017 1004 @command('verifyremotefilelog', [
1018 1005 ('d', 'decompress', None, _('decompress the filelogs first')),
1019 1006 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1020 1007 def verifyremotefilelog(ui, path, **opts):
1021 1008 return debugcommands.verifyremotefilelog(ui, path, **opts)
1022 1009
1023 1010 @command('debugdatapack', [
1024 1011 ('', 'long', None, _('print the long hashes')),
1025 1012 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1026 1013 ], _('hg debugdatapack <paths>'), norepo=True)
1027 1014 def debugdatapack(ui, *paths, **opts):
1028 1015 return debugcommands.debugdatapack(ui, *paths, **opts)
1029 1016
1030 1017 @command('debughistorypack', [
1031 1018 ], _('hg debughistorypack <path>'), norepo=True)
1032 1019 def debughistorypack(ui, path, **opts):
1033 1020 return debugcommands.debughistorypack(ui, path)
1034 1021
1035 1022 @command('debugkeepset', [
1036 1023 ], _('hg debugkeepset'))
1037 1024 def debugkeepset(ui, repo, **opts):
1038 1025 # The command is used to measure keepset computation time
1039 1026 def keyfn(fname, fnode):
1040 1027 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1041 1028 repackmod.keepset(repo, keyfn)
1042 1029 return
1043 1030
1044 1031 @command('debugwaitonrepack', [
1045 1032 ], _('hg debugwaitonrepack'))
1046 1033 def debugwaitonrepack(ui, repo, **opts):
1047 1034 return debugcommands.debugwaitonrepack(repo)
1048 1035
1049 1036 @command('debugwaitonprefetch', [
1050 1037 ], _('hg debugwaitonprefetch'))
1051 1038 def debugwaitonprefetch(ui, repo, **opts):
1052 1039 return debugcommands.debugwaitonprefetch(repo)
1053 1040
1054 1041 def resolveprefetchopts(ui, opts):
1055 1042 if not opts.get('rev'):
1056 1043 revset = ['.', 'draft()']
1057 1044
1058 1045 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1059 1046 if prefetchrevset:
1060 1047 revset.append('(%s)' % prefetchrevset)
1061 1048 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1062 1049 if bgprefetchrevs:
1063 1050 revset.append('(%s)' % bgprefetchrevs)
1064 1051 revset = '+'.join(revset)
1065 1052
1066 1053 # update a revset with a date limit
1067 1054 revset = revdatelimit(ui, revset)
1068 1055
1069 1056 opts['rev'] = [revset]
1070 1057
1071 1058 if not opts.get('base'):
1072 1059 opts['base'] = None
1073 1060
1074 1061 return opts
1075 1062
1076 1063 @command('prefetch', [
1077 1064 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1078 1065 ('', 'repack', False, _('run repack after prefetch')),
1079 1066 ('b', 'base', '', _("rev that is assumed to already be local")),
1080 1067 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1081 1068 def prefetch(ui, repo, *pats, **opts):
1082 1069 """prefetch file revisions from the server
1083 1070
1084 1071 Prefetchs file revisions for the specified revs and stores them in the
1085 1072 local remotefilelog cache. If no rev is specified, the default rev is
1086 1073 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1087 1074 File names or patterns can be used to limit which files are downloaded.
1088 1075
1089 1076 Return 0 on success.
1090 1077 """
1091 1078 opts = pycompat.byteskwargs(opts)
1092 1079 if not isenabled(repo):
1093 1080 raise error.Abort(_("repo is not shallow"))
1094 1081
1095 1082 opts = resolveprefetchopts(ui, opts)
1096 1083 revs = scmutil.revrange(repo, opts.get('rev'))
1097 1084 repo.prefetch(revs, opts.get('base'), pats, opts)
1098 1085
1099 1086 # Run repack in background
1100 1087 if opts.get('repack'):
1101 1088 repackmod.backgroundrepack(repo, incremental=True)
1102 1089
1103 1090 @command('repack', [
1104 1091 ('', 'background', None, _('run in a background process'), None),
1105 1092 ('', 'incremental', None, _('do an incremental repack'), None),
1106 1093 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1107 1094 ], _('hg repack [OPTIONS]'))
1108 1095 def repack_(ui, repo, *pats, **opts):
1109 1096 if opts.get(r'background'):
1110 1097 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1111 1098 packsonly=opts.get(r'packsonly', False))
1112 1099 return
1113 1100
1114 1101 options = {'packsonly': opts.get(r'packsonly')}
1115 1102
1116 1103 try:
1117 1104 if opts.get(r'incremental'):
1118 1105 repackmod.incrementalrepack(repo, options=options)
1119 1106 else:
1120 1107 repackmod.fullrepack(repo, options=options)
1121 1108 except repackmod.RepackAlreadyRunning as ex:
1122 1109 # Don't propogate the exception if the repack is already in
1123 1110 # progress, since we want the command to exit 0.
1124 1111 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now