##// END OF EJS Templates
remotefilelog: do file IO in terms of bytes...
Augie Fackler -
r41284:312afd16 default
parent child Browse files
Show More
@@ -1,1143 +1,1143
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial import (
136 136 changegroup,
137 137 changelog,
138 138 cmdutil,
139 139 commands,
140 140 configitems,
141 141 context,
142 142 copies,
143 143 debugcommands as hgdebugcommands,
144 144 dispatch,
145 145 error,
146 146 exchange,
147 147 extensions,
148 148 hg,
149 149 localrepo,
150 150 match,
151 151 merge,
152 152 node as nodemod,
153 153 patch,
154 154 pycompat,
155 155 registrar,
156 156 repair,
157 157 repoview,
158 158 revset,
159 159 scmutil,
160 160 smartset,
161 161 streamclone,
162 162 templatekw,
163 163 util,
164 164 )
165 165 from . import (
166 166 constants,
167 167 debugcommands,
168 168 fileserverclient,
169 169 remotefilectx,
170 170 remotefilelog,
171 171 remotefilelogserver,
172 172 repack as repackmod,
173 173 shallowbundle,
174 174 shallowrepo,
175 175 shallowstore,
176 176 shallowutil,
177 177 shallowverifier,
178 178 )
179 179
180 180 # ensures debug commands are registered
181 181 hgdebugcommands.command
182 182
183 183 cmdtable = {}
184 184 command = registrar.command(cmdtable)
185 185
186 186 configtable = {}
187 187 configitem = registrar.configitem(configtable)
188 188
189 189 configitem('remotefilelog', 'debug', default=False)
190 190
191 191 configitem('remotefilelog', 'reponame', default='')
192 192 configitem('remotefilelog', 'cachepath', default=None)
193 193 configitem('remotefilelog', 'cachegroup', default=None)
194 194 configitem('remotefilelog', 'cacheprocess', default=None)
195 195 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
196 196 configitem("remotefilelog", "cachelimit", default="1000 GB")
197 197
198 198 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
199 199 alias=[('remotefilelog', 'fallbackrepo')])
200 200
201 201 configitem('remotefilelog', 'validatecachelog', default=None)
202 202 configitem('remotefilelog', 'validatecache', default='on')
203 203 configitem('remotefilelog', 'server', default=None)
204 204 configitem('remotefilelog', 'servercachepath', default=None)
205 205 configitem("remotefilelog", "serverexpiration", default=30)
206 206 configitem('remotefilelog', 'backgroundrepack', default=False)
207 207 configitem('remotefilelog', 'bgprefetchrevs', default=None)
208 208 configitem('remotefilelog', 'pullprefetch', default=None)
209 209 configitem('remotefilelog', 'backgroundprefetch', default=False)
210 210 configitem('remotefilelog', 'prefetchdelay', default=120)
211 211 configitem('remotefilelog', 'prefetchdays', default=14)
212 212
213 213 configitem('remotefilelog', 'getfilesstep', default=10000)
214 214 configitem('remotefilelog', 'getfilestype', default='optimistic')
215 215 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
216 216 configitem('remotefilelog', 'fetchwarning', default='')
217 217
218 218 configitem('remotefilelog', 'includepattern', default=None)
219 219 configitem('remotefilelog', 'excludepattern', default=None)
220 220
221 221 configitem('remotefilelog', 'gcrepack', default=False)
222 222 configitem('remotefilelog', 'repackonhggc', default=False)
223 223 configitem('repack', 'chainorphansbysize', default=True)
224 224
225 225 configitem('packs', 'maxpacksize', default=0)
226 226 configitem('packs', 'maxchainlen', default=1000)
227 227
228 228 # default TTL limit is 30 days
229 229 _defaultlimit = 60 * 60 * 24 * 30
230 230 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
231 231
232 232 configitem('remotefilelog', 'data.gencountlimit', default=2),
233 233 configitem('remotefilelog', 'data.generations',
234 234 default=['1GB', '100MB', '1MB'])
235 235 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
236 236 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
237 237 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
238 238
239 239 configitem('remotefilelog', 'history.gencountlimit', default=2),
240 240 configitem('remotefilelog', 'history.generations', default=['100MB'])
241 241 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
242 242 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
243 243 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
244 244
245 245 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
246 246 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
247 247 # be specifying the version(s) of Mercurial they are tested with, or
248 248 # leave the attribute unspecified.
249 249 testedwith = 'ships-with-hg-core'
250 250
251 251 repoclass = localrepo.localrepository
252 252 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
253 253
254 254 isenabled = shallowutil.isenabled
255 255
256 256 def uisetup(ui):
257 257 """Wraps user facing Mercurial commands to swap them out with shallow
258 258 versions.
259 259 """
260 260 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
261 261
262 262 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
263 263 entry[1].append(('', 'shallow', None,
264 264 _("create a shallow clone which uses remote file "
265 265 "history")))
266 266
267 267 extensions.wrapcommand(commands.table, 'debugindex',
268 268 debugcommands.debugindex)
269 269 extensions.wrapcommand(commands.table, 'debugindexdot',
270 270 debugcommands.debugindexdot)
271 271 extensions.wrapcommand(commands.table, 'log', log)
272 272 extensions.wrapcommand(commands.table, 'pull', pull)
273 273
274 274 # Prevent 'hg manifest --all'
275 275 def _manifest(orig, ui, repo, *args, **opts):
276 276 if (isenabled(repo) and opts.get(r'all')):
277 277 raise error.Abort(_("--all is not supported in a shallow repo"))
278 278
279 279 return orig(ui, repo, *args, **opts)
280 280 extensions.wrapcommand(commands.table, "manifest", _manifest)
281 281
282 282 # Wrap remotefilelog with lfs code
283 283 def _lfsloaded(loaded=False):
284 284 lfsmod = None
285 285 try:
286 286 lfsmod = extensions.find('lfs')
287 287 except KeyError:
288 288 pass
289 289 if lfsmod:
290 290 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
291 291 fileserverclient._lfsmod = lfsmod
292 292 extensions.afterloaded('lfs', _lfsloaded)
293 293
294 294 # debugdata needs remotefilelog.len to work
295 295 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
296 296
297 297 def cloneshallow(orig, ui, repo, *args, **opts):
298 298 if opts.get(r'shallow'):
299 299 repos = []
300 300 def pull_shallow(orig, self, *args, **kwargs):
301 301 if not isenabled(self):
302 302 repos.append(self.unfiltered())
303 303 # set up the client hooks so the post-clone update works
304 304 setupclient(self.ui, self.unfiltered())
305 305
306 306 # setupclient fixed the class on the repo itself
307 307 # but we also need to fix it on the repoview
308 308 if isinstance(self, repoview.repoview):
309 309 self.__class__.__bases__ = (self.__class__.__bases__[0],
310 310 self.unfiltered().__class__)
311 311 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
312 312 self._writerequirements()
313 313
314 314 # Since setupclient hadn't been called, exchange.pull was not
315 315 # wrapped. So we need to manually invoke our version of it.
316 316 return exchangepull(orig, self, *args, **kwargs)
317 317 else:
318 318 return orig(self, *args, **kwargs)
319 319 extensions.wrapfunction(exchange, 'pull', pull_shallow)
320 320
321 321 # Wrap the stream logic to add requirements and to pass include/exclude
322 322 # patterns around.
323 323 def setup_streamout(repo, remote):
324 324 # Replace remote.stream_out with a version that sends file
325 325 # patterns.
326 326 def stream_out_shallow(orig):
327 327 caps = remote.capabilities()
328 328 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
329 329 opts = {}
330 330 if repo.includepattern:
331 331 opts[r'includepattern'] = '\0'.join(repo.includepattern)
332 332 if repo.excludepattern:
333 333 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
334 334 return remote._callstream('stream_out_shallow', **opts)
335 335 else:
336 336 return orig()
337 337 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
338 338 def stream_wrap(orig, op):
339 339 setup_streamout(op.repo, op.remote)
340 340 return orig(op)
341 341 extensions.wrapfunction(
342 342 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
343 343
344 344 def canperformstreamclone(orig, pullop, bundle2=False):
345 345 # remotefilelog is currently incompatible with the
346 346 # bundle2 flavor of streamclones, so force us to use
347 347 # v1 instead.
348 348 if 'v2' in pullop.remotebundle2caps.get('stream', []):
349 349 pullop.remotebundle2caps['stream'] = [
350 350 c for c in pullop.remotebundle2caps['stream']
351 351 if c != 'v2']
352 352 if bundle2:
353 353 return False, None
354 354 supported, requirements = orig(pullop, bundle2=bundle2)
355 355 if requirements is not None:
356 356 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
357 357 return supported, requirements
358 358 extensions.wrapfunction(
359 359 streamclone, 'canperformstreamclone', canperformstreamclone)
360 360
361 361 try:
362 362 orig(ui, repo, *args, **opts)
363 363 finally:
364 364 if opts.get(r'shallow'):
365 365 for r in repos:
366 366 if util.safehasattr(r, 'fileservice'):
367 367 r.fileservice.close()
368 368
369 369 def debugdatashallow(orig, *args, **kwds):
370 370 oldlen = remotefilelog.remotefilelog.__len__
371 371 try:
372 372 remotefilelog.remotefilelog.__len__ = lambda x: 1
373 373 return orig(*args, **kwds)
374 374 finally:
375 375 remotefilelog.remotefilelog.__len__ = oldlen
376 376
377 377 def reposetup(ui, repo):
378 378 if not repo.local():
379 379 return
380 380
381 381 # put here intentionally bc doesnt work in uisetup
382 382 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
383 383 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
384 384
385 385 isserverenabled = ui.configbool('remotefilelog', 'server')
386 386 isshallowclient = isenabled(repo)
387 387
388 388 if isserverenabled and isshallowclient:
389 389 raise RuntimeError("Cannot be both a server and shallow client.")
390 390
391 391 if isshallowclient:
392 392 setupclient(ui, repo)
393 393
394 394 if isserverenabled:
395 395 remotefilelogserver.setupserver(ui, repo)
396 396
397 397 def setupclient(ui, repo):
398 398 if not isinstance(repo, localrepo.localrepository):
399 399 return
400 400
401 401 # Even clients get the server setup since they need to have the
402 402 # wireprotocol endpoints registered.
403 403 remotefilelogserver.onetimesetup(ui)
404 404 onetimeclientsetup(ui)
405 405
406 406 shallowrepo.wraprepo(repo)
407 407 repo.store = shallowstore.wrapstore(repo.store)
408 408
409 409 clientonetime = False
410 410 def onetimeclientsetup(ui):
411 411 global clientonetime
412 412 if clientonetime:
413 413 return
414 414 clientonetime = True
415 415
416 416 changegroup.cgpacker = shallowbundle.shallowcg1packer
417 417
418 418 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
419 419 shallowbundle.addchangegroupfiles)
420 420 extensions.wrapfunction(
421 421 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
422 422
423 423 def storewrapper(orig, requirements, path, vfstype):
424 424 s = orig(requirements, path, vfstype)
425 425 if constants.SHALLOWREPO_REQUIREMENT in requirements:
426 426 s = shallowstore.wrapstore(s)
427 427
428 428 return s
429 429 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
430 430
431 431 extensions.wrapfunction(exchange, 'pull', exchangepull)
432 432
433 433 # prefetch files before update
434 434 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
435 435 if isenabled(repo):
436 436 manifest = mctx.manifest()
437 437 files = []
438 438 for f, args, msg in actions['g']:
439 439 files.append((f, hex(manifest[f])))
440 440 # batch fetch the needed files from the server
441 441 repo.fileservice.prefetch(files)
442 442 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
443 443 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
444 444
445 445 # Prefetch merge checkunknownfiles
446 446 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
447 447 *args, **kwargs):
448 448 if isenabled(repo):
449 449 files = []
450 450 sparsematch = repo.maybesparsematch(mctx.rev())
451 451 for f, (m, actionargs, msg) in actions.iteritems():
452 452 if sparsematch and not sparsematch(f):
453 453 continue
454 454 if m in ('c', 'dc', 'cm'):
455 455 files.append((f, hex(mctx.filenode(f))))
456 456 elif m == 'dg':
457 457 f2 = actionargs[0]
458 458 files.append((f2, hex(mctx.filenode(f2))))
459 459 # batch fetch the needed files from the server
460 460 repo.fileservice.prefetch(files)
461 461 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
462 462 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
463 463
464 464 # Prefetch files before status attempts to look at their size and contents
465 465 def checklookup(orig, self, files):
466 466 repo = self._repo
467 467 if isenabled(repo):
468 468 prefetchfiles = []
469 469 for parent in self._parents:
470 470 for f in files:
471 471 if f in parent:
472 472 prefetchfiles.append((f, hex(parent.filenode(f))))
473 473 # batch fetch the needed files from the server
474 474 repo.fileservice.prefetch(prefetchfiles)
475 475 return orig(self, files)
476 476 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
477 477
478 478 # Prefetch the logic that compares added and removed files for renames
479 479 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
480 480 if isenabled(repo):
481 481 files = []
482 482 parentctx = repo['.']
483 483 for f in removed:
484 484 files.append((f, hex(parentctx.filenode(f))))
485 485 # batch fetch the needed files from the server
486 486 repo.fileservice.prefetch(files)
487 487 return orig(repo, matcher, added, removed, *args, **kwargs)
488 488 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
489 489
490 490 # prefetch files before mergecopies check
491 491 def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
492 492 u1, u2 = orig(repo, c1, c2, *args, **kwargs)
493 493 if isenabled(repo):
494 494 m1 = c1.manifest()
495 495 m2 = c2.manifest()
496 496 files = []
497 497
498 498 sparsematch1 = repo.maybesparsematch(c1.rev())
499 499 if sparsematch1:
500 500 sparseu1 = []
501 501 for f in u1:
502 502 if sparsematch1(f):
503 503 files.append((f, hex(m1[f])))
504 504 sparseu1.append(f)
505 505 u1 = sparseu1
506 506
507 507 sparsematch2 = repo.maybesparsematch(c2.rev())
508 508 if sparsematch2:
509 509 sparseu2 = []
510 510 for f in u2:
511 511 if sparsematch2(f):
512 512 files.append((f, hex(m2[f])))
513 513 sparseu2.append(f)
514 514 u2 = sparseu2
515 515
516 516 # batch fetch the needed files from the server
517 517 repo.fileservice.prefetch(files)
518 518 return u1, u2
519 519 extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap)
520 520
521 521 # prefetch files before pathcopies check
522 522 def computeforwardmissing(orig, a, b, match=None):
523 523 missing = list(orig(a, b, match=match))
524 524 repo = a._repo
525 525 if isenabled(repo):
526 526 mb = b.manifest()
527 527
528 528 files = []
529 529 sparsematch = repo.maybesparsematch(b.rev())
530 530 if sparsematch:
531 531 sparsemissing = []
532 532 for f in missing:
533 533 if sparsematch(f):
534 534 files.append((f, hex(mb[f])))
535 535 sparsemissing.append(f)
536 536 missing = sparsemissing
537 537
538 538 # batch fetch the needed files from the server
539 539 repo.fileservice.prefetch(files)
540 540 return missing
541 541 extensions.wrapfunction(copies, '_computeforwardmissing',
542 542 computeforwardmissing)
543 543
544 544 # close cache miss server connection after the command has finished
545 545 def runcommand(orig, lui, repo, *args, **kwargs):
546 546 fileservice = None
547 547 # repo can be None when running in chg:
548 548 # - at startup, reposetup was called because serve is not norepo
549 549 # - a norepo command like "help" is called
550 550 if repo and isenabled(repo):
551 551 fileservice = repo.fileservice
552 552 try:
553 553 return orig(lui, repo, *args, **kwargs)
554 554 finally:
555 555 if fileservice:
556 556 fileservice.close()
557 557 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
558 558
559 559 # disappointing hacks below
560 560 templatekw.getrenamedfn = getrenamedfn
561 561 extensions.wrapfunction(revset, 'filelog', filelogrevset)
562 562 revset.symbols['filelog'] = revset.filelog
563 563 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
564 564
565 565 # prevent strip from stripping remotefilelogs
566 566 def _collectbrokencsets(orig, repo, files, striprev):
567 567 if isenabled(repo):
568 568 files = list([f for f in files if not repo.shallowmatch(f)])
569 569 return orig(repo, files, striprev)
570 570 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
571 571
572 572 # Don't commit filelogs until we know the commit hash, since the hash
573 573 # is present in the filelog blob.
574 574 # This violates Mercurial's filelog->manifest->changelog write order,
575 575 # but is generally fine for client repos.
576 576 pendingfilecommits = []
577 577 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
578 578 flags, cachedelta=None, _metatuple=None):
579 579 if isinstance(link, int):
580 580 pendingfilecommits.append(
581 581 (self, rawtext, transaction, link, p1, p2, node, flags,
582 582 cachedelta, _metatuple))
583 583 return node
584 584 else:
585 585 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
586 586 cachedelta, _metatuple=_metatuple)
587 587 extensions.wrapfunction(
588 588 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
589 589
590 590 def changelogadd(orig, self, *args):
591 591 oldlen = len(self)
592 592 node = orig(self, *args)
593 593 newlen = len(self)
594 594 if oldlen != newlen:
595 595 for oldargs in pendingfilecommits:
596 596 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
597 597 linknode = self.node(link)
598 598 if linknode == node:
599 599 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
600 600 else:
601 601 raise error.ProgrammingError(
602 602 'pending multiple integer revisions are not supported')
603 603 else:
604 604 # "link" is actually wrong here (it is set to len(changelog))
605 605 # if changelog remains unchanged, skip writing file revisions
606 606 # but still do a sanity check about pending multiple revisions
607 607 if len(set(x[3] for x in pendingfilecommits)) > 1:
608 608 raise error.ProgrammingError(
609 609 'pending multiple integer revisions are not supported')
610 610 del pendingfilecommits[:]
611 611 return node
612 612 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
613 613
614 614 # changectx wrappers
615 615 def filectx(orig, self, path, fileid=None, filelog=None):
616 616 if fileid is None:
617 617 fileid = self.filenode(path)
618 618 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
619 619 return remotefilectx.remotefilectx(self._repo, path,
620 620 fileid=fileid, changectx=self, filelog=filelog)
621 621 return orig(self, path, fileid=fileid, filelog=filelog)
622 622 extensions.wrapfunction(context.changectx, 'filectx', filectx)
623 623
624 624 def workingfilectx(orig, self, path, filelog=None):
625 625 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
626 626 return remotefilectx.remoteworkingfilectx(self._repo,
627 627 path, workingctx=self, filelog=filelog)
628 628 return orig(self, path, filelog=filelog)
629 629 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
630 630
631 631 # prefetch required revisions before a diff
632 632 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
633 633 copy, getfilectx, *args, **kwargs):
634 634 if isenabled(repo):
635 635 prefetch = []
636 636 mf1 = ctx1.manifest()
637 637 for fname in modified + added + removed:
638 638 if fname in mf1:
639 639 fnode = getfilectx(fname, ctx1).filenode()
640 640 # fnode can be None if it's a edited working ctx file
641 641 if fnode:
642 642 prefetch.append((fname, hex(fnode)))
643 643 if fname not in removed:
644 644 fnode = getfilectx(fname, ctx2).filenode()
645 645 if fnode:
646 646 prefetch.append((fname, hex(fnode)))
647 647
648 648 repo.fileservice.prefetch(prefetch)
649 649
650 650 return orig(repo, revs, ctx1, ctx2, modified, added, removed,
651 651 copy, getfilectx, *args, **kwargs)
652 652 extensions.wrapfunction(patch, 'trydiff', trydiff)
653 653
654 654 # Prevent verify from processing files
655 655 # a stub for mercurial.hg.verify()
656 656 def _verify(orig, repo):
657 657 lock = repo.lock()
658 658 try:
659 659 return shallowverifier.shallowverifier(repo).verify()
660 660 finally:
661 661 lock.release()
662 662
663 663 extensions.wrapfunction(hg, 'verify', _verify)
664 664
665 665 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
666 666
667 667 def getrenamedfn(repo, endrev=None):
668 668 rcache = {}
669 669
670 670 def getrenamed(fn, rev):
671 671 '''looks up all renames for a file (up to endrev) the first
672 672 time the file is given. It indexes on the changerev and only
673 673 parses the manifest if linkrev != changerev.
674 674 Returns rename info for fn at changerev rev.'''
675 675 if rev in rcache.setdefault(fn, {}):
676 676 return rcache[fn][rev]
677 677
678 678 try:
679 679 fctx = repo[rev].filectx(fn)
680 680 for ancestor in fctx.ancestors():
681 681 if ancestor.path() == fn:
682 682 renamed = ancestor.renamed()
683 683 rcache[fn][ancestor.rev()] = renamed and renamed[0]
684 684
685 685 renamed = fctx.renamed()
686 686 return renamed and renamed[0]
687 687 except error.LookupError:
688 688 return None
689 689
690 690 return getrenamed
691 691
692 692 def walkfilerevs(orig, repo, match, follow, revs, fncache):
693 693 if not isenabled(repo):
694 694 return orig(repo, match, follow, revs, fncache)
695 695
696 696 # remotefilelog's can't be walked in rev order, so throw.
697 697 # The caller will see the exception and walk the commit tree instead.
698 698 if not follow:
699 699 raise cmdutil.FileWalkError("Cannot walk via filelog")
700 700
701 701 wanted = set()
702 702 minrev, maxrev = min(revs), max(revs)
703 703
704 704 pctx = repo['.']
705 705 for filename in match.files():
706 706 if filename not in pctx:
707 707 raise error.Abort(_('cannot follow file not in parent '
708 708 'revision: "%s"') % filename)
709 709 fctx = pctx[filename]
710 710
711 711 linkrev = fctx.linkrev()
712 712 if linkrev >= minrev and linkrev <= maxrev:
713 713 fncache.setdefault(linkrev, []).append(filename)
714 714 wanted.add(linkrev)
715 715
716 716 for ancestor in fctx.ancestors():
717 717 linkrev = ancestor.linkrev()
718 718 if linkrev >= minrev and linkrev <= maxrev:
719 719 fncache.setdefault(linkrev, []).append(ancestor.path())
720 720 wanted.add(linkrev)
721 721
722 722 return wanted
723 723
724 724 def filelogrevset(orig, repo, subset, x):
725 725 """``filelog(pattern)``
726 726 Changesets connected to the specified filelog.
727 727
728 728 For performance reasons, ``filelog()`` does not show every changeset
729 729 that affects the requested file(s). See :hg:`help log` for details. For
730 730 a slower, more accurate result, use ``file()``.
731 731 """
732 732
733 733 if not isenabled(repo):
734 734 return orig(repo, subset, x)
735 735
736 736 # i18n: "filelog" is a keyword
737 737 pat = revset.getstring(x, _("filelog requires a pattern"))
738 738 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
739 739 ctx=repo[None])
740 740 s = set()
741 741
742 742 if not match.patkind(pat):
743 743 # slow
744 744 for r in subset:
745 745 ctx = repo[r]
746 746 cfiles = ctx.files()
747 747 for f in m.files():
748 748 if f in cfiles:
749 749 s.add(ctx.rev())
750 750 break
751 751 else:
752 752 # partial
753 753 files = (f for f in repo[None] if m(f))
754 754 for f in files:
755 755 fctx = repo[None].filectx(f)
756 756 s.add(fctx.linkrev())
757 757 for actx in fctx.ancestors():
758 758 s.add(actx.linkrev())
759 759
760 760 return smartset.baseset([r for r in subset if r in s])
761 761
762 762 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
763 763 def gc(ui, *args, **opts):
764 764 '''garbage collect the client and server filelog caches
765 765 '''
766 766 cachepaths = set()
767 767
768 768 # get the system client cache
769 769 systemcache = shallowutil.getcachepath(ui, allowempty=True)
770 770 if systemcache:
771 771 cachepaths.add(systemcache)
772 772
773 773 # get repo client and server cache
774 774 repopaths = []
775 775 pwd = ui.environ.get('PWD')
776 776 if pwd:
777 777 repopaths.append(pwd)
778 778
779 779 repopaths.extend(args)
780 780 repos = []
781 781 for repopath in repopaths:
782 782 try:
783 783 repo = hg.peer(ui, {}, repopath)
784 784 repos.append(repo)
785 785
786 786 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
787 787 if repocache:
788 788 cachepaths.add(repocache)
789 789 except error.RepoError:
790 790 pass
791 791
792 792 # gc client cache
793 793 for cachepath in cachepaths:
794 794 gcclient(ui, cachepath)
795 795
796 796 # gc server cache
797 797 for repo in repos:
798 798 remotefilelogserver.gcserver(ui, repo._repo)
799 799
800 800 def gcclient(ui, cachepath):
801 801 # get list of repos that use this cache
802 802 repospath = os.path.join(cachepath, 'repos')
803 803 if not os.path.exists(repospath):
804 804 ui.warn(_("no known cache at %s\n") % cachepath)
805 805 return
806 806
807 reposfile = open(repospath, 'r')
807 reposfile = open(repospath, 'rb')
808 808 repos = set([r[:-1] for r in reposfile.readlines()])
809 809 reposfile.close()
810 810
811 811 # build list of useful files
812 812 validrepos = []
813 813 keepkeys = set()
814 814
815 815 sharedcache = None
816 816 filesrepacked = False
817 817
818 818 count = 0
819 819 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
820 820 total=len(repos))
821 821 for path in repos:
822 822 progress.update(count)
823 823 count += 1
824 824 try:
825 825 path = ui.expandpath(os.path.normpath(path))
826 826 except TypeError as e:
827 827 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
828 828 traceback.print_exc()
829 829 continue
830 830 try:
831 831 peer = hg.peer(ui, {}, path)
832 832 repo = peer._repo
833 833 except error.RepoError:
834 834 continue
835 835
836 836 validrepos.append(path)
837 837
838 838 # Protect against any repo or config changes that have happened since
839 839 # this repo was added to the repos file. We'd rather this loop succeed
840 840 # and too much be deleted, than the loop fail and nothing gets deleted.
841 841 if not isenabled(repo):
842 842 continue
843 843
844 844 if not util.safehasattr(repo, 'name'):
845 845 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
846 846 continue
847 847
848 848 # If garbage collection on repack and repack on hg gc are enabled
849 849 # then loose files are repacked and garbage collected.
850 850 # Otherwise regular garbage collection is performed.
851 851 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
852 852 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
853 853 if repackonhggc and gcrepack:
854 854 try:
855 855 repackmod.incrementalrepack(repo)
856 856 filesrepacked = True
857 857 continue
858 858 except (IOError, repackmod.RepackAlreadyRunning):
859 859 # If repack cannot be performed due to not enough disk space
860 860 # continue doing garbage collection of loose files w/o repack
861 861 pass
862 862
863 863 reponame = repo.name
864 864 if not sharedcache:
865 865 sharedcache = repo.sharedstore
866 866
867 867 # Compute a keepset which is not garbage collected
868 868 def keyfn(fname, fnode):
869 869 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
870 870 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
871 871
872 872 progress.complete()
873 873
874 874 # write list of valid repos back
875 875 oldumask = os.umask(0o002)
876 876 try:
877 reposfile = open(repospath, 'w')
877 reposfile = open(repospath, 'wb')
878 878 reposfile.writelines([("%s\n" % r) for r in validrepos])
879 879 reposfile.close()
880 880 finally:
881 881 os.umask(oldumask)
882 882
883 883 # prune cache
884 884 if sharedcache is not None:
885 885 sharedcache.gc(keepkeys)
886 886 elif not filesrepacked:
887 887 ui.warn(_("warning: no valid repos in repofile\n"))
888 888
889 889 def log(orig, ui, repo, *pats, **opts):
890 890 if not isenabled(repo):
891 891 return orig(ui, repo, *pats, **opts)
892 892
893 893 follow = opts.get(r'follow')
894 894 revs = opts.get(r'rev')
895 895 if pats:
896 896 # Force slowpath for non-follow patterns and follows that start from
897 897 # non-working-copy-parent revs.
898 898 if not follow or revs:
899 899 # This forces the slowpath
900 900 opts[r'removed'] = True
901 901
902 902 # If this is a non-follow log without any revs specified, recommend that
903 903 # the user add -f to speed it up.
904 904 if not follow and not revs:
905 905 match, pats = scmutil.matchandpats(repo['.'], pats,
906 906 pycompat.byteskwargs(opts))
907 907 isfile = not match.anypats()
908 908 if isfile:
909 909 for file in match.files():
910 910 if not os.path.isfile(repo.wjoin(file)):
911 911 isfile = False
912 912 break
913 913
914 914 if isfile:
915 915 ui.warn(_("warning: file log can be slow on large repos - " +
916 916 "use -f to speed it up\n"))
917 917
918 918 return orig(ui, repo, *pats, **opts)
919 919
920 920 def revdatelimit(ui, revset):
921 921 """Update revset so that only changesets no older than 'prefetchdays' days
922 922 are included. The default value is set to 14 days. If 'prefetchdays' is set
923 923 to zero or negative value then date restriction is not applied.
924 924 """
925 925 days = ui.configint('remotefilelog', 'prefetchdays')
926 926 if days > 0:
927 927 revset = '(%s) & date(-%s)' % (revset, days)
928 928 return revset
929 929
930 930 def readytofetch(repo):
931 931 """Check that enough time has passed since the last background prefetch.
932 932 This only relates to prefetches after operations that change the working
933 933 copy parent. Default delay between background prefetches is 2 minutes.
934 934 """
935 935 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
936 936 fname = repo.vfs.join('lastprefetch')
937 937
938 938 ready = False
939 939 with open(fname, 'a'):
940 940 # the with construct above is used to avoid race conditions
941 941 modtime = os.path.getmtime(fname)
942 942 if (time.time() - modtime) > timeout:
943 943 os.utime(fname, None)
944 944 ready = True
945 945
946 946 return ready
947 947
948 948 def wcpprefetch(ui, repo, **kwargs):
949 949 """Prefetches in background revisions specified by bgprefetchrevs revset.
950 950 Does background repack if backgroundrepack flag is set in config.
951 951 """
952 952 shallow = isenabled(repo)
953 953 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
954 954 isready = readytofetch(repo)
955 955
956 956 if not (shallow and bgprefetchrevs and isready):
957 957 return
958 958
959 959 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
960 960 # update a revset with a date limit
961 961 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
962 962
963 963 def anon():
964 964 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
965 965 return
966 966 repo.ranprefetch = True
967 967 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
968 968
969 969 repo._afterlock(anon)
970 970
971 971 def pull(orig, ui, repo, *pats, **opts):
972 972 result = orig(ui, repo, *pats, **opts)
973 973
974 974 if isenabled(repo):
975 975 # prefetch if it's configured
976 976 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
977 977 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
978 978 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
979 979
980 980 if prefetchrevset:
981 981 ui.status(_("prefetching file contents\n"))
982 982 revs = scmutil.revrange(repo, [prefetchrevset])
983 983 base = repo['.'].rev()
984 984 if bgprefetch:
985 985 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
986 986 else:
987 987 repo.prefetch(revs, base=base)
988 988 if bgrepack:
989 989 repackmod.backgroundrepack(repo, incremental=True)
990 990 elif bgrepack:
991 991 repackmod.backgroundrepack(repo, incremental=True)
992 992
993 993 return result
994 994
995 995 def exchangepull(orig, repo, remote, *args, **kwargs):
996 996 # Hook into the callstream/getbundle to insert bundle capabilities
997 997 # during a pull.
998 998 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
999 999 **kwargs):
1000 1000 if not bundlecaps:
1001 1001 bundlecaps = set()
1002 1002 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1003 1003 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
1004 1004 **kwargs)
1005 1005
1006 1006 if util.safehasattr(remote, '_callstream'):
1007 1007 remote._localrepo = repo
1008 1008 elif util.safehasattr(remote, 'getbundle'):
1009 1009 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
1010 1010
1011 1011 return orig(repo, remote, *args, **kwargs)
1012 1012
1013 1013 def _fileprefetchhook(repo, revs, match):
1014 1014 if isenabled(repo):
1015 1015 allfiles = []
1016 1016 for rev in revs:
1017 1017 if rev == nodemod.wdirrev or rev is None:
1018 1018 continue
1019 1019 ctx = repo[rev]
1020 1020 mf = ctx.manifest()
1021 1021 sparsematch = repo.maybesparsematch(ctx.rev())
1022 1022 for path in ctx.walk(match):
1023 1023 if path.endswith('/'):
1024 1024 # Tree manifest that's being excluded as part of narrow
1025 1025 continue
1026 1026 if (not sparsematch or sparsematch(path)) and path in mf:
1027 1027 allfiles.append((path, hex(mf[path])))
1028 1028 repo.fileservice.prefetch(allfiles)
1029 1029
1030 1030 @command('debugremotefilelog', [
1031 1031 ('d', 'decompress', None, _('decompress the filelog first')),
1032 1032 ], _('hg debugremotefilelog <path>'), norepo=True)
1033 1033 def debugremotefilelog(ui, path, **opts):
1034 1034 return debugcommands.debugremotefilelog(ui, path, **opts)
1035 1035
1036 1036 @command('verifyremotefilelog', [
1037 1037 ('d', 'decompress', None, _('decompress the filelogs first')),
1038 1038 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1039 1039 def verifyremotefilelog(ui, path, **opts):
1040 1040 return debugcommands.verifyremotefilelog(ui, path, **opts)
1041 1041
1042 1042 @command('debugdatapack', [
1043 1043 ('', 'long', None, _('print the long hashes')),
1044 1044 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1045 1045 ], _('hg debugdatapack <paths>'), norepo=True)
1046 1046 def debugdatapack(ui, *paths, **opts):
1047 1047 return debugcommands.debugdatapack(ui, *paths, **opts)
1048 1048
1049 1049 @command('debughistorypack', [
1050 1050 ], _('hg debughistorypack <path>'), norepo=True)
1051 1051 def debughistorypack(ui, path, **opts):
1052 1052 return debugcommands.debughistorypack(ui, path)
1053 1053
1054 1054 @command('debugkeepset', [
1055 1055 ], _('hg debugkeepset'))
1056 1056 def debugkeepset(ui, repo, **opts):
1057 1057 # The command is used to measure keepset computation time
1058 1058 def keyfn(fname, fnode):
1059 1059 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1060 1060 repackmod.keepset(repo, keyfn)
1061 1061 return
1062 1062
1063 1063 @command('debugwaitonrepack', [
1064 1064 ], _('hg debugwaitonrepack'))
1065 1065 def debugwaitonrepack(ui, repo, **opts):
1066 1066 return debugcommands.debugwaitonrepack(repo)
1067 1067
1068 1068 @command('debugwaitonprefetch', [
1069 1069 ], _('hg debugwaitonprefetch'))
1070 1070 def debugwaitonprefetch(ui, repo, **opts):
1071 1071 return debugcommands.debugwaitonprefetch(repo)
1072 1072
1073 1073 def resolveprefetchopts(ui, opts):
1074 1074 if not opts.get('rev'):
1075 1075 revset = ['.', 'draft()']
1076 1076
1077 1077 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1078 1078 if prefetchrevset:
1079 1079 revset.append('(%s)' % prefetchrevset)
1080 1080 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1081 1081 if bgprefetchrevs:
1082 1082 revset.append('(%s)' % bgprefetchrevs)
1083 1083 revset = '+'.join(revset)
1084 1084
1085 1085 # update a revset with a date limit
1086 1086 revset = revdatelimit(ui, revset)
1087 1087
1088 1088 opts['rev'] = [revset]
1089 1089
1090 1090 if not opts.get('base'):
1091 1091 opts['base'] = None
1092 1092
1093 1093 return opts
1094 1094
1095 1095 @command('prefetch', [
1096 1096 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1097 1097 ('', 'repack', False, _('run repack after prefetch')),
1098 1098 ('b', 'base', '', _("rev that is assumed to already be local")),
1099 1099 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1100 1100 def prefetch(ui, repo, *pats, **opts):
1101 1101 """prefetch file revisions from the server
1102 1102
1103 1103 Prefetchs file revisions for the specified revs and stores them in the
1104 1104 local remotefilelog cache. If no rev is specified, the default rev is
1105 1105 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1106 1106 File names or patterns can be used to limit which files are downloaded.
1107 1107
1108 1108 Return 0 on success.
1109 1109 """
1110 1110 opts = pycompat.byteskwargs(opts)
1111 1111 if not isenabled(repo):
1112 1112 raise error.Abort(_("repo is not shallow"))
1113 1113
1114 1114 opts = resolveprefetchopts(ui, opts)
1115 1115 revs = scmutil.revrange(repo, opts.get('rev'))
1116 1116 repo.prefetch(revs, opts.get('base'), pats, opts)
1117 1117
1118 1118 # Run repack in background
1119 1119 if opts.get('repack'):
1120 1120 repackmod.backgroundrepack(repo, incremental=True)
1121 1121
1122 1122 @command('repack', [
1123 1123 ('', 'background', None, _('run in a background process'), None),
1124 1124 ('', 'incremental', None, _('do an incremental repack'), None),
1125 1125 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1126 1126 ], _('hg repack [OPTIONS]'))
1127 1127 def repack_(ui, repo, *pats, **opts):
1128 1128 if opts.get(r'background'):
1129 1129 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1130 1130 packsonly=opts.get(r'packsonly', False))
1131 1131 return
1132 1132
1133 1133 options = {'packsonly': opts.get(r'packsonly')}
1134 1134
1135 1135 try:
1136 1136 if opts.get(r'incremental'):
1137 1137 repackmod.incrementalrepack(repo, options=options)
1138 1138 else:
1139 1139 repackmod.fullrepack(repo, options=options)
1140 1140 except repackmod.RepackAlreadyRunning as ex:
1141 1141 # Don't propogate the exception if the repack is already in
1142 1142 # progress, since we want the command to exit 0.
1143 1143 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now