##// END OF EJS Templates
remotefilelog: add a developer option to wait for background processes...
marmoute -
r44298:63bb6dc6 stable
parent child Browse files
Show More
@@ -1,1302 +1,1303 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial.pycompat import open
136 136 from mercurial import (
137 137 changegroup,
138 138 changelog,
139 139 cmdutil,
140 140 commands,
141 141 configitems,
142 142 context,
143 143 copies,
144 144 debugcommands as hgdebugcommands,
145 145 dispatch,
146 146 error,
147 147 exchange,
148 148 extensions,
149 149 hg,
150 150 localrepo,
151 151 match,
152 152 merge,
153 153 node as nodemod,
154 154 patch,
155 155 pycompat,
156 156 registrar,
157 157 repair,
158 158 repoview,
159 159 revset,
160 160 scmutil,
161 161 smartset,
162 162 streamclone,
163 163 util,
164 164 )
165 165 from . import (
166 166 constants,
167 167 debugcommands,
168 168 fileserverclient,
169 169 remotefilectx,
170 170 remotefilelog,
171 171 remotefilelogserver,
172 172 repack as repackmod,
173 173 shallowbundle,
174 174 shallowrepo,
175 175 shallowstore,
176 176 shallowutil,
177 177 shallowverifier,
178 178 )
179 179
180 180 # ensures debug commands are registered
181 181 hgdebugcommands.command
182 182
183 183 cmdtable = {}
184 184 command = registrar.command(cmdtable)
185 185
186 186 configtable = {}
187 187 configitem = registrar.configitem(configtable)
188 188
189 189 configitem(b'remotefilelog', b'debug', default=False)
190 190
191 191 configitem(b'remotefilelog', b'reponame', default=b'')
192 192 configitem(b'remotefilelog', b'cachepath', default=None)
193 193 configitem(b'remotefilelog', b'cachegroup', default=None)
194 194 configitem(b'remotefilelog', b'cacheprocess', default=None)
195 195 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
196 196 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
197 197
198 198 configitem(
199 199 b'remotefilelog',
200 200 b'fallbackpath',
201 201 default=configitems.dynamicdefault,
202 202 alias=[(b'remotefilelog', b'fallbackrepo')],
203 203 )
204 204
205 205 configitem(b'remotefilelog', b'validatecachelog', default=None)
206 206 configitem(b'remotefilelog', b'validatecache', default=b'on')
207 207 configitem(b'remotefilelog', b'server', default=None)
208 208 configitem(b'remotefilelog', b'servercachepath', default=None)
209 209 configitem(b"remotefilelog", b"serverexpiration", default=30)
210 210 configitem(b'remotefilelog', b'backgroundrepack', default=False)
211 211 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
212 212 configitem(b'remotefilelog', b'pullprefetch', default=None)
213 213 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
214 214 configitem(b'remotefilelog', b'prefetchdelay', default=120)
215 215 configitem(b'remotefilelog', b'prefetchdays', default=14)
216 216
217 217 configitem(b'remotefilelog', b'getfilesstep', default=10000)
218 218 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
219 219 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
220 220 configitem(b'remotefilelog', b'fetchwarning', default=b'')
221 221
222 222 configitem(b'remotefilelog', b'includepattern', default=None)
223 223 configitem(b'remotefilelog', b'excludepattern', default=None)
224 224
225 225 configitem(b'remotefilelog', b'gcrepack', default=False)
226 226 configitem(b'remotefilelog', b'repackonhggc', default=False)
227 227 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
228 228
229 229 configitem(b'packs', b'maxpacksize', default=0)
230 230 configitem(b'packs', b'maxchainlen', default=1000)
231 231
232 232 configitem(b'devel', b'remotefilelog.ensurestart', default=False)
233 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
233 234
234 235 # default TTL limit is 30 days
235 236 _defaultlimit = 60 * 60 * 24 * 30
236 237 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
237 238
238 239 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
239 240 configitem(
240 241 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
241 242 )
242 243 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
243 244 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
244 245 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
245 246
246 247 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
247 248 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
248 249 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
249 250 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
250 251 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
251 252
252 253 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
253 254 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
254 255 # be specifying the version(s) of Mercurial they are tested with, or
255 256 # leave the attribute unspecified.
256 257 testedwith = b'ships-with-hg-core'
257 258
258 259 repoclass = localrepo.localrepository
259 260 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
260 261
261 262 isenabled = shallowutil.isenabled
262 263
263 264
264 265 def uisetup(ui):
265 266 """Wraps user facing Mercurial commands to swap them out with shallow
266 267 versions.
267 268 """
268 269 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
269 270
270 271 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
271 272 entry[1].append(
272 273 (
273 274 b'',
274 275 b'shallow',
275 276 None,
276 277 _(b"create a shallow clone which uses remote file history"),
277 278 )
278 279 )
279 280
280 281 extensions.wrapcommand(
281 282 commands.table, b'debugindex', debugcommands.debugindex
282 283 )
283 284 extensions.wrapcommand(
284 285 commands.table, b'debugindexdot', debugcommands.debugindexdot
285 286 )
286 287 extensions.wrapcommand(commands.table, b'log', log)
287 288 extensions.wrapcommand(commands.table, b'pull', pull)
288 289
289 290 # Prevent 'hg manifest --all'
290 291 def _manifest(orig, ui, repo, *args, **opts):
291 292 if isenabled(repo) and opts.get(r'all'):
292 293 raise error.Abort(_(b"--all is not supported in a shallow repo"))
293 294
294 295 return orig(ui, repo, *args, **opts)
295 296
296 297 extensions.wrapcommand(commands.table, b"manifest", _manifest)
297 298
298 299 # Wrap remotefilelog with lfs code
299 300 def _lfsloaded(loaded=False):
300 301 lfsmod = None
301 302 try:
302 303 lfsmod = extensions.find(b'lfs')
303 304 except KeyError:
304 305 pass
305 306 if lfsmod:
306 307 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
307 308 fileserverclient._lfsmod = lfsmod
308 309
309 310 extensions.afterloaded(b'lfs', _lfsloaded)
310 311
311 312 # debugdata needs remotefilelog.len to work
312 313 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
313 314
314 315 changegroup.cgpacker = shallowbundle.shallowcg1packer
315 316
316 317 extensions.wrapfunction(
317 318 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
318 319 )
319 320 extensions.wrapfunction(
320 321 changegroup, b'makechangegroup', shallowbundle.makechangegroup
321 322 )
322 323 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
323 324 extensions.wrapfunction(exchange, b'pull', exchangepull)
324 325 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
325 326 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
326 327 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
327 328 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
328 329 extensions.wrapfunction(
329 330 copies, b'_computeforwardmissing', computeforwardmissing
330 331 )
331 332 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
332 333 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
333 334 extensions.wrapfunction(context.changectx, b'filectx', filectx)
334 335 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
335 336 extensions.wrapfunction(patch, b'trydiff', trydiff)
336 337 extensions.wrapfunction(hg, b'verify', _verify)
337 338 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
338 339
339 340 # disappointing hacks below
340 341 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
341 342 extensions.wrapfunction(revset, b'filelog', filelogrevset)
342 343 revset.symbols[b'filelog'] = revset.filelog
343 344 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
344 345
345 346
346 347 def cloneshallow(orig, ui, repo, *args, **opts):
347 348 if opts.get(r'shallow'):
348 349 repos = []
349 350
350 351 def pull_shallow(orig, self, *args, **kwargs):
351 352 if not isenabled(self):
352 353 repos.append(self.unfiltered())
353 354 # set up the client hooks so the post-clone update works
354 355 setupclient(self.ui, self.unfiltered())
355 356
356 357 # setupclient fixed the class on the repo itself
357 358 # but we also need to fix it on the repoview
358 359 if isinstance(self, repoview.repoview):
359 360 self.__class__.__bases__ = (
360 361 self.__class__.__bases__[0],
361 362 self.unfiltered().__class__,
362 363 )
363 364 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
364 365 self._writerequirements()
365 366
366 367 # Since setupclient hadn't been called, exchange.pull was not
367 368 # wrapped. So we need to manually invoke our version of it.
368 369 return exchangepull(orig, self, *args, **kwargs)
369 370 else:
370 371 return orig(self, *args, **kwargs)
371 372
372 373 extensions.wrapfunction(exchange, b'pull', pull_shallow)
373 374
374 375 # Wrap the stream logic to add requirements and to pass include/exclude
375 376 # patterns around.
376 377 def setup_streamout(repo, remote):
377 378 # Replace remote.stream_out with a version that sends file
378 379 # patterns.
379 380 def stream_out_shallow(orig):
380 381 caps = remote.capabilities()
381 382 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
382 383 opts = {}
383 384 if repo.includepattern:
384 385 opts[r'includepattern'] = b'\0'.join(
385 386 repo.includepattern
386 387 )
387 388 if repo.excludepattern:
388 389 opts[r'excludepattern'] = b'\0'.join(
389 390 repo.excludepattern
390 391 )
391 392 return remote._callstream(b'stream_out_shallow', **opts)
392 393 else:
393 394 return orig()
394 395
395 396 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
396 397
397 398 def stream_wrap(orig, op):
398 399 setup_streamout(op.repo, op.remote)
399 400 return orig(op)
400 401
401 402 extensions.wrapfunction(
402 403 streamclone, b'maybeperformlegacystreamclone', stream_wrap
403 404 )
404 405
405 406 def canperformstreamclone(orig, pullop, bundle2=False):
406 407 # remotefilelog is currently incompatible with the
407 408 # bundle2 flavor of streamclones, so force us to use
408 409 # v1 instead.
409 410 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
410 411 pullop.remotebundle2caps[b'stream'] = [
411 412 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
412 413 ]
413 414 if bundle2:
414 415 return False, None
415 416 supported, requirements = orig(pullop, bundle2=bundle2)
416 417 if requirements is not None:
417 418 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
418 419 return supported, requirements
419 420
420 421 extensions.wrapfunction(
421 422 streamclone, b'canperformstreamclone', canperformstreamclone
422 423 )
423 424
424 425 try:
425 426 orig(ui, repo, *args, **opts)
426 427 finally:
427 428 if opts.get(r'shallow'):
428 429 for r in repos:
429 430 if util.safehasattr(r, b'fileservice'):
430 431 r.fileservice.close()
431 432
432 433
433 434 def debugdatashallow(orig, *args, **kwds):
434 435 oldlen = remotefilelog.remotefilelog.__len__
435 436 try:
436 437 remotefilelog.remotefilelog.__len__ = lambda x: 1
437 438 return orig(*args, **kwds)
438 439 finally:
439 440 remotefilelog.remotefilelog.__len__ = oldlen
440 441
441 442
442 443 def reposetup(ui, repo):
443 444 if not repo.local():
444 445 return
445 446
446 447 # put here intentionally bc doesnt work in uisetup
447 448 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
448 449 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
449 450
450 451 isserverenabled = ui.configbool(b'remotefilelog', b'server')
451 452 isshallowclient = isenabled(repo)
452 453
453 454 if isserverenabled and isshallowclient:
454 455 raise RuntimeError(b"Cannot be both a server and shallow client.")
455 456
456 457 if isshallowclient:
457 458 setupclient(ui, repo)
458 459
459 460 if isserverenabled:
460 461 remotefilelogserver.setupserver(ui, repo)
461 462
462 463
463 464 def setupclient(ui, repo):
464 465 if not isinstance(repo, localrepo.localrepository):
465 466 return
466 467
467 468 # Even clients get the server setup since they need to have the
468 469 # wireprotocol endpoints registered.
469 470 remotefilelogserver.onetimesetup(ui)
470 471 onetimeclientsetup(ui)
471 472
472 473 shallowrepo.wraprepo(repo)
473 474 repo.store = shallowstore.wrapstore(repo.store)
474 475
475 476
476 477 def storewrapper(orig, requirements, path, vfstype):
477 478 s = orig(requirements, path, vfstype)
478 479 if constants.SHALLOWREPO_REQUIREMENT in requirements:
479 480 s = shallowstore.wrapstore(s)
480 481
481 482 return s
482 483
483 484
484 485 # prefetch files before update
485 486 def applyupdates(
486 487 orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
487 488 ):
488 489 if isenabled(repo):
489 490 manifest = mctx.manifest()
490 491 files = []
491 492 for f, args, msg in actions[b'g']:
492 493 files.append((f, hex(manifest[f])))
493 494 # batch fetch the needed files from the server
494 495 repo.fileservice.prefetch(files)
495 496 return orig(
496 497 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
497 498 )
498 499
499 500
500 501 # Prefetch merge checkunknownfiles
501 502 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
502 503 if isenabled(repo):
503 504 files = []
504 505 sparsematch = repo.maybesparsematch(mctx.rev())
505 506 for f, (m, actionargs, msg) in pycompat.iteritems(actions):
506 507 if sparsematch and not sparsematch(f):
507 508 continue
508 509 if m in (b'c', b'dc', b'cm'):
509 510 files.append((f, hex(mctx.filenode(f))))
510 511 elif m == b'dg':
511 512 f2 = actionargs[0]
512 513 files.append((f2, hex(mctx.filenode(f2))))
513 514 # batch fetch the needed files from the server
514 515 repo.fileservice.prefetch(files)
515 516 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
516 517
517 518
518 519 # Prefetch files before status attempts to look at their size and contents
519 520 def checklookup(orig, self, files):
520 521 repo = self._repo
521 522 if isenabled(repo):
522 523 prefetchfiles = []
523 524 for parent in self._parents:
524 525 for f in files:
525 526 if f in parent:
526 527 prefetchfiles.append((f, hex(parent.filenode(f))))
527 528 # batch fetch the needed files from the server
528 529 repo.fileservice.prefetch(prefetchfiles)
529 530 return orig(self, files)
530 531
531 532
532 533 # Prefetch the logic that compares added and removed files for renames
533 534 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
534 535 if isenabled(repo):
535 536 files = []
536 537 pmf = repo[b'.'].manifest()
537 538 for f in removed:
538 539 if f in pmf:
539 540 files.append((f, hex(pmf[f])))
540 541 # batch fetch the needed files from the server
541 542 repo.fileservice.prefetch(files)
542 543 return orig(repo, matcher, added, removed, *args, **kwargs)
543 544
544 545
545 546 # prefetch files before pathcopies check
546 547 def computeforwardmissing(orig, a, b, match=None):
547 548 missing = orig(a, b, match=match)
548 549 repo = a._repo
549 550 if isenabled(repo):
550 551 mb = b.manifest()
551 552
552 553 files = []
553 554 sparsematch = repo.maybesparsematch(b.rev())
554 555 if sparsematch:
555 556 sparsemissing = set()
556 557 for f in missing:
557 558 if sparsematch(f):
558 559 files.append((f, hex(mb[f])))
559 560 sparsemissing.add(f)
560 561 missing = sparsemissing
561 562
562 563 # batch fetch the needed files from the server
563 564 repo.fileservice.prefetch(files)
564 565 return missing
565 566
566 567
567 568 # close cache miss server connection after the command has finished
568 569 def runcommand(orig, lui, repo, *args, **kwargs):
569 570 fileservice = None
570 571 # repo can be None when running in chg:
571 572 # - at startup, reposetup was called because serve is not norepo
572 573 # - a norepo command like "help" is called
573 574 if repo and isenabled(repo):
574 575 fileservice = repo.fileservice
575 576 try:
576 577 return orig(lui, repo, *args, **kwargs)
577 578 finally:
578 579 if fileservice:
579 580 fileservice.close()
580 581
581 582
582 583 # prevent strip from stripping remotefilelogs
583 584 def _collectbrokencsets(orig, repo, files, striprev):
584 585 if isenabled(repo):
585 586 files = list([f for f in files if not repo.shallowmatch(f)])
586 587 return orig(repo, files, striprev)
587 588
588 589
589 590 # changectx wrappers
590 591 def filectx(orig, self, path, fileid=None, filelog=None):
591 592 if fileid is None:
592 593 fileid = self.filenode(path)
593 594 if isenabled(self._repo) and self._repo.shallowmatch(path):
594 595 return remotefilectx.remotefilectx(
595 596 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
596 597 )
597 598 return orig(self, path, fileid=fileid, filelog=filelog)
598 599
599 600
600 601 def workingfilectx(orig, self, path, filelog=None):
601 602 if isenabled(self._repo) and self._repo.shallowmatch(path):
602 603 return remotefilectx.remoteworkingfilectx(
603 604 self._repo, path, workingctx=self, filelog=filelog
604 605 )
605 606 return orig(self, path, filelog=filelog)
606 607
607 608
608 609 # prefetch required revisions before a diff
609 610 def trydiff(
610 611 orig,
611 612 repo,
612 613 revs,
613 614 ctx1,
614 615 ctx2,
615 616 modified,
616 617 added,
617 618 removed,
618 619 copy,
619 620 getfilectx,
620 621 *args,
621 622 **kwargs
622 623 ):
623 624 if isenabled(repo):
624 625 prefetch = []
625 626 mf1 = ctx1.manifest()
626 627 for fname in modified + added + removed:
627 628 if fname in mf1:
628 629 fnode = getfilectx(fname, ctx1).filenode()
629 630 # fnode can be None if it's a edited working ctx file
630 631 if fnode:
631 632 prefetch.append((fname, hex(fnode)))
632 633 if fname not in removed:
633 634 fnode = getfilectx(fname, ctx2).filenode()
634 635 if fnode:
635 636 prefetch.append((fname, hex(fnode)))
636 637
637 638 repo.fileservice.prefetch(prefetch)
638 639
639 640 return orig(
640 641 repo,
641 642 revs,
642 643 ctx1,
643 644 ctx2,
644 645 modified,
645 646 added,
646 647 removed,
647 648 copy,
648 649 getfilectx,
649 650 *args,
650 651 **kwargs
651 652 )
652 653
653 654
654 655 # Prevent verify from processing files
655 656 # a stub for mercurial.hg.verify()
656 657 def _verify(orig, repo, level=None):
657 658 lock = repo.lock()
658 659 try:
659 660 return shallowverifier.shallowverifier(repo).verify()
660 661 finally:
661 662 lock.release()
662 663
663 664
664 665 clientonetime = False
665 666
666 667
667 668 def onetimeclientsetup(ui):
668 669 global clientonetime
669 670 if clientonetime:
670 671 return
671 672 clientonetime = True
672 673
673 674 # Don't commit filelogs until we know the commit hash, since the hash
674 675 # is present in the filelog blob.
675 676 # This violates Mercurial's filelog->manifest->changelog write order,
676 677 # but is generally fine for client repos.
677 678 pendingfilecommits = []
678 679
679 680 def addrawrevision(
680 681 orig,
681 682 self,
682 683 rawtext,
683 684 transaction,
684 685 link,
685 686 p1,
686 687 p2,
687 688 node,
688 689 flags,
689 690 cachedelta=None,
690 691 _metatuple=None,
691 692 ):
692 693 if isinstance(link, int):
693 694 pendingfilecommits.append(
694 695 (
695 696 self,
696 697 rawtext,
697 698 transaction,
698 699 link,
699 700 p1,
700 701 p2,
701 702 node,
702 703 flags,
703 704 cachedelta,
704 705 _metatuple,
705 706 )
706 707 )
707 708 return node
708 709 else:
709 710 return orig(
710 711 self,
711 712 rawtext,
712 713 transaction,
713 714 link,
714 715 p1,
715 716 p2,
716 717 node,
717 718 flags,
718 719 cachedelta,
719 720 _metatuple=_metatuple,
720 721 )
721 722
722 723 extensions.wrapfunction(
723 724 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
724 725 )
725 726
726 727 def changelogadd(orig, self, *args):
727 728 oldlen = len(self)
728 729 node = orig(self, *args)
729 730 newlen = len(self)
730 731 if oldlen != newlen:
731 732 for oldargs in pendingfilecommits:
732 733 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
733 734 linknode = self.node(link)
734 735 if linknode == node:
735 736 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
736 737 else:
737 738 raise error.ProgrammingError(
738 739 b'pending multiple integer revisions are not supported'
739 740 )
740 741 else:
741 742 # "link" is actually wrong here (it is set to len(changelog))
742 743 # if changelog remains unchanged, skip writing file revisions
743 744 # but still do a sanity check about pending multiple revisions
744 745 if len(set(x[3] for x in pendingfilecommits)) > 1:
745 746 raise error.ProgrammingError(
746 747 b'pending multiple integer revisions are not supported'
747 748 )
748 749 del pendingfilecommits[:]
749 750 return node
750 751
751 752 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
752 753
753 754
754 755 def getrenamedfn(orig, repo, endrev=None):
755 756 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
756 757 return orig(repo, endrev)
757 758
758 759 rcache = {}
759 760
760 761 def getrenamed(fn, rev):
761 762 '''looks up all renames for a file (up to endrev) the first
762 763 time the file is given. It indexes on the changerev and only
763 764 parses the manifest if linkrev != changerev.
764 765 Returns rename info for fn at changerev rev.'''
765 766 if rev in rcache.setdefault(fn, {}):
766 767 return rcache[fn][rev]
767 768
768 769 try:
769 770 fctx = repo[rev].filectx(fn)
770 771 for ancestor in fctx.ancestors():
771 772 if ancestor.path() == fn:
772 773 renamed = ancestor.renamed()
773 774 rcache[fn][ancestor.rev()] = renamed and renamed[0]
774 775
775 776 renamed = fctx.renamed()
776 777 return renamed and renamed[0]
777 778 except error.LookupError:
778 779 return None
779 780
780 781 return getrenamed
781 782
782 783
783 784 def walkfilerevs(orig, repo, match, follow, revs, fncache):
784 785 if not isenabled(repo):
785 786 return orig(repo, match, follow, revs, fncache)
786 787
787 788 # remotefilelog's can't be walked in rev order, so throw.
788 789 # The caller will see the exception and walk the commit tree instead.
789 790 if not follow:
790 791 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
791 792
792 793 wanted = set()
793 794 minrev, maxrev = min(revs), max(revs)
794 795
795 796 pctx = repo[b'.']
796 797 for filename in match.files():
797 798 if filename not in pctx:
798 799 raise error.Abort(
799 800 _(b'cannot follow file not in parent revision: "%s"') % filename
800 801 )
801 802 fctx = pctx[filename]
802 803
803 804 linkrev = fctx.linkrev()
804 805 if linkrev >= minrev and linkrev <= maxrev:
805 806 fncache.setdefault(linkrev, []).append(filename)
806 807 wanted.add(linkrev)
807 808
808 809 for ancestor in fctx.ancestors():
809 810 linkrev = ancestor.linkrev()
810 811 if linkrev >= minrev and linkrev <= maxrev:
811 812 fncache.setdefault(linkrev, []).append(ancestor.path())
812 813 wanted.add(linkrev)
813 814
814 815 return wanted
815 816
816 817
817 818 def filelogrevset(orig, repo, subset, x):
818 819 """``filelog(pattern)``
819 820 Changesets connected to the specified filelog.
820 821
821 822 For performance reasons, ``filelog()`` does not show every changeset
822 823 that affects the requested file(s). See :hg:`help log` for details. For
823 824 a slower, more accurate result, use ``file()``.
824 825 """
825 826
826 827 if not isenabled(repo):
827 828 return orig(repo, subset, x)
828 829
829 830 # i18n: "filelog" is a keyword
830 831 pat = revset.getstring(x, _(b"filelog requires a pattern"))
831 832 m = match.match(
832 833 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
833 834 )
834 835 s = set()
835 836
836 837 if not match.patkind(pat):
837 838 # slow
838 839 for r in subset:
839 840 ctx = repo[r]
840 841 cfiles = ctx.files()
841 842 for f in m.files():
842 843 if f in cfiles:
843 844 s.add(ctx.rev())
844 845 break
845 846 else:
846 847 # partial
847 848 files = (f for f in repo[None] if m(f))
848 849 for f in files:
849 850 fctx = repo[None].filectx(f)
850 851 s.add(fctx.linkrev())
851 852 for actx in fctx.ancestors():
852 853 s.add(actx.linkrev())
853 854
854 855 return smartset.baseset([r for r in subset if r in s])
855 856
856 857
857 858 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
858 859 def gc(ui, *args, **opts):
859 860 '''garbage collect the client and server filelog caches
860 861 '''
861 862 cachepaths = set()
862 863
863 864 # get the system client cache
864 865 systemcache = shallowutil.getcachepath(ui, allowempty=True)
865 866 if systemcache:
866 867 cachepaths.add(systemcache)
867 868
868 869 # get repo client and server cache
869 870 repopaths = []
870 871 pwd = ui.environ.get(b'PWD')
871 872 if pwd:
872 873 repopaths.append(pwd)
873 874
874 875 repopaths.extend(args)
875 876 repos = []
876 877 for repopath in repopaths:
877 878 try:
878 879 repo = hg.peer(ui, {}, repopath)
879 880 repos.append(repo)
880 881
881 882 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
882 883 if repocache:
883 884 cachepaths.add(repocache)
884 885 except error.RepoError:
885 886 pass
886 887
887 888 # gc client cache
888 889 for cachepath in cachepaths:
889 890 gcclient(ui, cachepath)
890 891
891 892 # gc server cache
892 893 for repo in repos:
893 894 remotefilelogserver.gcserver(ui, repo._repo)
894 895
895 896
896 897 def gcclient(ui, cachepath):
897 898 # get list of repos that use this cache
898 899 repospath = os.path.join(cachepath, b'repos')
899 900 if not os.path.exists(repospath):
900 901 ui.warn(_(b"no known cache at %s\n") % cachepath)
901 902 return
902 903
903 904 reposfile = open(repospath, b'rb')
904 905 repos = {r[:-1] for r in reposfile.readlines()}
905 906 reposfile.close()
906 907
907 908 # build list of useful files
908 909 validrepos = []
909 910 keepkeys = set()
910 911
911 912 sharedcache = None
912 913 filesrepacked = False
913 914
914 915 count = 0
915 916 progress = ui.makeprogress(
916 917 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
917 918 )
918 919 for path in repos:
919 920 progress.update(count)
920 921 count += 1
921 922 try:
922 923 path = ui.expandpath(os.path.normpath(path))
923 924 except TypeError as e:
924 925 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
925 926 traceback.print_exc()
926 927 continue
927 928 try:
928 929 peer = hg.peer(ui, {}, path)
929 930 repo = peer._repo
930 931 except error.RepoError:
931 932 continue
932 933
933 934 validrepos.append(path)
934 935
935 936 # Protect against any repo or config changes that have happened since
936 937 # this repo was added to the repos file. We'd rather this loop succeed
937 938 # and too much be deleted, than the loop fail and nothing gets deleted.
938 939 if not isenabled(repo):
939 940 continue
940 941
941 942 if not util.safehasattr(repo, b'name'):
942 943 ui.warn(
943 944 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
944 945 )
945 946 continue
946 947
947 948 # If garbage collection on repack and repack on hg gc are enabled
948 949 # then loose files are repacked and garbage collected.
949 950 # Otherwise regular garbage collection is performed.
950 951 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
951 952 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
952 953 if repackonhggc and gcrepack:
953 954 try:
954 955 repackmod.incrementalrepack(repo)
955 956 filesrepacked = True
956 957 continue
957 958 except (IOError, repackmod.RepackAlreadyRunning):
958 959 # If repack cannot be performed due to not enough disk space
959 960 # continue doing garbage collection of loose files w/o repack
960 961 pass
961 962
962 963 reponame = repo.name
963 964 if not sharedcache:
964 965 sharedcache = repo.sharedstore
965 966
966 967 # Compute a keepset which is not garbage collected
967 968 def keyfn(fname, fnode):
968 969 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
969 970
970 971 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
971 972
972 973 progress.complete()
973 974
974 975 # write list of valid repos back
975 976 oldumask = os.umask(0o002)
976 977 try:
977 978 reposfile = open(repospath, b'wb')
978 979 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
979 980 reposfile.close()
980 981 finally:
981 982 os.umask(oldumask)
982 983
983 984 # prune cache
984 985 if sharedcache is not None:
985 986 sharedcache.gc(keepkeys)
986 987 elif not filesrepacked:
987 988 ui.warn(_(b"warning: no valid repos in repofile\n"))
988 989
989 990
990 991 def log(orig, ui, repo, *pats, **opts):
991 992 if not isenabled(repo):
992 993 return orig(ui, repo, *pats, **opts)
993 994
994 995 follow = opts.get(r'follow')
995 996 revs = opts.get(r'rev')
996 997 if pats:
997 998 # Force slowpath for non-follow patterns and follows that start from
998 999 # non-working-copy-parent revs.
999 1000 if not follow or revs:
1000 1001 # This forces the slowpath
1001 1002 opts[r'removed'] = True
1002 1003
1003 1004 # If this is a non-follow log without any revs specified, recommend that
1004 1005 # the user add -f to speed it up.
1005 1006 if not follow and not revs:
1006 1007 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1007 1008 isfile = not match.anypats()
1008 1009 if isfile:
1009 1010 for file in match.files():
1010 1011 if not os.path.isfile(repo.wjoin(file)):
1011 1012 isfile = False
1012 1013 break
1013 1014
1014 1015 if isfile:
1015 1016 ui.warn(
1016 1017 _(
1017 1018 b"warning: file log can be slow on large repos - "
1018 1019 + b"use -f to speed it up\n"
1019 1020 )
1020 1021 )
1021 1022
1022 1023 return orig(ui, repo, *pats, **opts)
1023 1024
1024 1025
1025 1026 def revdatelimit(ui, revset):
1026 1027 """Update revset so that only changesets no older than 'prefetchdays' days
1027 1028 are included. The default value is set to 14 days. If 'prefetchdays' is set
1028 1029 to zero or negative value then date restriction is not applied.
1029 1030 """
1030 1031 days = ui.configint(b'remotefilelog', b'prefetchdays')
1031 1032 if days > 0:
1032 1033 revset = b'(%s) & date(-%s)' % (revset, days)
1033 1034 return revset
1034 1035
1035 1036
1036 1037 def readytofetch(repo):
1037 1038 """Check that enough time has passed since the last background prefetch.
1038 1039 This only relates to prefetches after operations that change the working
1039 1040 copy parent. Default delay between background prefetches is 2 minutes.
1040 1041 """
1041 1042 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1042 1043 fname = repo.vfs.join(b'lastprefetch')
1043 1044
1044 1045 ready = False
1045 1046 with open(fname, b'a'):
1046 1047 # the with construct above is used to avoid race conditions
1047 1048 modtime = os.path.getmtime(fname)
1048 1049 if (time.time() - modtime) > timeout:
1049 1050 os.utime(fname, None)
1050 1051 ready = True
1051 1052
1052 1053 return ready
1053 1054
1054 1055
1055 1056 def wcpprefetch(ui, repo, **kwargs):
1056 1057 """Prefetches in background revisions specified by bgprefetchrevs revset.
1057 1058 Does background repack if backgroundrepack flag is set in config.
1058 1059 """
1059 1060 shallow = isenabled(repo)
1060 1061 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1061 1062 isready = readytofetch(repo)
1062 1063
1063 1064 if not (shallow and bgprefetchrevs and isready):
1064 1065 return
1065 1066
1066 1067 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1067 1068 # update a revset with a date limit
1068 1069 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1069 1070
1070 1071 def anon():
1071 1072 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1072 1073 return
1073 1074 repo.ranprefetch = True
1074 1075 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1075 1076
1076 1077 repo._afterlock(anon)
1077 1078
1078 1079
1079 1080 def pull(orig, ui, repo, *pats, **opts):
1080 1081 result = orig(ui, repo, *pats, **opts)
1081 1082
1082 1083 if isenabled(repo):
1083 1084 # prefetch if it's configured
1084 1085 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1085 1086 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1086 1087 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1087 1088 ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
1088 1089
1089 1090 if prefetchrevset:
1090 1091 ui.status(_(b"prefetching file contents\n"))
1091 1092 revs = scmutil.revrange(repo, [prefetchrevset])
1092 1093 base = repo[b'.'].rev()
1093 1094 if bgprefetch:
1094 1095 repo.backgroundprefetch(
1095 1096 prefetchrevset, repack=bgrepack, ensurestart=ensurestart
1096 1097 )
1097 1098 else:
1098 1099 repo.prefetch(revs, base=base)
1099 1100 if bgrepack:
1100 1101 repackmod.backgroundrepack(
1101 1102 repo, incremental=True, ensurestart=ensurestart
1102 1103 )
1103 1104 elif bgrepack:
1104 1105 repackmod.backgroundrepack(
1105 1106 repo, incremental=True, ensurestart=ensurestart
1106 1107 )
1107 1108
1108 1109 return result
1109 1110
1110 1111
1111 1112 def exchangepull(orig, repo, remote, *args, **kwargs):
1112 1113 # Hook into the callstream/getbundle to insert bundle capabilities
1113 1114 # during a pull.
1114 1115 def localgetbundle(
1115 1116 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1116 1117 ):
1117 1118 if not bundlecaps:
1118 1119 bundlecaps = set()
1119 1120 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1120 1121 return orig(
1121 1122 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1122 1123 )
1123 1124
1124 1125 if util.safehasattr(remote, b'_callstream'):
1125 1126 remote._localrepo = repo
1126 1127 elif util.safehasattr(remote, b'getbundle'):
1127 1128 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1128 1129
1129 1130 return orig(repo, remote, *args, **kwargs)
1130 1131
1131 1132
1132 1133 def _fileprefetchhook(repo, revs, match):
1133 1134 if isenabled(repo):
1134 1135 allfiles = []
1135 1136 for rev in revs:
1136 1137 if rev == nodemod.wdirrev or rev is None:
1137 1138 continue
1138 1139 ctx = repo[rev]
1139 1140 mf = ctx.manifest()
1140 1141 sparsematch = repo.maybesparsematch(ctx.rev())
1141 1142 for path in ctx.walk(match):
1142 1143 if (not sparsematch or sparsematch(path)) and path in mf:
1143 1144 allfiles.append((path, hex(mf[path])))
1144 1145 repo.fileservice.prefetch(allfiles)
1145 1146
1146 1147
1147 1148 @command(
1148 1149 b'debugremotefilelog',
1149 1150 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1150 1151 _(b'hg debugremotefilelog <path>'),
1151 1152 norepo=True,
1152 1153 )
1153 1154 def debugremotefilelog(ui, path, **opts):
1154 1155 return debugcommands.debugremotefilelog(ui, path, **opts)
1155 1156
1156 1157
1157 1158 @command(
1158 1159 b'verifyremotefilelog',
1159 1160 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1160 1161 _(b'hg verifyremotefilelogs <directory>'),
1161 1162 norepo=True,
1162 1163 )
1163 1164 def verifyremotefilelog(ui, path, **opts):
1164 1165 return debugcommands.verifyremotefilelog(ui, path, **opts)
1165 1166
1166 1167
1167 1168 @command(
1168 1169 b'debugdatapack',
1169 1170 [
1170 1171 (b'', b'long', None, _(b'print the long hashes')),
1171 1172 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1172 1173 ],
1173 1174 _(b'hg debugdatapack <paths>'),
1174 1175 norepo=True,
1175 1176 )
1176 1177 def debugdatapack(ui, *paths, **opts):
1177 1178 return debugcommands.debugdatapack(ui, *paths, **opts)
1178 1179
1179 1180
1180 1181 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1181 1182 def debughistorypack(ui, path, **opts):
1182 1183 return debugcommands.debughistorypack(ui, path)
1183 1184
1184 1185
1185 1186 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1186 1187 def debugkeepset(ui, repo, **opts):
1187 1188 # The command is used to measure keepset computation time
1188 1189 def keyfn(fname, fnode):
1189 1190 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1190 1191
1191 1192 repackmod.keepset(repo, keyfn)
1192 1193 return
1193 1194
1194 1195
1195 1196 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1196 1197 def debugwaitonrepack(ui, repo, **opts):
1197 1198 return debugcommands.debugwaitonrepack(repo)
1198 1199
1199 1200
1200 1201 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1201 1202 def debugwaitonprefetch(ui, repo, **opts):
1202 1203 return debugcommands.debugwaitonprefetch(repo)
1203 1204
1204 1205
1205 1206 def resolveprefetchopts(ui, opts):
1206 1207 if not opts.get(b'rev'):
1207 1208 revset = [b'.', b'draft()']
1208 1209
1209 1210 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1210 1211 if prefetchrevset:
1211 1212 revset.append(b'(%s)' % prefetchrevset)
1212 1213 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1213 1214 if bgprefetchrevs:
1214 1215 revset.append(b'(%s)' % bgprefetchrevs)
1215 1216 revset = b'+'.join(revset)
1216 1217
1217 1218 # update a revset with a date limit
1218 1219 revset = revdatelimit(ui, revset)
1219 1220
1220 1221 opts[b'rev'] = [revset]
1221 1222
1222 1223 if not opts.get(b'base'):
1223 1224 opts[b'base'] = None
1224 1225
1225 1226 return opts
1226 1227
1227 1228
1228 1229 @command(
1229 1230 b'prefetch',
1230 1231 [
1231 1232 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1232 1233 (b'', b'repack', False, _(b'run repack after prefetch')),
1233 1234 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1234 1235 ]
1235 1236 + commands.walkopts,
1236 1237 _(b'hg prefetch [OPTIONS] [FILE...]'),
1237 1238 helpcategory=command.CATEGORY_MAINTENANCE,
1238 1239 )
1239 1240 def prefetch(ui, repo, *pats, **opts):
1240 1241 """prefetch file revisions from the server
1241 1242
1242 1243 Prefetchs file revisions for the specified revs and stores them in the
1243 1244 local remotefilelog cache. If no rev is specified, the default rev is
1244 1245 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1245 1246 File names or patterns can be used to limit which files are downloaded.
1246 1247
1247 1248 Return 0 on success.
1248 1249 """
1249 1250 opts = pycompat.byteskwargs(opts)
1250 1251 if not isenabled(repo):
1251 1252 raise error.Abort(_(b"repo is not shallow"))
1252 1253
1253 1254 opts = resolveprefetchopts(ui, opts)
1254 1255 revs = scmutil.revrange(repo, opts.get(b'rev'))
1255 1256 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1256 1257
1257 1258 ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
1258 1259
1259 1260 # Run repack in background
1260 1261 if opts.get(b'repack'):
1261 1262 repackmod.backgroundrepack(
1262 1263 repo, incremental=True, ensurestart=ensurestart
1263 1264 )
1264 1265
1265 1266
1266 1267 @command(
1267 1268 b'repack',
1268 1269 [
1269 1270 (b'', b'background', None, _(b'run in a background process'), None),
1270 1271 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1271 1272 (
1272 1273 b'',
1273 1274 b'packsonly',
1274 1275 None,
1275 1276 _(b'only repack packs (skip loose objects)'),
1276 1277 None,
1277 1278 ),
1278 1279 ],
1279 1280 _(b'hg repack [OPTIONS]'),
1280 1281 )
1281 1282 def repack_(ui, repo, *pats, **opts):
1282 1283 if opts.get(r'background'):
1283 1284 ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart')
1284 1285 repackmod.backgroundrepack(
1285 1286 repo,
1286 1287 incremental=opts.get(r'incremental'),
1287 1288 packsonly=opts.get(r'packsonly', False),
1288 1289 ensurestart=ensurestart,
1289 1290 )
1290 1291 return
1291 1292
1292 1293 options = {b'packsonly': opts.get(r'packsonly')}
1293 1294
1294 1295 try:
1295 1296 if opts.get(r'incremental'):
1296 1297 repackmod.incrementalrepack(repo, options=options)
1297 1298 else:
1298 1299 repackmod.fullrepack(repo, options=options)
1299 1300 except repackmod.RepackAlreadyRunning as ex:
1300 1301 # Don't propogate the exception if the repack is already in
1301 1302 # progress, since we want the command to exit 0.
1302 1303 repo.ui.warn(b'%s\n' % ex)
@@ -1,912 +1,918 b''
1 1 from __future__ import absolute_import
2 2
3 3 import os
4 4 import time
5 5
6 6 from mercurial.i18n import _
7 7 from mercurial.node import (
8 8 nullid,
9 9 short,
10 10 )
11 11 from mercurial import (
12 12 encoding,
13 13 error,
14 14 lock as lockmod,
15 15 mdiff,
16 16 policy,
17 17 pycompat,
18 18 scmutil,
19 19 util,
20 20 vfs,
21 21 )
22 22 from mercurial.utils import procutil
23 23 from . import (
24 24 constants,
25 25 contentstore,
26 26 datapack,
27 27 historypack,
28 28 metadatastore,
29 29 shallowutil,
30 30 )
31 31
32 32 osutil = policy.importmod(r'osutil')
33 33
34 34
35 35 class RepackAlreadyRunning(error.Abort):
36 36 pass
37 37
38 38
39 39 def backgroundrepack(
40 40 repo, incremental=True, packsonly=False, ensurestart=False
41 41 ):
42 42 cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'repack']
43 43 msg = _(b"(running background repack)\n")
44 44 if incremental:
45 45 cmd.append(b'--incremental')
46 46 msg = _(b"(running background incremental repack)\n")
47 47 if packsonly:
48 48 cmd.append(b'--packsonly')
49 49 repo.ui.warn(msg)
50 50 # We know this command will find a binary, so don't block on it starting.
51 procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart)
51 kwargs = {}
52 if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'):
53 kwargs['record_wait'] = repo.ui.atexit
54
55 procutil.runbgcommand(
56 cmd, encoding.environ, ensurestart=ensurestart, **kwargs
57 )
52 58
53 59
54 60 def fullrepack(repo, options=None):
55 61 """If ``packsonly`` is True, stores creating only loose objects are skipped.
56 62 """
57 63 if util.safehasattr(repo, 'shareddatastores'):
58 64 datasource = contentstore.unioncontentstore(*repo.shareddatastores)
59 65 historysource = metadatastore.unionmetadatastore(
60 66 *repo.sharedhistorystores, allowincomplete=True
61 67 )
62 68
63 69 packpath = shallowutil.getcachepackpath(
64 70 repo, constants.FILEPACK_CATEGORY
65 71 )
66 72 _runrepack(
67 73 repo,
68 74 datasource,
69 75 historysource,
70 76 packpath,
71 77 constants.FILEPACK_CATEGORY,
72 78 options=options,
73 79 )
74 80
75 81 if util.safehasattr(repo.manifestlog, 'datastore'):
76 82 localdata, shareddata = _getmanifeststores(repo)
77 83 lpackpath, ldstores, lhstores = localdata
78 84 spackpath, sdstores, shstores = shareddata
79 85
80 86 # Repack the shared manifest store
81 87 datasource = contentstore.unioncontentstore(*sdstores)
82 88 historysource = metadatastore.unionmetadatastore(
83 89 *shstores, allowincomplete=True
84 90 )
85 91 _runrepack(
86 92 repo,
87 93 datasource,
88 94 historysource,
89 95 spackpath,
90 96 constants.TREEPACK_CATEGORY,
91 97 options=options,
92 98 )
93 99
94 100 # Repack the local manifest store
95 101 datasource = contentstore.unioncontentstore(
96 102 *ldstores, allowincomplete=True
97 103 )
98 104 historysource = metadatastore.unionmetadatastore(
99 105 *lhstores, allowincomplete=True
100 106 )
101 107 _runrepack(
102 108 repo,
103 109 datasource,
104 110 historysource,
105 111 lpackpath,
106 112 constants.TREEPACK_CATEGORY,
107 113 options=options,
108 114 )
109 115
110 116
111 117 def incrementalrepack(repo, options=None):
112 118 """This repacks the repo by looking at the distribution of pack files in the
113 119 repo and performing the most minimal repack to keep the repo in good shape.
114 120 """
115 121 if util.safehasattr(repo, 'shareddatastores'):
116 122 packpath = shallowutil.getcachepackpath(
117 123 repo, constants.FILEPACK_CATEGORY
118 124 )
119 125 _incrementalrepack(
120 126 repo,
121 127 repo.shareddatastores,
122 128 repo.sharedhistorystores,
123 129 packpath,
124 130 constants.FILEPACK_CATEGORY,
125 131 options=options,
126 132 )
127 133
128 134 if util.safehasattr(repo.manifestlog, 'datastore'):
129 135 localdata, shareddata = _getmanifeststores(repo)
130 136 lpackpath, ldstores, lhstores = localdata
131 137 spackpath, sdstores, shstores = shareddata
132 138
133 139 # Repack the shared manifest store
134 140 _incrementalrepack(
135 141 repo,
136 142 sdstores,
137 143 shstores,
138 144 spackpath,
139 145 constants.TREEPACK_CATEGORY,
140 146 options=options,
141 147 )
142 148
143 149 # Repack the local manifest store
144 150 _incrementalrepack(
145 151 repo,
146 152 ldstores,
147 153 lhstores,
148 154 lpackpath,
149 155 constants.TREEPACK_CATEGORY,
150 156 allowincompletedata=True,
151 157 options=options,
152 158 )
153 159
154 160
155 161 def _getmanifeststores(repo):
156 162 shareddatastores = repo.manifestlog.shareddatastores
157 163 localdatastores = repo.manifestlog.localdatastores
158 164 sharedhistorystores = repo.manifestlog.sharedhistorystores
159 165 localhistorystores = repo.manifestlog.localhistorystores
160 166
161 167 sharedpackpath = shallowutil.getcachepackpath(
162 168 repo, constants.TREEPACK_CATEGORY
163 169 )
164 170 localpackpath = shallowutil.getlocalpackpath(
165 171 repo.svfs.vfs.base, constants.TREEPACK_CATEGORY
166 172 )
167 173
168 174 return (
169 175 (localpackpath, localdatastores, localhistorystores),
170 176 (sharedpackpath, shareddatastores, sharedhistorystores),
171 177 )
172 178
173 179
174 180 def _topacks(packpath, files, constructor):
175 181 paths = list(os.path.join(packpath, p) for p in files)
176 182 packs = list(constructor(p) for p in paths)
177 183 return packs
178 184
179 185
180 186 def _deletebigpacks(repo, folder, files):
181 187 """Deletes packfiles that are bigger than ``packs.maxpacksize``.
182 188
183 189 Returns ``files` with the removed files omitted."""
184 190 maxsize = repo.ui.configbytes(b"packs", b"maxpacksize")
185 191 if maxsize <= 0:
186 192 return files
187 193
188 194 # This only considers datapacks today, but we could broaden it to include
189 195 # historypacks.
190 196 VALIDEXTS = [b".datapack", b".dataidx"]
191 197
192 198 # Either an oversize index or datapack will trigger cleanup of the whole
193 199 # pack:
194 200 oversized = {
195 201 os.path.splitext(path)[0]
196 202 for path, ftype, stat in files
197 203 if (stat.st_size > maxsize and (os.path.splitext(path)[1] in VALIDEXTS))
198 204 }
199 205
200 206 for rootfname in oversized:
201 207 rootpath = os.path.join(folder, rootfname)
202 208 for ext in VALIDEXTS:
203 209 path = rootpath + ext
204 210 repo.ui.debug(
205 211 b'removing oversize packfile %s (%s)\n'
206 212 % (path, util.bytecount(os.stat(path).st_size))
207 213 )
208 214 os.unlink(path)
209 215 return [row for row in files if os.path.basename(row[0]) not in oversized]
210 216
211 217
212 218 def _incrementalrepack(
213 219 repo,
214 220 datastore,
215 221 historystore,
216 222 packpath,
217 223 category,
218 224 allowincompletedata=False,
219 225 options=None,
220 226 ):
221 227 shallowutil.mkstickygroupdir(repo.ui, packpath)
222 228
223 229 files = osutil.listdir(packpath, stat=True)
224 230 files = _deletebigpacks(repo, packpath, files)
225 231 datapacks = _topacks(
226 232 packpath, _computeincrementaldatapack(repo.ui, files), datapack.datapack
227 233 )
228 234 datapacks.extend(
229 235 s for s in datastore if not isinstance(s, datapack.datapackstore)
230 236 )
231 237
232 238 historypacks = _topacks(
233 239 packpath,
234 240 _computeincrementalhistorypack(repo.ui, files),
235 241 historypack.historypack,
236 242 )
237 243 historypacks.extend(
238 244 s
239 245 for s in historystore
240 246 if not isinstance(s, historypack.historypackstore)
241 247 )
242 248
243 249 # ``allhistory{files,packs}`` contains all known history packs, even ones we
244 250 # don't plan to repack. They are used during the datapack repack to ensure
245 251 # good ordering of nodes.
246 252 allhistoryfiles = _allpackfileswithsuffix(
247 253 files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX
248 254 )
249 255 allhistorypacks = _topacks(
250 256 packpath,
251 257 (f for f, mode, stat in allhistoryfiles),
252 258 historypack.historypack,
253 259 )
254 260 allhistorypacks.extend(
255 261 s
256 262 for s in historystore
257 263 if not isinstance(s, historypack.historypackstore)
258 264 )
259 265 _runrepack(
260 266 repo,
261 267 contentstore.unioncontentstore(
262 268 *datapacks, allowincomplete=allowincompletedata
263 269 ),
264 270 metadatastore.unionmetadatastore(*historypacks, allowincomplete=True),
265 271 packpath,
266 272 category,
267 273 fullhistory=metadatastore.unionmetadatastore(
268 274 *allhistorypacks, allowincomplete=True
269 275 ),
270 276 options=options,
271 277 )
272 278
273 279
274 280 def _computeincrementaldatapack(ui, files):
275 281 opts = {
276 282 b'gencountlimit': ui.configint(b'remotefilelog', b'data.gencountlimit'),
277 283 b'generations': ui.configlist(b'remotefilelog', b'data.generations'),
278 284 b'maxrepackpacks': ui.configint(
279 285 b'remotefilelog', b'data.maxrepackpacks'
280 286 ),
281 287 b'repackmaxpacksize': ui.configbytes(
282 288 b'remotefilelog', b'data.repackmaxpacksize'
283 289 ),
284 290 b'repacksizelimit': ui.configbytes(
285 291 b'remotefilelog', b'data.repacksizelimit'
286 292 ),
287 293 }
288 294
289 295 packfiles = _allpackfileswithsuffix(
290 296 files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX
291 297 )
292 298 return _computeincrementalpack(packfiles, opts)
293 299
294 300
295 301 def _computeincrementalhistorypack(ui, files):
296 302 opts = {
297 303 b'gencountlimit': ui.configint(
298 304 b'remotefilelog', b'history.gencountlimit'
299 305 ),
300 306 b'generations': ui.configlist(
301 307 b'remotefilelog', b'history.generations', [b'100MB']
302 308 ),
303 309 b'maxrepackpacks': ui.configint(
304 310 b'remotefilelog', b'history.maxrepackpacks'
305 311 ),
306 312 b'repackmaxpacksize': ui.configbytes(
307 313 b'remotefilelog', b'history.repackmaxpacksize', b'400MB'
308 314 ),
309 315 b'repacksizelimit': ui.configbytes(
310 316 b'remotefilelog', b'history.repacksizelimit'
311 317 ),
312 318 }
313 319
314 320 packfiles = _allpackfileswithsuffix(
315 321 files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX
316 322 )
317 323 return _computeincrementalpack(packfiles, opts)
318 324
319 325
320 326 def _allpackfileswithsuffix(files, packsuffix, indexsuffix):
321 327 result = []
322 328 fileset = set(fn for fn, mode, stat in files)
323 329 for filename, mode, stat in files:
324 330 if not filename.endswith(packsuffix):
325 331 continue
326 332
327 333 prefix = filename[: -len(packsuffix)]
328 334
329 335 # Don't process a pack if it doesn't have an index.
330 336 if (prefix + indexsuffix) not in fileset:
331 337 continue
332 338 result.append((prefix, mode, stat))
333 339
334 340 return result
335 341
336 342
337 343 def _computeincrementalpack(files, opts):
338 344 """Given a set of pack files along with the configuration options, this
339 345 function computes the list of files that should be packed as part of an
340 346 incremental repack.
341 347
342 348 It tries to strike a balance between keeping incremental repacks cheap (i.e.
343 349 packing small things when possible, and rolling the packs up to the big ones
344 350 over time).
345 351 """
346 352
347 353 limits = list(
348 354 sorted((util.sizetoint(s) for s in opts[b'generations']), reverse=True)
349 355 )
350 356 limits.append(0)
351 357
352 358 # Group the packs by generation (i.e. by size)
353 359 generations = []
354 360 for i in pycompat.xrange(len(limits)):
355 361 generations.append([])
356 362
357 363 sizes = {}
358 364 for prefix, mode, stat in files:
359 365 size = stat.st_size
360 366 if size > opts[b'repackmaxpacksize']:
361 367 continue
362 368
363 369 sizes[prefix] = size
364 370 for i, limit in enumerate(limits):
365 371 if size > limit:
366 372 generations[i].append(prefix)
367 373 break
368 374
369 375 # Steps for picking what packs to repack:
370 376 # 1. Pick the largest generation with > gencountlimit pack files.
371 377 # 2. Take the smallest three packs.
372 378 # 3. While total-size-of-packs < repacksizelimit: add another pack
373 379
374 380 # Find the largest generation with more than gencountlimit packs
375 381 genpacks = []
376 382 for i, limit in enumerate(limits):
377 383 if len(generations[i]) > opts[b'gencountlimit']:
378 384 # Sort to be smallest last, for easy popping later
379 385 genpacks.extend(
380 386 sorted(generations[i], reverse=True, key=lambda x: sizes[x])
381 387 )
382 388 break
383 389
384 390 # Take as many packs from the generation as we can
385 391 chosenpacks = genpacks[-3:]
386 392 genpacks = genpacks[:-3]
387 393 repacksize = sum(sizes[n] for n in chosenpacks)
388 394 while (
389 395 repacksize < opts[b'repacksizelimit']
390 396 and genpacks
391 397 and len(chosenpacks) < opts[b'maxrepackpacks']
392 398 ):
393 399 chosenpacks.append(genpacks.pop())
394 400 repacksize += sizes[chosenpacks[-1]]
395 401
396 402 return chosenpacks
397 403
398 404
399 405 def _runrepack(
400 406 repo, data, history, packpath, category, fullhistory=None, options=None
401 407 ):
402 408 shallowutil.mkstickygroupdir(repo.ui, packpath)
403 409
404 410 def isold(repo, filename, node):
405 411 """Check if the file node is older than a limit.
406 412 Unless a limit is specified in the config the default limit is taken.
407 413 """
408 414 filectx = repo.filectx(filename, fileid=node)
409 415 filetime = repo[filectx.linkrev()].date()
410 416
411 417 ttl = repo.ui.configint(b'remotefilelog', b'nodettl')
412 418
413 419 limit = time.time() - ttl
414 420 return filetime[0] < limit
415 421
416 422 garbagecollect = repo.ui.configbool(b'remotefilelog', b'gcrepack')
417 423 if not fullhistory:
418 424 fullhistory = history
419 425 packer = repacker(
420 426 repo,
421 427 data,
422 428 history,
423 429 fullhistory,
424 430 category,
425 431 gc=garbagecollect,
426 432 isold=isold,
427 433 options=options,
428 434 )
429 435
430 436 with datapack.mutabledatapack(repo.ui, packpath) as dpack:
431 437 with historypack.mutablehistorypack(repo.ui, packpath) as hpack:
432 438 try:
433 439 packer.run(dpack, hpack)
434 440 except error.LockHeld:
435 441 raise RepackAlreadyRunning(
436 442 _(
437 443 b"skipping repack - another repack "
438 444 b"is already running"
439 445 )
440 446 )
441 447
442 448
443 449 def keepset(repo, keyfn, lastkeepkeys=None):
444 450 """Computes a keepset which is not garbage collected.
445 451 'keyfn' is a function that maps filename, node to a unique key.
446 452 'lastkeepkeys' is an optional argument and if provided the keepset
447 453 function updates lastkeepkeys with more keys and returns the result.
448 454 """
449 455 if not lastkeepkeys:
450 456 keepkeys = set()
451 457 else:
452 458 keepkeys = lastkeepkeys
453 459
454 460 # We want to keep:
455 461 # 1. Working copy parent
456 462 # 2. Draft commits
457 463 # 3. Parents of draft commits
458 464 # 4. Pullprefetch and bgprefetchrevs revsets if specified
459 465 revs = [b'.', b'draft()', b'parents(draft())']
460 466 prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None)
461 467 if prefetchrevs:
462 468 revs.append(b'(%s)' % prefetchrevs)
463 469 prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None)
464 470 if prefetchrevs:
465 471 revs.append(b'(%s)' % prefetchrevs)
466 472 revs = b'+'.join(revs)
467 473
468 474 revs = [b'sort((%s), "topo")' % revs]
469 475 keep = scmutil.revrange(repo, revs)
470 476
471 477 processed = set()
472 478 lastmanifest = None
473 479
474 480 # process the commits in toposorted order starting from the oldest
475 481 for r in reversed(keep._list):
476 482 if repo[r].p1().rev() in processed:
477 483 # if the direct parent has already been processed
478 484 # then we only need to process the delta
479 485 m = repo[r].manifestctx().readdelta()
480 486 else:
481 487 # otherwise take the manifest and diff it
482 488 # with the previous manifest if one exists
483 489 if lastmanifest:
484 490 m = repo[r].manifest().diff(lastmanifest)
485 491 else:
486 492 m = repo[r].manifest()
487 493 lastmanifest = repo[r].manifest()
488 494 processed.add(r)
489 495
490 496 # populate keepkeys with keys from the current manifest
491 497 if type(m) is dict:
492 498 # m is a result of diff of two manifests and is a dictionary that
493 499 # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple
494 500 for filename, diff in pycompat.iteritems(m):
495 501 if diff[0][0] is not None:
496 502 keepkeys.add(keyfn(filename, diff[0][0]))
497 503 else:
498 504 # m is a manifest object
499 505 for filename, filenode in pycompat.iteritems(m):
500 506 keepkeys.add(keyfn(filename, filenode))
501 507
502 508 return keepkeys
503 509
504 510
505 511 class repacker(object):
506 512 """Class for orchestrating the repack of data and history information into a
507 513 new format.
508 514 """
509 515
510 516 def __init__(
511 517 self,
512 518 repo,
513 519 data,
514 520 history,
515 521 fullhistory,
516 522 category,
517 523 gc=False,
518 524 isold=None,
519 525 options=None,
520 526 ):
521 527 self.repo = repo
522 528 self.data = data
523 529 self.history = history
524 530 self.fullhistory = fullhistory
525 531 self.unit = constants.getunits(category)
526 532 self.garbagecollect = gc
527 533 self.options = options
528 534 if self.garbagecollect:
529 535 if not isold:
530 536 raise ValueError(b"Function 'isold' is not properly specified")
531 537 # use (filename, node) tuple as a keepset key
532 538 self.keepkeys = keepset(repo, lambda f, n: (f, n))
533 539 self.isold = isold
534 540
535 541 def run(self, targetdata, targethistory):
536 542 ledger = repackledger()
537 543
538 544 with lockmod.lock(
539 545 repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0
540 546 ):
541 547 self.repo.hook(b'prerepack')
542 548
543 549 # Populate ledger from source
544 550 self.data.markledger(ledger, options=self.options)
545 551 self.history.markledger(ledger, options=self.options)
546 552
547 553 # Run repack
548 554 self.repackdata(ledger, targetdata)
549 555 self.repackhistory(ledger, targethistory)
550 556
551 557 # Call cleanup on each source
552 558 for source in ledger.sources:
553 559 source.cleanup(ledger)
554 560
555 561 def _chainorphans(self, ui, filename, nodes, orphans, deltabases):
556 562 """Reorderes ``orphans`` into a single chain inside ``nodes`` and
557 563 ``deltabases``.
558 564
559 565 We often have orphan entries (nodes without a base that aren't
560 566 referenced by other nodes -- i.e., part of a chain) due to gaps in
561 567 history. Rather than store them as individual fulltexts, we prefer to
562 568 insert them as one chain sorted by size.
563 569 """
564 570 if not orphans:
565 571 return nodes
566 572
567 573 def getsize(node, default=0):
568 574 meta = self.data.getmeta(filename, node)
569 575 if constants.METAKEYSIZE in meta:
570 576 return meta[constants.METAKEYSIZE]
571 577 else:
572 578 return default
573 579
574 580 # Sort orphans by size; biggest first is preferred, since it's more
575 581 # likely to be the newest version assuming files grow over time.
576 582 # (Sort by node first to ensure the sort is stable.)
577 583 orphans = sorted(orphans)
578 584 orphans = list(sorted(orphans, key=getsize, reverse=True))
579 585 if ui.debugflag:
580 586 ui.debug(
581 587 b"%s: orphan chain: %s\n"
582 588 % (filename, b", ".join([short(s) for s in orphans]))
583 589 )
584 590
585 591 # Create one contiguous chain and reassign deltabases.
586 592 for i, node in enumerate(orphans):
587 593 if i == 0:
588 594 deltabases[node] = (nullid, 0)
589 595 else:
590 596 parent = orphans[i - 1]
591 597 deltabases[node] = (parent, deltabases[parent][1] + 1)
592 598 nodes = [n for n in nodes if n not in orphans]
593 599 nodes += orphans
594 600 return nodes
595 601
596 602 def repackdata(self, ledger, target):
597 603 ui = self.repo.ui
598 604 maxchainlen = ui.configint(b'packs', b'maxchainlen', 1000)
599 605
600 606 byfile = {}
601 607 for entry in pycompat.itervalues(ledger.entries):
602 608 if entry.datasource:
603 609 byfile.setdefault(entry.filename, {})[entry.node] = entry
604 610
605 611 count = 0
606 612 repackprogress = ui.makeprogress(
607 613 _(b"repacking data"), unit=self.unit, total=len(byfile)
608 614 )
609 615 for filename, entries in sorted(pycompat.iteritems(byfile)):
610 616 repackprogress.update(count)
611 617
612 618 ancestors = {}
613 619 nodes = list(node for node in entries)
614 620 nohistory = []
615 621 buildprogress = ui.makeprogress(
616 622 _(b"building history"), unit=b'nodes', total=len(nodes)
617 623 )
618 624 for i, node in enumerate(nodes):
619 625 if node in ancestors:
620 626 continue
621 627 buildprogress.update(i)
622 628 try:
623 629 ancestors.update(
624 630 self.fullhistory.getancestors(
625 631 filename, node, known=ancestors
626 632 )
627 633 )
628 634 except KeyError:
629 635 # Since we're packing data entries, we may not have the
630 636 # corresponding history entries for them. It's not a big
631 637 # deal, but the entries won't be delta'd perfectly.
632 638 nohistory.append(node)
633 639 buildprogress.complete()
634 640
635 641 # Order the nodes children first, so we can produce reverse deltas
636 642 orderednodes = list(reversed(self._toposort(ancestors)))
637 643 if len(nohistory) > 0:
638 644 ui.debug(
639 645 b'repackdata: %d nodes without history\n' % len(nohistory)
640 646 )
641 647 orderednodes.extend(sorted(nohistory))
642 648
643 649 # Filter orderednodes to just the nodes we want to serialize (it
644 650 # currently also has the edge nodes' ancestors).
645 651 orderednodes = list(
646 652 filter(lambda node: node in nodes, orderednodes)
647 653 )
648 654
649 655 # Garbage collect old nodes:
650 656 if self.garbagecollect:
651 657 neworderednodes = []
652 658 for node in orderednodes:
653 659 # If the node is old and is not in the keepset, we skip it,
654 660 # and mark as garbage collected
655 661 if (filename, node) not in self.keepkeys and self.isold(
656 662 self.repo, filename, node
657 663 ):
658 664 entries[node].gced = True
659 665 continue
660 666 neworderednodes.append(node)
661 667 orderednodes = neworderednodes
662 668
663 669 # Compute delta bases for nodes:
664 670 deltabases = {}
665 671 nobase = set()
666 672 referenced = set()
667 673 nodes = set(nodes)
668 674 processprogress = ui.makeprogress(
669 675 _(b"processing nodes"), unit=b'nodes', total=len(orderednodes)
670 676 )
671 677 for i, node in enumerate(orderednodes):
672 678 processprogress.update(i)
673 679 # Find delta base
674 680 # TODO: allow delta'ing against most recent descendant instead
675 681 # of immediate child
676 682 deltatuple = deltabases.get(node, None)
677 683 if deltatuple is None:
678 684 deltabase, chainlen = nullid, 0
679 685 deltabases[node] = (nullid, 0)
680 686 nobase.add(node)
681 687 else:
682 688 deltabase, chainlen = deltatuple
683 689 referenced.add(deltabase)
684 690
685 691 # Use available ancestor information to inform our delta choices
686 692 ancestorinfo = ancestors.get(node)
687 693 if ancestorinfo:
688 694 p1, p2, linknode, copyfrom = ancestorinfo
689 695
690 696 # The presence of copyfrom means we're at a point where the
691 697 # file was copied from elsewhere. So don't attempt to do any
692 698 # deltas with the other file.
693 699 if copyfrom:
694 700 p1 = nullid
695 701
696 702 if chainlen < maxchainlen:
697 703 # Record this child as the delta base for its parents.
698 704 # This may be non optimal, since the parents may have
699 705 # many children, and this will only choose the last one.
700 706 # TODO: record all children and try all deltas to find
701 707 # best
702 708 if p1 != nullid:
703 709 deltabases[p1] = (node, chainlen + 1)
704 710 if p2 != nullid:
705 711 deltabases[p2] = (node, chainlen + 1)
706 712
707 713 # experimental config: repack.chainorphansbysize
708 714 if ui.configbool(b'repack', b'chainorphansbysize'):
709 715 orphans = nobase - referenced
710 716 orderednodes = self._chainorphans(
711 717 ui, filename, orderednodes, orphans, deltabases
712 718 )
713 719
714 720 # Compute deltas and write to the pack
715 721 for i, node in enumerate(orderednodes):
716 722 deltabase, chainlen = deltabases[node]
717 723 # Compute delta
718 724 # TODO: Optimize the deltachain fetching. Since we're
719 725 # iterating over the different version of the file, we may
720 726 # be fetching the same deltachain over and over again.
721 727 if deltabase != nullid:
722 728 deltaentry = self.data.getdelta(filename, node)
723 729 delta, deltabasename, origdeltabase, meta = deltaentry
724 730 size = meta.get(constants.METAKEYSIZE)
725 731 if (
726 732 deltabasename != filename
727 733 or origdeltabase != deltabase
728 734 or size is None
729 735 ):
730 736 deltabasetext = self.data.get(filename, deltabase)
731 737 original = self.data.get(filename, node)
732 738 size = len(original)
733 739 delta = mdiff.textdiff(deltabasetext, original)
734 740 else:
735 741 delta = self.data.get(filename, node)
736 742 size = len(delta)
737 743 meta = self.data.getmeta(filename, node)
738 744
739 745 # TODO: don't use the delta if it's larger than the fulltext
740 746 if constants.METAKEYSIZE not in meta:
741 747 meta[constants.METAKEYSIZE] = size
742 748 target.add(filename, node, deltabase, delta, meta)
743 749
744 750 entries[node].datarepacked = True
745 751
746 752 processprogress.complete()
747 753 count += 1
748 754
749 755 repackprogress.complete()
750 756 target.close(ledger=ledger)
751 757
752 758 def repackhistory(self, ledger, target):
753 759 ui = self.repo.ui
754 760
755 761 byfile = {}
756 762 for entry in pycompat.itervalues(ledger.entries):
757 763 if entry.historysource:
758 764 byfile.setdefault(entry.filename, {})[entry.node] = entry
759 765
760 766 progress = ui.makeprogress(
761 767 _(b"repacking history"), unit=self.unit, total=len(byfile)
762 768 )
763 769 for filename, entries in sorted(pycompat.iteritems(byfile)):
764 770 ancestors = {}
765 771 nodes = list(node for node in entries)
766 772
767 773 for node in nodes:
768 774 if node in ancestors:
769 775 continue
770 776 ancestors.update(
771 777 self.history.getancestors(filename, node, known=ancestors)
772 778 )
773 779
774 780 # Order the nodes children first
775 781 orderednodes = reversed(self._toposort(ancestors))
776 782
777 783 # Write to the pack
778 784 dontprocess = set()
779 785 for node in orderednodes:
780 786 p1, p2, linknode, copyfrom = ancestors[node]
781 787
782 788 # If the node is marked dontprocess, but it's also in the
783 789 # explicit entries set, that means the node exists both in this
784 790 # file and in another file that was copied to this file.
785 791 # Usually this happens if the file was copied to another file,
786 792 # then the copy was deleted, then reintroduced without copy
787 793 # metadata. The original add and the new add have the same hash
788 794 # since the content is identical and the parents are null.
789 795 if node in dontprocess and node not in entries:
790 796 # If copyfrom == filename, it means the copy history
791 797 # went to come other file, then came back to this one, so we
792 798 # should continue processing it.
793 799 if p1 != nullid and copyfrom != filename:
794 800 dontprocess.add(p1)
795 801 if p2 != nullid:
796 802 dontprocess.add(p2)
797 803 continue
798 804
799 805 if copyfrom:
800 806 dontprocess.add(p1)
801 807
802 808 target.add(filename, node, p1, p2, linknode, copyfrom)
803 809
804 810 if node in entries:
805 811 entries[node].historyrepacked = True
806 812
807 813 progress.increment()
808 814
809 815 progress.complete()
810 816 target.close(ledger=ledger)
811 817
812 818 def _toposort(self, ancestors):
813 819 def parentfunc(node):
814 820 p1, p2, linknode, copyfrom = ancestors[node]
815 821 parents = []
816 822 if p1 != nullid:
817 823 parents.append(p1)
818 824 if p2 != nullid:
819 825 parents.append(p2)
820 826 return parents
821 827
822 828 sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc)
823 829 return sortednodes
824 830
825 831
826 832 class repackledger(object):
827 833 """Storage for all the bookkeeping that happens during a repack. It contains
828 834 the list of revisions being repacked, what happened to each revision, and
829 835 which source store contained which revision originally (for later cleanup).
830 836 """
831 837
832 838 def __init__(self):
833 839 self.entries = {}
834 840 self.sources = {}
835 841 self.created = set()
836 842
837 843 def markdataentry(self, source, filename, node):
838 844 """Mark the given filename+node revision as having a data rev in the
839 845 given source.
840 846 """
841 847 entry = self._getorcreateentry(filename, node)
842 848 entry.datasource = True
843 849 entries = self.sources.get(source)
844 850 if not entries:
845 851 entries = set()
846 852 self.sources[source] = entries
847 853 entries.add(entry)
848 854
849 855 def markhistoryentry(self, source, filename, node):
850 856 """Mark the given filename+node revision as having a history rev in the
851 857 given source.
852 858 """
853 859 entry = self._getorcreateentry(filename, node)
854 860 entry.historysource = True
855 861 entries = self.sources.get(source)
856 862 if not entries:
857 863 entries = set()
858 864 self.sources[source] = entries
859 865 entries.add(entry)
860 866
861 867 def _getorcreateentry(self, filename, node):
862 868 key = (filename, node)
863 869 value = self.entries.get(key)
864 870 if not value:
865 871 value = repackentry(filename, node)
866 872 self.entries[key] = value
867 873
868 874 return value
869 875
870 876 def addcreated(self, value):
871 877 self.created.add(value)
872 878
873 879
874 880 class repackentry(object):
875 881 """Simple class representing a single revision entry in the repackledger.
876 882 """
877 883
878 884 __slots__ = (
879 885 r'filename',
880 886 r'node',
881 887 r'datasource',
882 888 r'historysource',
883 889 r'datarepacked',
884 890 r'historyrepacked',
885 891 r'gced',
886 892 )
887 893
888 894 def __init__(self, filename, node):
889 895 self.filename = filename
890 896 self.node = node
891 897 # If the revision has a data entry in the source
892 898 self.datasource = False
893 899 # If the revision has a history entry in the source
894 900 self.historysource = False
895 901 # If the revision's data entry was repacked into the repack target
896 902 self.datarepacked = False
897 903 # If the revision's history entry was repacked into the repack target
898 904 self.historyrepacked = False
899 905 # If garbage collected
900 906 self.gced = False
901 907
902 908
903 909 def repacklockvfs(repo):
904 910 if util.safehasattr(repo, 'name'):
905 911 # Lock in the shared cache so repacks across multiple copies of the same
906 912 # repo are coordinated.
907 913 sharedcachepath = shallowutil.getcachepackpath(
908 914 repo, constants.FILEPACK_CATEGORY
909 915 )
910 916 return vfs.vfs(sharedcachepath)
911 917 else:
912 918 return repo.svfs
@@ -1,354 +1,358 b''
1 1 # shallowrepo.py - shallow repository that uses remote filelogs
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import os
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial.node import hex, nullid, nullrev
13 13 from mercurial import (
14 14 encoding,
15 15 error,
16 16 localrepo,
17 17 match,
18 18 pycompat,
19 19 scmutil,
20 20 sparse,
21 21 util,
22 22 )
23 23 from mercurial.utils import procutil
24 24 from . import (
25 25 connectionpool,
26 26 constants,
27 27 contentstore,
28 28 datapack,
29 29 fileserverclient,
30 30 historypack,
31 31 metadatastore,
32 32 remotefilectx,
33 33 remotefilelog,
34 34 shallowutil,
35 35 )
36 36
37 37 # These make*stores functions are global so that other extensions can replace
38 38 # them.
39 39 def makelocalstores(repo):
40 40 """In-repo stores, like .hg/store/data; can not be discarded."""
41 41 localpath = os.path.join(repo.svfs.vfs.base, b'data')
42 42 if not os.path.exists(localpath):
43 43 os.makedirs(localpath)
44 44
45 45 # Instantiate local data stores
46 46 localcontent = contentstore.remotefilelogcontentstore(
47 47 repo, localpath, repo.name, shared=False
48 48 )
49 49 localmetadata = metadatastore.remotefilelogmetadatastore(
50 50 repo, localpath, repo.name, shared=False
51 51 )
52 52 return localcontent, localmetadata
53 53
54 54
55 55 def makecachestores(repo):
56 56 """Typically machine-wide, cache of remote data; can be discarded."""
57 57 # Instantiate shared cache stores
58 58 cachepath = shallowutil.getcachepath(repo.ui)
59 59 cachecontent = contentstore.remotefilelogcontentstore(
60 60 repo, cachepath, repo.name, shared=True
61 61 )
62 62 cachemetadata = metadatastore.remotefilelogmetadatastore(
63 63 repo, cachepath, repo.name, shared=True
64 64 )
65 65
66 66 repo.sharedstore = cachecontent
67 67 repo.shareddatastores.append(cachecontent)
68 68 repo.sharedhistorystores.append(cachemetadata)
69 69
70 70 return cachecontent, cachemetadata
71 71
72 72
73 73 def makeremotestores(repo, cachecontent, cachemetadata):
74 74 """These stores fetch data from a remote server."""
75 75 # Instantiate remote stores
76 76 repo.fileservice = fileserverclient.fileserverclient(repo)
77 77 remotecontent = contentstore.remotecontentstore(
78 78 repo.ui, repo.fileservice, cachecontent
79 79 )
80 80 remotemetadata = metadatastore.remotemetadatastore(
81 81 repo.ui, repo.fileservice, cachemetadata
82 82 )
83 83 return remotecontent, remotemetadata
84 84
85 85
86 86 def makepackstores(repo):
87 87 """Packs are more efficient (to read from) cache stores."""
88 88 # Instantiate pack stores
89 89 packpath = shallowutil.getcachepackpath(repo, constants.FILEPACK_CATEGORY)
90 90 packcontentstore = datapack.datapackstore(repo.ui, packpath)
91 91 packmetadatastore = historypack.historypackstore(repo.ui, packpath)
92 92
93 93 repo.shareddatastores.append(packcontentstore)
94 94 repo.sharedhistorystores.append(packmetadatastore)
95 95 shallowutil.reportpackmetrics(
96 96 repo.ui, b'filestore', packcontentstore, packmetadatastore
97 97 )
98 98 return packcontentstore, packmetadatastore
99 99
100 100
101 101 def makeunionstores(repo):
102 102 """Union stores iterate the other stores and return the first result."""
103 103 repo.shareddatastores = []
104 104 repo.sharedhistorystores = []
105 105
106 106 packcontentstore, packmetadatastore = makepackstores(repo)
107 107 cachecontent, cachemetadata = makecachestores(repo)
108 108 localcontent, localmetadata = makelocalstores(repo)
109 109 remotecontent, remotemetadata = makeremotestores(
110 110 repo, cachecontent, cachemetadata
111 111 )
112 112
113 113 # Instantiate union stores
114 114 repo.contentstore = contentstore.unioncontentstore(
115 115 packcontentstore,
116 116 cachecontent,
117 117 localcontent,
118 118 remotecontent,
119 119 writestore=localcontent,
120 120 )
121 121 repo.metadatastore = metadatastore.unionmetadatastore(
122 122 packmetadatastore,
123 123 cachemetadata,
124 124 localmetadata,
125 125 remotemetadata,
126 126 writestore=localmetadata,
127 127 )
128 128
129 129 fileservicedatawrite = cachecontent
130 130 fileservicehistorywrite = cachemetadata
131 131 repo.fileservice.setstore(
132 132 repo.contentstore,
133 133 repo.metadatastore,
134 134 fileservicedatawrite,
135 135 fileservicehistorywrite,
136 136 )
137 137 shallowutil.reportpackmetrics(
138 138 repo.ui, b'filestore', packcontentstore, packmetadatastore
139 139 )
140 140
141 141
142 142 def wraprepo(repo):
143 143 class shallowrepository(repo.__class__):
144 144 @util.propertycache
145 145 def name(self):
146 146 return self.ui.config(b'remotefilelog', b'reponame')
147 147
148 148 @util.propertycache
149 149 def fallbackpath(self):
150 150 path = repo.ui.config(
151 151 b"remotefilelog",
152 152 b"fallbackpath",
153 153 repo.ui.config(b'paths', b'default'),
154 154 )
155 155 if not path:
156 156 raise error.Abort(
157 157 b"no remotefilelog server "
158 158 b"configured - is your .hg/hgrc trusted?"
159 159 )
160 160
161 161 return path
162 162
163 163 def maybesparsematch(self, *revs, **kwargs):
164 164 '''
165 165 A wrapper that allows the remotefilelog to invoke sparsematch() if
166 166 this is a sparse repository, or returns None if this is not a
167 167 sparse repository.
168 168 '''
169 169 if revs:
170 170 ret = sparse.matcher(repo, revs=revs)
171 171 else:
172 172 ret = sparse.matcher(repo)
173 173
174 174 if ret.always():
175 175 return None
176 176 return ret
177 177
178 178 def file(self, f):
179 179 if f[0] == b'/':
180 180 f = f[1:]
181 181
182 182 if self.shallowmatch(f):
183 183 return remotefilelog.remotefilelog(self.svfs, f, self)
184 184 else:
185 185 return super(shallowrepository, self).file(f)
186 186
187 187 def filectx(self, path, *args, **kwargs):
188 188 if self.shallowmatch(path):
189 189 return remotefilectx.remotefilectx(self, path, *args, **kwargs)
190 190 else:
191 191 return super(shallowrepository, self).filectx(
192 192 path, *args, **kwargs
193 193 )
194 194
195 195 @localrepo.unfilteredmethod
196 196 def commitctx(self, ctx, error=False, origctx=None):
197 197 """Add a new revision to current repository.
198 198 Revision information is passed via the context argument.
199 199 """
200 200
201 201 # some contexts already have manifest nodes, they don't need any
202 202 # prefetching (for example if we're just editing a commit message
203 203 # we can reuse manifest
204 204 if not ctx.manifestnode():
205 205 # prefetch files that will likely be compared
206 206 m1 = ctx.p1().manifest()
207 207 files = []
208 208 for f in ctx.modified() + ctx.added():
209 209 fparent1 = m1.get(f, nullid)
210 210 if fparent1 != nullid:
211 211 files.append((f, hex(fparent1)))
212 212 self.fileservice.prefetch(files)
213 213 return super(shallowrepository, self).commitctx(
214 214 ctx, error=error, origctx=origctx
215 215 )
216 216
217 217 def backgroundprefetch(
218 218 self,
219 219 revs,
220 220 base=None,
221 221 repack=False,
222 222 pats=None,
223 223 opts=None,
224 224 ensurestart=False,
225 225 ):
226 226 """Runs prefetch in background with optional repack
227 227 """
228 228 cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch']
229 229 if repack:
230 230 cmd.append(b'--repack')
231 231 if revs:
232 232 cmd += [b'-r', revs]
233 233 # We know this command will find a binary, so don't block
234 234 # on it starting.
235 kwargs = {}
236 if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'):
237 kwargs['record_wait'] = repo.ui.atexit
238
235 239 procutil.runbgcommand(
236 cmd, encoding.environ, ensurestart=ensurestart
240 cmd, encoding.environ, ensurestart=ensurestart, **kwargs
237 241 )
238 242
239 243 def prefetch(self, revs, base=None, pats=None, opts=None):
240 244 """Prefetches all the necessary file revisions for the given revs
241 245 Optionally runs repack in background
242 246 """
243 247 with repo._lock(
244 248 repo.svfs,
245 249 b'prefetchlock',
246 250 True,
247 251 None,
248 252 None,
249 253 _(b'prefetching in %s') % repo.origroot,
250 254 ):
251 255 self._prefetch(revs, base, pats, opts)
252 256
253 257 def _prefetch(self, revs, base=None, pats=None, opts=None):
254 258 fallbackpath = self.fallbackpath
255 259 if fallbackpath:
256 260 # If we know a rev is on the server, we should fetch the server
257 261 # version of those files, since our local file versions might
258 262 # become obsolete if the local commits are stripped.
259 263 localrevs = repo.revs(b'outgoing(%s)', fallbackpath)
260 264 if base is not None and base != nullrev:
261 265 serverbase = list(
262 266 repo.revs(
263 267 b'first(reverse(::%s) - %ld)', base, localrevs
264 268 )
265 269 )
266 270 if serverbase:
267 271 base = serverbase[0]
268 272 else:
269 273 localrevs = repo
270 274
271 275 mfl = repo.manifestlog
272 276 mfrevlog = mfl.getstorage(b'')
273 277 if base is not None:
274 278 mfdict = mfl[repo[base].manifestnode()].read()
275 279 skip = set(pycompat.iteritems(mfdict))
276 280 else:
277 281 skip = set()
278 282
279 283 # Copy the skip set to start large and avoid constant resizing,
280 284 # and since it's likely to be very similar to the prefetch set.
281 285 files = skip.copy()
282 286 serverfiles = skip.copy()
283 287 visited = set()
284 288 visited.add(nullrev)
285 289 revcount = len(revs)
286 290 progress = self.ui.makeprogress(_(b'prefetching'), total=revcount)
287 291 progress.update(0)
288 292 for rev in sorted(revs):
289 293 ctx = repo[rev]
290 294 if pats:
291 295 m = scmutil.match(ctx, pats, opts)
292 296 sparsematch = repo.maybesparsematch(rev)
293 297
294 298 mfnode = ctx.manifestnode()
295 299 mfrev = mfrevlog.rev(mfnode)
296 300
297 301 # Decompressing manifests is expensive.
298 302 # When possible, only read the deltas.
299 303 p1, p2 = mfrevlog.parentrevs(mfrev)
300 304 if p1 in visited and p2 in visited:
301 305 mfdict = mfl[mfnode].readfast()
302 306 else:
303 307 mfdict = mfl[mfnode].read()
304 308
305 309 diff = pycompat.iteritems(mfdict)
306 310 if pats:
307 311 diff = (pf for pf in diff if m(pf[0]))
308 312 if sparsematch:
309 313 diff = (pf for pf in diff if sparsematch(pf[0]))
310 314 if rev not in localrevs:
311 315 serverfiles.update(diff)
312 316 else:
313 317 files.update(diff)
314 318
315 319 visited.add(mfrev)
316 320 progress.increment()
317 321
318 322 files.difference_update(skip)
319 323 serverfiles.difference_update(skip)
320 324 progress.complete()
321 325
322 326 # Fetch files known to be on the server
323 327 if serverfiles:
324 328 results = [(path, hex(fnode)) for (path, fnode) in serverfiles]
325 329 repo.fileservice.prefetch(results, force=True)
326 330
327 331 # Fetch files that may or may not be on the server
328 332 if files:
329 333 results = [(path, hex(fnode)) for (path, fnode) in files]
330 334 repo.fileservice.prefetch(results)
331 335
332 336 def close(self):
333 337 super(shallowrepository, self).close()
334 338 self.connectionpool.close()
335 339
336 340 repo.__class__ = shallowrepository
337 341
338 342 repo.shallowmatch = match.always()
339 343
340 344 makeunionstores(repo)
341 345
342 346 repo.includepattern = repo.ui.configlist(
343 347 b"remotefilelog", b"includepattern", None
344 348 )
345 349 repo.excludepattern = repo.ui.configlist(
346 350 b"remotefilelog", b"excludepattern", None
347 351 )
348 352 if not util.safehasattr(repo, 'connectionpool'):
349 353 repo.connectionpool = connectionpool.connectionpool(repo)
350 354
351 355 if repo.includepattern or repo.excludepattern:
352 356 repo.shallowmatch = match.match(
353 357 repo.root, b'', None, repo.includepattern, repo.excludepattern
354 358 )
General Comments 0
You need to be logged in to leave comments. Login now