##// END OF EJS Templates
remotefilelog: move most functions in onetimeclientsetup() to top level...
Martin von Zweigbergk -
r42459:651f325e default
parent child Browse files
Show More
@@ -1,1111 +1,1124 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial import (
136 136 changegroup,
137 137 changelog,
138 138 cmdutil,
139 139 commands,
140 140 configitems,
141 141 context,
142 142 copies,
143 143 debugcommands as hgdebugcommands,
144 144 dispatch,
145 145 error,
146 146 exchange,
147 147 extensions,
148 148 hg,
149 149 localrepo,
150 150 match,
151 151 merge,
152 152 node as nodemod,
153 153 patch,
154 154 pycompat,
155 155 registrar,
156 156 repair,
157 157 repoview,
158 158 revset,
159 159 scmutil,
160 160 smartset,
161 161 streamclone,
162 162 util,
163 163 )
164 164 from . import (
165 165 constants,
166 166 debugcommands,
167 167 fileserverclient,
168 168 remotefilectx,
169 169 remotefilelog,
170 170 remotefilelogserver,
171 171 repack as repackmod,
172 172 shallowbundle,
173 173 shallowrepo,
174 174 shallowstore,
175 175 shallowutil,
176 176 shallowverifier,
177 177 )
178 178
179 179 # ensures debug commands are registered
180 180 hgdebugcommands.command
181 181
182 182 cmdtable = {}
183 183 command = registrar.command(cmdtable)
184 184
185 185 configtable = {}
186 186 configitem = registrar.configitem(configtable)
187 187
188 188 configitem('remotefilelog', 'debug', default=False)
189 189
190 190 configitem('remotefilelog', 'reponame', default='')
191 191 configitem('remotefilelog', 'cachepath', default=None)
192 192 configitem('remotefilelog', 'cachegroup', default=None)
193 193 configitem('remotefilelog', 'cacheprocess', default=None)
194 194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196 196
197 197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 198 alias=[('remotefilelog', 'fallbackrepo')])
199 199
200 200 configitem('remotefilelog', 'validatecachelog', default=None)
201 201 configitem('remotefilelog', 'validatecache', default='on')
202 202 configitem('remotefilelog', 'server', default=None)
203 203 configitem('remotefilelog', 'servercachepath', default=None)
204 204 configitem("remotefilelog", "serverexpiration", default=30)
205 205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 207 configitem('remotefilelog', 'pullprefetch', default=None)
208 208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 210 configitem('remotefilelog', 'prefetchdays', default=14)
211 211
212 212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 215 configitem('remotefilelog', 'fetchwarning', default='')
216 216
217 217 configitem('remotefilelog', 'includepattern', default=None)
218 218 configitem('remotefilelog', 'excludepattern', default=None)
219 219
220 220 configitem('remotefilelog', 'gcrepack', default=False)
221 221 configitem('remotefilelog', 'repackonhggc', default=False)
222 222 configitem('repack', 'chainorphansbysize', default=True)
223 223
224 224 configitem('packs', 'maxpacksize', default=0)
225 225 configitem('packs', 'maxchainlen', default=1000)
226 226
227 227 # default TTL limit is 30 days
228 228 _defaultlimit = 60 * 60 * 24 * 30
229 229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230 230
231 231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 232 configitem('remotefilelog', 'data.generations',
233 233 default=['1GB', '100MB', '1MB'])
234 234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237 237
238 238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243 243
244 244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 246 # be specifying the version(s) of Mercurial they are tested with, or
247 247 # leave the attribute unspecified.
248 248 testedwith = 'ships-with-hg-core'
249 249
250 250 repoclass = localrepo.localrepository
251 251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252 252
253 253 isenabled = shallowutil.isenabled
254 254
255 255 def uisetup(ui):
256 256 """Wraps user facing Mercurial commands to swap them out with shallow
257 257 versions.
258 258 """
259 259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260 260
261 261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 262 entry[1].append(('', 'shallow', None,
263 263 _("create a shallow clone which uses remote file "
264 264 "history")))
265 265
266 266 extensions.wrapcommand(commands.table, 'debugindex',
267 267 debugcommands.debugindex)
268 268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 269 debugcommands.debugindexdot)
270 270 extensions.wrapcommand(commands.table, 'log', log)
271 271 extensions.wrapcommand(commands.table, 'pull', pull)
272 272
273 273 # Prevent 'hg manifest --all'
274 274 def _manifest(orig, ui, repo, *args, **opts):
275 275 if (isenabled(repo) and opts.get(r'all')):
276 276 raise error.Abort(_("--all is not supported in a shallow repo"))
277 277
278 278 return orig(ui, repo, *args, **opts)
279 279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280 280
281 281 # Wrap remotefilelog with lfs code
282 282 def _lfsloaded(loaded=False):
283 283 lfsmod = None
284 284 try:
285 285 lfsmod = extensions.find('lfs')
286 286 except KeyError:
287 287 pass
288 288 if lfsmod:
289 289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 290 fileserverclient._lfsmod = lfsmod
291 291 extensions.afterloaded('lfs', _lfsloaded)
292 292
293 293 # debugdata needs remotefilelog.len to work
294 294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295 295
296 296 def cloneshallow(orig, ui, repo, *args, **opts):
297 297 if opts.get(r'shallow'):
298 298 repos = []
299 299 def pull_shallow(orig, self, *args, **kwargs):
300 300 if not isenabled(self):
301 301 repos.append(self.unfiltered())
302 302 # set up the client hooks so the post-clone update works
303 303 setupclient(self.ui, self.unfiltered())
304 304
305 305 # setupclient fixed the class on the repo itself
306 306 # but we also need to fix it on the repoview
307 307 if isinstance(self, repoview.repoview):
308 308 self.__class__.__bases__ = (self.__class__.__bases__[0],
309 309 self.unfiltered().__class__)
310 310 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
311 311 self._writerequirements()
312 312
313 313 # Since setupclient hadn't been called, exchange.pull was not
314 314 # wrapped. So we need to manually invoke our version of it.
315 315 return exchangepull(orig, self, *args, **kwargs)
316 316 else:
317 317 return orig(self, *args, **kwargs)
318 318 extensions.wrapfunction(exchange, 'pull', pull_shallow)
319 319
320 320 # Wrap the stream logic to add requirements and to pass include/exclude
321 321 # patterns around.
322 322 def setup_streamout(repo, remote):
323 323 # Replace remote.stream_out with a version that sends file
324 324 # patterns.
325 325 def stream_out_shallow(orig):
326 326 caps = remote.capabilities()
327 327 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
328 328 opts = {}
329 329 if repo.includepattern:
330 330 opts[r'includepattern'] = '\0'.join(repo.includepattern)
331 331 if repo.excludepattern:
332 332 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
333 333 return remote._callstream('stream_out_shallow', **opts)
334 334 else:
335 335 return orig()
336 336 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
337 337 def stream_wrap(orig, op):
338 338 setup_streamout(op.repo, op.remote)
339 339 return orig(op)
340 340 extensions.wrapfunction(
341 341 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
342 342
343 343 def canperformstreamclone(orig, pullop, bundle2=False):
344 344 # remotefilelog is currently incompatible with the
345 345 # bundle2 flavor of streamclones, so force us to use
346 346 # v1 instead.
347 347 if 'v2' in pullop.remotebundle2caps.get('stream', []):
348 348 pullop.remotebundle2caps['stream'] = [
349 349 c for c in pullop.remotebundle2caps['stream']
350 350 if c != 'v2']
351 351 if bundle2:
352 352 return False, None
353 353 supported, requirements = orig(pullop, bundle2=bundle2)
354 354 if requirements is not None:
355 355 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
356 356 return supported, requirements
357 357 extensions.wrapfunction(
358 358 streamclone, 'canperformstreamclone', canperformstreamclone)
359 359
360 360 try:
361 361 orig(ui, repo, *args, **opts)
362 362 finally:
363 363 if opts.get(r'shallow'):
364 364 for r in repos:
365 365 if util.safehasattr(r, 'fileservice'):
366 366 r.fileservice.close()
367 367
368 368 def debugdatashallow(orig, *args, **kwds):
369 369 oldlen = remotefilelog.remotefilelog.__len__
370 370 try:
371 371 remotefilelog.remotefilelog.__len__ = lambda x: 1
372 372 return orig(*args, **kwds)
373 373 finally:
374 374 remotefilelog.remotefilelog.__len__ = oldlen
375 375
376 376 def reposetup(ui, repo):
377 377 if not repo.local():
378 378 return
379 379
380 380 # put here intentionally bc doesnt work in uisetup
381 381 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
382 382 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
383 383
384 384 isserverenabled = ui.configbool('remotefilelog', 'server')
385 385 isshallowclient = isenabled(repo)
386 386
387 387 if isserverenabled and isshallowclient:
388 388 raise RuntimeError("Cannot be both a server and shallow client.")
389 389
390 390 if isshallowclient:
391 391 setupclient(ui, repo)
392 392
393 393 if isserverenabled:
394 394 remotefilelogserver.setupserver(ui, repo)
395 395
396 396 def setupclient(ui, repo):
397 397 if not isinstance(repo, localrepo.localrepository):
398 398 return
399 399
400 400 # Even clients get the server setup since they need to have the
401 401 # wireprotocol endpoints registered.
402 402 remotefilelogserver.onetimesetup(ui)
403 403 onetimeclientsetup(ui)
404 404
405 405 shallowrepo.wraprepo(repo)
406 406 repo.store = shallowstore.wrapstore(repo.store)
407 407
408 def storewrapper(orig, requirements, path, vfstype):
409 s = orig(requirements, path, vfstype)
410 if constants.SHALLOWREPO_REQUIREMENT in requirements:
411 s = shallowstore.wrapstore(s)
412
413 return s
414
415 # prefetch files before update
416 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
417 if isenabled(repo):
418 manifest = mctx.manifest()
419 files = []
420 for f, args, msg in actions['g']:
421 files.append((f, hex(manifest[f])))
422 # batch fetch the needed files from the server
423 repo.fileservice.prefetch(files)
424 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
425
426 # Prefetch merge checkunknownfiles
427 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
428 *args, **kwargs):
429 if isenabled(repo):
430 files = []
431 sparsematch = repo.maybesparsematch(mctx.rev())
432 for f, (m, actionargs, msg) in actions.iteritems():
433 if sparsematch and not sparsematch(f):
434 continue
435 if m in ('c', 'dc', 'cm'):
436 files.append((f, hex(mctx.filenode(f))))
437 elif m == 'dg':
438 f2 = actionargs[0]
439 files.append((f2, hex(mctx.filenode(f2))))
440 # batch fetch the needed files from the server
441 repo.fileservice.prefetch(files)
442 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
443
444 # Prefetch files before status attempts to look at their size and contents
445 def checklookup(orig, self, files):
446 repo = self._repo
447 if isenabled(repo):
448 prefetchfiles = []
449 for parent in self._parents:
450 for f in files:
451 if f in parent:
452 prefetchfiles.append((f, hex(parent.filenode(f))))
453 # batch fetch the needed files from the server
454 repo.fileservice.prefetch(prefetchfiles)
455 return orig(self, files)
456
457 # Prefetch the logic that compares added and removed files for renames
458 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
459 if isenabled(repo):
460 files = []
461 pmf = repo['.'].manifest()
462 for f in removed:
463 if f in pmf:
464 files.append((f, hex(pmf[f])))
465 # batch fetch the needed files from the server
466 repo.fileservice.prefetch(files)
467 return orig(repo, matcher, added, removed, *args, **kwargs)
468
469 # prefetch files before pathcopies check
470 def computeforwardmissing(orig, a, b, match=None):
471 missing = orig(a, b, match=match)
472 repo = a._repo
473 if isenabled(repo):
474 mb = b.manifest()
475
476 files = []
477 sparsematch = repo.maybesparsematch(b.rev())
478 if sparsematch:
479 sparsemissing = set()
480 for f in missing:
481 if sparsematch(f):
482 files.append((f, hex(mb[f])))
483 sparsemissing.add(f)
484 missing = sparsemissing
485
486 # batch fetch the needed files from the server
487 repo.fileservice.prefetch(files)
488 return missing
489
490 # close cache miss server connection after the command has finished
491 def runcommand(orig, lui, repo, *args, **kwargs):
492 fileservice = None
493 # repo can be None when running in chg:
494 # - at startup, reposetup was called because serve is not norepo
495 # - a norepo command like "help" is called
496 if repo and isenabled(repo):
497 fileservice = repo.fileservice
498 try:
499 return orig(lui, repo, *args, **kwargs)
500 finally:
501 if fileservice:
502 fileservice.close()
503
504 # prevent strip from stripping remotefilelogs
505 def _collectbrokencsets(orig, repo, files, striprev):
506 if isenabled(repo):
507 files = list([f for f in files if not repo.shallowmatch(f)])
508 return orig(repo, files, striprev)
509
510 # changectx wrappers
511 def filectx(orig, self, path, fileid=None, filelog=None):
512 if fileid is None:
513 fileid = self.filenode(path)
514 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
515 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
516 changectx=self, filelog=filelog)
517 return orig(self, path, fileid=fileid, filelog=filelog)
518
519 def workingfilectx(orig, self, path, filelog=None):
520 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
521 return remotefilectx.remoteworkingfilectx(self._repo, path,
522 workingctx=self,
523 filelog=filelog)
524 return orig(self, path, filelog=filelog)
525
526 # prefetch required revisions before a diff
527 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
528 copy, getfilectx, *args, **kwargs):
529 if isenabled(repo):
530 prefetch = []
531 mf1 = ctx1.manifest()
532 for fname in modified + added + removed:
533 if fname in mf1:
534 fnode = getfilectx(fname, ctx1).filenode()
535 # fnode can be None if it's a edited working ctx file
536 if fnode:
537 prefetch.append((fname, hex(fnode)))
538 if fname not in removed:
539 fnode = getfilectx(fname, ctx2).filenode()
540 if fnode:
541 prefetch.append((fname, hex(fnode)))
542
543 repo.fileservice.prefetch(prefetch)
544
545 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
546 getfilectx, *args, **kwargs)
547
548 # Prevent verify from processing files
549 # a stub for mercurial.hg.verify()
550 def _verify(orig, repo, level=None):
551 lock = repo.lock()
552 try:
553 return shallowverifier.shallowverifier(repo).verify()
554 finally:
555 lock.release()
556
557
408 558 clientonetime = False
409 559 def onetimeclientsetup(ui):
410 560 global clientonetime
411 561 if clientonetime:
412 562 return
413 563 clientonetime = True
414 564
415 565 changegroup.cgpacker = shallowbundle.shallowcg1packer
416 566
417 567 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
418 568 shallowbundle.addchangegroupfiles)
419 569 extensions.wrapfunction(
420 570 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
421 571
422 def storewrapper(orig, requirements, path, vfstype):
423 s = orig(requirements, path, vfstype)
424 if constants.SHALLOWREPO_REQUIREMENT in requirements:
425 s = shallowstore.wrapstore(s)
426
427 return s
428 572 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
429 573
430 574 extensions.wrapfunction(exchange, 'pull', exchangepull)
431 575
432 # prefetch files before update
433 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
434 if isenabled(repo):
435 manifest = mctx.manifest()
436 files = []
437 for f, args, msg in actions['g']:
438 files.append((f, hex(manifest[f])))
439 # batch fetch the needed files from the server
440 repo.fileservice.prefetch(files)
441 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
442 576 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
443 577
444 # Prefetch merge checkunknownfiles
445 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
446 *args, **kwargs):
447 if isenabled(repo):
448 files = []
449 sparsematch = repo.maybesparsematch(mctx.rev())
450 for f, (m, actionargs, msg) in actions.iteritems():
451 if sparsematch and not sparsematch(f):
452 continue
453 if m in ('c', 'dc', 'cm'):
454 files.append((f, hex(mctx.filenode(f))))
455 elif m == 'dg':
456 f2 = actionargs[0]
457 files.append((f2, hex(mctx.filenode(f2))))
458 # batch fetch the needed files from the server
459 repo.fileservice.prefetch(files)
460 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
461 578 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
462 579
463 # Prefetch files before status attempts to look at their size and contents
464 def checklookup(orig, self, files):
465 repo = self._repo
466 if isenabled(repo):
467 prefetchfiles = []
468 for parent in self._parents:
469 for f in files:
470 if f in parent:
471 prefetchfiles.append((f, hex(parent.filenode(f))))
472 # batch fetch the needed files from the server
473 repo.fileservice.prefetch(prefetchfiles)
474 return orig(self, files)
475 580 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
476 581
477 # Prefetch the logic that compares added and removed files for renames
478 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
479 if isenabled(repo):
480 files = []
481 pmf = repo['.'].manifest()
482 for f in removed:
483 if f in pmf:
484 files.append((f, hex(pmf[f])))
485 # batch fetch the needed files from the server
486 repo.fileservice.prefetch(files)
487 return orig(repo, matcher, added, removed, *args, **kwargs)
488 582 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
489 583
490 # prefetch files before pathcopies check
491 def computeforwardmissing(orig, a, b, match=None):
492 missing = orig(a, b, match=match)
493 repo = a._repo
494 if isenabled(repo):
495 mb = b.manifest()
496
497 files = []
498 sparsematch = repo.maybesparsematch(b.rev())
499 if sparsematch:
500 sparsemissing = set()
501 for f in missing:
502 if sparsematch(f):
503 files.append((f, hex(mb[f])))
504 sparsemissing.add(f)
505 missing = sparsemissing
506
507 # batch fetch the needed files from the server
508 repo.fileservice.prefetch(files)
509 return missing
510 584 extensions.wrapfunction(copies, '_computeforwardmissing',
511 585 computeforwardmissing)
512 586
513 # close cache miss server connection after the command has finished
514 def runcommand(orig, lui, repo, *args, **kwargs):
515 fileservice = None
516 # repo can be None when running in chg:
517 # - at startup, reposetup was called because serve is not norepo
518 # - a norepo command like "help" is called
519 if repo and isenabled(repo):
520 fileservice = repo.fileservice
521 try:
522 return orig(lui, repo, *args, **kwargs)
523 finally:
524 if fileservice:
525 fileservice.close()
526 587 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
527 588
528 589 # disappointing hacks below
529 590 scmutil.getrenamedfn = getrenamedfn
530 591 extensions.wrapfunction(revset, 'filelog', filelogrevset)
531 592 revset.symbols['filelog'] = revset.filelog
532 593 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
533 594
534 # prevent strip from stripping remotefilelogs
535 def _collectbrokencsets(orig, repo, files, striprev):
536 if isenabled(repo):
537 files = list([f for f in files if not repo.shallowmatch(f)])
538 return orig(repo, files, striprev)
539 595 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
540 596
541 597 # Don't commit filelogs until we know the commit hash, since the hash
542 598 # is present in the filelog blob.
543 599 # This violates Mercurial's filelog->manifest->changelog write order,
544 600 # but is generally fine for client repos.
545 601 pendingfilecommits = []
546 602 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
547 603 flags, cachedelta=None, _metatuple=None):
548 604 if isinstance(link, int):
549 605 pendingfilecommits.append(
550 606 (self, rawtext, transaction, link, p1, p2, node, flags,
551 607 cachedelta, _metatuple))
552 608 return node
553 609 else:
554 610 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
555 611 cachedelta, _metatuple=_metatuple)
556 612 extensions.wrapfunction(
557 613 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
558 614
559 615 def changelogadd(orig, self, *args):
560 616 oldlen = len(self)
561 617 node = orig(self, *args)
562 618 newlen = len(self)
563 619 if oldlen != newlen:
564 620 for oldargs in pendingfilecommits:
565 621 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
566 622 linknode = self.node(link)
567 623 if linknode == node:
568 624 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
569 625 else:
570 626 raise error.ProgrammingError(
571 627 'pending multiple integer revisions are not supported')
572 628 else:
573 629 # "link" is actually wrong here (it is set to len(changelog))
574 630 # if changelog remains unchanged, skip writing file revisions
575 631 # but still do a sanity check about pending multiple revisions
576 632 if len(set(x[3] for x in pendingfilecommits)) > 1:
577 633 raise error.ProgrammingError(
578 634 'pending multiple integer revisions are not supported')
579 635 del pendingfilecommits[:]
580 636 return node
581 637 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
582 638
583 # changectx wrappers
584 def filectx(orig, self, path, fileid=None, filelog=None):
585 if fileid is None:
586 fileid = self.filenode(path)
587 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
588 return remotefilectx.remotefilectx(self._repo, path,
589 fileid=fileid, changectx=self, filelog=filelog)
590 return orig(self, path, fileid=fileid, filelog=filelog)
591 639 extensions.wrapfunction(context.changectx, 'filectx', filectx)
592 640
593 def workingfilectx(orig, self, path, filelog=None):
594 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
595 return remotefilectx.remoteworkingfilectx(self._repo,
596 path, workingctx=self, filelog=filelog)
597 return orig(self, path, filelog=filelog)
598 641 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
599 642
600 # prefetch required revisions before a diff
601 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
602 copy, getfilectx, *args, **kwargs):
603 if isenabled(repo):
604 prefetch = []
605 mf1 = ctx1.manifest()
606 for fname in modified + added + removed:
607 if fname in mf1:
608 fnode = getfilectx(fname, ctx1).filenode()
609 # fnode can be None if it's a edited working ctx file
610 if fnode:
611 prefetch.append((fname, hex(fnode)))
612 if fname not in removed:
613 fnode = getfilectx(fname, ctx2).filenode()
614 if fnode:
615 prefetch.append((fname, hex(fnode)))
616
617 repo.fileservice.prefetch(prefetch)
618
619 return orig(repo, revs, ctx1, ctx2, modified, added, removed,
620 copy, getfilectx, *args, **kwargs)
621 643 extensions.wrapfunction(patch, 'trydiff', trydiff)
622 644
623 # Prevent verify from processing files
624 # a stub for mercurial.hg.verify()
625 def _verify(orig, repo, level=None):
626 lock = repo.lock()
627 try:
628 return shallowverifier.shallowverifier(repo).verify()
629 finally:
630 lock.release()
631
632 645 extensions.wrapfunction(hg, 'verify', _verify)
633 646
634 647 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
635 648
636 649 def getrenamedfn(repo, endrev=None):
637 650 rcache = {}
638 651
639 652 def getrenamed(fn, rev):
640 653 '''looks up all renames for a file (up to endrev) the first
641 654 time the file is given. It indexes on the changerev and only
642 655 parses the manifest if linkrev != changerev.
643 656 Returns rename info for fn at changerev rev.'''
644 657 if rev in rcache.setdefault(fn, {}):
645 658 return rcache[fn][rev]
646 659
647 660 try:
648 661 fctx = repo[rev].filectx(fn)
649 662 for ancestor in fctx.ancestors():
650 663 if ancestor.path() == fn:
651 664 renamed = ancestor.renamed()
652 665 rcache[fn][ancestor.rev()] = renamed and renamed[0]
653 666
654 667 renamed = fctx.renamed()
655 668 return renamed and renamed[0]
656 669 except error.LookupError:
657 670 return None
658 671
659 672 return getrenamed
660 673
661 674 def walkfilerevs(orig, repo, match, follow, revs, fncache):
662 675 if not isenabled(repo):
663 676 return orig(repo, match, follow, revs, fncache)
664 677
665 678 # remotefilelog's can't be walked in rev order, so throw.
666 679 # The caller will see the exception and walk the commit tree instead.
667 680 if not follow:
668 681 raise cmdutil.FileWalkError("Cannot walk via filelog")
669 682
670 683 wanted = set()
671 684 minrev, maxrev = min(revs), max(revs)
672 685
673 686 pctx = repo['.']
674 687 for filename in match.files():
675 688 if filename not in pctx:
676 689 raise error.Abort(_('cannot follow file not in parent '
677 690 'revision: "%s"') % filename)
678 691 fctx = pctx[filename]
679 692
680 693 linkrev = fctx.linkrev()
681 694 if linkrev >= minrev and linkrev <= maxrev:
682 695 fncache.setdefault(linkrev, []).append(filename)
683 696 wanted.add(linkrev)
684 697
685 698 for ancestor in fctx.ancestors():
686 699 linkrev = ancestor.linkrev()
687 700 if linkrev >= minrev and linkrev <= maxrev:
688 701 fncache.setdefault(linkrev, []).append(ancestor.path())
689 702 wanted.add(linkrev)
690 703
691 704 return wanted
692 705
693 706 def filelogrevset(orig, repo, subset, x):
694 707 """``filelog(pattern)``
695 708 Changesets connected to the specified filelog.
696 709
697 710 For performance reasons, ``filelog()`` does not show every changeset
698 711 that affects the requested file(s). See :hg:`help log` for details. For
699 712 a slower, more accurate result, use ``file()``.
700 713 """
701 714
702 715 if not isenabled(repo):
703 716 return orig(repo, subset, x)
704 717
705 718 # i18n: "filelog" is a keyword
706 719 pat = revset.getstring(x, _("filelog requires a pattern"))
707 720 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
708 721 ctx=repo[None])
709 722 s = set()
710 723
711 724 if not match.patkind(pat):
712 725 # slow
713 726 for r in subset:
714 727 ctx = repo[r]
715 728 cfiles = ctx.files()
716 729 for f in m.files():
717 730 if f in cfiles:
718 731 s.add(ctx.rev())
719 732 break
720 733 else:
721 734 # partial
722 735 files = (f for f in repo[None] if m(f))
723 736 for f in files:
724 737 fctx = repo[None].filectx(f)
725 738 s.add(fctx.linkrev())
726 739 for actx in fctx.ancestors():
727 740 s.add(actx.linkrev())
728 741
729 742 return smartset.baseset([r for r in subset if r in s])
730 743
731 744 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
732 745 def gc(ui, *args, **opts):
733 746 '''garbage collect the client and server filelog caches
734 747 '''
735 748 cachepaths = set()
736 749
737 750 # get the system client cache
738 751 systemcache = shallowutil.getcachepath(ui, allowempty=True)
739 752 if systemcache:
740 753 cachepaths.add(systemcache)
741 754
742 755 # get repo client and server cache
743 756 repopaths = []
744 757 pwd = ui.environ.get('PWD')
745 758 if pwd:
746 759 repopaths.append(pwd)
747 760
748 761 repopaths.extend(args)
749 762 repos = []
750 763 for repopath in repopaths:
751 764 try:
752 765 repo = hg.peer(ui, {}, repopath)
753 766 repos.append(repo)
754 767
755 768 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
756 769 if repocache:
757 770 cachepaths.add(repocache)
758 771 except error.RepoError:
759 772 pass
760 773
761 774 # gc client cache
762 775 for cachepath in cachepaths:
763 776 gcclient(ui, cachepath)
764 777
765 778 # gc server cache
766 779 for repo in repos:
767 780 remotefilelogserver.gcserver(ui, repo._repo)
768 781
769 782 def gcclient(ui, cachepath):
770 783 # get list of repos that use this cache
771 784 repospath = os.path.join(cachepath, 'repos')
772 785 if not os.path.exists(repospath):
773 786 ui.warn(_("no known cache at %s\n") % cachepath)
774 787 return
775 788
776 789 reposfile = open(repospath, 'rb')
777 790 repos = {r[:-1] for r in reposfile.readlines()}
778 791 reposfile.close()
779 792
780 793 # build list of useful files
781 794 validrepos = []
782 795 keepkeys = set()
783 796
784 797 sharedcache = None
785 798 filesrepacked = False
786 799
787 800 count = 0
788 801 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
789 802 total=len(repos))
790 803 for path in repos:
791 804 progress.update(count)
792 805 count += 1
793 806 try:
794 807 path = ui.expandpath(os.path.normpath(path))
795 808 except TypeError as e:
796 809 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
797 810 traceback.print_exc()
798 811 continue
799 812 try:
800 813 peer = hg.peer(ui, {}, path)
801 814 repo = peer._repo
802 815 except error.RepoError:
803 816 continue
804 817
805 818 validrepos.append(path)
806 819
807 820 # Protect against any repo or config changes that have happened since
808 821 # this repo was added to the repos file. We'd rather this loop succeed
809 822 # and too much be deleted, than the loop fail and nothing gets deleted.
810 823 if not isenabled(repo):
811 824 continue
812 825
813 826 if not util.safehasattr(repo, 'name'):
814 827 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
815 828 continue
816 829
817 830 # If garbage collection on repack and repack on hg gc are enabled
818 831 # then loose files are repacked and garbage collected.
819 832 # Otherwise regular garbage collection is performed.
820 833 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
821 834 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
822 835 if repackonhggc and gcrepack:
823 836 try:
824 837 repackmod.incrementalrepack(repo)
825 838 filesrepacked = True
826 839 continue
827 840 except (IOError, repackmod.RepackAlreadyRunning):
828 841 # If repack cannot be performed due to not enough disk space
829 842 # continue doing garbage collection of loose files w/o repack
830 843 pass
831 844
832 845 reponame = repo.name
833 846 if not sharedcache:
834 847 sharedcache = repo.sharedstore
835 848
836 849 # Compute a keepset which is not garbage collected
837 850 def keyfn(fname, fnode):
838 851 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
839 852 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
840 853
841 854 progress.complete()
842 855
843 856 # write list of valid repos back
844 857 oldumask = os.umask(0o002)
845 858 try:
846 859 reposfile = open(repospath, 'wb')
847 860 reposfile.writelines([("%s\n" % r) for r in validrepos])
848 861 reposfile.close()
849 862 finally:
850 863 os.umask(oldumask)
851 864
852 865 # prune cache
853 866 if sharedcache is not None:
854 867 sharedcache.gc(keepkeys)
855 868 elif not filesrepacked:
856 869 ui.warn(_("warning: no valid repos in repofile\n"))
857 870
858 871 def log(orig, ui, repo, *pats, **opts):
859 872 if not isenabled(repo):
860 873 return orig(ui, repo, *pats, **opts)
861 874
862 875 follow = opts.get(r'follow')
863 876 revs = opts.get(r'rev')
864 877 if pats:
865 878 # Force slowpath for non-follow patterns and follows that start from
866 879 # non-working-copy-parent revs.
867 880 if not follow or revs:
868 881 # This forces the slowpath
869 882 opts[r'removed'] = True
870 883
871 884 # If this is a non-follow log without any revs specified, recommend that
872 885 # the user add -f to speed it up.
873 886 if not follow and not revs:
874 887 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
875 888 isfile = not match.anypats()
876 889 if isfile:
877 890 for file in match.files():
878 891 if not os.path.isfile(repo.wjoin(file)):
879 892 isfile = False
880 893 break
881 894
882 895 if isfile:
883 896 ui.warn(_("warning: file log can be slow on large repos - " +
884 897 "use -f to speed it up\n"))
885 898
886 899 return orig(ui, repo, *pats, **opts)
887 900
888 901 def revdatelimit(ui, revset):
889 902 """Update revset so that only changesets no older than 'prefetchdays' days
890 903 are included. The default value is set to 14 days. If 'prefetchdays' is set
891 904 to zero or negative value then date restriction is not applied.
892 905 """
893 906 days = ui.configint('remotefilelog', 'prefetchdays')
894 907 if days > 0:
895 908 revset = '(%s) & date(-%s)' % (revset, days)
896 909 return revset
897 910
898 911 def readytofetch(repo):
899 912 """Check that enough time has passed since the last background prefetch.
900 913 This only relates to prefetches after operations that change the working
901 914 copy parent. Default delay between background prefetches is 2 minutes.
902 915 """
903 916 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
904 917 fname = repo.vfs.join('lastprefetch')
905 918
906 919 ready = False
907 920 with open(fname, 'a'):
908 921 # the with construct above is used to avoid race conditions
909 922 modtime = os.path.getmtime(fname)
910 923 if (time.time() - modtime) > timeout:
911 924 os.utime(fname, None)
912 925 ready = True
913 926
914 927 return ready
915 928
916 929 def wcpprefetch(ui, repo, **kwargs):
917 930 """Prefetches in background revisions specified by bgprefetchrevs revset.
918 931 Does background repack if backgroundrepack flag is set in config.
919 932 """
920 933 shallow = isenabled(repo)
921 934 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
922 935 isready = readytofetch(repo)
923 936
924 937 if not (shallow and bgprefetchrevs and isready):
925 938 return
926 939
927 940 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
928 941 # update a revset with a date limit
929 942 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
930 943
931 944 def anon():
932 945 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
933 946 return
934 947 repo.ranprefetch = True
935 948 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
936 949
937 950 repo._afterlock(anon)
938 951
939 952 def pull(orig, ui, repo, *pats, **opts):
940 953 result = orig(ui, repo, *pats, **opts)
941 954
942 955 if isenabled(repo):
943 956 # prefetch if it's configured
944 957 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
945 958 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
946 959 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
947 960
948 961 if prefetchrevset:
949 962 ui.status(_("prefetching file contents\n"))
950 963 revs = scmutil.revrange(repo, [prefetchrevset])
951 964 base = repo['.'].rev()
952 965 if bgprefetch:
953 966 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
954 967 else:
955 968 repo.prefetch(revs, base=base)
956 969 if bgrepack:
957 970 repackmod.backgroundrepack(repo, incremental=True)
958 971 elif bgrepack:
959 972 repackmod.backgroundrepack(repo, incremental=True)
960 973
961 974 return result
962 975
963 976 def exchangepull(orig, repo, remote, *args, **kwargs):
964 977 # Hook into the callstream/getbundle to insert bundle capabilities
965 978 # during a pull.
966 979 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
967 980 **kwargs):
968 981 if not bundlecaps:
969 982 bundlecaps = set()
970 983 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
971 984 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
972 985 **kwargs)
973 986
974 987 if util.safehasattr(remote, '_callstream'):
975 988 remote._localrepo = repo
976 989 elif util.safehasattr(remote, 'getbundle'):
977 990 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
978 991
979 992 return orig(repo, remote, *args, **kwargs)
980 993
981 994 def _fileprefetchhook(repo, revs, match):
982 995 if isenabled(repo):
983 996 allfiles = []
984 997 for rev in revs:
985 998 if rev == nodemod.wdirrev or rev is None:
986 999 continue
987 1000 ctx = repo[rev]
988 1001 mf = ctx.manifest()
989 1002 sparsematch = repo.maybesparsematch(ctx.rev())
990 1003 for path in ctx.walk(match):
991 1004 if path.endswith('/'):
992 1005 # Tree manifest that's being excluded as part of narrow
993 1006 continue
994 1007 if (not sparsematch or sparsematch(path)) and path in mf:
995 1008 allfiles.append((path, hex(mf[path])))
996 1009 repo.fileservice.prefetch(allfiles)
997 1010
998 1011 @command('debugremotefilelog', [
999 1012 ('d', 'decompress', None, _('decompress the filelog first')),
1000 1013 ], _('hg debugremotefilelog <path>'), norepo=True)
1001 1014 def debugremotefilelog(ui, path, **opts):
1002 1015 return debugcommands.debugremotefilelog(ui, path, **opts)
1003 1016
1004 1017 @command('verifyremotefilelog', [
1005 1018 ('d', 'decompress', None, _('decompress the filelogs first')),
1006 1019 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1007 1020 def verifyremotefilelog(ui, path, **opts):
1008 1021 return debugcommands.verifyremotefilelog(ui, path, **opts)
1009 1022
1010 1023 @command('debugdatapack', [
1011 1024 ('', 'long', None, _('print the long hashes')),
1012 1025 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1013 1026 ], _('hg debugdatapack <paths>'), norepo=True)
1014 1027 def debugdatapack(ui, *paths, **opts):
1015 1028 return debugcommands.debugdatapack(ui, *paths, **opts)
1016 1029
1017 1030 @command('debughistorypack', [
1018 1031 ], _('hg debughistorypack <path>'), norepo=True)
1019 1032 def debughistorypack(ui, path, **opts):
1020 1033 return debugcommands.debughistorypack(ui, path)
1021 1034
1022 1035 @command('debugkeepset', [
1023 1036 ], _('hg debugkeepset'))
1024 1037 def debugkeepset(ui, repo, **opts):
1025 1038 # The command is used to measure keepset computation time
1026 1039 def keyfn(fname, fnode):
1027 1040 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1028 1041 repackmod.keepset(repo, keyfn)
1029 1042 return
1030 1043
1031 1044 @command('debugwaitonrepack', [
1032 1045 ], _('hg debugwaitonrepack'))
1033 1046 def debugwaitonrepack(ui, repo, **opts):
1034 1047 return debugcommands.debugwaitonrepack(repo)
1035 1048
1036 1049 @command('debugwaitonprefetch', [
1037 1050 ], _('hg debugwaitonprefetch'))
1038 1051 def debugwaitonprefetch(ui, repo, **opts):
1039 1052 return debugcommands.debugwaitonprefetch(repo)
1040 1053
1041 1054 def resolveprefetchopts(ui, opts):
1042 1055 if not opts.get('rev'):
1043 1056 revset = ['.', 'draft()']
1044 1057
1045 1058 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1046 1059 if prefetchrevset:
1047 1060 revset.append('(%s)' % prefetchrevset)
1048 1061 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1049 1062 if bgprefetchrevs:
1050 1063 revset.append('(%s)' % bgprefetchrevs)
1051 1064 revset = '+'.join(revset)
1052 1065
1053 1066 # update a revset with a date limit
1054 1067 revset = revdatelimit(ui, revset)
1055 1068
1056 1069 opts['rev'] = [revset]
1057 1070
1058 1071 if not opts.get('base'):
1059 1072 opts['base'] = None
1060 1073
1061 1074 return opts
1062 1075
1063 1076 @command('prefetch', [
1064 1077 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1065 1078 ('', 'repack', False, _('run repack after prefetch')),
1066 1079 ('b', 'base', '', _("rev that is assumed to already be local")),
1067 1080 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1068 1081 def prefetch(ui, repo, *pats, **opts):
1069 1082 """prefetch file revisions from the server
1070 1083
1071 1084 Prefetchs file revisions for the specified revs and stores them in the
1072 1085 local remotefilelog cache. If no rev is specified, the default rev is
1073 1086 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1074 1087 File names or patterns can be used to limit which files are downloaded.
1075 1088
1076 1089 Return 0 on success.
1077 1090 """
1078 1091 opts = pycompat.byteskwargs(opts)
1079 1092 if not isenabled(repo):
1080 1093 raise error.Abort(_("repo is not shallow"))
1081 1094
1082 1095 opts = resolveprefetchopts(ui, opts)
1083 1096 revs = scmutil.revrange(repo, opts.get('rev'))
1084 1097 repo.prefetch(revs, opts.get('base'), pats, opts)
1085 1098
1086 1099 # Run repack in background
1087 1100 if opts.get('repack'):
1088 1101 repackmod.backgroundrepack(repo, incremental=True)
1089 1102
1090 1103 @command('repack', [
1091 1104 ('', 'background', None, _('run in a background process'), None),
1092 1105 ('', 'incremental', None, _('do an incremental repack'), None),
1093 1106 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1094 1107 ], _('hg repack [OPTIONS]'))
1095 1108 def repack_(ui, repo, *pats, **opts):
1096 1109 if opts.get(r'background'):
1097 1110 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1098 1111 packsonly=opts.get(r'packsonly', False))
1099 1112 return
1100 1113
1101 1114 options = {'packsonly': opts.get(r'packsonly')}
1102 1115
1103 1116 try:
1104 1117 if opts.get(r'incremental'):
1105 1118 repackmod.incrementalrepack(repo, options=options)
1106 1119 else:
1107 1120 repackmod.fullrepack(repo, options=options)
1108 1121 except repackmod.RepackAlreadyRunning as ex:
1109 1122 # Don't propogate the exception if the repack is already in
1110 1123 # progress, since we want the command to exit 0.
1111 1124 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now