##// END OF EJS Templates
remotefilelog: move most functions in onetimeclientsetup() to top level...
Martin von Zweigbergk -
r42459:651f325e default
parent child Browse files
Show More
@@ -1,1111 +1,1124 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial import (
135 from mercurial import (
136 changegroup,
136 changegroup,
137 changelog,
137 changelog,
138 cmdutil,
138 cmdutil,
139 commands,
139 commands,
140 configitems,
140 configitems,
141 context,
141 context,
142 copies,
142 copies,
143 debugcommands as hgdebugcommands,
143 debugcommands as hgdebugcommands,
144 dispatch,
144 dispatch,
145 error,
145 error,
146 exchange,
146 exchange,
147 extensions,
147 extensions,
148 hg,
148 hg,
149 localrepo,
149 localrepo,
150 match,
150 match,
151 merge,
151 merge,
152 node as nodemod,
152 node as nodemod,
153 patch,
153 patch,
154 pycompat,
154 pycompat,
155 registrar,
155 registrar,
156 repair,
156 repair,
157 repoview,
157 repoview,
158 revset,
158 revset,
159 scmutil,
159 scmutil,
160 smartset,
160 smartset,
161 streamclone,
161 streamclone,
162 util,
162 util,
163 )
163 )
164 from . import (
164 from . import (
165 constants,
165 constants,
166 debugcommands,
166 debugcommands,
167 fileserverclient,
167 fileserverclient,
168 remotefilectx,
168 remotefilectx,
169 remotefilelog,
169 remotefilelog,
170 remotefilelogserver,
170 remotefilelogserver,
171 repack as repackmod,
171 repack as repackmod,
172 shallowbundle,
172 shallowbundle,
173 shallowrepo,
173 shallowrepo,
174 shallowstore,
174 shallowstore,
175 shallowutil,
175 shallowutil,
176 shallowverifier,
176 shallowverifier,
177 )
177 )
178
178
179 # ensures debug commands are registered
179 # ensures debug commands are registered
180 hgdebugcommands.command
180 hgdebugcommands.command
181
181
182 cmdtable = {}
182 cmdtable = {}
183 command = registrar.command(cmdtable)
183 command = registrar.command(cmdtable)
184
184
185 configtable = {}
185 configtable = {}
186 configitem = registrar.configitem(configtable)
186 configitem = registrar.configitem(configtable)
187
187
188 configitem('remotefilelog', 'debug', default=False)
188 configitem('remotefilelog', 'debug', default=False)
189
189
190 configitem('remotefilelog', 'reponame', default='')
190 configitem('remotefilelog', 'reponame', default='')
191 configitem('remotefilelog', 'cachepath', default=None)
191 configitem('remotefilelog', 'cachepath', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196
196
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 alias=[('remotefilelog', 'fallbackrepo')])
198 alias=[('remotefilelog', 'fallbackrepo')])
199
199
200 configitem('remotefilelog', 'validatecachelog', default=None)
200 configitem('remotefilelog', 'validatecachelog', default=None)
201 configitem('remotefilelog', 'validatecache', default='on')
201 configitem('remotefilelog', 'validatecache', default='on')
202 configitem('remotefilelog', 'server', default=None)
202 configitem('remotefilelog', 'server', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
204 configitem("remotefilelog", "serverexpiration", default=30)
204 configitem("remotefilelog", "serverexpiration", default=30)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 configitem('remotefilelog', 'prefetchdays', default=14)
210 configitem('remotefilelog', 'prefetchdays', default=14)
211
211
212 configitem('remotefilelog', 'getfilesstep', default=10000)
212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 configitem('remotefilelog', 'fetchwarning', default='')
215 configitem('remotefilelog', 'fetchwarning', default='')
216
216
217 configitem('remotefilelog', 'includepattern', default=None)
217 configitem('remotefilelog', 'includepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
219
219
220 configitem('remotefilelog', 'gcrepack', default=False)
220 configitem('remotefilelog', 'gcrepack', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
222 configitem('repack', 'chainorphansbysize', default=True)
222 configitem('repack', 'chainorphansbysize', default=True)
223
223
224 configitem('packs', 'maxpacksize', default=0)
224 configitem('packs', 'maxpacksize', default=0)
225 configitem('packs', 'maxchainlen', default=1000)
225 configitem('packs', 'maxchainlen', default=1000)
226
226
227 # default TTL limit is 30 days
227 # default TTL limit is 30 days
228 _defaultlimit = 60 * 60 * 24 * 30
228 _defaultlimit = 60 * 60 * 24 * 30
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230
230
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 configitem('remotefilelog', 'data.generations',
232 configitem('remotefilelog', 'data.generations',
233 default=['1GB', '100MB', '1MB'])
233 default=['1GB', '100MB', '1MB'])
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237
237
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243
243
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 # be specifying the version(s) of Mercurial they are tested with, or
246 # be specifying the version(s) of Mercurial they are tested with, or
247 # leave the attribute unspecified.
247 # leave the attribute unspecified.
248 testedwith = 'ships-with-hg-core'
248 testedwith = 'ships-with-hg-core'
249
249
250 repoclass = localrepo.localrepository
250 repoclass = localrepo.localrepository
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252
252
253 isenabled = shallowutil.isenabled
253 isenabled = shallowutil.isenabled
254
254
255 def uisetup(ui):
255 def uisetup(ui):
256 """Wraps user facing Mercurial commands to swap them out with shallow
256 """Wraps user facing Mercurial commands to swap them out with shallow
257 versions.
257 versions.
258 """
258 """
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260
260
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 entry[1].append(('', 'shallow', None,
262 entry[1].append(('', 'shallow', None,
263 _("create a shallow clone which uses remote file "
263 _("create a shallow clone which uses remote file "
264 "history")))
264 "history")))
265
265
266 extensions.wrapcommand(commands.table, 'debugindex',
266 extensions.wrapcommand(commands.table, 'debugindex',
267 debugcommands.debugindex)
267 debugcommands.debugindex)
268 extensions.wrapcommand(commands.table, 'debugindexdot',
268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 debugcommands.debugindexdot)
269 debugcommands.debugindexdot)
270 extensions.wrapcommand(commands.table, 'log', log)
270 extensions.wrapcommand(commands.table, 'log', log)
271 extensions.wrapcommand(commands.table, 'pull', pull)
271 extensions.wrapcommand(commands.table, 'pull', pull)
272
272
273 # Prevent 'hg manifest --all'
273 # Prevent 'hg manifest --all'
274 def _manifest(orig, ui, repo, *args, **opts):
274 def _manifest(orig, ui, repo, *args, **opts):
275 if (isenabled(repo) and opts.get(r'all')):
275 if (isenabled(repo) and opts.get(r'all')):
276 raise error.Abort(_("--all is not supported in a shallow repo"))
276 raise error.Abort(_("--all is not supported in a shallow repo"))
277
277
278 return orig(ui, repo, *args, **opts)
278 return orig(ui, repo, *args, **opts)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280
280
281 # Wrap remotefilelog with lfs code
281 # Wrap remotefilelog with lfs code
282 def _lfsloaded(loaded=False):
282 def _lfsloaded(loaded=False):
283 lfsmod = None
283 lfsmod = None
284 try:
284 try:
285 lfsmod = extensions.find('lfs')
285 lfsmod = extensions.find('lfs')
286 except KeyError:
286 except KeyError:
287 pass
287 pass
288 if lfsmod:
288 if lfsmod:
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 fileserverclient._lfsmod = lfsmod
290 fileserverclient._lfsmod = lfsmod
291 extensions.afterloaded('lfs', _lfsloaded)
291 extensions.afterloaded('lfs', _lfsloaded)
292
292
293 # debugdata needs remotefilelog.len to work
293 # debugdata needs remotefilelog.len to work
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295
295
296 def cloneshallow(orig, ui, repo, *args, **opts):
296 def cloneshallow(orig, ui, repo, *args, **opts):
297 if opts.get(r'shallow'):
297 if opts.get(r'shallow'):
298 repos = []
298 repos = []
299 def pull_shallow(orig, self, *args, **kwargs):
299 def pull_shallow(orig, self, *args, **kwargs):
300 if not isenabled(self):
300 if not isenabled(self):
301 repos.append(self.unfiltered())
301 repos.append(self.unfiltered())
302 # set up the client hooks so the post-clone update works
302 # set up the client hooks so the post-clone update works
303 setupclient(self.ui, self.unfiltered())
303 setupclient(self.ui, self.unfiltered())
304
304
305 # setupclient fixed the class on the repo itself
305 # setupclient fixed the class on the repo itself
306 # but we also need to fix it on the repoview
306 # but we also need to fix it on the repoview
307 if isinstance(self, repoview.repoview):
307 if isinstance(self, repoview.repoview):
308 self.__class__.__bases__ = (self.__class__.__bases__[0],
308 self.__class__.__bases__ = (self.__class__.__bases__[0],
309 self.unfiltered().__class__)
309 self.unfiltered().__class__)
310 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
310 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
311 self._writerequirements()
311 self._writerequirements()
312
312
313 # Since setupclient hadn't been called, exchange.pull was not
313 # Since setupclient hadn't been called, exchange.pull was not
314 # wrapped. So we need to manually invoke our version of it.
314 # wrapped. So we need to manually invoke our version of it.
315 return exchangepull(orig, self, *args, **kwargs)
315 return exchangepull(orig, self, *args, **kwargs)
316 else:
316 else:
317 return orig(self, *args, **kwargs)
317 return orig(self, *args, **kwargs)
318 extensions.wrapfunction(exchange, 'pull', pull_shallow)
318 extensions.wrapfunction(exchange, 'pull', pull_shallow)
319
319
320 # Wrap the stream logic to add requirements and to pass include/exclude
320 # Wrap the stream logic to add requirements and to pass include/exclude
321 # patterns around.
321 # patterns around.
322 def setup_streamout(repo, remote):
322 def setup_streamout(repo, remote):
323 # Replace remote.stream_out with a version that sends file
323 # Replace remote.stream_out with a version that sends file
324 # patterns.
324 # patterns.
325 def stream_out_shallow(orig):
325 def stream_out_shallow(orig):
326 caps = remote.capabilities()
326 caps = remote.capabilities()
327 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
327 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
328 opts = {}
328 opts = {}
329 if repo.includepattern:
329 if repo.includepattern:
330 opts[r'includepattern'] = '\0'.join(repo.includepattern)
330 opts[r'includepattern'] = '\0'.join(repo.includepattern)
331 if repo.excludepattern:
331 if repo.excludepattern:
332 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
332 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
333 return remote._callstream('stream_out_shallow', **opts)
333 return remote._callstream('stream_out_shallow', **opts)
334 else:
334 else:
335 return orig()
335 return orig()
336 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
336 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
337 def stream_wrap(orig, op):
337 def stream_wrap(orig, op):
338 setup_streamout(op.repo, op.remote)
338 setup_streamout(op.repo, op.remote)
339 return orig(op)
339 return orig(op)
340 extensions.wrapfunction(
340 extensions.wrapfunction(
341 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
341 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
342
342
343 def canperformstreamclone(orig, pullop, bundle2=False):
343 def canperformstreamclone(orig, pullop, bundle2=False):
344 # remotefilelog is currently incompatible with the
344 # remotefilelog is currently incompatible with the
345 # bundle2 flavor of streamclones, so force us to use
345 # bundle2 flavor of streamclones, so force us to use
346 # v1 instead.
346 # v1 instead.
347 if 'v2' in pullop.remotebundle2caps.get('stream', []):
347 if 'v2' in pullop.remotebundle2caps.get('stream', []):
348 pullop.remotebundle2caps['stream'] = [
348 pullop.remotebundle2caps['stream'] = [
349 c for c in pullop.remotebundle2caps['stream']
349 c for c in pullop.remotebundle2caps['stream']
350 if c != 'v2']
350 if c != 'v2']
351 if bundle2:
351 if bundle2:
352 return False, None
352 return False, None
353 supported, requirements = orig(pullop, bundle2=bundle2)
353 supported, requirements = orig(pullop, bundle2=bundle2)
354 if requirements is not None:
354 if requirements is not None:
355 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
355 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
356 return supported, requirements
356 return supported, requirements
357 extensions.wrapfunction(
357 extensions.wrapfunction(
358 streamclone, 'canperformstreamclone', canperformstreamclone)
358 streamclone, 'canperformstreamclone', canperformstreamclone)
359
359
360 try:
360 try:
361 orig(ui, repo, *args, **opts)
361 orig(ui, repo, *args, **opts)
362 finally:
362 finally:
363 if opts.get(r'shallow'):
363 if opts.get(r'shallow'):
364 for r in repos:
364 for r in repos:
365 if util.safehasattr(r, 'fileservice'):
365 if util.safehasattr(r, 'fileservice'):
366 r.fileservice.close()
366 r.fileservice.close()
367
367
368 def debugdatashallow(orig, *args, **kwds):
368 def debugdatashallow(orig, *args, **kwds):
369 oldlen = remotefilelog.remotefilelog.__len__
369 oldlen = remotefilelog.remotefilelog.__len__
370 try:
370 try:
371 remotefilelog.remotefilelog.__len__ = lambda x: 1
371 remotefilelog.remotefilelog.__len__ = lambda x: 1
372 return orig(*args, **kwds)
372 return orig(*args, **kwds)
373 finally:
373 finally:
374 remotefilelog.remotefilelog.__len__ = oldlen
374 remotefilelog.remotefilelog.__len__ = oldlen
375
375
376 def reposetup(ui, repo):
376 def reposetup(ui, repo):
377 if not repo.local():
377 if not repo.local():
378 return
378 return
379
379
380 # put here intentionally bc doesnt work in uisetup
380 # put here intentionally bc doesnt work in uisetup
381 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
381 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
382 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
382 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
383
383
384 isserverenabled = ui.configbool('remotefilelog', 'server')
384 isserverenabled = ui.configbool('remotefilelog', 'server')
385 isshallowclient = isenabled(repo)
385 isshallowclient = isenabled(repo)
386
386
387 if isserverenabled and isshallowclient:
387 if isserverenabled and isshallowclient:
388 raise RuntimeError("Cannot be both a server and shallow client.")
388 raise RuntimeError("Cannot be both a server and shallow client.")
389
389
390 if isshallowclient:
390 if isshallowclient:
391 setupclient(ui, repo)
391 setupclient(ui, repo)
392
392
393 if isserverenabled:
393 if isserverenabled:
394 remotefilelogserver.setupserver(ui, repo)
394 remotefilelogserver.setupserver(ui, repo)
395
395
396 def setupclient(ui, repo):
396 def setupclient(ui, repo):
397 if not isinstance(repo, localrepo.localrepository):
397 if not isinstance(repo, localrepo.localrepository):
398 return
398 return
399
399
400 # Even clients get the server setup since they need to have the
400 # Even clients get the server setup since they need to have the
401 # wireprotocol endpoints registered.
401 # wireprotocol endpoints registered.
402 remotefilelogserver.onetimesetup(ui)
402 remotefilelogserver.onetimesetup(ui)
403 onetimeclientsetup(ui)
403 onetimeclientsetup(ui)
404
404
405 shallowrepo.wraprepo(repo)
405 shallowrepo.wraprepo(repo)
406 repo.store = shallowstore.wrapstore(repo.store)
406 repo.store = shallowstore.wrapstore(repo.store)
407
407
408 def storewrapper(orig, requirements, path, vfstype):
409 s = orig(requirements, path, vfstype)
410 if constants.SHALLOWREPO_REQUIREMENT in requirements:
411 s = shallowstore.wrapstore(s)
412
413 return s
414
415 # prefetch files before update
416 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
417 if isenabled(repo):
418 manifest = mctx.manifest()
419 files = []
420 for f, args, msg in actions['g']:
421 files.append((f, hex(manifest[f])))
422 # batch fetch the needed files from the server
423 repo.fileservice.prefetch(files)
424 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
425
426 # Prefetch merge checkunknownfiles
427 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
428 *args, **kwargs):
429 if isenabled(repo):
430 files = []
431 sparsematch = repo.maybesparsematch(mctx.rev())
432 for f, (m, actionargs, msg) in actions.iteritems():
433 if sparsematch and not sparsematch(f):
434 continue
435 if m in ('c', 'dc', 'cm'):
436 files.append((f, hex(mctx.filenode(f))))
437 elif m == 'dg':
438 f2 = actionargs[0]
439 files.append((f2, hex(mctx.filenode(f2))))
440 # batch fetch the needed files from the server
441 repo.fileservice.prefetch(files)
442 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
443
444 # Prefetch files before status attempts to look at their size and contents
445 def checklookup(orig, self, files):
446 repo = self._repo
447 if isenabled(repo):
448 prefetchfiles = []
449 for parent in self._parents:
450 for f in files:
451 if f in parent:
452 prefetchfiles.append((f, hex(parent.filenode(f))))
453 # batch fetch the needed files from the server
454 repo.fileservice.prefetch(prefetchfiles)
455 return orig(self, files)
456
457 # Prefetch the logic that compares added and removed files for renames
458 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
459 if isenabled(repo):
460 files = []
461 pmf = repo['.'].manifest()
462 for f in removed:
463 if f in pmf:
464 files.append((f, hex(pmf[f])))
465 # batch fetch the needed files from the server
466 repo.fileservice.prefetch(files)
467 return orig(repo, matcher, added, removed, *args, **kwargs)
468
469 # prefetch files before pathcopies check
470 def computeforwardmissing(orig, a, b, match=None):
471 missing = orig(a, b, match=match)
472 repo = a._repo
473 if isenabled(repo):
474 mb = b.manifest()
475
476 files = []
477 sparsematch = repo.maybesparsematch(b.rev())
478 if sparsematch:
479 sparsemissing = set()
480 for f in missing:
481 if sparsematch(f):
482 files.append((f, hex(mb[f])))
483 sparsemissing.add(f)
484 missing = sparsemissing
485
486 # batch fetch the needed files from the server
487 repo.fileservice.prefetch(files)
488 return missing
489
490 # close cache miss server connection after the command has finished
491 def runcommand(orig, lui, repo, *args, **kwargs):
492 fileservice = None
493 # repo can be None when running in chg:
494 # - at startup, reposetup was called because serve is not norepo
495 # - a norepo command like "help" is called
496 if repo and isenabled(repo):
497 fileservice = repo.fileservice
498 try:
499 return orig(lui, repo, *args, **kwargs)
500 finally:
501 if fileservice:
502 fileservice.close()
503
504 # prevent strip from stripping remotefilelogs
505 def _collectbrokencsets(orig, repo, files, striprev):
506 if isenabled(repo):
507 files = list([f for f in files if not repo.shallowmatch(f)])
508 return orig(repo, files, striprev)
509
510 # changectx wrappers
511 def filectx(orig, self, path, fileid=None, filelog=None):
512 if fileid is None:
513 fileid = self.filenode(path)
514 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
515 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
516 changectx=self, filelog=filelog)
517 return orig(self, path, fileid=fileid, filelog=filelog)
518
519 def workingfilectx(orig, self, path, filelog=None):
520 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
521 return remotefilectx.remoteworkingfilectx(self._repo, path,
522 workingctx=self,
523 filelog=filelog)
524 return orig(self, path, filelog=filelog)
525
526 # prefetch required revisions before a diff
527 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
528 copy, getfilectx, *args, **kwargs):
529 if isenabled(repo):
530 prefetch = []
531 mf1 = ctx1.manifest()
532 for fname in modified + added + removed:
533 if fname in mf1:
534 fnode = getfilectx(fname, ctx1).filenode()
535 # fnode can be None if it's a edited working ctx file
536 if fnode:
537 prefetch.append((fname, hex(fnode)))
538 if fname not in removed:
539 fnode = getfilectx(fname, ctx2).filenode()
540 if fnode:
541 prefetch.append((fname, hex(fnode)))
542
543 repo.fileservice.prefetch(prefetch)
544
545 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
546 getfilectx, *args, **kwargs)
547
548 # Prevent verify from processing files
549 # a stub for mercurial.hg.verify()
550 def _verify(orig, repo, level=None):
551 lock = repo.lock()
552 try:
553 return shallowverifier.shallowverifier(repo).verify()
554 finally:
555 lock.release()
556
557
408 clientonetime = False
558 clientonetime = False
409 def onetimeclientsetup(ui):
559 def onetimeclientsetup(ui):
410 global clientonetime
560 global clientonetime
411 if clientonetime:
561 if clientonetime:
412 return
562 return
413 clientonetime = True
563 clientonetime = True
414
564
415 changegroup.cgpacker = shallowbundle.shallowcg1packer
565 changegroup.cgpacker = shallowbundle.shallowcg1packer
416
566
417 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
567 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
418 shallowbundle.addchangegroupfiles)
568 shallowbundle.addchangegroupfiles)
419 extensions.wrapfunction(
569 extensions.wrapfunction(
420 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
570 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
421
571
422 def storewrapper(orig, requirements, path, vfstype):
423 s = orig(requirements, path, vfstype)
424 if constants.SHALLOWREPO_REQUIREMENT in requirements:
425 s = shallowstore.wrapstore(s)
426
427 return s
428 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
572 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
429
573
430 extensions.wrapfunction(exchange, 'pull', exchangepull)
574 extensions.wrapfunction(exchange, 'pull', exchangepull)
431
575
432 # prefetch files before update
433 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
434 if isenabled(repo):
435 manifest = mctx.manifest()
436 files = []
437 for f, args, msg in actions['g']:
438 files.append((f, hex(manifest[f])))
439 # batch fetch the needed files from the server
440 repo.fileservice.prefetch(files)
441 return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
442 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
576 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
443
577
444 # Prefetch merge checkunknownfiles
445 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
446 *args, **kwargs):
447 if isenabled(repo):
448 files = []
449 sparsematch = repo.maybesparsematch(mctx.rev())
450 for f, (m, actionargs, msg) in actions.iteritems():
451 if sparsematch and not sparsematch(f):
452 continue
453 if m in ('c', 'dc', 'cm'):
454 files.append((f, hex(mctx.filenode(f))))
455 elif m == 'dg':
456 f2 = actionargs[0]
457 files.append((f2, hex(mctx.filenode(f2))))
458 # batch fetch the needed files from the server
459 repo.fileservice.prefetch(files)
460 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
461 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
578 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
462
579
463 # Prefetch files before status attempts to look at their size and contents
464 def checklookup(orig, self, files):
465 repo = self._repo
466 if isenabled(repo):
467 prefetchfiles = []
468 for parent in self._parents:
469 for f in files:
470 if f in parent:
471 prefetchfiles.append((f, hex(parent.filenode(f))))
472 # batch fetch the needed files from the server
473 repo.fileservice.prefetch(prefetchfiles)
474 return orig(self, files)
475 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
580 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
476
581
477 # Prefetch the logic that compares added and removed files for renames
478 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
479 if isenabled(repo):
480 files = []
481 pmf = repo['.'].manifest()
482 for f in removed:
483 if f in pmf:
484 files.append((f, hex(pmf[f])))
485 # batch fetch the needed files from the server
486 repo.fileservice.prefetch(files)
487 return orig(repo, matcher, added, removed, *args, **kwargs)
488 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
582 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
489
583
490 # prefetch files before pathcopies check
491 def computeforwardmissing(orig, a, b, match=None):
492 missing = orig(a, b, match=match)
493 repo = a._repo
494 if isenabled(repo):
495 mb = b.manifest()
496
497 files = []
498 sparsematch = repo.maybesparsematch(b.rev())
499 if sparsematch:
500 sparsemissing = set()
501 for f in missing:
502 if sparsematch(f):
503 files.append((f, hex(mb[f])))
504 sparsemissing.add(f)
505 missing = sparsemissing
506
507 # batch fetch the needed files from the server
508 repo.fileservice.prefetch(files)
509 return missing
510 extensions.wrapfunction(copies, '_computeforwardmissing',
584 extensions.wrapfunction(copies, '_computeforwardmissing',
511 computeforwardmissing)
585 computeforwardmissing)
512
586
513 # close cache miss server connection after the command has finished
514 def runcommand(orig, lui, repo, *args, **kwargs):
515 fileservice = None
516 # repo can be None when running in chg:
517 # - at startup, reposetup was called because serve is not norepo
518 # - a norepo command like "help" is called
519 if repo and isenabled(repo):
520 fileservice = repo.fileservice
521 try:
522 return orig(lui, repo, *args, **kwargs)
523 finally:
524 if fileservice:
525 fileservice.close()
526 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
587 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
527
588
528 # disappointing hacks below
589 # disappointing hacks below
529 scmutil.getrenamedfn = getrenamedfn
590 scmutil.getrenamedfn = getrenamedfn
530 extensions.wrapfunction(revset, 'filelog', filelogrevset)
591 extensions.wrapfunction(revset, 'filelog', filelogrevset)
531 revset.symbols['filelog'] = revset.filelog
592 revset.symbols['filelog'] = revset.filelog
532 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
593 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
533
594
534 # prevent strip from stripping remotefilelogs
535 def _collectbrokencsets(orig, repo, files, striprev):
536 if isenabled(repo):
537 files = list([f for f in files if not repo.shallowmatch(f)])
538 return orig(repo, files, striprev)
539 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
595 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
540
596
541 # Don't commit filelogs until we know the commit hash, since the hash
597 # Don't commit filelogs until we know the commit hash, since the hash
542 # is present in the filelog blob.
598 # is present in the filelog blob.
543 # This violates Mercurial's filelog->manifest->changelog write order,
599 # This violates Mercurial's filelog->manifest->changelog write order,
544 # but is generally fine for client repos.
600 # but is generally fine for client repos.
545 pendingfilecommits = []
601 pendingfilecommits = []
546 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
602 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
547 flags, cachedelta=None, _metatuple=None):
603 flags, cachedelta=None, _metatuple=None):
548 if isinstance(link, int):
604 if isinstance(link, int):
549 pendingfilecommits.append(
605 pendingfilecommits.append(
550 (self, rawtext, transaction, link, p1, p2, node, flags,
606 (self, rawtext, transaction, link, p1, p2, node, flags,
551 cachedelta, _metatuple))
607 cachedelta, _metatuple))
552 return node
608 return node
553 else:
609 else:
554 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
610 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
555 cachedelta, _metatuple=_metatuple)
611 cachedelta, _metatuple=_metatuple)
556 extensions.wrapfunction(
612 extensions.wrapfunction(
557 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
613 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
558
614
559 def changelogadd(orig, self, *args):
615 def changelogadd(orig, self, *args):
560 oldlen = len(self)
616 oldlen = len(self)
561 node = orig(self, *args)
617 node = orig(self, *args)
562 newlen = len(self)
618 newlen = len(self)
563 if oldlen != newlen:
619 if oldlen != newlen:
564 for oldargs in pendingfilecommits:
620 for oldargs in pendingfilecommits:
565 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
621 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
566 linknode = self.node(link)
622 linknode = self.node(link)
567 if linknode == node:
623 if linknode == node:
568 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
624 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
569 else:
625 else:
570 raise error.ProgrammingError(
626 raise error.ProgrammingError(
571 'pending multiple integer revisions are not supported')
627 'pending multiple integer revisions are not supported')
572 else:
628 else:
573 # "link" is actually wrong here (it is set to len(changelog))
629 # "link" is actually wrong here (it is set to len(changelog))
574 # if changelog remains unchanged, skip writing file revisions
630 # if changelog remains unchanged, skip writing file revisions
575 # but still do a sanity check about pending multiple revisions
631 # but still do a sanity check about pending multiple revisions
576 if len(set(x[3] for x in pendingfilecommits)) > 1:
632 if len(set(x[3] for x in pendingfilecommits)) > 1:
577 raise error.ProgrammingError(
633 raise error.ProgrammingError(
578 'pending multiple integer revisions are not supported')
634 'pending multiple integer revisions are not supported')
579 del pendingfilecommits[:]
635 del pendingfilecommits[:]
580 return node
636 return node
581 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
637 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
582
638
583 # changectx wrappers
584 def filectx(orig, self, path, fileid=None, filelog=None):
585 if fileid is None:
586 fileid = self.filenode(path)
587 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
588 return remotefilectx.remotefilectx(self._repo, path,
589 fileid=fileid, changectx=self, filelog=filelog)
590 return orig(self, path, fileid=fileid, filelog=filelog)
591 extensions.wrapfunction(context.changectx, 'filectx', filectx)
639 extensions.wrapfunction(context.changectx, 'filectx', filectx)
592
640
593 def workingfilectx(orig, self, path, filelog=None):
594 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
595 return remotefilectx.remoteworkingfilectx(self._repo,
596 path, workingctx=self, filelog=filelog)
597 return orig(self, path, filelog=filelog)
598 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
641 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
599
642
600 # prefetch required revisions before a diff
601 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
602 copy, getfilectx, *args, **kwargs):
603 if isenabled(repo):
604 prefetch = []
605 mf1 = ctx1.manifest()
606 for fname in modified + added + removed:
607 if fname in mf1:
608 fnode = getfilectx(fname, ctx1).filenode()
609 # fnode can be None if it's a edited working ctx file
610 if fnode:
611 prefetch.append((fname, hex(fnode)))
612 if fname not in removed:
613 fnode = getfilectx(fname, ctx2).filenode()
614 if fnode:
615 prefetch.append((fname, hex(fnode)))
616
617 repo.fileservice.prefetch(prefetch)
618
619 return orig(repo, revs, ctx1, ctx2, modified, added, removed,
620 copy, getfilectx, *args, **kwargs)
621 extensions.wrapfunction(patch, 'trydiff', trydiff)
643 extensions.wrapfunction(patch, 'trydiff', trydiff)
622
644
623 # Prevent verify from processing files
624 # a stub for mercurial.hg.verify()
625 def _verify(orig, repo, level=None):
626 lock = repo.lock()
627 try:
628 return shallowverifier.shallowverifier(repo).verify()
629 finally:
630 lock.release()
631
632 extensions.wrapfunction(hg, 'verify', _verify)
645 extensions.wrapfunction(hg, 'verify', _verify)
633
646
634 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
647 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
635
648
636 def getrenamedfn(repo, endrev=None):
649 def getrenamedfn(repo, endrev=None):
637 rcache = {}
650 rcache = {}
638
651
639 def getrenamed(fn, rev):
652 def getrenamed(fn, rev):
640 '''looks up all renames for a file (up to endrev) the first
653 '''looks up all renames for a file (up to endrev) the first
641 time the file is given. It indexes on the changerev and only
654 time the file is given. It indexes on the changerev and only
642 parses the manifest if linkrev != changerev.
655 parses the manifest if linkrev != changerev.
643 Returns rename info for fn at changerev rev.'''
656 Returns rename info for fn at changerev rev.'''
644 if rev in rcache.setdefault(fn, {}):
657 if rev in rcache.setdefault(fn, {}):
645 return rcache[fn][rev]
658 return rcache[fn][rev]
646
659
647 try:
660 try:
648 fctx = repo[rev].filectx(fn)
661 fctx = repo[rev].filectx(fn)
649 for ancestor in fctx.ancestors():
662 for ancestor in fctx.ancestors():
650 if ancestor.path() == fn:
663 if ancestor.path() == fn:
651 renamed = ancestor.renamed()
664 renamed = ancestor.renamed()
652 rcache[fn][ancestor.rev()] = renamed and renamed[0]
665 rcache[fn][ancestor.rev()] = renamed and renamed[0]
653
666
654 renamed = fctx.renamed()
667 renamed = fctx.renamed()
655 return renamed and renamed[0]
668 return renamed and renamed[0]
656 except error.LookupError:
669 except error.LookupError:
657 return None
670 return None
658
671
659 return getrenamed
672 return getrenamed
660
673
661 def walkfilerevs(orig, repo, match, follow, revs, fncache):
674 def walkfilerevs(orig, repo, match, follow, revs, fncache):
662 if not isenabled(repo):
675 if not isenabled(repo):
663 return orig(repo, match, follow, revs, fncache)
676 return orig(repo, match, follow, revs, fncache)
664
677
665 # remotefilelog's can't be walked in rev order, so throw.
678 # remotefilelog's can't be walked in rev order, so throw.
666 # The caller will see the exception and walk the commit tree instead.
679 # The caller will see the exception and walk the commit tree instead.
667 if not follow:
680 if not follow:
668 raise cmdutil.FileWalkError("Cannot walk via filelog")
681 raise cmdutil.FileWalkError("Cannot walk via filelog")
669
682
670 wanted = set()
683 wanted = set()
671 minrev, maxrev = min(revs), max(revs)
684 minrev, maxrev = min(revs), max(revs)
672
685
673 pctx = repo['.']
686 pctx = repo['.']
674 for filename in match.files():
687 for filename in match.files():
675 if filename not in pctx:
688 if filename not in pctx:
676 raise error.Abort(_('cannot follow file not in parent '
689 raise error.Abort(_('cannot follow file not in parent '
677 'revision: "%s"') % filename)
690 'revision: "%s"') % filename)
678 fctx = pctx[filename]
691 fctx = pctx[filename]
679
692
680 linkrev = fctx.linkrev()
693 linkrev = fctx.linkrev()
681 if linkrev >= minrev and linkrev <= maxrev:
694 if linkrev >= minrev and linkrev <= maxrev:
682 fncache.setdefault(linkrev, []).append(filename)
695 fncache.setdefault(linkrev, []).append(filename)
683 wanted.add(linkrev)
696 wanted.add(linkrev)
684
697
685 for ancestor in fctx.ancestors():
698 for ancestor in fctx.ancestors():
686 linkrev = ancestor.linkrev()
699 linkrev = ancestor.linkrev()
687 if linkrev >= minrev and linkrev <= maxrev:
700 if linkrev >= minrev and linkrev <= maxrev:
688 fncache.setdefault(linkrev, []).append(ancestor.path())
701 fncache.setdefault(linkrev, []).append(ancestor.path())
689 wanted.add(linkrev)
702 wanted.add(linkrev)
690
703
691 return wanted
704 return wanted
692
705
693 def filelogrevset(orig, repo, subset, x):
706 def filelogrevset(orig, repo, subset, x):
694 """``filelog(pattern)``
707 """``filelog(pattern)``
695 Changesets connected to the specified filelog.
708 Changesets connected to the specified filelog.
696
709
697 For performance reasons, ``filelog()`` does not show every changeset
710 For performance reasons, ``filelog()`` does not show every changeset
698 that affects the requested file(s). See :hg:`help log` for details. For
711 that affects the requested file(s). See :hg:`help log` for details. For
699 a slower, more accurate result, use ``file()``.
712 a slower, more accurate result, use ``file()``.
700 """
713 """
701
714
702 if not isenabled(repo):
715 if not isenabled(repo):
703 return orig(repo, subset, x)
716 return orig(repo, subset, x)
704
717
705 # i18n: "filelog" is a keyword
718 # i18n: "filelog" is a keyword
706 pat = revset.getstring(x, _("filelog requires a pattern"))
719 pat = revset.getstring(x, _("filelog requires a pattern"))
707 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
720 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
708 ctx=repo[None])
721 ctx=repo[None])
709 s = set()
722 s = set()
710
723
711 if not match.patkind(pat):
724 if not match.patkind(pat):
712 # slow
725 # slow
713 for r in subset:
726 for r in subset:
714 ctx = repo[r]
727 ctx = repo[r]
715 cfiles = ctx.files()
728 cfiles = ctx.files()
716 for f in m.files():
729 for f in m.files():
717 if f in cfiles:
730 if f in cfiles:
718 s.add(ctx.rev())
731 s.add(ctx.rev())
719 break
732 break
720 else:
733 else:
721 # partial
734 # partial
722 files = (f for f in repo[None] if m(f))
735 files = (f for f in repo[None] if m(f))
723 for f in files:
736 for f in files:
724 fctx = repo[None].filectx(f)
737 fctx = repo[None].filectx(f)
725 s.add(fctx.linkrev())
738 s.add(fctx.linkrev())
726 for actx in fctx.ancestors():
739 for actx in fctx.ancestors():
727 s.add(actx.linkrev())
740 s.add(actx.linkrev())
728
741
729 return smartset.baseset([r for r in subset if r in s])
742 return smartset.baseset([r for r in subset if r in s])
730
743
731 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
744 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
732 def gc(ui, *args, **opts):
745 def gc(ui, *args, **opts):
733 '''garbage collect the client and server filelog caches
746 '''garbage collect the client and server filelog caches
734 '''
747 '''
735 cachepaths = set()
748 cachepaths = set()
736
749
737 # get the system client cache
750 # get the system client cache
738 systemcache = shallowutil.getcachepath(ui, allowempty=True)
751 systemcache = shallowutil.getcachepath(ui, allowempty=True)
739 if systemcache:
752 if systemcache:
740 cachepaths.add(systemcache)
753 cachepaths.add(systemcache)
741
754
742 # get repo client and server cache
755 # get repo client and server cache
743 repopaths = []
756 repopaths = []
744 pwd = ui.environ.get('PWD')
757 pwd = ui.environ.get('PWD')
745 if pwd:
758 if pwd:
746 repopaths.append(pwd)
759 repopaths.append(pwd)
747
760
748 repopaths.extend(args)
761 repopaths.extend(args)
749 repos = []
762 repos = []
750 for repopath in repopaths:
763 for repopath in repopaths:
751 try:
764 try:
752 repo = hg.peer(ui, {}, repopath)
765 repo = hg.peer(ui, {}, repopath)
753 repos.append(repo)
766 repos.append(repo)
754
767
755 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
768 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
756 if repocache:
769 if repocache:
757 cachepaths.add(repocache)
770 cachepaths.add(repocache)
758 except error.RepoError:
771 except error.RepoError:
759 pass
772 pass
760
773
761 # gc client cache
774 # gc client cache
762 for cachepath in cachepaths:
775 for cachepath in cachepaths:
763 gcclient(ui, cachepath)
776 gcclient(ui, cachepath)
764
777
765 # gc server cache
778 # gc server cache
766 for repo in repos:
779 for repo in repos:
767 remotefilelogserver.gcserver(ui, repo._repo)
780 remotefilelogserver.gcserver(ui, repo._repo)
768
781
769 def gcclient(ui, cachepath):
782 def gcclient(ui, cachepath):
770 # get list of repos that use this cache
783 # get list of repos that use this cache
771 repospath = os.path.join(cachepath, 'repos')
784 repospath = os.path.join(cachepath, 'repos')
772 if not os.path.exists(repospath):
785 if not os.path.exists(repospath):
773 ui.warn(_("no known cache at %s\n") % cachepath)
786 ui.warn(_("no known cache at %s\n") % cachepath)
774 return
787 return
775
788
776 reposfile = open(repospath, 'rb')
789 reposfile = open(repospath, 'rb')
777 repos = {r[:-1] for r in reposfile.readlines()}
790 repos = {r[:-1] for r in reposfile.readlines()}
778 reposfile.close()
791 reposfile.close()
779
792
780 # build list of useful files
793 # build list of useful files
781 validrepos = []
794 validrepos = []
782 keepkeys = set()
795 keepkeys = set()
783
796
784 sharedcache = None
797 sharedcache = None
785 filesrepacked = False
798 filesrepacked = False
786
799
787 count = 0
800 count = 0
788 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
801 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
789 total=len(repos))
802 total=len(repos))
790 for path in repos:
803 for path in repos:
791 progress.update(count)
804 progress.update(count)
792 count += 1
805 count += 1
793 try:
806 try:
794 path = ui.expandpath(os.path.normpath(path))
807 path = ui.expandpath(os.path.normpath(path))
795 except TypeError as e:
808 except TypeError as e:
796 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
809 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
797 traceback.print_exc()
810 traceback.print_exc()
798 continue
811 continue
799 try:
812 try:
800 peer = hg.peer(ui, {}, path)
813 peer = hg.peer(ui, {}, path)
801 repo = peer._repo
814 repo = peer._repo
802 except error.RepoError:
815 except error.RepoError:
803 continue
816 continue
804
817
805 validrepos.append(path)
818 validrepos.append(path)
806
819
807 # Protect against any repo or config changes that have happened since
820 # Protect against any repo or config changes that have happened since
808 # this repo was added to the repos file. We'd rather this loop succeed
821 # this repo was added to the repos file. We'd rather this loop succeed
809 # and too much be deleted, than the loop fail and nothing gets deleted.
822 # and too much be deleted, than the loop fail and nothing gets deleted.
810 if not isenabled(repo):
823 if not isenabled(repo):
811 continue
824 continue
812
825
813 if not util.safehasattr(repo, 'name'):
826 if not util.safehasattr(repo, 'name'):
814 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
827 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
815 continue
828 continue
816
829
817 # If garbage collection on repack and repack on hg gc are enabled
830 # If garbage collection on repack and repack on hg gc are enabled
818 # then loose files are repacked and garbage collected.
831 # then loose files are repacked and garbage collected.
819 # Otherwise regular garbage collection is performed.
832 # Otherwise regular garbage collection is performed.
820 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
833 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
821 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
834 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
822 if repackonhggc and gcrepack:
835 if repackonhggc and gcrepack:
823 try:
836 try:
824 repackmod.incrementalrepack(repo)
837 repackmod.incrementalrepack(repo)
825 filesrepacked = True
838 filesrepacked = True
826 continue
839 continue
827 except (IOError, repackmod.RepackAlreadyRunning):
840 except (IOError, repackmod.RepackAlreadyRunning):
828 # If repack cannot be performed due to not enough disk space
841 # If repack cannot be performed due to not enough disk space
829 # continue doing garbage collection of loose files w/o repack
842 # continue doing garbage collection of loose files w/o repack
830 pass
843 pass
831
844
832 reponame = repo.name
845 reponame = repo.name
833 if not sharedcache:
846 if not sharedcache:
834 sharedcache = repo.sharedstore
847 sharedcache = repo.sharedstore
835
848
836 # Compute a keepset which is not garbage collected
849 # Compute a keepset which is not garbage collected
837 def keyfn(fname, fnode):
850 def keyfn(fname, fnode):
838 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
851 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
839 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
852 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
840
853
841 progress.complete()
854 progress.complete()
842
855
843 # write list of valid repos back
856 # write list of valid repos back
844 oldumask = os.umask(0o002)
857 oldumask = os.umask(0o002)
845 try:
858 try:
846 reposfile = open(repospath, 'wb')
859 reposfile = open(repospath, 'wb')
847 reposfile.writelines([("%s\n" % r) for r in validrepos])
860 reposfile.writelines([("%s\n" % r) for r in validrepos])
848 reposfile.close()
861 reposfile.close()
849 finally:
862 finally:
850 os.umask(oldumask)
863 os.umask(oldumask)
851
864
852 # prune cache
865 # prune cache
853 if sharedcache is not None:
866 if sharedcache is not None:
854 sharedcache.gc(keepkeys)
867 sharedcache.gc(keepkeys)
855 elif not filesrepacked:
868 elif not filesrepacked:
856 ui.warn(_("warning: no valid repos in repofile\n"))
869 ui.warn(_("warning: no valid repos in repofile\n"))
857
870
858 def log(orig, ui, repo, *pats, **opts):
871 def log(orig, ui, repo, *pats, **opts):
859 if not isenabled(repo):
872 if not isenabled(repo):
860 return orig(ui, repo, *pats, **opts)
873 return orig(ui, repo, *pats, **opts)
861
874
862 follow = opts.get(r'follow')
875 follow = opts.get(r'follow')
863 revs = opts.get(r'rev')
876 revs = opts.get(r'rev')
864 if pats:
877 if pats:
865 # Force slowpath for non-follow patterns and follows that start from
878 # Force slowpath for non-follow patterns and follows that start from
866 # non-working-copy-parent revs.
879 # non-working-copy-parent revs.
867 if not follow or revs:
880 if not follow or revs:
868 # This forces the slowpath
881 # This forces the slowpath
869 opts[r'removed'] = True
882 opts[r'removed'] = True
870
883
871 # If this is a non-follow log without any revs specified, recommend that
884 # If this is a non-follow log without any revs specified, recommend that
872 # the user add -f to speed it up.
885 # the user add -f to speed it up.
873 if not follow and not revs:
886 if not follow and not revs:
874 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
887 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
875 isfile = not match.anypats()
888 isfile = not match.anypats()
876 if isfile:
889 if isfile:
877 for file in match.files():
890 for file in match.files():
878 if not os.path.isfile(repo.wjoin(file)):
891 if not os.path.isfile(repo.wjoin(file)):
879 isfile = False
892 isfile = False
880 break
893 break
881
894
882 if isfile:
895 if isfile:
883 ui.warn(_("warning: file log can be slow on large repos - " +
896 ui.warn(_("warning: file log can be slow on large repos - " +
884 "use -f to speed it up\n"))
897 "use -f to speed it up\n"))
885
898
886 return orig(ui, repo, *pats, **opts)
899 return orig(ui, repo, *pats, **opts)
887
900
888 def revdatelimit(ui, revset):
901 def revdatelimit(ui, revset):
889 """Update revset so that only changesets no older than 'prefetchdays' days
902 """Update revset so that only changesets no older than 'prefetchdays' days
890 are included. The default value is set to 14 days. If 'prefetchdays' is set
903 are included. The default value is set to 14 days. If 'prefetchdays' is set
891 to zero or negative value then date restriction is not applied.
904 to zero or negative value then date restriction is not applied.
892 """
905 """
893 days = ui.configint('remotefilelog', 'prefetchdays')
906 days = ui.configint('remotefilelog', 'prefetchdays')
894 if days > 0:
907 if days > 0:
895 revset = '(%s) & date(-%s)' % (revset, days)
908 revset = '(%s) & date(-%s)' % (revset, days)
896 return revset
909 return revset
897
910
898 def readytofetch(repo):
911 def readytofetch(repo):
899 """Check that enough time has passed since the last background prefetch.
912 """Check that enough time has passed since the last background prefetch.
900 This only relates to prefetches after operations that change the working
913 This only relates to prefetches after operations that change the working
901 copy parent. Default delay between background prefetches is 2 minutes.
914 copy parent. Default delay between background prefetches is 2 minutes.
902 """
915 """
903 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
916 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
904 fname = repo.vfs.join('lastprefetch')
917 fname = repo.vfs.join('lastprefetch')
905
918
906 ready = False
919 ready = False
907 with open(fname, 'a'):
920 with open(fname, 'a'):
908 # the with construct above is used to avoid race conditions
921 # the with construct above is used to avoid race conditions
909 modtime = os.path.getmtime(fname)
922 modtime = os.path.getmtime(fname)
910 if (time.time() - modtime) > timeout:
923 if (time.time() - modtime) > timeout:
911 os.utime(fname, None)
924 os.utime(fname, None)
912 ready = True
925 ready = True
913
926
914 return ready
927 return ready
915
928
916 def wcpprefetch(ui, repo, **kwargs):
929 def wcpprefetch(ui, repo, **kwargs):
917 """Prefetches in background revisions specified by bgprefetchrevs revset.
930 """Prefetches in background revisions specified by bgprefetchrevs revset.
918 Does background repack if backgroundrepack flag is set in config.
931 Does background repack if backgroundrepack flag is set in config.
919 """
932 """
920 shallow = isenabled(repo)
933 shallow = isenabled(repo)
921 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
934 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
922 isready = readytofetch(repo)
935 isready = readytofetch(repo)
923
936
924 if not (shallow and bgprefetchrevs and isready):
937 if not (shallow and bgprefetchrevs and isready):
925 return
938 return
926
939
927 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
940 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
928 # update a revset with a date limit
941 # update a revset with a date limit
929 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
942 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
930
943
931 def anon():
944 def anon():
932 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
945 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
933 return
946 return
934 repo.ranprefetch = True
947 repo.ranprefetch = True
935 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
948 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
936
949
937 repo._afterlock(anon)
950 repo._afterlock(anon)
938
951
939 def pull(orig, ui, repo, *pats, **opts):
952 def pull(orig, ui, repo, *pats, **opts):
940 result = orig(ui, repo, *pats, **opts)
953 result = orig(ui, repo, *pats, **opts)
941
954
942 if isenabled(repo):
955 if isenabled(repo):
943 # prefetch if it's configured
956 # prefetch if it's configured
944 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
957 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
945 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
958 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
946 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
959 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
947
960
948 if prefetchrevset:
961 if prefetchrevset:
949 ui.status(_("prefetching file contents\n"))
962 ui.status(_("prefetching file contents\n"))
950 revs = scmutil.revrange(repo, [prefetchrevset])
963 revs = scmutil.revrange(repo, [prefetchrevset])
951 base = repo['.'].rev()
964 base = repo['.'].rev()
952 if bgprefetch:
965 if bgprefetch:
953 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
966 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
954 else:
967 else:
955 repo.prefetch(revs, base=base)
968 repo.prefetch(revs, base=base)
956 if bgrepack:
969 if bgrepack:
957 repackmod.backgroundrepack(repo, incremental=True)
970 repackmod.backgroundrepack(repo, incremental=True)
958 elif bgrepack:
971 elif bgrepack:
959 repackmod.backgroundrepack(repo, incremental=True)
972 repackmod.backgroundrepack(repo, incremental=True)
960
973
961 return result
974 return result
962
975
963 def exchangepull(orig, repo, remote, *args, **kwargs):
976 def exchangepull(orig, repo, remote, *args, **kwargs):
964 # Hook into the callstream/getbundle to insert bundle capabilities
977 # Hook into the callstream/getbundle to insert bundle capabilities
965 # during a pull.
978 # during a pull.
966 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
979 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
967 **kwargs):
980 **kwargs):
968 if not bundlecaps:
981 if not bundlecaps:
969 bundlecaps = set()
982 bundlecaps = set()
970 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
983 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
971 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
984 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
972 **kwargs)
985 **kwargs)
973
986
974 if util.safehasattr(remote, '_callstream'):
987 if util.safehasattr(remote, '_callstream'):
975 remote._localrepo = repo
988 remote._localrepo = repo
976 elif util.safehasattr(remote, 'getbundle'):
989 elif util.safehasattr(remote, 'getbundle'):
977 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
990 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
978
991
979 return orig(repo, remote, *args, **kwargs)
992 return orig(repo, remote, *args, **kwargs)
980
993
981 def _fileprefetchhook(repo, revs, match):
994 def _fileprefetchhook(repo, revs, match):
982 if isenabled(repo):
995 if isenabled(repo):
983 allfiles = []
996 allfiles = []
984 for rev in revs:
997 for rev in revs:
985 if rev == nodemod.wdirrev or rev is None:
998 if rev == nodemod.wdirrev or rev is None:
986 continue
999 continue
987 ctx = repo[rev]
1000 ctx = repo[rev]
988 mf = ctx.manifest()
1001 mf = ctx.manifest()
989 sparsematch = repo.maybesparsematch(ctx.rev())
1002 sparsematch = repo.maybesparsematch(ctx.rev())
990 for path in ctx.walk(match):
1003 for path in ctx.walk(match):
991 if path.endswith('/'):
1004 if path.endswith('/'):
992 # Tree manifest that's being excluded as part of narrow
1005 # Tree manifest that's being excluded as part of narrow
993 continue
1006 continue
994 if (not sparsematch or sparsematch(path)) and path in mf:
1007 if (not sparsematch or sparsematch(path)) and path in mf:
995 allfiles.append((path, hex(mf[path])))
1008 allfiles.append((path, hex(mf[path])))
996 repo.fileservice.prefetch(allfiles)
1009 repo.fileservice.prefetch(allfiles)
997
1010
998 @command('debugremotefilelog', [
1011 @command('debugremotefilelog', [
999 ('d', 'decompress', None, _('decompress the filelog first')),
1012 ('d', 'decompress', None, _('decompress the filelog first')),
1000 ], _('hg debugremotefilelog <path>'), norepo=True)
1013 ], _('hg debugremotefilelog <path>'), norepo=True)
1001 def debugremotefilelog(ui, path, **opts):
1014 def debugremotefilelog(ui, path, **opts):
1002 return debugcommands.debugremotefilelog(ui, path, **opts)
1015 return debugcommands.debugremotefilelog(ui, path, **opts)
1003
1016
1004 @command('verifyremotefilelog', [
1017 @command('verifyremotefilelog', [
1005 ('d', 'decompress', None, _('decompress the filelogs first')),
1018 ('d', 'decompress', None, _('decompress the filelogs first')),
1006 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1019 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1007 def verifyremotefilelog(ui, path, **opts):
1020 def verifyremotefilelog(ui, path, **opts):
1008 return debugcommands.verifyremotefilelog(ui, path, **opts)
1021 return debugcommands.verifyremotefilelog(ui, path, **opts)
1009
1022
1010 @command('debugdatapack', [
1023 @command('debugdatapack', [
1011 ('', 'long', None, _('print the long hashes')),
1024 ('', 'long', None, _('print the long hashes')),
1012 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1025 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1013 ], _('hg debugdatapack <paths>'), norepo=True)
1026 ], _('hg debugdatapack <paths>'), norepo=True)
1014 def debugdatapack(ui, *paths, **opts):
1027 def debugdatapack(ui, *paths, **opts):
1015 return debugcommands.debugdatapack(ui, *paths, **opts)
1028 return debugcommands.debugdatapack(ui, *paths, **opts)
1016
1029
1017 @command('debughistorypack', [
1030 @command('debughistorypack', [
1018 ], _('hg debughistorypack <path>'), norepo=True)
1031 ], _('hg debughistorypack <path>'), norepo=True)
1019 def debughistorypack(ui, path, **opts):
1032 def debughistorypack(ui, path, **opts):
1020 return debugcommands.debughistorypack(ui, path)
1033 return debugcommands.debughistorypack(ui, path)
1021
1034
1022 @command('debugkeepset', [
1035 @command('debugkeepset', [
1023 ], _('hg debugkeepset'))
1036 ], _('hg debugkeepset'))
1024 def debugkeepset(ui, repo, **opts):
1037 def debugkeepset(ui, repo, **opts):
1025 # The command is used to measure keepset computation time
1038 # The command is used to measure keepset computation time
1026 def keyfn(fname, fnode):
1039 def keyfn(fname, fnode):
1027 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1040 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1028 repackmod.keepset(repo, keyfn)
1041 repackmod.keepset(repo, keyfn)
1029 return
1042 return
1030
1043
1031 @command('debugwaitonrepack', [
1044 @command('debugwaitonrepack', [
1032 ], _('hg debugwaitonrepack'))
1045 ], _('hg debugwaitonrepack'))
1033 def debugwaitonrepack(ui, repo, **opts):
1046 def debugwaitonrepack(ui, repo, **opts):
1034 return debugcommands.debugwaitonrepack(repo)
1047 return debugcommands.debugwaitonrepack(repo)
1035
1048
1036 @command('debugwaitonprefetch', [
1049 @command('debugwaitonprefetch', [
1037 ], _('hg debugwaitonprefetch'))
1050 ], _('hg debugwaitonprefetch'))
1038 def debugwaitonprefetch(ui, repo, **opts):
1051 def debugwaitonprefetch(ui, repo, **opts):
1039 return debugcommands.debugwaitonprefetch(repo)
1052 return debugcommands.debugwaitonprefetch(repo)
1040
1053
1041 def resolveprefetchopts(ui, opts):
1054 def resolveprefetchopts(ui, opts):
1042 if not opts.get('rev'):
1055 if not opts.get('rev'):
1043 revset = ['.', 'draft()']
1056 revset = ['.', 'draft()']
1044
1057
1045 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1058 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1046 if prefetchrevset:
1059 if prefetchrevset:
1047 revset.append('(%s)' % prefetchrevset)
1060 revset.append('(%s)' % prefetchrevset)
1048 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1061 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1049 if bgprefetchrevs:
1062 if bgprefetchrevs:
1050 revset.append('(%s)' % bgprefetchrevs)
1063 revset.append('(%s)' % bgprefetchrevs)
1051 revset = '+'.join(revset)
1064 revset = '+'.join(revset)
1052
1065
1053 # update a revset with a date limit
1066 # update a revset with a date limit
1054 revset = revdatelimit(ui, revset)
1067 revset = revdatelimit(ui, revset)
1055
1068
1056 opts['rev'] = [revset]
1069 opts['rev'] = [revset]
1057
1070
1058 if not opts.get('base'):
1071 if not opts.get('base'):
1059 opts['base'] = None
1072 opts['base'] = None
1060
1073
1061 return opts
1074 return opts
1062
1075
1063 @command('prefetch', [
1076 @command('prefetch', [
1064 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1077 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1065 ('', 'repack', False, _('run repack after prefetch')),
1078 ('', 'repack', False, _('run repack after prefetch')),
1066 ('b', 'base', '', _("rev that is assumed to already be local")),
1079 ('b', 'base', '', _("rev that is assumed to already be local")),
1067 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1080 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1068 def prefetch(ui, repo, *pats, **opts):
1081 def prefetch(ui, repo, *pats, **opts):
1069 """prefetch file revisions from the server
1082 """prefetch file revisions from the server
1070
1083
1071 Prefetchs file revisions for the specified revs and stores them in the
1084 Prefetchs file revisions for the specified revs and stores them in the
1072 local remotefilelog cache. If no rev is specified, the default rev is
1085 local remotefilelog cache. If no rev is specified, the default rev is
1073 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1086 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1074 File names or patterns can be used to limit which files are downloaded.
1087 File names or patterns can be used to limit which files are downloaded.
1075
1088
1076 Return 0 on success.
1089 Return 0 on success.
1077 """
1090 """
1078 opts = pycompat.byteskwargs(opts)
1091 opts = pycompat.byteskwargs(opts)
1079 if not isenabled(repo):
1092 if not isenabled(repo):
1080 raise error.Abort(_("repo is not shallow"))
1093 raise error.Abort(_("repo is not shallow"))
1081
1094
1082 opts = resolveprefetchopts(ui, opts)
1095 opts = resolveprefetchopts(ui, opts)
1083 revs = scmutil.revrange(repo, opts.get('rev'))
1096 revs = scmutil.revrange(repo, opts.get('rev'))
1084 repo.prefetch(revs, opts.get('base'), pats, opts)
1097 repo.prefetch(revs, opts.get('base'), pats, opts)
1085
1098
1086 # Run repack in background
1099 # Run repack in background
1087 if opts.get('repack'):
1100 if opts.get('repack'):
1088 repackmod.backgroundrepack(repo, incremental=True)
1101 repackmod.backgroundrepack(repo, incremental=True)
1089
1102
1090 @command('repack', [
1103 @command('repack', [
1091 ('', 'background', None, _('run in a background process'), None),
1104 ('', 'background', None, _('run in a background process'), None),
1092 ('', 'incremental', None, _('do an incremental repack'), None),
1105 ('', 'incremental', None, _('do an incremental repack'), None),
1093 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1106 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1094 ], _('hg repack [OPTIONS]'))
1107 ], _('hg repack [OPTIONS]'))
1095 def repack_(ui, repo, *pats, **opts):
1108 def repack_(ui, repo, *pats, **opts):
1096 if opts.get(r'background'):
1109 if opts.get(r'background'):
1097 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1110 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1098 packsonly=opts.get(r'packsonly', False))
1111 packsonly=opts.get(r'packsonly', False))
1099 return
1112 return
1100
1113
1101 options = {'packsonly': opts.get(r'packsonly')}
1114 options = {'packsonly': opts.get(r'packsonly')}
1102
1115
1103 try:
1116 try:
1104 if opts.get(r'incremental'):
1117 if opts.get(r'incremental'):
1105 repackmod.incrementalrepack(repo, options=options)
1118 repackmod.incrementalrepack(repo, options=options)
1106 else:
1119 else:
1107 repackmod.fullrepack(repo, options=options)
1120 repackmod.fullrepack(repo, options=options)
1108 except repackmod.RepackAlreadyRunning as ex:
1121 except repackmod.RepackAlreadyRunning as ex:
1109 # Don't propogate the exception if the repack is already in
1122 # Don't propogate the exception if the repack is already in
1110 # progress, since we want the command to exit 0.
1123 # progress, since we want the command to exit 0.
1111 repo.ui.warn('%s\n' % ex)
1124 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now