##// END OF EJS Templates
remotefilelog: check if RFL is enabled in getrenamedfn() override...
Martin von Zweigbergk -
r42699:f93762f2 default
parent child Browse files
Show More
@@ -1,1110 +1,1113 b''
1 # __init__.py - remotefilelog extension
1 # __init__.py - remotefilelog extension
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8
8
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 GUARANTEES. This means that repositories created with this extension may
10 GUARANTEES. This means that repositories created with this extension may
11 only be usable with the exact version of this extension/Mercurial that was
11 only be usable with the exact version of this extension/Mercurial that was
12 used. The extension attempts to enforce this in order to prevent repository
12 used. The extension attempts to enforce this in order to prevent repository
13 corruption.
13 corruption.
14
14
15 remotefilelog works by fetching file contents lazily and storing them
15 remotefilelog works by fetching file contents lazily and storing them
16 in a cache on the client rather than in revlogs. This allows enormous
16 in a cache on the client rather than in revlogs. This allows enormous
17 histories to be transferred only partially, making them easier to
17 histories to be transferred only partially, making them easier to
18 operate on.
18 operate on.
19
19
20 Configs:
20 Configs:
21
21
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23
23
24 ``packs.maxpacksize`` specifies the maximum pack file size
24 ``packs.maxpacksize`` specifies the maximum pack file size
25
25
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 shared cache (trees only for now)
27 shared cache (trees only for now)
28
28
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30
30
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 update, and on other commands that use them. Different from pullprefetch.
32 update, and on other commands that use them. Different from pullprefetch.
33
33
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35
35
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 it is garbage collected
37 it is garbage collected
38
38
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40
40
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 days after which it is no longer prefetched.
42 days after which it is no longer prefetched.
43
43
44 ``remotefilelog.prefetchdelay`` specifies delay between background
44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 prefetches in seconds after operations that change the working copy parent
45 prefetches in seconds after operations that change the working copy parent
46
46
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 pack files required to be considered part of a generation. In particular,
48 pack files required to be considered part of a generation. In particular,
49 minimum number of packs files > gencountlimit.
49 minimum number of packs files > gencountlimit.
50
50
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 each generation of the data pack files. For example, list ['100MB','1MB']
52 each generation of the data pack files. For example, list ['100MB','1MB']
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 1MB, 100MB) and [100MB, infinity).
54 1MB, 100MB) and [100MB, infinity).
55
55
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 include in an incremental data repack.
57 include in an incremental data repack.
58
58
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 it to be considered for an incremental data repack.
60 it to be considered for an incremental data repack.
61
61
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 to include in an incremental data repack.
63 to include in an incremental data repack.
64
64
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 history pack files required to be considered part of a generation. In
66 history pack files required to be considered part of a generation. In
67 particular, minimum number of packs files > gencountlimit.
67 particular, minimum number of packs files > gencountlimit.
68
68
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 each generation of the history pack files. For example, list [
70 each generation of the history pack files. For example, list [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73
73
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 include in an incremental history repack.
75 include in an incremental history repack.
76
76
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 for it to be considered for an incremental history repack.
78 for it to be considered for an incremental history repack.
79
79
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 files to include in an incremental history repack.
81 files to include in an incremental history repack.
82
82
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 background
84 background
85
85
86 ``remotefilelog.cachepath`` path to cache
86 ``remotefilelog.cachepath`` path to cache
87
87
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 group
89 group
90
90
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92
92
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94
94
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96
96
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98
98
99 ``remotefilelog.fetchwarning``: message to print when too many
99 ``remotefilelog.fetchwarning``: message to print when too many
100 single-file fetches occur
100 single-file fetches occur
101
101
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103
103
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 files, otherwise use optimistic fetching
105 files, otherwise use optimistic fetching
106
106
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 eagerly downloaded rather than lazily
108 eagerly downloaded rather than lazily
109
109
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 data from other repos in a shared store.
111 data from other repos in a shared store.
112
112
113 ``remotefilelog.server`` if true, enable server-side functionality
113 ``remotefilelog.server`` if true, enable server-side functionality
114
114
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116
116
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 blobs
118 blobs
119
119
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 before returning blobs
121 before returning blobs
122
122
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 corruption before returning metadata
124 corruption before returning metadata
125
125
126 """
126 """
127 from __future__ import absolute_import
127 from __future__ import absolute_import
128
128
129 import os
129 import os
130 import time
130 import time
131 import traceback
131 import traceback
132
132
133 from mercurial.node import hex
133 from mercurial.node import hex
134 from mercurial.i18n import _
134 from mercurial.i18n import _
135 from mercurial import (
135 from mercurial import (
136 changegroup,
136 changegroup,
137 changelog,
137 changelog,
138 cmdutil,
138 cmdutil,
139 commands,
139 commands,
140 configitems,
140 configitems,
141 context,
141 context,
142 copies,
142 copies,
143 debugcommands as hgdebugcommands,
143 debugcommands as hgdebugcommands,
144 dispatch,
144 dispatch,
145 error,
145 error,
146 exchange,
146 exchange,
147 extensions,
147 extensions,
148 hg,
148 hg,
149 localrepo,
149 localrepo,
150 match,
150 match,
151 merge,
151 merge,
152 node as nodemod,
152 node as nodemod,
153 patch,
153 patch,
154 pycompat,
154 pycompat,
155 registrar,
155 registrar,
156 repair,
156 repair,
157 repoview,
157 repoview,
158 revset,
158 revset,
159 scmutil,
159 scmutil,
160 smartset,
160 smartset,
161 streamclone,
161 streamclone,
162 util,
162 util,
163 )
163 )
164 from . import (
164 from . import (
165 constants,
165 constants,
166 debugcommands,
166 debugcommands,
167 fileserverclient,
167 fileserverclient,
168 remotefilectx,
168 remotefilectx,
169 remotefilelog,
169 remotefilelog,
170 remotefilelogserver,
170 remotefilelogserver,
171 repack as repackmod,
171 repack as repackmod,
172 shallowbundle,
172 shallowbundle,
173 shallowrepo,
173 shallowrepo,
174 shallowstore,
174 shallowstore,
175 shallowutil,
175 shallowutil,
176 shallowverifier,
176 shallowverifier,
177 )
177 )
178
178
179 # ensures debug commands are registered
179 # ensures debug commands are registered
180 hgdebugcommands.command
180 hgdebugcommands.command
181
181
182 cmdtable = {}
182 cmdtable = {}
183 command = registrar.command(cmdtable)
183 command = registrar.command(cmdtable)
184
184
185 configtable = {}
185 configtable = {}
186 configitem = registrar.configitem(configtable)
186 configitem = registrar.configitem(configtable)
187
187
188 configitem('remotefilelog', 'debug', default=False)
188 configitem('remotefilelog', 'debug', default=False)
189
189
190 configitem('remotefilelog', 'reponame', default='')
190 configitem('remotefilelog', 'reponame', default='')
191 configitem('remotefilelog', 'cachepath', default=None)
191 configitem('remotefilelog', 'cachepath', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
192 configitem('remotefilelog', 'cachegroup', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
193 configitem('remotefilelog', 'cacheprocess', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196
196
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 alias=[('remotefilelog', 'fallbackrepo')])
198 alias=[('remotefilelog', 'fallbackrepo')])
199
199
200 configitem('remotefilelog', 'validatecachelog', default=None)
200 configitem('remotefilelog', 'validatecachelog', default=None)
201 configitem('remotefilelog', 'validatecache', default='on')
201 configitem('remotefilelog', 'validatecache', default='on')
202 configitem('remotefilelog', 'server', default=None)
202 configitem('remotefilelog', 'server', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
203 configitem('remotefilelog', 'servercachepath', default=None)
204 configitem("remotefilelog", "serverexpiration", default=30)
204 configitem("remotefilelog", "serverexpiration", default=30)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
207 configitem('remotefilelog', 'pullprefetch', default=None)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 configitem('remotefilelog', 'prefetchdays', default=14)
210 configitem('remotefilelog', 'prefetchdays', default=14)
211
211
212 configitem('remotefilelog', 'getfilesstep', default=10000)
212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 configitem('remotefilelog', 'fetchwarning', default='')
215 configitem('remotefilelog', 'fetchwarning', default='')
216
216
217 configitem('remotefilelog', 'includepattern', default=None)
217 configitem('remotefilelog', 'includepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
218 configitem('remotefilelog', 'excludepattern', default=None)
219
219
220 configitem('remotefilelog', 'gcrepack', default=False)
220 configitem('remotefilelog', 'gcrepack', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
221 configitem('remotefilelog', 'repackonhggc', default=False)
222 configitem('repack', 'chainorphansbysize', default=True)
222 configitem('repack', 'chainorphansbysize', default=True)
223
223
224 configitem('packs', 'maxpacksize', default=0)
224 configitem('packs', 'maxpacksize', default=0)
225 configitem('packs', 'maxchainlen', default=1000)
225 configitem('packs', 'maxchainlen', default=1000)
226
226
227 # default TTL limit is 30 days
227 # default TTL limit is 30 days
228 _defaultlimit = 60 * 60 * 24 * 30
228 _defaultlimit = 60 * 60 * 24 * 30
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230
230
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 configitem('remotefilelog', 'data.generations',
232 configitem('remotefilelog', 'data.generations',
233 default=['1GB', '100MB', '1MB'])
233 default=['1GB', '100MB', '1MB'])
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237
237
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243
243
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 # be specifying the version(s) of Mercurial they are tested with, or
246 # be specifying the version(s) of Mercurial they are tested with, or
247 # leave the attribute unspecified.
247 # leave the attribute unspecified.
248 testedwith = 'ships-with-hg-core'
248 testedwith = 'ships-with-hg-core'
249
249
250 repoclass = localrepo.localrepository
250 repoclass = localrepo.localrepository
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252
252
253 isenabled = shallowutil.isenabled
253 isenabled = shallowutil.isenabled
254
254
255 def uisetup(ui):
255 def uisetup(ui):
256 """Wraps user facing Mercurial commands to swap them out with shallow
256 """Wraps user facing Mercurial commands to swap them out with shallow
257 versions.
257 versions.
258 """
258 """
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260
260
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 entry[1].append(('', 'shallow', None,
262 entry[1].append(('', 'shallow', None,
263 _("create a shallow clone which uses remote file "
263 _("create a shallow clone which uses remote file "
264 "history")))
264 "history")))
265
265
266 extensions.wrapcommand(commands.table, 'debugindex',
266 extensions.wrapcommand(commands.table, 'debugindex',
267 debugcommands.debugindex)
267 debugcommands.debugindex)
268 extensions.wrapcommand(commands.table, 'debugindexdot',
268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 debugcommands.debugindexdot)
269 debugcommands.debugindexdot)
270 extensions.wrapcommand(commands.table, 'log', log)
270 extensions.wrapcommand(commands.table, 'log', log)
271 extensions.wrapcommand(commands.table, 'pull', pull)
271 extensions.wrapcommand(commands.table, 'pull', pull)
272
272
273 # Prevent 'hg manifest --all'
273 # Prevent 'hg manifest --all'
274 def _manifest(orig, ui, repo, *args, **opts):
274 def _manifest(orig, ui, repo, *args, **opts):
275 if (isenabled(repo) and opts.get(r'all')):
275 if (isenabled(repo) and opts.get(r'all')):
276 raise error.Abort(_("--all is not supported in a shallow repo"))
276 raise error.Abort(_("--all is not supported in a shallow repo"))
277
277
278 return orig(ui, repo, *args, **opts)
278 return orig(ui, repo, *args, **opts)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280
280
281 # Wrap remotefilelog with lfs code
281 # Wrap remotefilelog with lfs code
282 def _lfsloaded(loaded=False):
282 def _lfsloaded(loaded=False):
283 lfsmod = None
283 lfsmod = None
284 try:
284 try:
285 lfsmod = extensions.find('lfs')
285 lfsmod = extensions.find('lfs')
286 except KeyError:
286 except KeyError:
287 pass
287 pass
288 if lfsmod:
288 if lfsmod:
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 fileserverclient._lfsmod = lfsmod
290 fileserverclient._lfsmod = lfsmod
291 extensions.afterloaded('lfs', _lfsloaded)
291 extensions.afterloaded('lfs', _lfsloaded)
292
292
293 # debugdata needs remotefilelog.len to work
293 # debugdata needs remotefilelog.len to work
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295
295
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297
297
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 shallowbundle.addchangegroupfiles)
299 shallowbundle.addchangegroupfiles)
300 extensions.wrapfunction(
300 extensions.wrapfunction(
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 extensions.wrapfunction(copies, '_computeforwardmissing',
308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 computeforwardmissing)
309 computeforwardmissing)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 extensions.wrapfunction(hg, 'verify', _verify)
315 extensions.wrapfunction(hg, 'verify', _verify)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317
317
318 # disappointing hacks below
318 # disappointing hacks below
319 scmutil.getrenamedfn = getrenamedfn
319 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 revset.symbols['filelog'] = revset.filelog
321 revset.symbols['filelog'] = revset.filelog
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323
323
324
324
325 def cloneshallow(orig, ui, repo, *args, **opts):
325 def cloneshallow(orig, ui, repo, *args, **opts):
326 if opts.get(r'shallow'):
326 if opts.get(r'shallow'):
327 repos = []
327 repos = []
328 def pull_shallow(orig, self, *args, **kwargs):
328 def pull_shallow(orig, self, *args, **kwargs):
329 if not isenabled(self):
329 if not isenabled(self):
330 repos.append(self.unfiltered())
330 repos.append(self.unfiltered())
331 # set up the client hooks so the post-clone update works
331 # set up the client hooks so the post-clone update works
332 setupclient(self.ui, self.unfiltered())
332 setupclient(self.ui, self.unfiltered())
333
333
334 # setupclient fixed the class on the repo itself
334 # setupclient fixed the class on the repo itself
335 # but we also need to fix it on the repoview
335 # but we also need to fix it on the repoview
336 if isinstance(self, repoview.repoview):
336 if isinstance(self, repoview.repoview):
337 self.__class__.__bases__ = (self.__class__.__bases__[0],
337 self.__class__.__bases__ = (self.__class__.__bases__[0],
338 self.unfiltered().__class__)
338 self.unfiltered().__class__)
339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
340 self._writerequirements()
340 self._writerequirements()
341
341
342 # Since setupclient hadn't been called, exchange.pull was not
342 # Since setupclient hadn't been called, exchange.pull was not
343 # wrapped. So we need to manually invoke our version of it.
343 # wrapped. So we need to manually invoke our version of it.
344 return exchangepull(orig, self, *args, **kwargs)
344 return exchangepull(orig, self, *args, **kwargs)
345 else:
345 else:
346 return orig(self, *args, **kwargs)
346 return orig(self, *args, **kwargs)
347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
348
348
349 # Wrap the stream logic to add requirements and to pass include/exclude
349 # Wrap the stream logic to add requirements and to pass include/exclude
350 # patterns around.
350 # patterns around.
351 def setup_streamout(repo, remote):
351 def setup_streamout(repo, remote):
352 # Replace remote.stream_out with a version that sends file
352 # Replace remote.stream_out with a version that sends file
353 # patterns.
353 # patterns.
354 def stream_out_shallow(orig):
354 def stream_out_shallow(orig):
355 caps = remote.capabilities()
355 caps = remote.capabilities()
356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
357 opts = {}
357 opts = {}
358 if repo.includepattern:
358 if repo.includepattern:
359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
360 if repo.excludepattern:
360 if repo.excludepattern:
361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
362 return remote._callstream('stream_out_shallow', **opts)
362 return remote._callstream('stream_out_shallow', **opts)
363 else:
363 else:
364 return orig()
364 return orig()
365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
366 def stream_wrap(orig, op):
366 def stream_wrap(orig, op):
367 setup_streamout(op.repo, op.remote)
367 setup_streamout(op.repo, op.remote)
368 return orig(op)
368 return orig(op)
369 extensions.wrapfunction(
369 extensions.wrapfunction(
370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
371
371
372 def canperformstreamclone(orig, pullop, bundle2=False):
372 def canperformstreamclone(orig, pullop, bundle2=False):
373 # remotefilelog is currently incompatible with the
373 # remotefilelog is currently incompatible with the
374 # bundle2 flavor of streamclones, so force us to use
374 # bundle2 flavor of streamclones, so force us to use
375 # v1 instead.
375 # v1 instead.
376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
377 pullop.remotebundle2caps['stream'] = [
377 pullop.remotebundle2caps['stream'] = [
378 c for c in pullop.remotebundle2caps['stream']
378 c for c in pullop.remotebundle2caps['stream']
379 if c != 'v2']
379 if c != 'v2']
380 if bundle2:
380 if bundle2:
381 return False, None
381 return False, None
382 supported, requirements = orig(pullop, bundle2=bundle2)
382 supported, requirements = orig(pullop, bundle2=bundle2)
383 if requirements is not None:
383 if requirements is not None:
384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
385 return supported, requirements
385 return supported, requirements
386 extensions.wrapfunction(
386 extensions.wrapfunction(
387 streamclone, 'canperformstreamclone', canperformstreamclone)
387 streamclone, 'canperformstreamclone', canperformstreamclone)
388
388
389 try:
389 try:
390 orig(ui, repo, *args, **opts)
390 orig(ui, repo, *args, **opts)
391 finally:
391 finally:
392 if opts.get(r'shallow'):
392 if opts.get(r'shallow'):
393 for r in repos:
393 for r in repos:
394 if util.safehasattr(r, 'fileservice'):
394 if util.safehasattr(r, 'fileservice'):
395 r.fileservice.close()
395 r.fileservice.close()
396
396
397 def debugdatashallow(orig, *args, **kwds):
397 def debugdatashallow(orig, *args, **kwds):
398 oldlen = remotefilelog.remotefilelog.__len__
398 oldlen = remotefilelog.remotefilelog.__len__
399 try:
399 try:
400 remotefilelog.remotefilelog.__len__ = lambda x: 1
400 remotefilelog.remotefilelog.__len__ = lambda x: 1
401 return orig(*args, **kwds)
401 return orig(*args, **kwds)
402 finally:
402 finally:
403 remotefilelog.remotefilelog.__len__ = oldlen
403 remotefilelog.remotefilelog.__len__ = oldlen
404
404
405 def reposetup(ui, repo):
405 def reposetup(ui, repo):
406 if not repo.local():
406 if not repo.local():
407 return
407 return
408
408
409 # put here intentionally bc doesnt work in uisetup
409 # put here intentionally bc doesnt work in uisetup
410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
412
412
413 isserverenabled = ui.configbool('remotefilelog', 'server')
413 isserverenabled = ui.configbool('remotefilelog', 'server')
414 isshallowclient = isenabled(repo)
414 isshallowclient = isenabled(repo)
415
415
416 if isserverenabled and isshallowclient:
416 if isserverenabled and isshallowclient:
417 raise RuntimeError("Cannot be both a server and shallow client.")
417 raise RuntimeError("Cannot be both a server and shallow client.")
418
418
419 if isshallowclient:
419 if isshallowclient:
420 setupclient(ui, repo)
420 setupclient(ui, repo)
421
421
422 if isserverenabled:
422 if isserverenabled:
423 remotefilelogserver.setupserver(ui, repo)
423 remotefilelogserver.setupserver(ui, repo)
424
424
425 def setupclient(ui, repo):
425 def setupclient(ui, repo):
426 if not isinstance(repo, localrepo.localrepository):
426 if not isinstance(repo, localrepo.localrepository):
427 return
427 return
428
428
429 # Even clients get the server setup since they need to have the
429 # Even clients get the server setup since they need to have the
430 # wireprotocol endpoints registered.
430 # wireprotocol endpoints registered.
431 remotefilelogserver.onetimesetup(ui)
431 remotefilelogserver.onetimesetup(ui)
432 onetimeclientsetup(ui)
432 onetimeclientsetup(ui)
433
433
434 shallowrepo.wraprepo(repo)
434 shallowrepo.wraprepo(repo)
435 repo.store = shallowstore.wrapstore(repo.store)
435 repo.store = shallowstore.wrapstore(repo.store)
436
436
437 def storewrapper(orig, requirements, path, vfstype):
437 def storewrapper(orig, requirements, path, vfstype):
438 s = orig(requirements, path, vfstype)
438 s = orig(requirements, path, vfstype)
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
440 s = shallowstore.wrapstore(s)
440 s = shallowstore.wrapstore(s)
441
441
442 return s
442 return s
443
443
444 # prefetch files before update
444 # prefetch files before update
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
446 labels=None):
446 labels=None):
447 if isenabled(repo):
447 if isenabled(repo):
448 manifest = mctx.manifest()
448 manifest = mctx.manifest()
449 files = []
449 files = []
450 for f, args, msg in actions['g']:
450 for f, args, msg in actions['g']:
451 files.append((f, hex(manifest[f])))
451 files.append((f, hex(manifest[f])))
452 # batch fetch the needed files from the server
452 # batch fetch the needed files from the server
453 repo.fileservice.prefetch(files)
453 repo.fileservice.prefetch(files)
454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
455 labels=labels)
455 labels=labels)
456
456
457 # Prefetch merge checkunknownfiles
457 # Prefetch merge checkunknownfiles
458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
459 *args, **kwargs):
459 *args, **kwargs):
460 if isenabled(repo):
460 if isenabled(repo):
461 files = []
461 files = []
462 sparsematch = repo.maybesparsematch(mctx.rev())
462 sparsematch = repo.maybesparsematch(mctx.rev())
463 for f, (m, actionargs, msg) in actions.iteritems():
463 for f, (m, actionargs, msg) in actions.iteritems():
464 if sparsematch and not sparsematch(f):
464 if sparsematch and not sparsematch(f):
465 continue
465 continue
466 if m in ('c', 'dc', 'cm'):
466 if m in ('c', 'dc', 'cm'):
467 files.append((f, hex(mctx.filenode(f))))
467 files.append((f, hex(mctx.filenode(f))))
468 elif m == 'dg':
468 elif m == 'dg':
469 f2 = actionargs[0]
469 f2 = actionargs[0]
470 files.append((f2, hex(mctx.filenode(f2))))
470 files.append((f2, hex(mctx.filenode(f2))))
471 # batch fetch the needed files from the server
471 # batch fetch the needed files from the server
472 repo.fileservice.prefetch(files)
472 repo.fileservice.prefetch(files)
473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
474
474
475 # Prefetch files before status attempts to look at their size and contents
475 # Prefetch files before status attempts to look at their size and contents
476 def checklookup(orig, self, files):
476 def checklookup(orig, self, files):
477 repo = self._repo
477 repo = self._repo
478 if isenabled(repo):
478 if isenabled(repo):
479 prefetchfiles = []
479 prefetchfiles = []
480 for parent in self._parents:
480 for parent in self._parents:
481 for f in files:
481 for f in files:
482 if f in parent:
482 if f in parent:
483 prefetchfiles.append((f, hex(parent.filenode(f))))
483 prefetchfiles.append((f, hex(parent.filenode(f))))
484 # batch fetch the needed files from the server
484 # batch fetch the needed files from the server
485 repo.fileservice.prefetch(prefetchfiles)
485 repo.fileservice.prefetch(prefetchfiles)
486 return orig(self, files)
486 return orig(self, files)
487
487
488 # Prefetch the logic that compares added and removed files for renames
488 # Prefetch the logic that compares added and removed files for renames
489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
490 if isenabled(repo):
490 if isenabled(repo):
491 files = []
491 files = []
492 pmf = repo['.'].manifest()
492 pmf = repo['.'].manifest()
493 for f in removed:
493 for f in removed:
494 if f in pmf:
494 if f in pmf:
495 files.append((f, hex(pmf[f])))
495 files.append((f, hex(pmf[f])))
496 # batch fetch the needed files from the server
496 # batch fetch the needed files from the server
497 repo.fileservice.prefetch(files)
497 repo.fileservice.prefetch(files)
498 return orig(repo, matcher, added, removed, *args, **kwargs)
498 return orig(repo, matcher, added, removed, *args, **kwargs)
499
499
500 # prefetch files before pathcopies check
500 # prefetch files before pathcopies check
501 def computeforwardmissing(orig, a, b, match=None):
501 def computeforwardmissing(orig, a, b, match=None):
502 missing = orig(a, b, match=match)
502 missing = orig(a, b, match=match)
503 repo = a._repo
503 repo = a._repo
504 if isenabled(repo):
504 if isenabled(repo):
505 mb = b.manifest()
505 mb = b.manifest()
506
506
507 files = []
507 files = []
508 sparsematch = repo.maybesparsematch(b.rev())
508 sparsematch = repo.maybesparsematch(b.rev())
509 if sparsematch:
509 if sparsematch:
510 sparsemissing = set()
510 sparsemissing = set()
511 for f in missing:
511 for f in missing:
512 if sparsematch(f):
512 if sparsematch(f):
513 files.append((f, hex(mb[f])))
513 files.append((f, hex(mb[f])))
514 sparsemissing.add(f)
514 sparsemissing.add(f)
515 missing = sparsemissing
515 missing = sparsemissing
516
516
517 # batch fetch the needed files from the server
517 # batch fetch the needed files from the server
518 repo.fileservice.prefetch(files)
518 repo.fileservice.prefetch(files)
519 return missing
519 return missing
520
520
521 # close cache miss server connection after the command has finished
521 # close cache miss server connection after the command has finished
522 def runcommand(orig, lui, repo, *args, **kwargs):
522 def runcommand(orig, lui, repo, *args, **kwargs):
523 fileservice = None
523 fileservice = None
524 # repo can be None when running in chg:
524 # repo can be None when running in chg:
525 # - at startup, reposetup was called because serve is not norepo
525 # - at startup, reposetup was called because serve is not norepo
526 # - a norepo command like "help" is called
526 # - a norepo command like "help" is called
527 if repo and isenabled(repo):
527 if repo and isenabled(repo):
528 fileservice = repo.fileservice
528 fileservice = repo.fileservice
529 try:
529 try:
530 return orig(lui, repo, *args, **kwargs)
530 return orig(lui, repo, *args, **kwargs)
531 finally:
531 finally:
532 if fileservice:
532 if fileservice:
533 fileservice.close()
533 fileservice.close()
534
534
535 # prevent strip from stripping remotefilelogs
535 # prevent strip from stripping remotefilelogs
536 def _collectbrokencsets(orig, repo, files, striprev):
536 def _collectbrokencsets(orig, repo, files, striprev):
537 if isenabled(repo):
537 if isenabled(repo):
538 files = list([f for f in files if not repo.shallowmatch(f)])
538 files = list([f for f in files if not repo.shallowmatch(f)])
539 return orig(repo, files, striprev)
539 return orig(repo, files, striprev)
540
540
541 # changectx wrappers
541 # changectx wrappers
542 def filectx(orig, self, path, fileid=None, filelog=None):
542 def filectx(orig, self, path, fileid=None, filelog=None):
543 if fileid is None:
543 if fileid is None:
544 fileid = self.filenode(path)
544 fileid = self.filenode(path)
545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
547 changectx=self, filelog=filelog)
547 changectx=self, filelog=filelog)
548 return orig(self, path, fileid=fileid, filelog=filelog)
548 return orig(self, path, fileid=fileid, filelog=filelog)
549
549
550 def workingfilectx(orig, self, path, filelog=None):
550 def workingfilectx(orig, self, path, filelog=None):
551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
552 return remotefilectx.remoteworkingfilectx(self._repo, path,
552 return remotefilectx.remoteworkingfilectx(self._repo, path,
553 workingctx=self,
553 workingctx=self,
554 filelog=filelog)
554 filelog=filelog)
555 return orig(self, path, filelog=filelog)
555 return orig(self, path, filelog=filelog)
556
556
557 # prefetch required revisions before a diff
557 # prefetch required revisions before a diff
558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
559 copy, getfilectx, *args, **kwargs):
559 copy, getfilectx, *args, **kwargs):
560 if isenabled(repo):
560 if isenabled(repo):
561 prefetch = []
561 prefetch = []
562 mf1 = ctx1.manifest()
562 mf1 = ctx1.manifest()
563 for fname in modified + added + removed:
563 for fname in modified + added + removed:
564 if fname in mf1:
564 if fname in mf1:
565 fnode = getfilectx(fname, ctx1).filenode()
565 fnode = getfilectx(fname, ctx1).filenode()
566 # fnode can be None if it's a edited working ctx file
566 # fnode can be None if it's a edited working ctx file
567 if fnode:
567 if fnode:
568 prefetch.append((fname, hex(fnode)))
568 prefetch.append((fname, hex(fnode)))
569 if fname not in removed:
569 if fname not in removed:
570 fnode = getfilectx(fname, ctx2).filenode()
570 fnode = getfilectx(fname, ctx2).filenode()
571 if fnode:
571 if fnode:
572 prefetch.append((fname, hex(fnode)))
572 prefetch.append((fname, hex(fnode)))
573
573
574 repo.fileservice.prefetch(prefetch)
574 repo.fileservice.prefetch(prefetch)
575
575
576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
577 getfilectx, *args, **kwargs)
577 getfilectx, *args, **kwargs)
578
578
579 # Prevent verify from processing files
579 # Prevent verify from processing files
580 # a stub for mercurial.hg.verify()
580 # a stub for mercurial.hg.verify()
581 def _verify(orig, repo, level=None):
581 def _verify(orig, repo, level=None):
582 lock = repo.lock()
582 lock = repo.lock()
583 try:
583 try:
584 return shallowverifier.shallowverifier(repo).verify()
584 return shallowverifier.shallowverifier(repo).verify()
585 finally:
585 finally:
586 lock.release()
586 lock.release()
587
587
588
588
589 clientonetime = False
589 clientonetime = False
590 def onetimeclientsetup(ui):
590 def onetimeclientsetup(ui):
591 global clientonetime
591 global clientonetime
592 if clientonetime:
592 if clientonetime:
593 return
593 return
594 clientonetime = True
594 clientonetime = True
595
595
596 # Don't commit filelogs until we know the commit hash, since the hash
596 # Don't commit filelogs until we know the commit hash, since the hash
597 # is present in the filelog blob.
597 # is present in the filelog blob.
598 # This violates Mercurial's filelog->manifest->changelog write order,
598 # This violates Mercurial's filelog->manifest->changelog write order,
599 # but is generally fine for client repos.
599 # but is generally fine for client repos.
600 pendingfilecommits = []
600 pendingfilecommits = []
601 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
601 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
602 flags, cachedelta=None, _metatuple=None):
602 flags, cachedelta=None, _metatuple=None):
603 if isinstance(link, int):
603 if isinstance(link, int):
604 pendingfilecommits.append(
604 pendingfilecommits.append(
605 (self, rawtext, transaction, link, p1, p2, node, flags,
605 (self, rawtext, transaction, link, p1, p2, node, flags,
606 cachedelta, _metatuple))
606 cachedelta, _metatuple))
607 return node
607 return node
608 else:
608 else:
609 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
609 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
610 cachedelta, _metatuple=_metatuple)
610 cachedelta, _metatuple=_metatuple)
611 extensions.wrapfunction(
611 extensions.wrapfunction(
612 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
612 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
613
613
614 def changelogadd(orig, self, *args):
614 def changelogadd(orig, self, *args):
615 oldlen = len(self)
615 oldlen = len(self)
616 node = orig(self, *args)
616 node = orig(self, *args)
617 newlen = len(self)
617 newlen = len(self)
618 if oldlen != newlen:
618 if oldlen != newlen:
619 for oldargs in pendingfilecommits:
619 for oldargs in pendingfilecommits:
620 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
620 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
621 linknode = self.node(link)
621 linknode = self.node(link)
622 if linknode == node:
622 if linknode == node:
623 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
623 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
624 else:
624 else:
625 raise error.ProgrammingError(
625 raise error.ProgrammingError(
626 'pending multiple integer revisions are not supported')
626 'pending multiple integer revisions are not supported')
627 else:
627 else:
628 # "link" is actually wrong here (it is set to len(changelog))
628 # "link" is actually wrong here (it is set to len(changelog))
629 # if changelog remains unchanged, skip writing file revisions
629 # if changelog remains unchanged, skip writing file revisions
630 # but still do a sanity check about pending multiple revisions
630 # but still do a sanity check about pending multiple revisions
631 if len(set(x[3] for x in pendingfilecommits)) > 1:
631 if len(set(x[3] for x in pendingfilecommits)) > 1:
632 raise error.ProgrammingError(
632 raise error.ProgrammingError(
633 'pending multiple integer revisions are not supported')
633 'pending multiple integer revisions are not supported')
634 del pendingfilecommits[:]
634 del pendingfilecommits[:]
635 return node
635 return node
636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
637
637
638 def getrenamedfn(repo, endrev=None):
638 def getrenamedfn(orig, repo, endrev=None):
639 if not isenabled(repo):
640 return orig(repo, endrev)
641
639 rcache = {}
642 rcache = {}
640
643
641 def getrenamed(fn, rev):
644 def getrenamed(fn, rev):
642 '''looks up all renames for a file (up to endrev) the first
645 '''looks up all renames for a file (up to endrev) the first
643 time the file is given. It indexes on the changerev and only
646 time the file is given. It indexes on the changerev and only
644 parses the manifest if linkrev != changerev.
647 parses the manifest if linkrev != changerev.
645 Returns rename info for fn at changerev rev.'''
648 Returns rename info for fn at changerev rev.'''
646 if rev in rcache.setdefault(fn, {}):
649 if rev in rcache.setdefault(fn, {}):
647 return rcache[fn][rev]
650 return rcache[fn][rev]
648
651
649 try:
652 try:
650 fctx = repo[rev].filectx(fn)
653 fctx = repo[rev].filectx(fn)
651 for ancestor in fctx.ancestors():
654 for ancestor in fctx.ancestors():
652 if ancestor.path() == fn:
655 if ancestor.path() == fn:
653 renamed = ancestor.renamed()
656 renamed = ancestor.renamed()
654 rcache[fn][ancestor.rev()] = renamed and renamed[0]
657 rcache[fn][ancestor.rev()] = renamed and renamed[0]
655
658
656 renamed = fctx.renamed()
659 renamed = fctx.renamed()
657 return renamed and renamed[0]
660 return renamed and renamed[0]
658 except error.LookupError:
661 except error.LookupError:
659 return None
662 return None
660
663
661 return getrenamed
664 return getrenamed
662
665
663 def walkfilerevs(orig, repo, match, follow, revs, fncache):
666 def walkfilerevs(orig, repo, match, follow, revs, fncache):
664 if not isenabled(repo):
667 if not isenabled(repo):
665 return orig(repo, match, follow, revs, fncache)
668 return orig(repo, match, follow, revs, fncache)
666
669
667 # remotefilelog's can't be walked in rev order, so throw.
670 # remotefilelog's can't be walked in rev order, so throw.
668 # The caller will see the exception and walk the commit tree instead.
671 # The caller will see the exception and walk the commit tree instead.
669 if not follow:
672 if not follow:
670 raise cmdutil.FileWalkError("Cannot walk via filelog")
673 raise cmdutil.FileWalkError("Cannot walk via filelog")
671
674
672 wanted = set()
675 wanted = set()
673 minrev, maxrev = min(revs), max(revs)
676 minrev, maxrev = min(revs), max(revs)
674
677
675 pctx = repo['.']
678 pctx = repo['.']
676 for filename in match.files():
679 for filename in match.files():
677 if filename not in pctx:
680 if filename not in pctx:
678 raise error.Abort(_('cannot follow file not in parent '
681 raise error.Abort(_('cannot follow file not in parent '
679 'revision: "%s"') % filename)
682 'revision: "%s"') % filename)
680 fctx = pctx[filename]
683 fctx = pctx[filename]
681
684
682 linkrev = fctx.linkrev()
685 linkrev = fctx.linkrev()
683 if linkrev >= minrev and linkrev <= maxrev:
686 if linkrev >= minrev and linkrev <= maxrev:
684 fncache.setdefault(linkrev, []).append(filename)
687 fncache.setdefault(linkrev, []).append(filename)
685 wanted.add(linkrev)
688 wanted.add(linkrev)
686
689
687 for ancestor in fctx.ancestors():
690 for ancestor in fctx.ancestors():
688 linkrev = ancestor.linkrev()
691 linkrev = ancestor.linkrev()
689 if linkrev >= minrev and linkrev <= maxrev:
692 if linkrev >= minrev and linkrev <= maxrev:
690 fncache.setdefault(linkrev, []).append(ancestor.path())
693 fncache.setdefault(linkrev, []).append(ancestor.path())
691 wanted.add(linkrev)
694 wanted.add(linkrev)
692
695
693 return wanted
696 return wanted
694
697
695 def filelogrevset(orig, repo, subset, x):
698 def filelogrevset(orig, repo, subset, x):
696 """``filelog(pattern)``
699 """``filelog(pattern)``
697 Changesets connected to the specified filelog.
700 Changesets connected to the specified filelog.
698
701
699 For performance reasons, ``filelog()`` does not show every changeset
702 For performance reasons, ``filelog()`` does not show every changeset
700 that affects the requested file(s). See :hg:`help log` for details. For
703 that affects the requested file(s). See :hg:`help log` for details. For
701 a slower, more accurate result, use ``file()``.
704 a slower, more accurate result, use ``file()``.
702 """
705 """
703
706
704 if not isenabled(repo):
707 if not isenabled(repo):
705 return orig(repo, subset, x)
708 return orig(repo, subset, x)
706
709
707 # i18n: "filelog" is a keyword
710 # i18n: "filelog" is a keyword
708 pat = revset.getstring(x, _("filelog requires a pattern"))
711 pat = revset.getstring(x, _("filelog requires a pattern"))
709 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
712 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
710 ctx=repo[None])
713 ctx=repo[None])
711 s = set()
714 s = set()
712
715
713 if not match.patkind(pat):
716 if not match.patkind(pat):
714 # slow
717 # slow
715 for r in subset:
718 for r in subset:
716 ctx = repo[r]
719 ctx = repo[r]
717 cfiles = ctx.files()
720 cfiles = ctx.files()
718 for f in m.files():
721 for f in m.files():
719 if f in cfiles:
722 if f in cfiles:
720 s.add(ctx.rev())
723 s.add(ctx.rev())
721 break
724 break
722 else:
725 else:
723 # partial
726 # partial
724 files = (f for f in repo[None] if m(f))
727 files = (f for f in repo[None] if m(f))
725 for f in files:
728 for f in files:
726 fctx = repo[None].filectx(f)
729 fctx = repo[None].filectx(f)
727 s.add(fctx.linkrev())
730 s.add(fctx.linkrev())
728 for actx in fctx.ancestors():
731 for actx in fctx.ancestors():
729 s.add(actx.linkrev())
732 s.add(actx.linkrev())
730
733
731 return smartset.baseset([r for r in subset if r in s])
734 return smartset.baseset([r for r in subset if r in s])
732
735
733 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
736 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
734 def gc(ui, *args, **opts):
737 def gc(ui, *args, **opts):
735 '''garbage collect the client and server filelog caches
738 '''garbage collect the client and server filelog caches
736 '''
739 '''
737 cachepaths = set()
740 cachepaths = set()
738
741
739 # get the system client cache
742 # get the system client cache
740 systemcache = shallowutil.getcachepath(ui, allowempty=True)
743 systemcache = shallowutil.getcachepath(ui, allowempty=True)
741 if systemcache:
744 if systemcache:
742 cachepaths.add(systemcache)
745 cachepaths.add(systemcache)
743
746
744 # get repo client and server cache
747 # get repo client and server cache
745 repopaths = []
748 repopaths = []
746 pwd = ui.environ.get('PWD')
749 pwd = ui.environ.get('PWD')
747 if pwd:
750 if pwd:
748 repopaths.append(pwd)
751 repopaths.append(pwd)
749
752
750 repopaths.extend(args)
753 repopaths.extend(args)
751 repos = []
754 repos = []
752 for repopath in repopaths:
755 for repopath in repopaths:
753 try:
756 try:
754 repo = hg.peer(ui, {}, repopath)
757 repo = hg.peer(ui, {}, repopath)
755 repos.append(repo)
758 repos.append(repo)
756
759
757 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
760 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
758 if repocache:
761 if repocache:
759 cachepaths.add(repocache)
762 cachepaths.add(repocache)
760 except error.RepoError:
763 except error.RepoError:
761 pass
764 pass
762
765
763 # gc client cache
766 # gc client cache
764 for cachepath in cachepaths:
767 for cachepath in cachepaths:
765 gcclient(ui, cachepath)
768 gcclient(ui, cachepath)
766
769
767 # gc server cache
770 # gc server cache
768 for repo in repos:
771 for repo in repos:
769 remotefilelogserver.gcserver(ui, repo._repo)
772 remotefilelogserver.gcserver(ui, repo._repo)
770
773
771 def gcclient(ui, cachepath):
774 def gcclient(ui, cachepath):
772 # get list of repos that use this cache
775 # get list of repos that use this cache
773 repospath = os.path.join(cachepath, 'repos')
776 repospath = os.path.join(cachepath, 'repos')
774 if not os.path.exists(repospath):
777 if not os.path.exists(repospath):
775 ui.warn(_("no known cache at %s\n") % cachepath)
778 ui.warn(_("no known cache at %s\n") % cachepath)
776 return
779 return
777
780
778 reposfile = open(repospath, 'rb')
781 reposfile = open(repospath, 'rb')
779 repos = {r[:-1] for r in reposfile.readlines()}
782 repos = {r[:-1] for r in reposfile.readlines()}
780 reposfile.close()
783 reposfile.close()
781
784
782 # build list of useful files
785 # build list of useful files
783 validrepos = []
786 validrepos = []
784 keepkeys = set()
787 keepkeys = set()
785
788
786 sharedcache = None
789 sharedcache = None
787 filesrepacked = False
790 filesrepacked = False
788
791
789 count = 0
792 count = 0
790 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
793 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
791 total=len(repos))
794 total=len(repos))
792 for path in repos:
795 for path in repos:
793 progress.update(count)
796 progress.update(count)
794 count += 1
797 count += 1
795 try:
798 try:
796 path = ui.expandpath(os.path.normpath(path))
799 path = ui.expandpath(os.path.normpath(path))
797 except TypeError as e:
800 except TypeError as e:
798 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
801 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
799 traceback.print_exc()
802 traceback.print_exc()
800 continue
803 continue
801 try:
804 try:
802 peer = hg.peer(ui, {}, path)
805 peer = hg.peer(ui, {}, path)
803 repo = peer._repo
806 repo = peer._repo
804 except error.RepoError:
807 except error.RepoError:
805 continue
808 continue
806
809
807 validrepos.append(path)
810 validrepos.append(path)
808
811
809 # Protect against any repo or config changes that have happened since
812 # Protect against any repo or config changes that have happened since
810 # this repo was added to the repos file. We'd rather this loop succeed
813 # this repo was added to the repos file. We'd rather this loop succeed
811 # and too much be deleted, than the loop fail and nothing gets deleted.
814 # and too much be deleted, than the loop fail and nothing gets deleted.
812 if not isenabled(repo):
815 if not isenabled(repo):
813 continue
816 continue
814
817
815 if not util.safehasattr(repo, 'name'):
818 if not util.safehasattr(repo, 'name'):
816 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
819 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
817 continue
820 continue
818
821
819 # If garbage collection on repack and repack on hg gc are enabled
822 # If garbage collection on repack and repack on hg gc are enabled
820 # then loose files are repacked and garbage collected.
823 # then loose files are repacked and garbage collected.
821 # Otherwise regular garbage collection is performed.
824 # Otherwise regular garbage collection is performed.
822 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
825 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
823 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
826 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
824 if repackonhggc and gcrepack:
827 if repackonhggc and gcrepack:
825 try:
828 try:
826 repackmod.incrementalrepack(repo)
829 repackmod.incrementalrepack(repo)
827 filesrepacked = True
830 filesrepacked = True
828 continue
831 continue
829 except (IOError, repackmod.RepackAlreadyRunning):
832 except (IOError, repackmod.RepackAlreadyRunning):
830 # If repack cannot be performed due to not enough disk space
833 # If repack cannot be performed due to not enough disk space
831 # continue doing garbage collection of loose files w/o repack
834 # continue doing garbage collection of loose files w/o repack
832 pass
835 pass
833
836
834 reponame = repo.name
837 reponame = repo.name
835 if not sharedcache:
838 if not sharedcache:
836 sharedcache = repo.sharedstore
839 sharedcache = repo.sharedstore
837
840
838 # Compute a keepset which is not garbage collected
841 # Compute a keepset which is not garbage collected
839 def keyfn(fname, fnode):
842 def keyfn(fname, fnode):
840 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
843 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
841 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
844 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
842
845
843 progress.complete()
846 progress.complete()
844
847
845 # write list of valid repos back
848 # write list of valid repos back
846 oldumask = os.umask(0o002)
849 oldumask = os.umask(0o002)
847 try:
850 try:
848 reposfile = open(repospath, 'wb')
851 reposfile = open(repospath, 'wb')
849 reposfile.writelines([("%s\n" % r) for r in validrepos])
852 reposfile.writelines([("%s\n" % r) for r in validrepos])
850 reposfile.close()
853 reposfile.close()
851 finally:
854 finally:
852 os.umask(oldumask)
855 os.umask(oldumask)
853
856
854 # prune cache
857 # prune cache
855 if sharedcache is not None:
858 if sharedcache is not None:
856 sharedcache.gc(keepkeys)
859 sharedcache.gc(keepkeys)
857 elif not filesrepacked:
860 elif not filesrepacked:
858 ui.warn(_("warning: no valid repos in repofile\n"))
861 ui.warn(_("warning: no valid repos in repofile\n"))
859
862
860 def log(orig, ui, repo, *pats, **opts):
863 def log(orig, ui, repo, *pats, **opts):
861 if not isenabled(repo):
864 if not isenabled(repo):
862 return orig(ui, repo, *pats, **opts)
865 return orig(ui, repo, *pats, **opts)
863
866
864 follow = opts.get(r'follow')
867 follow = opts.get(r'follow')
865 revs = opts.get(r'rev')
868 revs = opts.get(r'rev')
866 if pats:
869 if pats:
867 # Force slowpath for non-follow patterns and follows that start from
870 # Force slowpath for non-follow patterns and follows that start from
868 # non-working-copy-parent revs.
871 # non-working-copy-parent revs.
869 if not follow or revs:
872 if not follow or revs:
870 # This forces the slowpath
873 # This forces the slowpath
871 opts[r'removed'] = True
874 opts[r'removed'] = True
872
875
873 # If this is a non-follow log without any revs specified, recommend that
876 # If this is a non-follow log without any revs specified, recommend that
874 # the user add -f to speed it up.
877 # the user add -f to speed it up.
875 if not follow and not revs:
878 if not follow and not revs:
876 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
879 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
877 isfile = not match.anypats()
880 isfile = not match.anypats()
878 if isfile:
881 if isfile:
879 for file in match.files():
882 for file in match.files():
880 if not os.path.isfile(repo.wjoin(file)):
883 if not os.path.isfile(repo.wjoin(file)):
881 isfile = False
884 isfile = False
882 break
885 break
883
886
884 if isfile:
887 if isfile:
885 ui.warn(_("warning: file log can be slow on large repos - " +
888 ui.warn(_("warning: file log can be slow on large repos - " +
886 "use -f to speed it up\n"))
889 "use -f to speed it up\n"))
887
890
888 return orig(ui, repo, *pats, **opts)
891 return orig(ui, repo, *pats, **opts)
889
892
890 def revdatelimit(ui, revset):
893 def revdatelimit(ui, revset):
891 """Update revset so that only changesets no older than 'prefetchdays' days
894 """Update revset so that only changesets no older than 'prefetchdays' days
892 are included. The default value is set to 14 days. If 'prefetchdays' is set
895 are included. The default value is set to 14 days. If 'prefetchdays' is set
893 to zero or negative value then date restriction is not applied.
896 to zero or negative value then date restriction is not applied.
894 """
897 """
895 days = ui.configint('remotefilelog', 'prefetchdays')
898 days = ui.configint('remotefilelog', 'prefetchdays')
896 if days > 0:
899 if days > 0:
897 revset = '(%s) & date(-%s)' % (revset, days)
900 revset = '(%s) & date(-%s)' % (revset, days)
898 return revset
901 return revset
899
902
900 def readytofetch(repo):
903 def readytofetch(repo):
901 """Check that enough time has passed since the last background prefetch.
904 """Check that enough time has passed since the last background prefetch.
902 This only relates to prefetches after operations that change the working
905 This only relates to prefetches after operations that change the working
903 copy parent. Default delay between background prefetches is 2 minutes.
906 copy parent. Default delay between background prefetches is 2 minutes.
904 """
907 """
905 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
908 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
906 fname = repo.vfs.join('lastprefetch')
909 fname = repo.vfs.join('lastprefetch')
907
910
908 ready = False
911 ready = False
909 with open(fname, 'a'):
912 with open(fname, 'a'):
910 # the with construct above is used to avoid race conditions
913 # the with construct above is used to avoid race conditions
911 modtime = os.path.getmtime(fname)
914 modtime = os.path.getmtime(fname)
912 if (time.time() - modtime) > timeout:
915 if (time.time() - modtime) > timeout:
913 os.utime(fname, None)
916 os.utime(fname, None)
914 ready = True
917 ready = True
915
918
916 return ready
919 return ready
917
920
918 def wcpprefetch(ui, repo, **kwargs):
921 def wcpprefetch(ui, repo, **kwargs):
919 """Prefetches in background revisions specified by bgprefetchrevs revset.
922 """Prefetches in background revisions specified by bgprefetchrevs revset.
920 Does background repack if backgroundrepack flag is set in config.
923 Does background repack if backgroundrepack flag is set in config.
921 """
924 """
922 shallow = isenabled(repo)
925 shallow = isenabled(repo)
923 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
926 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
924 isready = readytofetch(repo)
927 isready = readytofetch(repo)
925
928
926 if not (shallow and bgprefetchrevs and isready):
929 if not (shallow and bgprefetchrevs and isready):
927 return
930 return
928
931
929 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
932 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
930 # update a revset with a date limit
933 # update a revset with a date limit
931 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
934 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
932
935
933 def anon():
936 def anon():
934 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
937 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
935 return
938 return
936 repo.ranprefetch = True
939 repo.ranprefetch = True
937 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
940 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
938
941
939 repo._afterlock(anon)
942 repo._afterlock(anon)
940
943
941 def pull(orig, ui, repo, *pats, **opts):
944 def pull(orig, ui, repo, *pats, **opts):
942 result = orig(ui, repo, *pats, **opts)
945 result = orig(ui, repo, *pats, **opts)
943
946
944 if isenabled(repo):
947 if isenabled(repo):
945 # prefetch if it's configured
948 # prefetch if it's configured
946 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
949 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
947 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
950 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
948 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
951 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
949
952
950 if prefetchrevset:
953 if prefetchrevset:
951 ui.status(_("prefetching file contents\n"))
954 ui.status(_("prefetching file contents\n"))
952 revs = scmutil.revrange(repo, [prefetchrevset])
955 revs = scmutil.revrange(repo, [prefetchrevset])
953 base = repo['.'].rev()
956 base = repo['.'].rev()
954 if bgprefetch:
957 if bgprefetch:
955 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
958 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
956 else:
959 else:
957 repo.prefetch(revs, base=base)
960 repo.prefetch(revs, base=base)
958 if bgrepack:
961 if bgrepack:
959 repackmod.backgroundrepack(repo, incremental=True)
962 repackmod.backgroundrepack(repo, incremental=True)
960 elif bgrepack:
963 elif bgrepack:
961 repackmod.backgroundrepack(repo, incremental=True)
964 repackmod.backgroundrepack(repo, incremental=True)
962
965
963 return result
966 return result
964
967
965 def exchangepull(orig, repo, remote, *args, **kwargs):
968 def exchangepull(orig, repo, remote, *args, **kwargs):
966 # Hook into the callstream/getbundle to insert bundle capabilities
969 # Hook into the callstream/getbundle to insert bundle capabilities
967 # during a pull.
970 # during a pull.
968 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
971 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
969 **kwargs):
972 **kwargs):
970 if not bundlecaps:
973 if not bundlecaps:
971 bundlecaps = set()
974 bundlecaps = set()
972 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
975 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
973 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
976 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
974 **kwargs)
977 **kwargs)
975
978
976 if util.safehasattr(remote, '_callstream'):
979 if util.safehasattr(remote, '_callstream'):
977 remote._localrepo = repo
980 remote._localrepo = repo
978 elif util.safehasattr(remote, 'getbundle'):
981 elif util.safehasattr(remote, 'getbundle'):
979 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
982 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
980
983
981 return orig(repo, remote, *args, **kwargs)
984 return orig(repo, remote, *args, **kwargs)
982
985
983 def _fileprefetchhook(repo, revs, match):
986 def _fileprefetchhook(repo, revs, match):
984 if isenabled(repo):
987 if isenabled(repo):
985 allfiles = []
988 allfiles = []
986 for rev in revs:
989 for rev in revs:
987 if rev == nodemod.wdirrev or rev is None:
990 if rev == nodemod.wdirrev or rev is None:
988 continue
991 continue
989 ctx = repo[rev]
992 ctx = repo[rev]
990 mf = ctx.manifest()
993 mf = ctx.manifest()
991 sparsematch = repo.maybesparsematch(ctx.rev())
994 sparsematch = repo.maybesparsematch(ctx.rev())
992 for path in ctx.walk(match):
995 for path in ctx.walk(match):
993 if (not sparsematch or sparsematch(path)) and path in mf:
996 if (not sparsematch or sparsematch(path)) and path in mf:
994 allfiles.append((path, hex(mf[path])))
997 allfiles.append((path, hex(mf[path])))
995 repo.fileservice.prefetch(allfiles)
998 repo.fileservice.prefetch(allfiles)
996
999
997 @command('debugremotefilelog', [
1000 @command('debugremotefilelog', [
998 ('d', 'decompress', None, _('decompress the filelog first')),
1001 ('d', 'decompress', None, _('decompress the filelog first')),
999 ], _('hg debugremotefilelog <path>'), norepo=True)
1002 ], _('hg debugremotefilelog <path>'), norepo=True)
1000 def debugremotefilelog(ui, path, **opts):
1003 def debugremotefilelog(ui, path, **opts):
1001 return debugcommands.debugremotefilelog(ui, path, **opts)
1004 return debugcommands.debugremotefilelog(ui, path, **opts)
1002
1005
1003 @command('verifyremotefilelog', [
1006 @command('verifyremotefilelog', [
1004 ('d', 'decompress', None, _('decompress the filelogs first')),
1007 ('d', 'decompress', None, _('decompress the filelogs first')),
1005 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1008 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1006 def verifyremotefilelog(ui, path, **opts):
1009 def verifyremotefilelog(ui, path, **opts):
1007 return debugcommands.verifyremotefilelog(ui, path, **opts)
1010 return debugcommands.verifyremotefilelog(ui, path, **opts)
1008
1011
1009 @command('debugdatapack', [
1012 @command('debugdatapack', [
1010 ('', 'long', None, _('print the long hashes')),
1013 ('', 'long', None, _('print the long hashes')),
1011 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1014 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1012 ], _('hg debugdatapack <paths>'), norepo=True)
1015 ], _('hg debugdatapack <paths>'), norepo=True)
1013 def debugdatapack(ui, *paths, **opts):
1016 def debugdatapack(ui, *paths, **opts):
1014 return debugcommands.debugdatapack(ui, *paths, **opts)
1017 return debugcommands.debugdatapack(ui, *paths, **opts)
1015
1018
1016 @command('debughistorypack', [
1019 @command('debughistorypack', [
1017 ], _('hg debughistorypack <path>'), norepo=True)
1020 ], _('hg debughistorypack <path>'), norepo=True)
1018 def debughistorypack(ui, path, **opts):
1021 def debughistorypack(ui, path, **opts):
1019 return debugcommands.debughistorypack(ui, path)
1022 return debugcommands.debughistorypack(ui, path)
1020
1023
1021 @command('debugkeepset', [
1024 @command('debugkeepset', [
1022 ], _('hg debugkeepset'))
1025 ], _('hg debugkeepset'))
1023 def debugkeepset(ui, repo, **opts):
1026 def debugkeepset(ui, repo, **opts):
1024 # The command is used to measure keepset computation time
1027 # The command is used to measure keepset computation time
1025 def keyfn(fname, fnode):
1028 def keyfn(fname, fnode):
1026 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1029 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1027 repackmod.keepset(repo, keyfn)
1030 repackmod.keepset(repo, keyfn)
1028 return
1031 return
1029
1032
1030 @command('debugwaitonrepack', [
1033 @command('debugwaitonrepack', [
1031 ], _('hg debugwaitonrepack'))
1034 ], _('hg debugwaitonrepack'))
1032 def debugwaitonrepack(ui, repo, **opts):
1035 def debugwaitonrepack(ui, repo, **opts):
1033 return debugcommands.debugwaitonrepack(repo)
1036 return debugcommands.debugwaitonrepack(repo)
1034
1037
1035 @command('debugwaitonprefetch', [
1038 @command('debugwaitonprefetch', [
1036 ], _('hg debugwaitonprefetch'))
1039 ], _('hg debugwaitonprefetch'))
1037 def debugwaitonprefetch(ui, repo, **opts):
1040 def debugwaitonprefetch(ui, repo, **opts):
1038 return debugcommands.debugwaitonprefetch(repo)
1041 return debugcommands.debugwaitonprefetch(repo)
1039
1042
1040 def resolveprefetchopts(ui, opts):
1043 def resolveprefetchopts(ui, opts):
1041 if not opts.get('rev'):
1044 if not opts.get('rev'):
1042 revset = ['.', 'draft()']
1045 revset = ['.', 'draft()']
1043
1046
1044 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1047 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1045 if prefetchrevset:
1048 if prefetchrevset:
1046 revset.append('(%s)' % prefetchrevset)
1049 revset.append('(%s)' % prefetchrevset)
1047 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1050 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1048 if bgprefetchrevs:
1051 if bgprefetchrevs:
1049 revset.append('(%s)' % bgprefetchrevs)
1052 revset.append('(%s)' % bgprefetchrevs)
1050 revset = '+'.join(revset)
1053 revset = '+'.join(revset)
1051
1054
1052 # update a revset with a date limit
1055 # update a revset with a date limit
1053 revset = revdatelimit(ui, revset)
1056 revset = revdatelimit(ui, revset)
1054
1057
1055 opts['rev'] = [revset]
1058 opts['rev'] = [revset]
1056
1059
1057 if not opts.get('base'):
1060 if not opts.get('base'):
1058 opts['base'] = None
1061 opts['base'] = None
1059
1062
1060 return opts
1063 return opts
1061
1064
1062 @command('prefetch', [
1065 @command('prefetch', [
1063 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1066 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1064 ('', 'repack', False, _('run repack after prefetch')),
1067 ('', 'repack', False, _('run repack after prefetch')),
1065 ('b', 'base', '', _("rev that is assumed to already be local")),
1068 ('b', 'base', '', _("rev that is assumed to already be local")),
1066 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1069 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1067 def prefetch(ui, repo, *pats, **opts):
1070 def prefetch(ui, repo, *pats, **opts):
1068 """prefetch file revisions from the server
1071 """prefetch file revisions from the server
1069
1072
1070 Prefetchs file revisions for the specified revs and stores them in the
1073 Prefetchs file revisions for the specified revs and stores them in the
1071 local remotefilelog cache. If no rev is specified, the default rev is
1074 local remotefilelog cache. If no rev is specified, the default rev is
1072 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1075 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1073 File names or patterns can be used to limit which files are downloaded.
1076 File names or patterns can be used to limit which files are downloaded.
1074
1077
1075 Return 0 on success.
1078 Return 0 on success.
1076 """
1079 """
1077 opts = pycompat.byteskwargs(opts)
1080 opts = pycompat.byteskwargs(opts)
1078 if not isenabled(repo):
1081 if not isenabled(repo):
1079 raise error.Abort(_("repo is not shallow"))
1082 raise error.Abort(_("repo is not shallow"))
1080
1083
1081 opts = resolveprefetchopts(ui, opts)
1084 opts = resolveprefetchopts(ui, opts)
1082 revs = scmutil.revrange(repo, opts.get('rev'))
1085 revs = scmutil.revrange(repo, opts.get('rev'))
1083 repo.prefetch(revs, opts.get('base'), pats, opts)
1086 repo.prefetch(revs, opts.get('base'), pats, opts)
1084
1087
1085 # Run repack in background
1088 # Run repack in background
1086 if opts.get('repack'):
1089 if opts.get('repack'):
1087 repackmod.backgroundrepack(repo, incremental=True)
1090 repackmod.backgroundrepack(repo, incremental=True)
1088
1091
1089 @command('repack', [
1092 @command('repack', [
1090 ('', 'background', None, _('run in a background process'), None),
1093 ('', 'background', None, _('run in a background process'), None),
1091 ('', 'incremental', None, _('do an incremental repack'), None),
1094 ('', 'incremental', None, _('do an incremental repack'), None),
1092 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1095 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1093 ], _('hg repack [OPTIONS]'))
1096 ], _('hg repack [OPTIONS]'))
1094 def repack_(ui, repo, *pats, **opts):
1097 def repack_(ui, repo, *pats, **opts):
1095 if opts.get(r'background'):
1098 if opts.get(r'background'):
1096 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1099 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1097 packsonly=opts.get(r'packsonly', False))
1100 packsonly=opts.get(r'packsonly', False))
1098 return
1101 return
1099
1102
1100 options = {'packsonly': opts.get(r'packsonly')}
1103 options = {'packsonly': opts.get(r'packsonly')}
1101
1104
1102 try:
1105 try:
1103 if opts.get(r'incremental'):
1106 if opts.get(r'incremental'):
1104 repackmod.incrementalrepack(repo, options=options)
1107 repackmod.incrementalrepack(repo, options=options)
1105 else:
1108 else:
1106 repackmod.fullrepack(repo, options=options)
1109 repackmod.fullrepack(repo, options=options)
1107 except repackmod.RepackAlreadyRunning as ex:
1110 except repackmod.RepackAlreadyRunning as ex:
1108 # Don't propogate the exception if the repack is already in
1111 # Don't propogate the exception if the repack is already in
1109 # progress, since we want the command to exit 0.
1112 # progress, since we want the command to exit 0.
1110 repo.ui.warn('%s\n' % ex)
1113 repo.ui.warn('%s\n' % ex)
General Comments 0
You need to be logged in to leave comments. Login now