Show More
@@ -1,1302 +1,1303 b'' | |||||
1 | # __init__.py - remotefilelog extension |
|
1 | # __init__.py - remotefilelog extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) |
|
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | |
10 | GUARANTEES. This means that repositories created with this extension may |
|
10 | GUARANTEES. This means that repositories created with this extension may | |
11 | only be usable with the exact version of this extension/Mercurial that was |
|
11 | only be usable with the exact version of this extension/Mercurial that was | |
12 | used. The extension attempts to enforce this in order to prevent repository |
|
12 | used. The extension attempts to enforce this in order to prevent repository | |
13 | corruption. |
|
13 | corruption. | |
14 |
|
14 | |||
15 | remotefilelog works by fetching file contents lazily and storing them |
|
15 | remotefilelog works by fetching file contents lazily and storing them | |
16 | in a cache on the client rather than in revlogs. This allows enormous |
|
16 | in a cache on the client rather than in revlogs. This allows enormous | |
17 | histories to be transferred only partially, making them easier to |
|
17 | histories to be transferred only partially, making them easier to | |
18 | operate on. |
|
18 | operate on. | |
19 |
|
19 | |||
20 | Configs: |
|
20 | Configs: | |
21 |
|
21 | |||
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files |
|
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files | |
23 |
|
23 | |||
24 | ``packs.maxpacksize`` specifies the maximum pack file size |
|
24 | ``packs.maxpacksize`` specifies the maximum pack file size | |
25 |
|
25 | |||
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the |
|
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the | |
27 | shared cache (trees only for now) |
|
27 | shared cache (trees only for now) | |
28 |
|
28 | |||
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True |
|
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True | |
30 |
|
30 | |||
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and |
|
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and | |
32 | update, and on other commands that use them. Different from pullprefetch. |
|
32 | update, and on other commands that use them. Different from pullprefetch. | |
33 |
|
33 | |||
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True |
|
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True | |
35 |
|
35 | |||
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before |
|
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before | |
37 | it is garbage collected |
|
37 | it is garbage collected | |
38 |
|
38 | |||
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True |
|
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True | |
40 |
|
40 | |||
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in |
|
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in | |
42 | days after which it is no longer prefetched. |
|
42 | days after which it is no longer prefetched. | |
43 |
|
43 | |||
44 | ``remotefilelog.prefetchdelay`` specifies delay between background |
|
44 | ``remotefilelog.prefetchdelay`` specifies delay between background | |
45 | prefetches in seconds after operations that change the working copy parent |
|
45 | prefetches in seconds after operations that change the working copy parent | |
46 |
|
46 | |||
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data |
|
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data | |
48 | pack files required to be considered part of a generation. In particular, |
|
48 | pack files required to be considered part of a generation. In particular, | |
49 | minimum number of packs files > gencountlimit. |
|
49 | minimum number of packs files > gencountlimit. | |
50 |
|
50 | |||
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of |
|
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of | |
52 | each generation of the data pack files. For example, list ['100MB','1MB'] |
|
52 | each generation of the data pack files. For example, list ['100MB','1MB'] | |
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ |
|
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ | |
54 | 1MB, 100MB) and [100MB, infinity). |
|
54 | 1MB, 100MB) and [100MB, infinity). | |
55 |
|
55 | |||
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to |
|
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to | |
57 | include in an incremental data repack. |
|
57 | include in an incremental data repack. | |
58 |
|
58 | |||
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for |
|
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for | |
60 | it to be considered for an incremental data repack. |
|
60 | it to be considered for an incremental data repack. | |
61 |
|
61 | |||
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files |
|
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files | |
63 | to include in an incremental data repack. |
|
63 | to include in an incremental data repack. | |
64 |
|
64 | |||
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of |
|
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of | |
66 | history pack files required to be considered part of a generation. In |
|
66 | history pack files required to be considered part of a generation. In | |
67 | particular, minimum number of packs files > gencountlimit. |
|
67 | particular, minimum number of packs files > gencountlimit. | |
68 |
|
68 | |||
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of |
|
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of | |
70 | each generation of the history pack files. For example, list [ |
|
70 | each generation of the history pack files. For example, list [ | |
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ |
|
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ | |
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). |
|
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). | |
73 |
|
73 | |||
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to |
|
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to | |
75 | include in an incremental history repack. |
|
75 | include in an incremental history repack. | |
76 |
|
76 | |||
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file |
|
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file | |
78 | for it to be considered for an incremental history repack. |
|
78 | for it to be considered for an incremental history repack. | |
79 |
|
79 | |||
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack |
|
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack | |
81 | files to include in an incremental history repack. |
|
81 | files to include in an incremental history repack. | |
82 |
|
82 | |||
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the |
|
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the | |
84 | background |
|
84 | background | |
85 |
|
85 | |||
86 | ``remotefilelog.cachepath`` path to cache |
|
86 | ``remotefilelog.cachepath`` path to cache | |
87 |
|
87 | |||
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this |
|
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this | |
89 | group |
|
89 | group | |
90 |
|
90 | |||
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data |
|
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data | |
92 |
|
92 | |||
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output |
|
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output | |
94 |
|
94 | |||
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls |
|
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls | |
96 |
|
96 | |||
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls |
|
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls | |
98 |
|
98 | |||
99 | ``remotefilelog.fetchwarning``: message to print when too many |
|
99 | ``remotefilelog.fetchwarning``: message to print when too many | |
100 | single-file fetches occur |
|
100 | single-file fetches occur | |
101 |
|
101 | |||
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC |
|
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC | |
103 |
|
103 | |||
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch |
|
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch | |
105 | files, otherwise use optimistic fetching |
|
105 | files, otherwise use optimistic fetching | |
106 |
|
106 | |||
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be |
|
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be | |
108 | eagerly downloaded rather than lazily |
|
108 | eagerly downloaded rather than lazily | |
109 |
|
109 | |||
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition |
|
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition | |
111 | data from other repos in a shared store. |
|
111 | data from other repos in a shared store. | |
112 |
|
112 | |||
113 | ``remotefilelog.server`` if true, enable server-side functionality |
|
113 | ``remotefilelog.server`` if true, enable server-side functionality | |
114 |
|
114 | |||
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server |
|
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server | |
116 |
|
116 | |||
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server |
|
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server | |
118 | blobs |
|
118 | blobs | |
119 |
|
119 | |||
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption |
|
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption | |
121 | before returning blobs |
|
121 | before returning blobs | |
122 |
|
122 | |||
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for |
|
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for | |
124 | corruption before returning metadata |
|
124 | corruption before returning metadata | |
125 |
|
125 | |||
126 | """ |
|
126 | """ | |
127 | from __future__ import absolute_import |
|
127 | from __future__ import absolute_import | |
128 |
|
128 | |||
129 | import os |
|
129 | import os | |
130 | import time |
|
130 | import time | |
131 | import traceback |
|
131 | import traceback | |
132 |
|
132 | |||
133 | from mercurial.node import hex |
|
133 | from mercurial.node import hex | |
134 | from mercurial.i18n import _ |
|
134 | from mercurial.i18n import _ | |
135 | from mercurial.pycompat import open |
|
135 | from mercurial.pycompat import open | |
136 | from mercurial import ( |
|
136 | from mercurial import ( | |
137 | changegroup, |
|
137 | changegroup, | |
138 | changelog, |
|
138 | changelog, | |
139 | cmdutil, |
|
139 | cmdutil, | |
140 | commands, |
|
140 | commands, | |
141 | configitems, |
|
141 | configitems, | |
142 | context, |
|
142 | context, | |
143 | copies, |
|
143 | copies, | |
144 | debugcommands as hgdebugcommands, |
|
144 | debugcommands as hgdebugcommands, | |
145 | dispatch, |
|
145 | dispatch, | |
146 | error, |
|
146 | error, | |
147 | exchange, |
|
147 | exchange, | |
148 | extensions, |
|
148 | extensions, | |
149 | hg, |
|
149 | hg, | |
150 | localrepo, |
|
150 | localrepo, | |
151 | match, |
|
151 | match, | |
152 | merge, |
|
152 | merge, | |
153 | node as nodemod, |
|
153 | node as nodemod, | |
154 | patch, |
|
154 | patch, | |
155 | pycompat, |
|
155 | pycompat, | |
156 | registrar, |
|
156 | registrar, | |
157 | repair, |
|
157 | repair, | |
158 | repoview, |
|
158 | repoview, | |
159 | revset, |
|
159 | revset, | |
160 | scmutil, |
|
160 | scmutil, | |
161 | smartset, |
|
161 | smartset, | |
162 | streamclone, |
|
162 | streamclone, | |
163 | util, |
|
163 | util, | |
164 | ) |
|
164 | ) | |
165 | from . import ( |
|
165 | from . import ( | |
166 | constants, |
|
166 | constants, | |
167 | debugcommands, |
|
167 | debugcommands, | |
168 | fileserverclient, |
|
168 | fileserverclient, | |
169 | remotefilectx, |
|
169 | remotefilectx, | |
170 | remotefilelog, |
|
170 | remotefilelog, | |
171 | remotefilelogserver, |
|
171 | remotefilelogserver, | |
172 | repack as repackmod, |
|
172 | repack as repackmod, | |
173 | shallowbundle, |
|
173 | shallowbundle, | |
174 | shallowrepo, |
|
174 | shallowrepo, | |
175 | shallowstore, |
|
175 | shallowstore, | |
176 | shallowutil, |
|
176 | shallowutil, | |
177 | shallowverifier, |
|
177 | shallowverifier, | |
178 | ) |
|
178 | ) | |
179 |
|
179 | |||
180 | # ensures debug commands are registered |
|
180 | # ensures debug commands are registered | |
181 | hgdebugcommands.command |
|
181 | hgdebugcommands.command | |
182 |
|
182 | |||
183 | cmdtable = {} |
|
183 | cmdtable = {} | |
184 | command = registrar.command(cmdtable) |
|
184 | command = registrar.command(cmdtable) | |
185 |
|
185 | |||
186 | configtable = {} |
|
186 | configtable = {} | |
187 | configitem = registrar.configitem(configtable) |
|
187 | configitem = registrar.configitem(configtable) | |
188 |
|
188 | |||
189 | configitem(b'remotefilelog', b'debug', default=False) |
|
189 | configitem(b'remotefilelog', b'debug', default=False) | |
190 |
|
190 | |||
191 | configitem(b'remotefilelog', b'reponame', default=b'') |
|
191 | configitem(b'remotefilelog', b'reponame', default=b'') | |
192 | configitem(b'remotefilelog', b'cachepath', default=None) |
|
192 | configitem(b'remotefilelog', b'cachepath', default=None) | |
193 | configitem(b'remotefilelog', b'cachegroup', default=None) |
|
193 | configitem(b'remotefilelog', b'cachegroup', default=None) | |
194 | configitem(b'remotefilelog', b'cacheprocess', default=None) |
|
194 | configitem(b'remotefilelog', b'cacheprocess', default=None) | |
195 | configitem(b'remotefilelog', b'cacheprocess.includepath', default=None) |
|
195 | configitem(b'remotefilelog', b'cacheprocess.includepath', default=None) | |
196 | configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB") |
|
196 | configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB") | |
197 |
|
197 | |||
198 | configitem( |
|
198 | configitem( | |
199 | b'remotefilelog', |
|
199 | b'remotefilelog', | |
200 | b'fallbackpath', |
|
200 | b'fallbackpath', | |
201 | default=configitems.dynamicdefault, |
|
201 | default=configitems.dynamicdefault, | |
202 | alias=[(b'remotefilelog', b'fallbackrepo')], |
|
202 | alias=[(b'remotefilelog', b'fallbackrepo')], | |
203 | ) |
|
203 | ) | |
204 |
|
204 | |||
205 | configitem(b'remotefilelog', b'validatecachelog', default=None) |
|
205 | configitem(b'remotefilelog', b'validatecachelog', default=None) | |
206 | configitem(b'remotefilelog', b'validatecache', default=b'on') |
|
206 | configitem(b'remotefilelog', b'validatecache', default=b'on') | |
207 | configitem(b'remotefilelog', b'server', default=None) |
|
207 | configitem(b'remotefilelog', b'server', default=None) | |
208 | configitem(b'remotefilelog', b'servercachepath', default=None) |
|
208 | configitem(b'remotefilelog', b'servercachepath', default=None) | |
209 | configitem(b"remotefilelog", b"serverexpiration", default=30) |
|
209 | configitem(b"remotefilelog", b"serverexpiration", default=30) | |
210 | configitem(b'remotefilelog', b'backgroundrepack', default=False) |
|
210 | configitem(b'remotefilelog', b'backgroundrepack', default=False) | |
211 | configitem(b'remotefilelog', b'bgprefetchrevs', default=None) |
|
211 | configitem(b'remotefilelog', b'bgprefetchrevs', default=None) | |
212 | configitem(b'remotefilelog', b'pullprefetch', default=None) |
|
212 | configitem(b'remotefilelog', b'pullprefetch', default=None) | |
213 | configitem(b'remotefilelog', b'backgroundprefetch', default=False) |
|
213 | configitem(b'remotefilelog', b'backgroundprefetch', default=False) | |
214 | configitem(b'remotefilelog', b'prefetchdelay', default=120) |
|
214 | configitem(b'remotefilelog', b'prefetchdelay', default=120) | |
215 | configitem(b'remotefilelog', b'prefetchdays', default=14) |
|
215 | configitem(b'remotefilelog', b'prefetchdays', default=14) | |
216 |
|
216 | |||
217 | configitem(b'remotefilelog', b'getfilesstep', default=10000) |
|
217 | configitem(b'remotefilelog', b'getfilesstep', default=10000) | |
218 | configitem(b'remotefilelog', b'getfilestype', default=b'optimistic') |
|
218 | configitem(b'remotefilelog', b'getfilestype', default=b'optimistic') | |
219 | configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault) |
|
219 | configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault) | |
220 | configitem(b'remotefilelog', b'fetchwarning', default=b'') |
|
220 | configitem(b'remotefilelog', b'fetchwarning', default=b'') | |
221 |
|
221 | |||
222 | configitem(b'remotefilelog', b'includepattern', default=None) |
|
222 | configitem(b'remotefilelog', b'includepattern', default=None) | |
223 | configitem(b'remotefilelog', b'excludepattern', default=None) |
|
223 | configitem(b'remotefilelog', b'excludepattern', default=None) | |
224 |
|
224 | |||
225 | configitem(b'remotefilelog', b'gcrepack', default=False) |
|
225 | configitem(b'remotefilelog', b'gcrepack', default=False) | |
226 | configitem(b'remotefilelog', b'repackonhggc', default=False) |
|
226 | configitem(b'remotefilelog', b'repackonhggc', default=False) | |
227 | configitem(b'repack', b'chainorphansbysize', default=True, experimental=True) |
|
227 | configitem(b'repack', b'chainorphansbysize', default=True, experimental=True) | |
228 |
|
228 | |||
229 | configitem(b'packs', b'maxpacksize', default=0) |
|
229 | configitem(b'packs', b'maxpacksize', default=0) | |
230 | configitem(b'packs', b'maxchainlen', default=1000) |
|
230 | configitem(b'packs', b'maxchainlen', default=1000) | |
231 |
|
231 | |||
232 | configitem(b'devel', b'remotefilelog.ensurestart', default=False) |
|
232 | configitem(b'devel', b'remotefilelog.ensurestart', default=False) | |
|
233 | configitem(b'devel', b'remotefilelog.bg-wait', default=False) | |||
233 |
|
234 | |||
234 | # default TTL limit is 30 days |
|
235 | # default TTL limit is 30 days | |
235 | _defaultlimit = 60 * 60 * 24 * 30 |
|
236 | _defaultlimit = 60 * 60 * 24 * 30 | |
236 | configitem(b'remotefilelog', b'nodettl', default=_defaultlimit) |
|
237 | configitem(b'remotefilelog', b'nodettl', default=_defaultlimit) | |
237 |
|
238 | |||
238 | configitem(b'remotefilelog', b'data.gencountlimit', default=2), |
|
239 | configitem(b'remotefilelog', b'data.gencountlimit', default=2), | |
239 | configitem( |
|
240 | configitem( | |
240 | b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB'] |
|
241 | b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB'] | |
241 | ) |
|
242 | ) | |
242 | configitem(b'remotefilelog', b'data.maxrepackpacks', default=50) |
|
243 | configitem(b'remotefilelog', b'data.maxrepackpacks', default=50) | |
243 | configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB') |
|
244 | configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB') | |
244 | configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB') |
|
245 | configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB') | |
245 |
|
246 | |||
246 | configitem(b'remotefilelog', b'history.gencountlimit', default=2), |
|
247 | configitem(b'remotefilelog', b'history.gencountlimit', default=2), | |
247 | configitem(b'remotefilelog', b'history.generations', default=[b'100MB']) |
|
248 | configitem(b'remotefilelog', b'history.generations', default=[b'100MB']) | |
248 | configitem(b'remotefilelog', b'history.maxrepackpacks', default=50) |
|
249 | configitem(b'remotefilelog', b'history.maxrepackpacks', default=50) | |
249 | configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB') |
|
250 | configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB') | |
250 | configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB') |
|
251 | configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB') | |
251 |
|
252 | |||
252 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
253 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
253 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
254 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
254 | # be specifying the version(s) of Mercurial they are tested with, or |
|
255 | # be specifying the version(s) of Mercurial they are tested with, or | |
255 | # leave the attribute unspecified. |
|
256 | # leave the attribute unspecified. | |
256 | testedwith = b'ships-with-hg-core' |
|
257 | testedwith = b'ships-with-hg-core' | |
257 |
|
258 | |||
258 | repoclass = localrepo.localrepository |
|
259 | repoclass = localrepo.localrepository | |
259 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) |
|
260 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) | |
260 |
|
261 | |||
261 | isenabled = shallowutil.isenabled |
|
262 | isenabled = shallowutil.isenabled | |
262 |
|
263 | |||
263 |
|
264 | |||
264 | def uisetup(ui): |
|
265 | def uisetup(ui): | |
265 | """Wraps user facing Mercurial commands to swap them out with shallow |
|
266 | """Wraps user facing Mercurial commands to swap them out with shallow | |
266 | versions. |
|
267 | versions. | |
267 | """ |
|
268 | """ | |
268 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) |
|
269 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | |
269 |
|
270 | |||
270 | entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) |
|
271 | entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow) | |
271 | entry[1].append( |
|
272 | entry[1].append( | |
272 | ( |
|
273 | ( | |
273 | b'', |
|
274 | b'', | |
274 | b'shallow', |
|
275 | b'shallow', | |
275 | None, |
|
276 | None, | |
276 | _(b"create a shallow clone which uses remote file history"), |
|
277 | _(b"create a shallow clone which uses remote file history"), | |
277 | ) |
|
278 | ) | |
278 | ) |
|
279 | ) | |
279 |
|
280 | |||
280 | extensions.wrapcommand( |
|
281 | extensions.wrapcommand( | |
281 | commands.table, b'debugindex', debugcommands.debugindex |
|
282 | commands.table, b'debugindex', debugcommands.debugindex | |
282 | ) |
|
283 | ) | |
283 | extensions.wrapcommand( |
|
284 | extensions.wrapcommand( | |
284 | commands.table, b'debugindexdot', debugcommands.debugindexdot |
|
285 | commands.table, b'debugindexdot', debugcommands.debugindexdot | |
285 | ) |
|
286 | ) | |
286 | extensions.wrapcommand(commands.table, b'log', log) |
|
287 | extensions.wrapcommand(commands.table, b'log', log) | |
287 | extensions.wrapcommand(commands.table, b'pull', pull) |
|
288 | extensions.wrapcommand(commands.table, b'pull', pull) | |
288 |
|
289 | |||
289 | # Prevent 'hg manifest --all' |
|
290 | # Prevent 'hg manifest --all' | |
290 | def _manifest(orig, ui, repo, *args, **opts): |
|
291 | def _manifest(orig, ui, repo, *args, **opts): | |
291 | if isenabled(repo) and opts.get(r'all'): |
|
292 | if isenabled(repo) and opts.get(r'all'): | |
292 | raise error.Abort(_(b"--all is not supported in a shallow repo")) |
|
293 | raise error.Abort(_(b"--all is not supported in a shallow repo")) | |
293 |
|
294 | |||
294 | return orig(ui, repo, *args, **opts) |
|
295 | return orig(ui, repo, *args, **opts) | |
295 |
|
296 | |||
296 | extensions.wrapcommand(commands.table, b"manifest", _manifest) |
|
297 | extensions.wrapcommand(commands.table, b"manifest", _manifest) | |
297 |
|
298 | |||
298 | # Wrap remotefilelog with lfs code |
|
299 | # Wrap remotefilelog with lfs code | |
299 | def _lfsloaded(loaded=False): |
|
300 | def _lfsloaded(loaded=False): | |
300 | lfsmod = None |
|
301 | lfsmod = None | |
301 | try: |
|
302 | try: | |
302 | lfsmod = extensions.find(b'lfs') |
|
303 | lfsmod = extensions.find(b'lfs') | |
303 | except KeyError: |
|
304 | except KeyError: | |
304 | pass |
|
305 | pass | |
305 | if lfsmod: |
|
306 | if lfsmod: | |
306 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) |
|
307 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) | |
307 | fileserverclient._lfsmod = lfsmod |
|
308 | fileserverclient._lfsmod = lfsmod | |
308 |
|
309 | |||
309 | extensions.afterloaded(b'lfs', _lfsloaded) |
|
310 | extensions.afterloaded(b'lfs', _lfsloaded) | |
310 |
|
311 | |||
311 | # debugdata needs remotefilelog.len to work |
|
312 | # debugdata needs remotefilelog.len to work | |
312 | extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow) |
|
313 | extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow) | |
313 |
|
314 | |||
314 | changegroup.cgpacker = shallowbundle.shallowcg1packer |
|
315 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |
315 |
|
316 | |||
316 | extensions.wrapfunction( |
|
317 | extensions.wrapfunction( | |
317 | changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles |
|
318 | changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles | |
318 | ) |
|
319 | ) | |
319 | extensions.wrapfunction( |
|
320 | extensions.wrapfunction( | |
320 | changegroup, b'makechangegroup', shallowbundle.makechangegroup |
|
321 | changegroup, b'makechangegroup', shallowbundle.makechangegroup | |
321 | ) |
|
322 | ) | |
322 | extensions.wrapfunction(localrepo, b'makestore', storewrapper) |
|
323 | extensions.wrapfunction(localrepo, b'makestore', storewrapper) | |
323 | extensions.wrapfunction(exchange, b'pull', exchangepull) |
|
324 | extensions.wrapfunction(exchange, b'pull', exchangepull) | |
324 | extensions.wrapfunction(merge, b'applyupdates', applyupdates) |
|
325 | extensions.wrapfunction(merge, b'applyupdates', applyupdates) | |
325 | extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles) |
|
326 | extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles) | |
326 | extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup) |
|
327 | extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup) | |
327 | extensions.wrapfunction(scmutil, b'_findrenames', findrenames) |
|
328 | extensions.wrapfunction(scmutil, b'_findrenames', findrenames) | |
328 | extensions.wrapfunction( |
|
329 | extensions.wrapfunction( | |
329 | copies, b'_computeforwardmissing', computeforwardmissing |
|
330 | copies, b'_computeforwardmissing', computeforwardmissing | |
330 | ) |
|
331 | ) | |
331 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) |
|
332 | extensions.wrapfunction(dispatch, b'runcommand', runcommand) | |
332 | extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets) |
|
333 | extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets) | |
333 | extensions.wrapfunction(context.changectx, b'filectx', filectx) |
|
334 | extensions.wrapfunction(context.changectx, b'filectx', filectx) | |
334 | extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx) |
|
335 | extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx) | |
335 | extensions.wrapfunction(patch, b'trydiff', trydiff) |
|
336 | extensions.wrapfunction(patch, b'trydiff', trydiff) | |
336 | extensions.wrapfunction(hg, b'verify', _verify) |
|
337 | extensions.wrapfunction(hg, b'verify', _verify) | |
337 | scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook) |
|
338 | scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook) | |
338 |
|
339 | |||
339 | # disappointing hacks below |
|
340 | # disappointing hacks below | |
340 | extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn) |
|
341 | extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn) | |
341 | extensions.wrapfunction(revset, b'filelog', filelogrevset) |
|
342 | extensions.wrapfunction(revset, b'filelog', filelogrevset) | |
342 | revset.symbols[b'filelog'] = revset.filelog |
|
343 | revset.symbols[b'filelog'] = revset.filelog | |
343 | extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs) |
|
344 | extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs) | |
344 |
|
345 | |||
345 |
|
346 | |||
346 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
347 | def cloneshallow(orig, ui, repo, *args, **opts): | |
347 | if opts.get(r'shallow'): |
|
348 | if opts.get(r'shallow'): | |
348 | repos = [] |
|
349 | repos = [] | |
349 |
|
350 | |||
350 | def pull_shallow(orig, self, *args, **kwargs): |
|
351 | def pull_shallow(orig, self, *args, **kwargs): | |
351 | if not isenabled(self): |
|
352 | if not isenabled(self): | |
352 | repos.append(self.unfiltered()) |
|
353 | repos.append(self.unfiltered()) | |
353 | # set up the client hooks so the post-clone update works |
|
354 | # set up the client hooks so the post-clone update works | |
354 | setupclient(self.ui, self.unfiltered()) |
|
355 | setupclient(self.ui, self.unfiltered()) | |
355 |
|
356 | |||
356 | # setupclient fixed the class on the repo itself |
|
357 | # setupclient fixed the class on the repo itself | |
357 | # but we also need to fix it on the repoview |
|
358 | # but we also need to fix it on the repoview | |
358 | if isinstance(self, repoview.repoview): |
|
359 | if isinstance(self, repoview.repoview): | |
359 | self.__class__.__bases__ = ( |
|
360 | self.__class__.__bases__ = ( | |
360 | self.__class__.__bases__[0], |
|
361 | self.__class__.__bases__[0], | |
361 | self.unfiltered().__class__, |
|
362 | self.unfiltered().__class__, | |
362 | ) |
|
363 | ) | |
363 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
364 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
364 | self._writerequirements() |
|
365 | self._writerequirements() | |
365 |
|
366 | |||
366 | # Since setupclient hadn't been called, exchange.pull was not |
|
367 | # Since setupclient hadn't been called, exchange.pull was not | |
367 | # wrapped. So we need to manually invoke our version of it. |
|
368 | # wrapped. So we need to manually invoke our version of it. | |
368 | return exchangepull(orig, self, *args, **kwargs) |
|
369 | return exchangepull(orig, self, *args, **kwargs) | |
369 | else: |
|
370 | else: | |
370 | return orig(self, *args, **kwargs) |
|
371 | return orig(self, *args, **kwargs) | |
371 |
|
372 | |||
372 | extensions.wrapfunction(exchange, b'pull', pull_shallow) |
|
373 | extensions.wrapfunction(exchange, b'pull', pull_shallow) | |
373 |
|
374 | |||
374 | # Wrap the stream logic to add requirements and to pass include/exclude |
|
375 | # Wrap the stream logic to add requirements and to pass include/exclude | |
375 | # patterns around. |
|
376 | # patterns around. | |
376 | def setup_streamout(repo, remote): |
|
377 | def setup_streamout(repo, remote): | |
377 | # Replace remote.stream_out with a version that sends file |
|
378 | # Replace remote.stream_out with a version that sends file | |
378 | # patterns. |
|
379 | # patterns. | |
379 | def stream_out_shallow(orig): |
|
380 | def stream_out_shallow(orig): | |
380 | caps = remote.capabilities() |
|
381 | caps = remote.capabilities() | |
381 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: |
|
382 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: | |
382 | opts = {} |
|
383 | opts = {} | |
383 | if repo.includepattern: |
|
384 | if repo.includepattern: | |
384 | opts[r'includepattern'] = b'\0'.join( |
|
385 | opts[r'includepattern'] = b'\0'.join( | |
385 | repo.includepattern |
|
386 | repo.includepattern | |
386 | ) |
|
387 | ) | |
387 | if repo.excludepattern: |
|
388 | if repo.excludepattern: | |
388 | opts[r'excludepattern'] = b'\0'.join( |
|
389 | opts[r'excludepattern'] = b'\0'.join( | |
389 | repo.excludepattern |
|
390 | repo.excludepattern | |
390 | ) |
|
391 | ) | |
391 | return remote._callstream(b'stream_out_shallow', **opts) |
|
392 | return remote._callstream(b'stream_out_shallow', **opts) | |
392 | else: |
|
393 | else: | |
393 | return orig() |
|
394 | return orig() | |
394 |
|
395 | |||
395 | extensions.wrapfunction(remote, b'stream_out', stream_out_shallow) |
|
396 | extensions.wrapfunction(remote, b'stream_out', stream_out_shallow) | |
396 |
|
397 | |||
397 | def stream_wrap(orig, op): |
|
398 | def stream_wrap(orig, op): | |
398 | setup_streamout(op.repo, op.remote) |
|
399 | setup_streamout(op.repo, op.remote) | |
399 | return orig(op) |
|
400 | return orig(op) | |
400 |
|
401 | |||
401 | extensions.wrapfunction( |
|
402 | extensions.wrapfunction( | |
402 | streamclone, b'maybeperformlegacystreamclone', stream_wrap |
|
403 | streamclone, b'maybeperformlegacystreamclone', stream_wrap | |
403 | ) |
|
404 | ) | |
404 |
|
405 | |||
405 | def canperformstreamclone(orig, pullop, bundle2=False): |
|
406 | def canperformstreamclone(orig, pullop, bundle2=False): | |
406 | # remotefilelog is currently incompatible with the |
|
407 | # remotefilelog is currently incompatible with the | |
407 | # bundle2 flavor of streamclones, so force us to use |
|
408 | # bundle2 flavor of streamclones, so force us to use | |
408 | # v1 instead. |
|
409 | # v1 instead. | |
409 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
410 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): | |
410 | pullop.remotebundle2caps[b'stream'] = [ |
|
411 | pullop.remotebundle2caps[b'stream'] = [ | |
411 | c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2' |
|
412 | c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2' | |
412 | ] |
|
413 | ] | |
413 | if bundle2: |
|
414 | if bundle2: | |
414 | return False, None |
|
415 | return False, None | |
415 | supported, requirements = orig(pullop, bundle2=bundle2) |
|
416 | supported, requirements = orig(pullop, bundle2=bundle2) | |
416 | if requirements is not None: |
|
417 | if requirements is not None: | |
417 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
418 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
418 | return supported, requirements |
|
419 | return supported, requirements | |
419 |
|
420 | |||
420 | extensions.wrapfunction( |
|
421 | extensions.wrapfunction( | |
421 | streamclone, b'canperformstreamclone', canperformstreamclone |
|
422 | streamclone, b'canperformstreamclone', canperformstreamclone | |
422 | ) |
|
423 | ) | |
423 |
|
424 | |||
424 | try: |
|
425 | try: | |
425 | orig(ui, repo, *args, **opts) |
|
426 | orig(ui, repo, *args, **opts) | |
426 | finally: |
|
427 | finally: | |
427 | if opts.get(r'shallow'): |
|
428 | if opts.get(r'shallow'): | |
428 | for r in repos: |
|
429 | for r in repos: | |
429 | if util.safehasattr(r, b'fileservice'): |
|
430 | if util.safehasattr(r, b'fileservice'): | |
430 | r.fileservice.close() |
|
431 | r.fileservice.close() | |
431 |
|
432 | |||
432 |
|
433 | |||
433 | def debugdatashallow(orig, *args, **kwds): |
|
434 | def debugdatashallow(orig, *args, **kwds): | |
434 | oldlen = remotefilelog.remotefilelog.__len__ |
|
435 | oldlen = remotefilelog.remotefilelog.__len__ | |
435 | try: |
|
436 | try: | |
436 | remotefilelog.remotefilelog.__len__ = lambda x: 1 |
|
437 | remotefilelog.remotefilelog.__len__ = lambda x: 1 | |
437 | return orig(*args, **kwds) |
|
438 | return orig(*args, **kwds) | |
438 | finally: |
|
439 | finally: | |
439 | remotefilelog.remotefilelog.__len__ = oldlen |
|
440 | remotefilelog.remotefilelog.__len__ = oldlen | |
440 |
|
441 | |||
441 |
|
442 | |||
442 | def reposetup(ui, repo): |
|
443 | def reposetup(ui, repo): | |
443 | if not repo.local(): |
|
444 | if not repo.local(): | |
444 | return |
|
445 | return | |
445 |
|
446 | |||
446 | # put here intentionally bc doesnt work in uisetup |
|
447 | # put here intentionally bc doesnt work in uisetup | |
447 | ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch) |
|
448 | ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch) | |
448 | ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch) |
|
449 | ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch) | |
449 |
|
450 | |||
450 | isserverenabled = ui.configbool(b'remotefilelog', b'server') |
|
451 | isserverenabled = ui.configbool(b'remotefilelog', b'server') | |
451 | isshallowclient = isenabled(repo) |
|
452 | isshallowclient = isenabled(repo) | |
452 |
|
453 | |||
453 | if isserverenabled and isshallowclient: |
|
454 | if isserverenabled and isshallowclient: | |
454 | raise RuntimeError(b"Cannot be both a server and shallow client.") |
|
455 | raise RuntimeError(b"Cannot be both a server and shallow client.") | |
455 |
|
456 | |||
456 | if isshallowclient: |
|
457 | if isshallowclient: | |
457 | setupclient(ui, repo) |
|
458 | setupclient(ui, repo) | |
458 |
|
459 | |||
459 | if isserverenabled: |
|
460 | if isserverenabled: | |
460 | remotefilelogserver.setupserver(ui, repo) |
|
461 | remotefilelogserver.setupserver(ui, repo) | |
461 |
|
462 | |||
462 |
|
463 | |||
463 | def setupclient(ui, repo): |
|
464 | def setupclient(ui, repo): | |
464 | if not isinstance(repo, localrepo.localrepository): |
|
465 | if not isinstance(repo, localrepo.localrepository): | |
465 | return |
|
466 | return | |
466 |
|
467 | |||
467 | # Even clients get the server setup since they need to have the |
|
468 | # Even clients get the server setup since they need to have the | |
468 | # wireprotocol endpoints registered. |
|
469 | # wireprotocol endpoints registered. | |
469 | remotefilelogserver.onetimesetup(ui) |
|
470 | remotefilelogserver.onetimesetup(ui) | |
470 | onetimeclientsetup(ui) |
|
471 | onetimeclientsetup(ui) | |
471 |
|
472 | |||
472 | shallowrepo.wraprepo(repo) |
|
473 | shallowrepo.wraprepo(repo) | |
473 | repo.store = shallowstore.wrapstore(repo.store) |
|
474 | repo.store = shallowstore.wrapstore(repo.store) | |
474 |
|
475 | |||
475 |
|
476 | |||
476 | def storewrapper(orig, requirements, path, vfstype): |
|
477 | def storewrapper(orig, requirements, path, vfstype): | |
477 | s = orig(requirements, path, vfstype) |
|
478 | s = orig(requirements, path, vfstype) | |
478 | if constants.SHALLOWREPO_REQUIREMENT in requirements: |
|
479 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |
479 | s = shallowstore.wrapstore(s) |
|
480 | s = shallowstore.wrapstore(s) | |
480 |
|
481 | |||
481 | return s |
|
482 | return s | |
482 |
|
483 | |||
483 |
|
484 | |||
484 | # prefetch files before update |
|
485 | # prefetch files before update | |
485 | def applyupdates( |
|
486 | def applyupdates( | |
486 | orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None |
|
487 | orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None | |
487 | ): |
|
488 | ): | |
488 | if isenabled(repo): |
|
489 | if isenabled(repo): | |
489 | manifest = mctx.manifest() |
|
490 | manifest = mctx.manifest() | |
490 | files = [] |
|
491 | files = [] | |
491 | for f, args, msg in actions[b'g']: |
|
492 | for f, args, msg in actions[b'g']: | |
492 | files.append((f, hex(manifest[f]))) |
|
493 | files.append((f, hex(manifest[f]))) | |
493 | # batch fetch the needed files from the server |
|
494 | # batch fetch the needed files from the server | |
494 | repo.fileservice.prefetch(files) |
|
495 | repo.fileservice.prefetch(files) | |
495 | return orig( |
|
496 | return orig( | |
496 | repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels |
|
497 | repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels | |
497 | ) |
|
498 | ) | |
498 |
|
499 | |||
499 |
|
500 | |||
500 | # Prefetch merge checkunknownfiles |
|
501 | # Prefetch merge checkunknownfiles | |
501 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs): |
|
502 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs): | |
502 | if isenabled(repo): |
|
503 | if isenabled(repo): | |
503 | files = [] |
|
504 | files = [] | |
504 | sparsematch = repo.maybesparsematch(mctx.rev()) |
|
505 | sparsematch = repo.maybesparsematch(mctx.rev()) | |
505 | for f, (m, actionargs, msg) in pycompat.iteritems(actions): |
|
506 | for f, (m, actionargs, msg) in pycompat.iteritems(actions): | |
506 | if sparsematch and not sparsematch(f): |
|
507 | if sparsematch and not sparsematch(f): | |
507 | continue |
|
508 | continue | |
508 | if m in (b'c', b'dc', b'cm'): |
|
509 | if m in (b'c', b'dc', b'cm'): | |
509 | files.append((f, hex(mctx.filenode(f)))) |
|
510 | files.append((f, hex(mctx.filenode(f)))) | |
510 | elif m == b'dg': |
|
511 | elif m == b'dg': | |
511 | f2 = actionargs[0] |
|
512 | f2 = actionargs[0] | |
512 | files.append((f2, hex(mctx.filenode(f2)))) |
|
513 | files.append((f2, hex(mctx.filenode(f2)))) | |
513 | # batch fetch the needed files from the server |
|
514 | # batch fetch the needed files from the server | |
514 | repo.fileservice.prefetch(files) |
|
515 | repo.fileservice.prefetch(files) | |
515 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) |
|
516 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |
516 |
|
517 | |||
517 |
|
518 | |||
518 | # Prefetch files before status attempts to look at their size and contents |
|
519 | # Prefetch files before status attempts to look at their size and contents | |
519 | def checklookup(orig, self, files): |
|
520 | def checklookup(orig, self, files): | |
520 | repo = self._repo |
|
521 | repo = self._repo | |
521 | if isenabled(repo): |
|
522 | if isenabled(repo): | |
522 | prefetchfiles = [] |
|
523 | prefetchfiles = [] | |
523 | for parent in self._parents: |
|
524 | for parent in self._parents: | |
524 | for f in files: |
|
525 | for f in files: | |
525 | if f in parent: |
|
526 | if f in parent: | |
526 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
527 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
527 | # batch fetch the needed files from the server |
|
528 | # batch fetch the needed files from the server | |
528 | repo.fileservice.prefetch(prefetchfiles) |
|
529 | repo.fileservice.prefetch(prefetchfiles) | |
529 | return orig(self, files) |
|
530 | return orig(self, files) | |
530 |
|
531 | |||
531 |
|
532 | |||
532 | # Prefetch the logic that compares added and removed files for renames |
|
533 | # Prefetch the logic that compares added and removed files for renames | |
533 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): |
|
534 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |
534 | if isenabled(repo): |
|
535 | if isenabled(repo): | |
535 | files = [] |
|
536 | files = [] | |
536 | pmf = repo[b'.'].manifest() |
|
537 | pmf = repo[b'.'].manifest() | |
537 | for f in removed: |
|
538 | for f in removed: | |
538 | if f in pmf: |
|
539 | if f in pmf: | |
539 | files.append((f, hex(pmf[f]))) |
|
540 | files.append((f, hex(pmf[f]))) | |
540 | # batch fetch the needed files from the server |
|
541 | # batch fetch the needed files from the server | |
541 | repo.fileservice.prefetch(files) |
|
542 | repo.fileservice.prefetch(files) | |
542 | return orig(repo, matcher, added, removed, *args, **kwargs) |
|
543 | return orig(repo, matcher, added, removed, *args, **kwargs) | |
543 |
|
544 | |||
544 |
|
545 | |||
545 | # prefetch files before pathcopies check |
|
546 | # prefetch files before pathcopies check | |
546 | def computeforwardmissing(orig, a, b, match=None): |
|
547 | def computeforwardmissing(orig, a, b, match=None): | |
547 | missing = orig(a, b, match=match) |
|
548 | missing = orig(a, b, match=match) | |
548 | repo = a._repo |
|
549 | repo = a._repo | |
549 | if isenabled(repo): |
|
550 | if isenabled(repo): | |
550 | mb = b.manifest() |
|
551 | mb = b.manifest() | |
551 |
|
552 | |||
552 | files = [] |
|
553 | files = [] | |
553 | sparsematch = repo.maybesparsematch(b.rev()) |
|
554 | sparsematch = repo.maybesparsematch(b.rev()) | |
554 | if sparsematch: |
|
555 | if sparsematch: | |
555 | sparsemissing = set() |
|
556 | sparsemissing = set() | |
556 | for f in missing: |
|
557 | for f in missing: | |
557 | if sparsematch(f): |
|
558 | if sparsematch(f): | |
558 | files.append((f, hex(mb[f]))) |
|
559 | files.append((f, hex(mb[f]))) | |
559 | sparsemissing.add(f) |
|
560 | sparsemissing.add(f) | |
560 | missing = sparsemissing |
|
561 | missing = sparsemissing | |
561 |
|
562 | |||
562 | # batch fetch the needed files from the server |
|
563 | # batch fetch the needed files from the server | |
563 | repo.fileservice.prefetch(files) |
|
564 | repo.fileservice.prefetch(files) | |
564 | return missing |
|
565 | return missing | |
565 |
|
566 | |||
566 |
|
567 | |||
567 | # close cache miss server connection after the command has finished |
|
568 | # close cache miss server connection after the command has finished | |
568 | def runcommand(orig, lui, repo, *args, **kwargs): |
|
569 | def runcommand(orig, lui, repo, *args, **kwargs): | |
569 | fileservice = None |
|
570 | fileservice = None | |
570 | # repo can be None when running in chg: |
|
571 | # repo can be None when running in chg: | |
571 | # - at startup, reposetup was called because serve is not norepo |
|
572 | # - at startup, reposetup was called because serve is not norepo | |
572 | # - a norepo command like "help" is called |
|
573 | # - a norepo command like "help" is called | |
573 | if repo and isenabled(repo): |
|
574 | if repo and isenabled(repo): | |
574 | fileservice = repo.fileservice |
|
575 | fileservice = repo.fileservice | |
575 | try: |
|
576 | try: | |
576 | return orig(lui, repo, *args, **kwargs) |
|
577 | return orig(lui, repo, *args, **kwargs) | |
577 | finally: |
|
578 | finally: | |
578 | if fileservice: |
|
579 | if fileservice: | |
579 | fileservice.close() |
|
580 | fileservice.close() | |
580 |
|
581 | |||
581 |
|
582 | |||
582 | # prevent strip from stripping remotefilelogs |
|
583 | # prevent strip from stripping remotefilelogs | |
583 | def _collectbrokencsets(orig, repo, files, striprev): |
|
584 | def _collectbrokencsets(orig, repo, files, striprev): | |
584 | if isenabled(repo): |
|
585 | if isenabled(repo): | |
585 | files = list([f for f in files if not repo.shallowmatch(f)]) |
|
586 | files = list([f for f in files if not repo.shallowmatch(f)]) | |
586 | return orig(repo, files, striprev) |
|
587 | return orig(repo, files, striprev) | |
587 |
|
588 | |||
588 |
|
589 | |||
589 | # changectx wrappers |
|
590 | # changectx wrappers | |
590 | def filectx(orig, self, path, fileid=None, filelog=None): |
|
591 | def filectx(orig, self, path, fileid=None, filelog=None): | |
591 | if fileid is None: |
|
592 | if fileid is None: | |
592 | fileid = self.filenode(path) |
|
593 | fileid = self.filenode(path) | |
593 | if isenabled(self._repo) and self._repo.shallowmatch(path): |
|
594 | if isenabled(self._repo) and self._repo.shallowmatch(path): | |
594 | return remotefilectx.remotefilectx( |
|
595 | return remotefilectx.remotefilectx( | |
595 | self._repo, path, fileid=fileid, changectx=self, filelog=filelog |
|
596 | self._repo, path, fileid=fileid, changectx=self, filelog=filelog | |
596 | ) |
|
597 | ) | |
597 | return orig(self, path, fileid=fileid, filelog=filelog) |
|
598 | return orig(self, path, fileid=fileid, filelog=filelog) | |
598 |
|
599 | |||
599 |
|
600 | |||
600 | def workingfilectx(orig, self, path, filelog=None): |
|
601 | def workingfilectx(orig, self, path, filelog=None): | |
601 | if isenabled(self._repo) and self._repo.shallowmatch(path): |
|
602 | if isenabled(self._repo) and self._repo.shallowmatch(path): | |
602 | return remotefilectx.remoteworkingfilectx( |
|
603 | return remotefilectx.remoteworkingfilectx( | |
603 | self._repo, path, workingctx=self, filelog=filelog |
|
604 | self._repo, path, workingctx=self, filelog=filelog | |
604 | ) |
|
605 | ) | |
605 | return orig(self, path, filelog=filelog) |
|
606 | return orig(self, path, filelog=filelog) | |
606 |
|
607 | |||
607 |
|
608 | |||
608 | # prefetch required revisions before a diff |
|
609 | # prefetch required revisions before a diff | |
609 | def trydiff( |
|
610 | def trydiff( | |
610 | orig, |
|
611 | orig, | |
611 | repo, |
|
612 | repo, | |
612 | revs, |
|
613 | revs, | |
613 | ctx1, |
|
614 | ctx1, | |
614 | ctx2, |
|
615 | ctx2, | |
615 | modified, |
|
616 | modified, | |
616 | added, |
|
617 | added, | |
617 | removed, |
|
618 | removed, | |
618 | copy, |
|
619 | copy, | |
619 | getfilectx, |
|
620 | getfilectx, | |
620 | *args, |
|
621 | *args, | |
621 | **kwargs |
|
622 | **kwargs | |
622 | ): |
|
623 | ): | |
623 | if isenabled(repo): |
|
624 | if isenabled(repo): | |
624 | prefetch = [] |
|
625 | prefetch = [] | |
625 | mf1 = ctx1.manifest() |
|
626 | mf1 = ctx1.manifest() | |
626 | for fname in modified + added + removed: |
|
627 | for fname in modified + added + removed: | |
627 | if fname in mf1: |
|
628 | if fname in mf1: | |
628 | fnode = getfilectx(fname, ctx1).filenode() |
|
629 | fnode = getfilectx(fname, ctx1).filenode() | |
629 | # fnode can be None if it's a edited working ctx file |
|
630 | # fnode can be None if it's a edited working ctx file | |
630 | if fnode: |
|
631 | if fnode: | |
631 | prefetch.append((fname, hex(fnode))) |
|
632 | prefetch.append((fname, hex(fnode))) | |
632 | if fname not in removed: |
|
633 | if fname not in removed: | |
633 | fnode = getfilectx(fname, ctx2).filenode() |
|
634 | fnode = getfilectx(fname, ctx2).filenode() | |
634 | if fnode: |
|
635 | if fnode: | |
635 | prefetch.append((fname, hex(fnode))) |
|
636 | prefetch.append((fname, hex(fnode))) | |
636 |
|
637 | |||
637 | repo.fileservice.prefetch(prefetch) |
|
638 | repo.fileservice.prefetch(prefetch) | |
638 |
|
639 | |||
639 | return orig( |
|
640 | return orig( | |
640 | repo, |
|
641 | repo, | |
641 | revs, |
|
642 | revs, | |
642 | ctx1, |
|
643 | ctx1, | |
643 | ctx2, |
|
644 | ctx2, | |
644 | modified, |
|
645 | modified, | |
645 | added, |
|
646 | added, | |
646 | removed, |
|
647 | removed, | |
647 | copy, |
|
648 | copy, | |
648 | getfilectx, |
|
649 | getfilectx, | |
649 | *args, |
|
650 | *args, | |
650 | **kwargs |
|
651 | **kwargs | |
651 | ) |
|
652 | ) | |
652 |
|
653 | |||
653 |
|
654 | |||
654 | # Prevent verify from processing files |
|
655 | # Prevent verify from processing files | |
655 | # a stub for mercurial.hg.verify() |
|
656 | # a stub for mercurial.hg.verify() | |
656 | def _verify(orig, repo, level=None): |
|
657 | def _verify(orig, repo, level=None): | |
657 | lock = repo.lock() |
|
658 | lock = repo.lock() | |
658 | try: |
|
659 | try: | |
659 | return shallowverifier.shallowverifier(repo).verify() |
|
660 | return shallowverifier.shallowverifier(repo).verify() | |
660 | finally: |
|
661 | finally: | |
661 | lock.release() |
|
662 | lock.release() | |
662 |
|
663 | |||
663 |
|
664 | |||
664 | clientonetime = False |
|
665 | clientonetime = False | |
665 |
|
666 | |||
666 |
|
667 | |||
667 | def onetimeclientsetup(ui): |
|
668 | def onetimeclientsetup(ui): | |
668 | global clientonetime |
|
669 | global clientonetime | |
669 | if clientonetime: |
|
670 | if clientonetime: | |
670 | return |
|
671 | return | |
671 | clientonetime = True |
|
672 | clientonetime = True | |
672 |
|
673 | |||
673 | # Don't commit filelogs until we know the commit hash, since the hash |
|
674 | # Don't commit filelogs until we know the commit hash, since the hash | |
674 | # is present in the filelog blob. |
|
675 | # is present in the filelog blob. | |
675 | # This violates Mercurial's filelog->manifest->changelog write order, |
|
676 | # This violates Mercurial's filelog->manifest->changelog write order, | |
676 | # but is generally fine for client repos. |
|
677 | # but is generally fine for client repos. | |
677 | pendingfilecommits = [] |
|
678 | pendingfilecommits = [] | |
678 |
|
679 | |||
679 | def addrawrevision( |
|
680 | def addrawrevision( | |
680 | orig, |
|
681 | orig, | |
681 | self, |
|
682 | self, | |
682 | rawtext, |
|
683 | rawtext, | |
683 | transaction, |
|
684 | transaction, | |
684 | link, |
|
685 | link, | |
685 | p1, |
|
686 | p1, | |
686 | p2, |
|
687 | p2, | |
687 | node, |
|
688 | node, | |
688 | flags, |
|
689 | flags, | |
689 | cachedelta=None, |
|
690 | cachedelta=None, | |
690 | _metatuple=None, |
|
691 | _metatuple=None, | |
691 | ): |
|
692 | ): | |
692 | if isinstance(link, int): |
|
693 | if isinstance(link, int): | |
693 | pendingfilecommits.append( |
|
694 | pendingfilecommits.append( | |
694 | ( |
|
695 | ( | |
695 | self, |
|
696 | self, | |
696 | rawtext, |
|
697 | rawtext, | |
697 | transaction, |
|
698 | transaction, | |
698 | link, |
|
699 | link, | |
699 | p1, |
|
700 | p1, | |
700 | p2, |
|
701 | p2, | |
701 | node, |
|
702 | node, | |
702 | flags, |
|
703 | flags, | |
703 | cachedelta, |
|
704 | cachedelta, | |
704 | _metatuple, |
|
705 | _metatuple, | |
705 | ) |
|
706 | ) | |
706 | ) |
|
707 | ) | |
707 | return node |
|
708 | return node | |
708 | else: |
|
709 | else: | |
709 | return orig( |
|
710 | return orig( | |
710 | self, |
|
711 | self, | |
711 | rawtext, |
|
712 | rawtext, | |
712 | transaction, |
|
713 | transaction, | |
713 | link, |
|
714 | link, | |
714 | p1, |
|
715 | p1, | |
715 | p2, |
|
716 | p2, | |
716 | node, |
|
717 | node, | |
717 | flags, |
|
718 | flags, | |
718 | cachedelta, |
|
719 | cachedelta, | |
719 | _metatuple=_metatuple, |
|
720 | _metatuple=_metatuple, | |
720 | ) |
|
721 | ) | |
721 |
|
722 | |||
722 | extensions.wrapfunction( |
|
723 | extensions.wrapfunction( | |
723 | remotefilelog.remotefilelog, b'addrawrevision', addrawrevision |
|
724 | remotefilelog.remotefilelog, b'addrawrevision', addrawrevision | |
724 | ) |
|
725 | ) | |
725 |
|
726 | |||
726 | def changelogadd(orig, self, *args): |
|
727 | def changelogadd(orig, self, *args): | |
727 | oldlen = len(self) |
|
728 | oldlen = len(self) | |
728 | node = orig(self, *args) |
|
729 | node = orig(self, *args) | |
729 | newlen = len(self) |
|
730 | newlen = len(self) | |
730 | if oldlen != newlen: |
|
731 | if oldlen != newlen: | |
731 | for oldargs in pendingfilecommits: |
|
732 | for oldargs in pendingfilecommits: | |
732 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs |
|
733 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs | |
733 | linknode = self.node(link) |
|
734 | linknode = self.node(link) | |
734 | if linknode == node: |
|
735 | if linknode == node: | |
735 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) |
|
736 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) | |
736 | else: |
|
737 | else: | |
737 | raise error.ProgrammingError( |
|
738 | raise error.ProgrammingError( | |
738 | b'pending multiple integer revisions are not supported' |
|
739 | b'pending multiple integer revisions are not supported' | |
739 | ) |
|
740 | ) | |
740 | else: |
|
741 | else: | |
741 | # "link" is actually wrong here (it is set to len(changelog)) |
|
742 | # "link" is actually wrong here (it is set to len(changelog)) | |
742 | # if changelog remains unchanged, skip writing file revisions |
|
743 | # if changelog remains unchanged, skip writing file revisions | |
743 | # but still do a sanity check about pending multiple revisions |
|
744 | # but still do a sanity check about pending multiple revisions | |
744 | if len(set(x[3] for x in pendingfilecommits)) > 1: |
|
745 | if len(set(x[3] for x in pendingfilecommits)) > 1: | |
745 | raise error.ProgrammingError( |
|
746 | raise error.ProgrammingError( | |
746 | b'pending multiple integer revisions are not supported' |
|
747 | b'pending multiple integer revisions are not supported' | |
747 | ) |
|
748 | ) | |
748 | del pendingfilecommits[:] |
|
749 | del pendingfilecommits[:] | |
749 | return node |
|
750 | return node | |
750 |
|
751 | |||
751 | extensions.wrapfunction(changelog.changelog, b'add', changelogadd) |
|
752 | extensions.wrapfunction(changelog.changelog, b'add', changelogadd) | |
752 |
|
753 | |||
753 |
|
754 | |||
754 | def getrenamedfn(orig, repo, endrev=None): |
|
755 | def getrenamedfn(orig, repo, endrev=None): | |
755 | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): |
|
756 | if not isenabled(repo) or copies.usechangesetcentricalgo(repo): | |
756 | return orig(repo, endrev) |
|
757 | return orig(repo, endrev) | |
757 |
|
758 | |||
758 | rcache = {} |
|
759 | rcache = {} | |
759 |
|
760 | |||
760 | def getrenamed(fn, rev): |
|
761 | def getrenamed(fn, rev): | |
761 | '''looks up all renames for a file (up to endrev) the first |
|
762 | '''looks up all renames for a file (up to endrev) the first | |
762 | time the file is given. It indexes on the changerev and only |
|
763 | time the file is given. It indexes on the changerev and only | |
763 | parses the manifest if linkrev != changerev. |
|
764 | parses the manifest if linkrev != changerev. | |
764 | Returns rename info for fn at changerev rev.''' |
|
765 | Returns rename info for fn at changerev rev.''' | |
765 | if rev in rcache.setdefault(fn, {}): |
|
766 | if rev in rcache.setdefault(fn, {}): | |
766 | return rcache[fn][rev] |
|
767 | return rcache[fn][rev] | |
767 |
|
768 | |||
768 | try: |
|
769 | try: | |
769 | fctx = repo[rev].filectx(fn) |
|
770 | fctx = repo[rev].filectx(fn) | |
770 | for ancestor in fctx.ancestors(): |
|
771 | for ancestor in fctx.ancestors(): | |
771 | if ancestor.path() == fn: |
|
772 | if ancestor.path() == fn: | |
772 | renamed = ancestor.renamed() |
|
773 | renamed = ancestor.renamed() | |
773 | rcache[fn][ancestor.rev()] = renamed and renamed[0] |
|
774 | rcache[fn][ancestor.rev()] = renamed and renamed[0] | |
774 |
|
775 | |||
775 | renamed = fctx.renamed() |
|
776 | renamed = fctx.renamed() | |
776 | return renamed and renamed[0] |
|
777 | return renamed and renamed[0] | |
777 | except error.LookupError: |
|
778 | except error.LookupError: | |
778 | return None |
|
779 | return None | |
779 |
|
780 | |||
780 | return getrenamed |
|
781 | return getrenamed | |
781 |
|
782 | |||
782 |
|
783 | |||
783 | def walkfilerevs(orig, repo, match, follow, revs, fncache): |
|
784 | def walkfilerevs(orig, repo, match, follow, revs, fncache): | |
784 | if not isenabled(repo): |
|
785 | if not isenabled(repo): | |
785 | return orig(repo, match, follow, revs, fncache) |
|
786 | return orig(repo, match, follow, revs, fncache) | |
786 |
|
787 | |||
787 | # remotefilelog's can't be walked in rev order, so throw. |
|
788 | # remotefilelog's can't be walked in rev order, so throw. | |
788 | # The caller will see the exception and walk the commit tree instead. |
|
789 | # The caller will see the exception and walk the commit tree instead. | |
789 | if not follow: |
|
790 | if not follow: | |
790 | raise cmdutil.FileWalkError(b"Cannot walk via filelog") |
|
791 | raise cmdutil.FileWalkError(b"Cannot walk via filelog") | |
791 |
|
792 | |||
792 | wanted = set() |
|
793 | wanted = set() | |
793 | minrev, maxrev = min(revs), max(revs) |
|
794 | minrev, maxrev = min(revs), max(revs) | |
794 |
|
795 | |||
795 | pctx = repo[b'.'] |
|
796 | pctx = repo[b'.'] | |
796 | for filename in match.files(): |
|
797 | for filename in match.files(): | |
797 | if filename not in pctx: |
|
798 | if filename not in pctx: | |
798 | raise error.Abort( |
|
799 | raise error.Abort( | |
799 | _(b'cannot follow file not in parent revision: "%s"') % filename |
|
800 | _(b'cannot follow file not in parent revision: "%s"') % filename | |
800 | ) |
|
801 | ) | |
801 | fctx = pctx[filename] |
|
802 | fctx = pctx[filename] | |
802 |
|
803 | |||
803 | linkrev = fctx.linkrev() |
|
804 | linkrev = fctx.linkrev() | |
804 | if linkrev >= minrev and linkrev <= maxrev: |
|
805 | if linkrev >= minrev and linkrev <= maxrev: | |
805 | fncache.setdefault(linkrev, []).append(filename) |
|
806 | fncache.setdefault(linkrev, []).append(filename) | |
806 | wanted.add(linkrev) |
|
807 | wanted.add(linkrev) | |
807 |
|
808 | |||
808 | for ancestor in fctx.ancestors(): |
|
809 | for ancestor in fctx.ancestors(): | |
809 | linkrev = ancestor.linkrev() |
|
810 | linkrev = ancestor.linkrev() | |
810 | if linkrev >= minrev and linkrev <= maxrev: |
|
811 | if linkrev >= minrev and linkrev <= maxrev: | |
811 | fncache.setdefault(linkrev, []).append(ancestor.path()) |
|
812 | fncache.setdefault(linkrev, []).append(ancestor.path()) | |
812 | wanted.add(linkrev) |
|
813 | wanted.add(linkrev) | |
813 |
|
814 | |||
814 | return wanted |
|
815 | return wanted | |
815 |
|
816 | |||
816 |
|
817 | |||
817 | def filelogrevset(orig, repo, subset, x): |
|
818 | def filelogrevset(orig, repo, subset, x): | |
818 | """``filelog(pattern)`` |
|
819 | """``filelog(pattern)`` | |
819 | Changesets connected to the specified filelog. |
|
820 | Changesets connected to the specified filelog. | |
820 |
|
821 | |||
821 | For performance reasons, ``filelog()`` does not show every changeset |
|
822 | For performance reasons, ``filelog()`` does not show every changeset | |
822 | that affects the requested file(s). See :hg:`help log` for details. For |
|
823 | that affects the requested file(s). See :hg:`help log` for details. For | |
823 | a slower, more accurate result, use ``file()``. |
|
824 | a slower, more accurate result, use ``file()``. | |
824 | """ |
|
825 | """ | |
825 |
|
826 | |||
826 | if not isenabled(repo): |
|
827 | if not isenabled(repo): | |
827 | return orig(repo, subset, x) |
|
828 | return orig(repo, subset, x) | |
828 |
|
829 | |||
829 | # i18n: "filelog" is a keyword |
|
830 | # i18n: "filelog" is a keyword | |
830 | pat = revset.getstring(x, _(b"filelog requires a pattern")) |
|
831 | pat = revset.getstring(x, _(b"filelog requires a pattern")) | |
831 | m = match.match( |
|
832 | m = match.match( | |
832 | repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None] |
|
833 | repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None] | |
833 | ) |
|
834 | ) | |
834 | s = set() |
|
835 | s = set() | |
835 |
|
836 | |||
836 | if not match.patkind(pat): |
|
837 | if not match.patkind(pat): | |
837 | # slow |
|
838 | # slow | |
838 | for r in subset: |
|
839 | for r in subset: | |
839 | ctx = repo[r] |
|
840 | ctx = repo[r] | |
840 | cfiles = ctx.files() |
|
841 | cfiles = ctx.files() | |
841 | for f in m.files(): |
|
842 | for f in m.files(): | |
842 | if f in cfiles: |
|
843 | if f in cfiles: | |
843 | s.add(ctx.rev()) |
|
844 | s.add(ctx.rev()) | |
844 | break |
|
845 | break | |
845 | else: |
|
846 | else: | |
846 | # partial |
|
847 | # partial | |
847 | files = (f for f in repo[None] if m(f)) |
|
848 | files = (f for f in repo[None] if m(f)) | |
848 | for f in files: |
|
849 | for f in files: | |
849 | fctx = repo[None].filectx(f) |
|
850 | fctx = repo[None].filectx(f) | |
850 | s.add(fctx.linkrev()) |
|
851 | s.add(fctx.linkrev()) | |
851 | for actx in fctx.ancestors(): |
|
852 | for actx in fctx.ancestors(): | |
852 | s.add(actx.linkrev()) |
|
853 | s.add(actx.linkrev()) | |
853 |
|
854 | |||
854 | return smartset.baseset([r for r in subset if r in s]) |
|
855 | return smartset.baseset([r for r in subset if r in s]) | |
855 |
|
856 | |||
856 |
|
857 | |||
857 | @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) |
|
858 | @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True) | |
858 | def gc(ui, *args, **opts): |
|
859 | def gc(ui, *args, **opts): | |
859 | '''garbage collect the client and server filelog caches |
|
860 | '''garbage collect the client and server filelog caches | |
860 | ''' |
|
861 | ''' | |
861 | cachepaths = set() |
|
862 | cachepaths = set() | |
862 |
|
863 | |||
863 | # get the system client cache |
|
864 | # get the system client cache | |
864 | systemcache = shallowutil.getcachepath(ui, allowempty=True) |
|
865 | systemcache = shallowutil.getcachepath(ui, allowempty=True) | |
865 | if systemcache: |
|
866 | if systemcache: | |
866 | cachepaths.add(systemcache) |
|
867 | cachepaths.add(systemcache) | |
867 |
|
868 | |||
868 | # get repo client and server cache |
|
869 | # get repo client and server cache | |
869 | repopaths = [] |
|
870 | repopaths = [] | |
870 | pwd = ui.environ.get(b'PWD') |
|
871 | pwd = ui.environ.get(b'PWD') | |
871 | if pwd: |
|
872 | if pwd: | |
872 | repopaths.append(pwd) |
|
873 | repopaths.append(pwd) | |
873 |
|
874 | |||
874 | repopaths.extend(args) |
|
875 | repopaths.extend(args) | |
875 | repos = [] |
|
876 | repos = [] | |
876 | for repopath in repopaths: |
|
877 | for repopath in repopaths: | |
877 | try: |
|
878 | try: | |
878 | repo = hg.peer(ui, {}, repopath) |
|
879 | repo = hg.peer(ui, {}, repopath) | |
879 | repos.append(repo) |
|
880 | repos.append(repo) | |
880 |
|
881 | |||
881 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) |
|
882 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) | |
882 | if repocache: |
|
883 | if repocache: | |
883 | cachepaths.add(repocache) |
|
884 | cachepaths.add(repocache) | |
884 | except error.RepoError: |
|
885 | except error.RepoError: | |
885 | pass |
|
886 | pass | |
886 |
|
887 | |||
887 | # gc client cache |
|
888 | # gc client cache | |
888 | for cachepath in cachepaths: |
|
889 | for cachepath in cachepaths: | |
889 | gcclient(ui, cachepath) |
|
890 | gcclient(ui, cachepath) | |
890 |
|
891 | |||
891 | # gc server cache |
|
892 | # gc server cache | |
892 | for repo in repos: |
|
893 | for repo in repos: | |
893 | remotefilelogserver.gcserver(ui, repo._repo) |
|
894 | remotefilelogserver.gcserver(ui, repo._repo) | |
894 |
|
895 | |||
895 |
|
896 | |||
896 | def gcclient(ui, cachepath): |
|
897 | def gcclient(ui, cachepath): | |
897 | # get list of repos that use this cache |
|
898 | # get list of repos that use this cache | |
898 | repospath = os.path.join(cachepath, b'repos') |
|
899 | repospath = os.path.join(cachepath, b'repos') | |
899 | if not os.path.exists(repospath): |
|
900 | if not os.path.exists(repospath): | |
900 | ui.warn(_(b"no known cache at %s\n") % cachepath) |
|
901 | ui.warn(_(b"no known cache at %s\n") % cachepath) | |
901 | return |
|
902 | return | |
902 |
|
903 | |||
903 | reposfile = open(repospath, b'rb') |
|
904 | reposfile = open(repospath, b'rb') | |
904 | repos = {r[:-1] for r in reposfile.readlines()} |
|
905 | repos = {r[:-1] for r in reposfile.readlines()} | |
905 | reposfile.close() |
|
906 | reposfile.close() | |
906 |
|
907 | |||
907 | # build list of useful files |
|
908 | # build list of useful files | |
908 | validrepos = [] |
|
909 | validrepos = [] | |
909 | keepkeys = set() |
|
910 | keepkeys = set() | |
910 |
|
911 | |||
911 | sharedcache = None |
|
912 | sharedcache = None | |
912 | filesrepacked = False |
|
913 | filesrepacked = False | |
913 |
|
914 | |||
914 | count = 0 |
|
915 | count = 0 | |
915 | progress = ui.makeprogress( |
|
916 | progress = ui.makeprogress( | |
916 | _(b"analyzing repositories"), unit=b"repos", total=len(repos) |
|
917 | _(b"analyzing repositories"), unit=b"repos", total=len(repos) | |
917 | ) |
|
918 | ) | |
918 | for path in repos: |
|
919 | for path in repos: | |
919 | progress.update(count) |
|
920 | progress.update(count) | |
920 | count += 1 |
|
921 | count += 1 | |
921 | try: |
|
922 | try: | |
922 | path = ui.expandpath(os.path.normpath(path)) |
|
923 | path = ui.expandpath(os.path.normpath(path)) | |
923 | except TypeError as e: |
|
924 | except TypeError as e: | |
924 | ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e)) |
|
925 | ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e)) | |
925 | traceback.print_exc() |
|
926 | traceback.print_exc() | |
926 | continue |
|
927 | continue | |
927 | try: |
|
928 | try: | |
928 | peer = hg.peer(ui, {}, path) |
|
929 | peer = hg.peer(ui, {}, path) | |
929 | repo = peer._repo |
|
930 | repo = peer._repo | |
930 | except error.RepoError: |
|
931 | except error.RepoError: | |
931 | continue |
|
932 | continue | |
932 |
|
933 | |||
933 | validrepos.append(path) |
|
934 | validrepos.append(path) | |
934 |
|
935 | |||
935 | # Protect against any repo or config changes that have happened since |
|
936 | # Protect against any repo or config changes that have happened since | |
936 | # this repo was added to the repos file. We'd rather this loop succeed |
|
937 | # this repo was added to the repos file. We'd rather this loop succeed | |
937 | # and too much be deleted, than the loop fail and nothing gets deleted. |
|
938 | # and too much be deleted, than the loop fail and nothing gets deleted. | |
938 | if not isenabled(repo): |
|
939 | if not isenabled(repo): | |
939 | continue |
|
940 | continue | |
940 |
|
941 | |||
941 | if not util.safehasattr(repo, b'name'): |
|
942 | if not util.safehasattr(repo, b'name'): | |
942 | ui.warn( |
|
943 | ui.warn( | |
943 | _(b"repo %s is a misconfigured remotefilelog repo\n") % path |
|
944 | _(b"repo %s is a misconfigured remotefilelog repo\n") % path | |
944 | ) |
|
945 | ) | |
945 | continue |
|
946 | continue | |
946 |
|
947 | |||
947 | # If garbage collection on repack and repack on hg gc are enabled |
|
948 | # If garbage collection on repack and repack on hg gc are enabled | |
948 | # then loose files are repacked and garbage collected. |
|
949 | # then loose files are repacked and garbage collected. | |
949 | # Otherwise regular garbage collection is performed. |
|
950 | # Otherwise regular garbage collection is performed. | |
950 | repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc') |
|
951 | repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc') | |
951 | gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack') |
|
952 | gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack') | |
952 | if repackonhggc and gcrepack: |
|
953 | if repackonhggc and gcrepack: | |
953 | try: |
|
954 | try: | |
954 | repackmod.incrementalrepack(repo) |
|
955 | repackmod.incrementalrepack(repo) | |
955 | filesrepacked = True |
|
956 | filesrepacked = True | |
956 | continue |
|
957 | continue | |
957 | except (IOError, repackmod.RepackAlreadyRunning): |
|
958 | except (IOError, repackmod.RepackAlreadyRunning): | |
958 | # If repack cannot be performed due to not enough disk space |
|
959 | # If repack cannot be performed due to not enough disk space | |
959 | # continue doing garbage collection of loose files w/o repack |
|
960 | # continue doing garbage collection of loose files w/o repack | |
960 | pass |
|
961 | pass | |
961 |
|
962 | |||
962 | reponame = repo.name |
|
963 | reponame = repo.name | |
963 | if not sharedcache: |
|
964 | if not sharedcache: | |
964 | sharedcache = repo.sharedstore |
|
965 | sharedcache = repo.sharedstore | |
965 |
|
966 | |||
966 | # Compute a keepset which is not garbage collected |
|
967 | # Compute a keepset which is not garbage collected | |
967 | def keyfn(fname, fnode): |
|
968 | def keyfn(fname, fnode): | |
968 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) |
|
969 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) | |
969 |
|
970 | |||
970 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) |
|
971 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) | |
971 |
|
972 | |||
972 | progress.complete() |
|
973 | progress.complete() | |
973 |
|
974 | |||
974 | # write list of valid repos back |
|
975 | # write list of valid repos back | |
975 | oldumask = os.umask(0o002) |
|
976 | oldumask = os.umask(0o002) | |
976 | try: |
|
977 | try: | |
977 | reposfile = open(repospath, b'wb') |
|
978 | reposfile = open(repospath, b'wb') | |
978 | reposfile.writelines([(b"%s\n" % r) for r in validrepos]) |
|
979 | reposfile.writelines([(b"%s\n" % r) for r in validrepos]) | |
979 | reposfile.close() |
|
980 | reposfile.close() | |
980 | finally: |
|
981 | finally: | |
981 | os.umask(oldumask) |
|
982 | os.umask(oldumask) | |
982 |
|
983 | |||
983 | # prune cache |
|
984 | # prune cache | |
984 | if sharedcache is not None: |
|
985 | if sharedcache is not None: | |
985 | sharedcache.gc(keepkeys) |
|
986 | sharedcache.gc(keepkeys) | |
986 | elif not filesrepacked: |
|
987 | elif not filesrepacked: | |
987 | ui.warn(_(b"warning: no valid repos in repofile\n")) |
|
988 | ui.warn(_(b"warning: no valid repos in repofile\n")) | |
988 |
|
989 | |||
989 |
|
990 | |||
990 | def log(orig, ui, repo, *pats, **opts): |
|
991 | def log(orig, ui, repo, *pats, **opts): | |
991 | if not isenabled(repo): |
|
992 | if not isenabled(repo): | |
992 | return orig(ui, repo, *pats, **opts) |
|
993 | return orig(ui, repo, *pats, **opts) | |
993 |
|
994 | |||
994 | follow = opts.get(r'follow') |
|
995 | follow = opts.get(r'follow') | |
995 | revs = opts.get(r'rev') |
|
996 | revs = opts.get(r'rev') | |
996 | if pats: |
|
997 | if pats: | |
997 | # Force slowpath for non-follow patterns and follows that start from |
|
998 | # Force slowpath for non-follow patterns and follows that start from | |
998 | # non-working-copy-parent revs. |
|
999 | # non-working-copy-parent revs. | |
999 | if not follow or revs: |
|
1000 | if not follow or revs: | |
1000 | # This forces the slowpath |
|
1001 | # This forces the slowpath | |
1001 | opts[r'removed'] = True |
|
1002 | opts[r'removed'] = True | |
1002 |
|
1003 | |||
1003 | # If this is a non-follow log without any revs specified, recommend that |
|
1004 | # If this is a non-follow log without any revs specified, recommend that | |
1004 | # the user add -f to speed it up. |
|
1005 | # the user add -f to speed it up. | |
1005 | if not follow and not revs: |
|
1006 | if not follow and not revs: | |
1006 | match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts)) |
|
1007 | match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts)) | |
1007 | isfile = not match.anypats() |
|
1008 | isfile = not match.anypats() | |
1008 | if isfile: |
|
1009 | if isfile: | |
1009 | for file in match.files(): |
|
1010 | for file in match.files(): | |
1010 | if not os.path.isfile(repo.wjoin(file)): |
|
1011 | if not os.path.isfile(repo.wjoin(file)): | |
1011 | isfile = False |
|
1012 | isfile = False | |
1012 | break |
|
1013 | break | |
1013 |
|
1014 | |||
1014 | if isfile: |
|
1015 | if isfile: | |
1015 | ui.warn( |
|
1016 | ui.warn( | |
1016 | _( |
|
1017 | _( | |
1017 | b"warning: file log can be slow on large repos - " |
|
1018 | b"warning: file log can be slow on large repos - " | |
1018 | + b"use -f to speed it up\n" |
|
1019 | + b"use -f to speed it up\n" | |
1019 | ) |
|
1020 | ) | |
1020 | ) |
|
1021 | ) | |
1021 |
|
1022 | |||
1022 | return orig(ui, repo, *pats, **opts) |
|
1023 | return orig(ui, repo, *pats, **opts) | |
1023 |
|
1024 | |||
1024 |
|
1025 | |||
1025 | def revdatelimit(ui, revset): |
|
1026 | def revdatelimit(ui, revset): | |
1026 | """Update revset so that only changesets no older than 'prefetchdays' days |
|
1027 | """Update revset so that only changesets no older than 'prefetchdays' days | |
1027 | are included. The default value is set to 14 days. If 'prefetchdays' is set |
|
1028 | are included. The default value is set to 14 days. If 'prefetchdays' is set | |
1028 | to zero or negative value then date restriction is not applied. |
|
1029 | to zero or negative value then date restriction is not applied. | |
1029 | """ |
|
1030 | """ | |
1030 | days = ui.configint(b'remotefilelog', b'prefetchdays') |
|
1031 | days = ui.configint(b'remotefilelog', b'prefetchdays') | |
1031 | if days > 0: |
|
1032 | if days > 0: | |
1032 | revset = b'(%s) & date(-%s)' % (revset, days) |
|
1033 | revset = b'(%s) & date(-%s)' % (revset, days) | |
1033 | return revset |
|
1034 | return revset | |
1034 |
|
1035 | |||
1035 |
|
1036 | |||
1036 | def readytofetch(repo): |
|
1037 | def readytofetch(repo): | |
1037 | """Check that enough time has passed since the last background prefetch. |
|
1038 | """Check that enough time has passed since the last background prefetch. | |
1038 | This only relates to prefetches after operations that change the working |
|
1039 | This only relates to prefetches after operations that change the working | |
1039 | copy parent. Default delay between background prefetches is 2 minutes. |
|
1040 | copy parent. Default delay between background prefetches is 2 minutes. | |
1040 | """ |
|
1041 | """ | |
1041 | timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay') |
|
1042 | timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay') | |
1042 | fname = repo.vfs.join(b'lastprefetch') |
|
1043 | fname = repo.vfs.join(b'lastprefetch') | |
1043 |
|
1044 | |||
1044 | ready = False |
|
1045 | ready = False | |
1045 | with open(fname, b'a'): |
|
1046 | with open(fname, b'a'): | |
1046 | # the with construct above is used to avoid race conditions |
|
1047 | # the with construct above is used to avoid race conditions | |
1047 | modtime = os.path.getmtime(fname) |
|
1048 | modtime = os.path.getmtime(fname) | |
1048 | if (time.time() - modtime) > timeout: |
|
1049 | if (time.time() - modtime) > timeout: | |
1049 | os.utime(fname, None) |
|
1050 | os.utime(fname, None) | |
1050 | ready = True |
|
1051 | ready = True | |
1051 |
|
1052 | |||
1052 | return ready |
|
1053 | return ready | |
1053 |
|
1054 | |||
1054 |
|
1055 | |||
1055 | def wcpprefetch(ui, repo, **kwargs): |
|
1056 | def wcpprefetch(ui, repo, **kwargs): | |
1056 | """Prefetches in background revisions specified by bgprefetchrevs revset. |
|
1057 | """Prefetches in background revisions specified by bgprefetchrevs revset. | |
1057 | Does background repack if backgroundrepack flag is set in config. |
|
1058 | Does background repack if backgroundrepack flag is set in config. | |
1058 | """ |
|
1059 | """ | |
1059 | shallow = isenabled(repo) |
|
1060 | shallow = isenabled(repo) | |
1060 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs') |
|
1061 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs') | |
1061 | isready = readytofetch(repo) |
|
1062 | isready = readytofetch(repo) | |
1062 |
|
1063 | |||
1063 | if not (shallow and bgprefetchrevs and isready): |
|
1064 | if not (shallow and bgprefetchrevs and isready): | |
1064 | return |
|
1065 | return | |
1065 |
|
1066 | |||
1066 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') |
|
1067 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') | |
1067 | # update a revset with a date limit |
|
1068 | # update a revset with a date limit | |
1068 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) |
|
1069 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) | |
1069 |
|
1070 | |||
1070 | def anon(): |
|
1071 | def anon(): | |
1071 | if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch: |
|
1072 | if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch: | |
1072 | return |
|
1073 | return | |
1073 | repo.ranprefetch = True |
|
1074 | repo.ranprefetch = True | |
1074 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) |
|
1075 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) | |
1075 |
|
1076 | |||
1076 | repo._afterlock(anon) |
|
1077 | repo._afterlock(anon) | |
1077 |
|
1078 | |||
1078 |
|
1079 | |||
1079 | def pull(orig, ui, repo, *pats, **opts): |
|
1080 | def pull(orig, ui, repo, *pats, **opts): | |
1080 | result = orig(ui, repo, *pats, **opts) |
|
1081 | result = orig(ui, repo, *pats, **opts) | |
1081 |
|
1082 | |||
1082 | if isenabled(repo): |
|
1083 | if isenabled(repo): | |
1083 | # prefetch if it's configured |
|
1084 | # prefetch if it's configured | |
1084 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch') |
|
1085 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch') | |
1085 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') |
|
1086 | bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack') | |
1086 | bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch') |
|
1087 | bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch') | |
1087 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') |
|
1088 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') | |
1088 |
|
1089 | |||
1089 | if prefetchrevset: |
|
1090 | if prefetchrevset: | |
1090 | ui.status(_(b"prefetching file contents\n")) |
|
1091 | ui.status(_(b"prefetching file contents\n")) | |
1091 | revs = scmutil.revrange(repo, [prefetchrevset]) |
|
1092 | revs = scmutil.revrange(repo, [prefetchrevset]) | |
1092 | base = repo[b'.'].rev() |
|
1093 | base = repo[b'.'].rev() | |
1093 | if bgprefetch: |
|
1094 | if bgprefetch: | |
1094 | repo.backgroundprefetch( |
|
1095 | repo.backgroundprefetch( | |
1095 | prefetchrevset, repack=bgrepack, ensurestart=ensurestart |
|
1096 | prefetchrevset, repack=bgrepack, ensurestart=ensurestart | |
1096 | ) |
|
1097 | ) | |
1097 | else: |
|
1098 | else: | |
1098 | repo.prefetch(revs, base=base) |
|
1099 | repo.prefetch(revs, base=base) | |
1099 | if bgrepack: |
|
1100 | if bgrepack: | |
1100 | repackmod.backgroundrepack( |
|
1101 | repackmod.backgroundrepack( | |
1101 | repo, incremental=True, ensurestart=ensurestart |
|
1102 | repo, incremental=True, ensurestart=ensurestart | |
1102 | ) |
|
1103 | ) | |
1103 | elif bgrepack: |
|
1104 | elif bgrepack: | |
1104 | repackmod.backgroundrepack( |
|
1105 | repackmod.backgroundrepack( | |
1105 | repo, incremental=True, ensurestart=ensurestart |
|
1106 | repo, incremental=True, ensurestart=ensurestart | |
1106 | ) |
|
1107 | ) | |
1107 |
|
1108 | |||
1108 | return result |
|
1109 | return result | |
1109 |
|
1110 | |||
1110 |
|
1111 | |||
1111 | def exchangepull(orig, repo, remote, *args, **kwargs): |
|
1112 | def exchangepull(orig, repo, remote, *args, **kwargs): | |
1112 | # Hook into the callstream/getbundle to insert bundle capabilities |
|
1113 | # Hook into the callstream/getbundle to insert bundle capabilities | |
1113 | # during a pull. |
|
1114 | # during a pull. | |
1114 | def localgetbundle( |
|
1115 | def localgetbundle( | |
1115 | orig, source, heads=None, common=None, bundlecaps=None, **kwargs |
|
1116 | orig, source, heads=None, common=None, bundlecaps=None, **kwargs | |
1116 | ): |
|
1117 | ): | |
1117 | if not bundlecaps: |
|
1118 | if not bundlecaps: | |
1118 | bundlecaps = set() |
|
1119 | bundlecaps = set() | |
1119 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
1120 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) | |
1120 | return orig( |
|
1121 | return orig( | |
1121 | source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs |
|
1122 | source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs | |
1122 | ) |
|
1123 | ) | |
1123 |
|
1124 | |||
1124 | if util.safehasattr(remote, b'_callstream'): |
|
1125 | if util.safehasattr(remote, b'_callstream'): | |
1125 | remote._localrepo = repo |
|
1126 | remote._localrepo = repo | |
1126 | elif util.safehasattr(remote, b'getbundle'): |
|
1127 | elif util.safehasattr(remote, b'getbundle'): | |
1127 | extensions.wrapfunction(remote, b'getbundle', localgetbundle) |
|
1128 | extensions.wrapfunction(remote, b'getbundle', localgetbundle) | |
1128 |
|
1129 | |||
1129 | return orig(repo, remote, *args, **kwargs) |
|
1130 | return orig(repo, remote, *args, **kwargs) | |
1130 |
|
1131 | |||
1131 |
|
1132 | |||
1132 | def _fileprefetchhook(repo, revs, match): |
|
1133 | def _fileprefetchhook(repo, revs, match): | |
1133 | if isenabled(repo): |
|
1134 | if isenabled(repo): | |
1134 | allfiles = [] |
|
1135 | allfiles = [] | |
1135 | for rev in revs: |
|
1136 | for rev in revs: | |
1136 | if rev == nodemod.wdirrev or rev is None: |
|
1137 | if rev == nodemod.wdirrev or rev is None: | |
1137 | continue |
|
1138 | continue | |
1138 | ctx = repo[rev] |
|
1139 | ctx = repo[rev] | |
1139 | mf = ctx.manifest() |
|
1140 | mf = ctx.manifest() | |
1140 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
1141 | sparsematch = repo.maybesparsematch(ctx.rev()) | |
1141 | for path in ctx.walk(match): |
|
1142 | for path in ctx.walk(match): | |
1142 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
1143 | if (not sparsematch or sparsematch(path)) and path in mf: | |
1143 | allfiles.append((path, hex(mf[path]))) |
|
1144 | allfiles.append((path, hex(mf[path]))) | |
1144 | repo.fileservice.prefetch(allfiles) |
|
1145 | repo.fileservice.prefetch(allfiles) | |
1145 |
|
1146 | |||
1146 |
|
1147 | |||
1147 | @command( |
|
1148 | @command( | |
1148 | b'debugremotefilelog', |
|
1149 | b'debugremotefilelog', | |
1149 | [(b'd', b'decompress', None, _(b'decompress the filelog first')),], |
|
1150 | [(b'd', b'decompress', None, _(b'decompress the filelog first')),], | |
1150 | _(b'hg debugremotefilelog <path>'), |
|
1151 | _(b'hg debugremotefilelog <path>'), | |
1151 | norepo=True, |
|
1152 | norepo=True, | |
1152 | ) |
|
1153 | ) | |
1153 | def debugremotefilelog(ui, path, **opts): |
|
1154 | def debugremotefilelog(ui, path, **opts): | |
1154 | return debugcommands.debugremotefilelog(ui, path, **opts) |
|
1155 | return debugcommands.debugremotefilelog(ui, path, **opts) | |
1155 |
|
1156 | |||
1156 |
|
1157 | |||
1157 | @command( |
|
1158 | @command( | |
1158 | b'verifyremotefilelog', |
|
1159 | b'verifyremotefilelog', | |
1159 | [(b'd', b'decompress', None, _(b'decompress the filelogs first')),], |
|
1160 | [(b'd', b'decompress', None, _(b'decompress the filelogs first')),], | |
1160 | _(b'hg verifyremotefilelogs <directory>'), |
|
1161 | _(b'hg verifyremotefilelogs <directory>'), | |
1161 | norepo=True, |
|
1162 | norepo=True, | |
1162 | ) |
|
1163 | ) | |
1163 | def verifyremotefilelog(ui, path, **opts): |
|
1164 | def verifyremotefilelog(ui, path, **opts): | |
1164 | return debugcommands.verifyremotefilelog(ui, path, **opts) |
|
1165 | return debugcommands.verifyremotefilelog(ui, path, **opts) | |
1165 |
|
1166 | |||
1166 |
|
1167 | |||
1167 | @command( |
|
1168 | @command( | |
1168 | b'debugdatapack', |
|
1169 | b'debugdatapack', | |
1169 | [ |
|
1170 | [ | |
1170 | (b'', b'long', None, _(b'print the long hashes')), |
|
1171 | (b'', b'long', None, _(b'print the long hashes')), | |
1171 | (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'), |
|
1172 | (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'), | |
1172 | ], |
|
1173 | ], | |
1173 | _(b'hg debugdatapack <paths>'), |
|
1174 | _(b'hg debugdatapack <paths>'), | |
1174 | norepo=True, |
|
1175 | norepo=True, | |
1175 | ) |
|
1176 | ) | |
1176 | def debugdatapack(ui, *paths, **opts): |
|
1177 | def debugdatapack(ui, *paths, **opts): | |
1177 | return debugcommands.debugdatapack(ui, *paths, **opts) |
|
1178 | return debugcommands.debugdatapack(ui, *paths, **opts) | |
1178 |
|
1179 | |||
1179 |
|
1180 | |||
1180 | @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True) |
|
1181 | @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True) | |
1181 | def debughistorypack(ui, path, **opts): |
|
1182 | def debughistorypack(ui, path, **opts): | |
1182 | return debugcommands.debughistorypack(ui, path) |
|
1183 | return debugcommands.debughistorypack(ui, path) | |
1183 |
|
1184 | |||
1184 |
|
1185 | |||
1185 | @command(b'debugkeepset', [], _(b'hg debugkeepset')) |
|
1186 | @command(b'debugkeepset', [], _(b'hg debugkeepset')) | |
1186 | def debugkeepset(ui, repo, **opts): |
|
1187 | def debugkeepset(ui, repo, **opts): | |
1187 | # The command is used to measure keepset computation time |
|
1188 | # The command is used to measure keepset computation time | |
1188 | def keyfn(fname, fnode): |
|
1189 | def keyfn(fname, fnode): | |
1189 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) |
|
1190 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) | |
1190 |
|
1191 | |||
1191 | repackmod.keepset(repo, keyfn) |
|
1192 | repackmod.keepset(repo, keyfn) | |
1192 | return |
|
1193 | return | |
1193 |
|
1194 | |||
1194 |
|
1195 | |||
1195 | @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack')) |
|
1196 | @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack')) | |
1196 | def debugwaitonrepack(ui, repo, **opts): |
|
1197 | def debugwaitonrepack(ui, repo, **opts): | |
1197 | return debugcommands.debugwaitonrepack(repo) |
|
1198 | return debugcommands.debugwaitonrepack(repo) | |
1198 |
|
1199 | |||
1199 |
|
1200 | |||
1200 | @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch')) |
|
1201 | @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch')) | |
1201 | def debugwaitonprefetch(ui, repo, **opts): |
|
1202 | def debugwaitonprefetch(ui, repo, **opts): | |
1202 | return debugcommands.debugwaitonprefetch(repo) |
|
1203 | return debugcommands.debugwaitonprefetch(repo) | |
1203 |
|
1204 | |||
1204 |
|
1205 | |||
1205 | def resolveprefetchopts(ui, opts): |
|
1206 | def resolveprefetchopts(ui, opts): | |
1206 | if not opts.get(b'rev'): |
|
1207 | if not opts.get(b'rev'): | |
1207 | revset = [b'.', b'draft()'] |
|
1208 | revset = [b'.', b'draft()'] | |
1208 |
|
1209 | |||
1209 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None) |
|
1210 | prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None) | |
1210 | if prefetchrevset: |
|
1211 | if prefetchrevset: | |
1211 | revset.append(b'(%s)' % prefetchrevset) |
|
1212 | revset.append(b'(%s)' % prefetchrevset) | |
1212 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None) |
|
1213 | bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None) | |
1213 | if bgprefetchrevs: |
|
1214 | if bgprefetchrevs: | |
1214 | revset.append(b'(%s)' % bgprefetchrevs) |
|
1215 | revset.append(b'(%s)' % bgprefetchrevs) | |
1215 | revset = b'+'.join(revset) |
|
1216 | revset = b'+'.join(revset) | |
1216 |
|
1217 | |||
1217 | # update a revset with a date limit |
|
1218 | # update a revset with a date limit | |
1218 | revset = revdatelimit(ui, revset) |
|
1219 | revset = revdatelimit(ui, revset) | |
1219 |
|
1220 | |||
1220 | opts[b'rev'] = [revset] |
|
1221 | opts[b'rev'] = [revset] | |
1221 |
|
1222 | |||
1222 | if not opts.get(b'base'): |
|
1223 | if not opts.get(b'base'): | |
1223 | opts[b'base'] = None |
|
1224 | opts[b'base'] = None | |
1224 |
|
1225 | |||
1225 | return opts |
|
1226 | return opts | |
1226 |
|
1227 | |||
1227 |
|
1228 | |||
1228 | @command( |
|
1229 | @command( | |
1229 | b'prefetch', |
|
1230 | b'prefetch', | |
1230 | [ |
|
1231 | [ | |
1231 | (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')), |
|
1232 | (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')), | |
1232 | (b'', b'repack', False, _(b'run repack after prefetch')), |
|
1233 | (b'', b'repack', False, _(b'run repack after prefetch')), | |
1233 | (b'b', b'base', b'', _(b"rev that is assumed to already be local")), |
|
1234 | (b'b', b'base', b'', _(b"rev that is assumed to already be local")), | |
1234 | ] |
|
1235 | ] | |
1235 | + commands.walkopts, |
|
1236 | + commands.walkopts, | |
1236 | _(b'hg prefetch [OPTIONS] [FILE...]'), |
|
1237 | _(b'hg prefetch [OPTIONS] [FILE...]'), | |
1237 | helpcategory=command.CATEGORY_MAINTENANCE, |
|
1238 | helpcategory=command.CATEGORY_MAINTENANCE, | |
1238 | ) |
|
1239 | ) | |
1239 | def prefetch(ui, repo, *pats, **opts): |
|
1240 | def prefetch(ui, repo, *pats, **opts): | |
1240 | """prefetch file revisions from the server |
|
1241 | """prefetch file revisions from the server | |
1241 |
|
1242 | |||
1242 | Prefetchs file revisions for the specified revs and stores them in the |
|
1243 | Prefetchs file revisions for the specified revs and stores them in the | |
1243 | local remotefilelog cache. If no rev is specified, the default rev is |
|
1244 | local remotefilelog cache. If no rev is specified, the default rev is | |
1244 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. |
|
1245 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. | |
1245 | File names or patterns can be used to limit which files are downloaded. |
|
1246 | File names or patterns can be used to limit which files are downloaded. | |
1246 |
|
1247 | |||
1247 | Return 0 on success. |
|
1248 | Return 0 on success. | |
1248 | """ |
|
1249 | """ | |
1249 | opts = pycompat.byteskwargs(opts) |
|
1250 | opts = pycompat.byteskwargs(opts) | |
1250 | if not isenabled(repo): |
|
1251 | if not isenabled(repo): | |
1251 | raise error.Abort(_(b"repo is not shallow")) |
|
1252 | raise error.Abort(_(b"repo is not shallow")) | |
1252 |
|
1253 | |||
1253 | opts = resolveprefetchopts(ui, opts) |
|
1254 | opts = resolveprefetchopts(ui, opts) | |
1254 | revs = scmutil.revrange(repo, opts.get(b'rev')) |
|
1255 | revs = scmutil.revrange(repo, opts.get(b'rev')) | |
1255 | repo.prefetch(revs, opts.get(b'base'), pats, opts) |
|
1256 | repo.prefetch(revs, opts.get(b'base'), pats, opts) | |
1256 |
|
1257 | |||
1257 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') |
|
1258 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') | |
1258 |
|
1259 | |||
1259 | # Run repack in background |
|
1260 | # Run repack in background | |
1260 | if opts.get(b'repack'): |
|
1261 | if opts.get(b'repack'): | |
1261 | repackmod.backgroundrepack( |
|
1262 | repackmod.backgroundrepack( | |
1262 | repo, incremental=True, ensurestart=ensurestart |
|
1263 | repo, incremental=True, ensurestart=ensurestart | |
1263 | ) |
|
1264 | ) | |
1264 |
|
1265 | |||
1265 |
|
1266 | |||
1266 | @command( |
|
1267 | @command( | |
1267 | b'repack', |
|
1268 | b'repack', | |
1268 | [ |
|
1269 | [ | |
1269 | (b'', b'background', None, _(b'run in a background process'), None), |
|
1270 | (b'', b'background', None, _(b'run in a background process'), None), | |
1270 | (b'', b'incremental', None, _(b'do an incremental repack'), None), |
|
1271 | (b'', b'incremental', None, _(b'do an incremental repack'), None), | |
1271 | ( |
|
1272 | ( | |
1272 | b'', |
|
1273 | b'', | |
1273 | b'packsonly', |
|
1274 | b'packsonly', | |
1274 | None, |
|
1275 | None, | |
1275 | _(b'only repack packs (skip loose objects)'), |
|
1276 | _(b'only repack packs (skip loose objects)'), | |
1276 | None, |
|
1277 | None, | |
1277 | ), |
|
1278 | ), | |
1278 | ], |
|
1279 | ], | |
1279 | _(b'hg repack [OPTIONS]'), |
|
1280 | _(b'hg repack [OPTIONS]'), | |
1280 | ) |
|
1281 | ) | |
1281 | def repack_(ui, repo, *pats, **opts): |
|
1282 | def repack_(ui, repo, *pats, **opts): | |
1282 | if opts.get(r'background'): |
|
1283 | if opts.get(r'background'): | |
1283 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') |
|
1284 | ensurestart = repo.ui.configbool(b'devel', b'remotefilelog.ensurestart') | |
1284 | repackmod.backgroundrepack( |
|
1285 | repackmod.backgroundrepack( | |
1285 | repo, |
|
1286 | repo, | |
1286 | incremental=opts.get(r'incremental'), |
|
1287 | incremental=opts.get(r'incremental'), | |
1287 | packsonly=opts.get(r'packsonly', False), |
|
1288 | packsonly=opts.get(r'packsonly', False), | |
1288 | ensurestart=ensurestart, |
|
1289 | ensurestart=ensurestart, | |
1289 | ) |
|
1290 | ) | |
1290 | return |
|
1291 | return | |
1291 |
|
1292 | |||
1292 | options = {b'packsonly': opts.get(r'packsonly')} |
|
1293 | options = {b'packsonly': opts.get(r'packsonly')} | |
1293 |
|
1294 | |||
1294 | try: |
|
1295 | try: | |
1295 | if opts.get(r'incremental'): |
|
1296 | if opts.get(r'incremental'): | |
1296 | repackmod.incrementalrepack(repo, options=options) |
|
1297 | repackmod.incrementalrepack(repo, options=options) | |
1297 | else: |
|
1298 | else: | |
1298 | repackmod.fullrepack(repo, options=options) |
|
1299 | repackmod.fullrepack(repo, options=options) | |
1299 | except repackmod.RepackAlreadyRunning as ex: |
|
1300 | except repackmod.RepackAlreadyRunning as ex: | |
1300 | # Don't propogate the exception if the repack is already in |
|
1301 | # Don't propogate the exception if the repack is already in | |
1301 | # progress, since we want the command to exit 0. |
|
1302 | # progress, since we want the command to exit 0. | |
1302 | repo.ui.warn(b'%s\n' % ex) |
|
1303 | repo.ui.warn(b'%s\n' % ex) |
@@ -1,912 +1,918 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import os |
|
3 | import os | |
4 | import time |
|
4 | import time | |
5 |
|
5 | |||
6 | from mercurial.i18n import _ |
|
6 | from mercurial.i18n import _ | |
7 | from mercurial.node import ( |
|
7 | from mercurial.node import ( | |
8 | nullid, |
|
8 | nullid, | |
9 | short, |
|
9 | short, | |
10 | ) |
|
10 | ) | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | encoding, |
|
12 | encoding, | |
13 | error, |
|
13 | error, | |
14 | lock as lockmod, |
|
14 | lock as lockmod, | |
15 | mdiff, |
|
15 | mdiff, | |
16 | policy, |
|
16 | policy, | |
17 | pycompat, |
|
17 | pycompat, | |
18 | scmutil, |
|
18 | scmutil, | |
19 | util, |
|
19 | util, | |
20 | vfs, |
|
20 | vfs, | |
21 | ) |
|
21 | ) | |
22 | from mercurial.utils import procutil |
|
22 | from mercurial.utils import procutil | |
23 | from . import ( |
|
23 | from . import ( | |
24 | constants, |
|
24 | constants, | |
25 | contentstore, |
|
25 | contentstore, | |
26 | datapack, |
|
26 | datapack, | |
27 | historypack, |
|
27 | historypack, | |
28 | metadatastore, |
|
28 | metadatastore, | |
29 | shallowutil, |
|
29 | shallowutil, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
32 | osutil = policy.importmod(r'osutil') |
|
32 | osutil = policy.importmod(r'osutil') | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | class RepackAlreadyRunning(error.Abort): |
|
35 | class RepackAlreadyRunning(error.Abort): | |
36 | pass |
|
36 | pass | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | def backgroundrepack( |
|
39 | def backgroundrepack( | |
40 | repo, incremental=True, packsonly=False, ensurestart=False |
|
40 | repo, incremental=True, packsonly=False, ensurestart=False | |
41 | ): |
|
41 | ): | |
42 | cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'repack'] |
|
42 | cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'repack'] | |
43 | msg = _(b"(running background repack)\n") |
|
43 | msg = _(b"(running background repack)\n") | |
44 | if incremental: |
|
44 | if incremental: | |
45 | cmd.append(b'--incremental') |
|
45 | cmd.append(b'--incremental') | |
46 | msg = _(b"(running background incremental repack)\n") |
|
46 | msg = _(b"(running background incremental repack)\n") | |
47 | if packsonly: |
|
47 | if packsonly: | |
48 | cmd.append(b'--packsonly') |
|
48 | cmd.append(b'--packsonly') | |
49 | repo.ui.warn(msg) |
|
49 | repo.ui.warn(msg) | |
50 | # We know this command will find a binary, so don't block on it starting. |
|
50 | # We know this command will find a binary, so don't block on it starting. | |
51 | procutil.runbgcommand(cmd, encoding.environ, ensurestart=ensurestart) |
|
51 | kwargs = {} | |
|
52 | if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'): | |||
|
53 | kwargs['record_wait'] = repo.ui.atexit | |||
|
54 | ||||
|
55 | procutil.runbgcommand( | |||
|
56 | cmd, encoding.environ, ensurestart=ensurestart, **kwargs | |||
|
57 | ) | |||
52 |
|
58 | |||
53 |
|
59 | |||
54 | def fullrepack(repo, options=None): |
|
60 | def fullrepack(repo, options=None): | |
55 | """If ``packsonly`` is True, stores creating only loose objects are skipped. |
|
61 | """If ``packsonly`` is True, stores creating only loose objects are skipped. | |
56 | """ |
|
62 | """ | |
57 | if util.safehasattr(repo, 'shareddatastores'): |
|
63 | if util.safehasattr(repo, 'shareddatastores'): | |
58 | datasource = contentstore.unioncontentstore(*repo.shareddatastores) |
|
64 | datasource = contentstore.unioncontentstore(*repo.shareddatastores) | |
59 | historysource = metadatastore.unionmetadatastore( |
|
65 | historysource = metadatastore.unionmetadatastore( | |
60 | *repo.sharedhistorystores, allowincomplete=True |
|
66 | *repo.sharedhistorystores, allowincomplete=True | |
61 | ) |
|
67 | ) | |
62 |
|
68 | |||
63 | packpath = shallowutil.getcachepackpath( |
|
69 | packpath = shallowutil.getcachepackpath( | |
64 | repo, constants.FILEPACK_CATEGORY |
|
70 | repo, constants.FILEPACK_CATEGORY | |
65 | ) |
|
71 | ) | |
66 | _runrepack( |
|
72 | _runrepack( | |
67 | repo, |
|
73 | repo, | |
68 | datasource, |
|
74 | datasource, | |
69 | historysource, |
|
75 | historysource, | |
70 | packpath, |
|
76 | packpath, | |
71 | constants.FILEPACK_CATEGORY, |
|
77 | constants.FILEPACK_CATEGORY, | |
72 | options=options, |
|
78 | options=options, | |
73 | ) |
|
79 | ) | |
74 |
|
80 | |||
75 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
81 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
76 | localdata, shareddata = _getmanifeststores(repo) |
|
82 | localdata, shareddata = _getmanifeststores(repo) | |
77 | lpackpath, ldstores, lhstores = localdata |
|
83 | lpackpath, ldstores, lhstores = localdata | |
78 | spackpath, sdstores, shstores = shareddata |
|
84 | spackpath, sdstores, shstores = shareddata | |
79 |
|
85 | |||
80 | # Repack the shared manifest store |
|
86 | # Repack the shared manifest store | |
81 | datasource = contentstore.unioncontentstore(*sdstores) |
|
87 | datasource = contentstore.unioncontentstore(*sdstores) | |
82 | historysource = metadatastore.unionmetadatastore( |
|
88 | historysource = metadatastore.unionmetadatastore( | |
83 | *shstores, allowincomplete=True |
|
89 | *shstores, allowincomplete=True | |
84 | ) |
|
90 | ) | |
85 | _runrepack( |
|
91 | _runrepack( | |
86 | repo, |
|
92 | repo, | |
87 | datasource, |
|
93 | datasource, | |
88 | historysource, |
|
94 | historysource, | |
89 | spackpath, |
|
95 | spackpath, | |
90 | constants.TREEPACK_CATEGORY, |
|
96 | constants.TREEPACK_CATEGORY, | |
91 | options=options, |
|
97 | options=options, | |
92 | ) |
|
98 | ) | |
93 |
|
99 | |||
94 | # Repack the local manifest store |
|
100 | # Repack the local manifest store | |
95 | datasource = contentstore.unioncontentstore( |
|
101 | datasource = contentstore.unioncontentstore( | |
96 | *ldstores, allowincomplete=True |
|
102 | *ldstores, allowincomplete=True | |
97 | ) |
|
103 | ) | |
98 | historysource = metadatastore.unionmetadatastore( |
|
104 | historysource = metadatastore.unionmetadatastore( | |
99 | *lhstores, allowincomplete=True |
|
105 | *lhstores, allowincomplete=True | |
100 | ) |
|
106 | ) | |
101 | _runrepack( |
|
107 | _runrepack( | |
102 | repo, |
|
108 | repo, | |
103 | datasource, |
|
109 | datasource, | |
104 | historysource, |
|
110 | historysource, | |
105 | lpackpath, |
|
111 | lpackpath, | |
106 | constants.TREEPACK_CATEGORY, |
|
112 | constants.TREEPACK_CATEGORY, | |
107 | options=options, |
|
113 | options=options, | |
108 | ) |
|
114 | ) | |
109 |
|
115 | |||
110 |
|
116 | |||
111 | def incrementalrepack(repo, options=None): |
|
117 | def incrementalrepack(repo, options=None): | |
112 | """This repacks the repo by looking at the distribution of pack files in the |
|
118 | """This repacks the repo by looking at the distribution of pack files in the | |
113 | repo and performing the most minimal repack to keep the repo in good shape. |
|
119 | repo and performing the most minimal repack to keep the repo in good shape. | |
114 | """ |
|
120 | """ | |
115 | if util.safehasattr(repo, 'shareddatastores'): |
|
121 | if util.safehasattr(repo, 'shareddatastores'): | |
116 | packpath = shallowutil.getcachepackpath( |
|
122 | packpath = shallowutil.getcachepackpath( | |
117 | repo, constants.FILEPACK_CATEGORY |
|
123 | repo, constants.FILEPACK_CATEGORY | |
118 | ) |
|
124 | ) | |
119 | _incrementalrepack( |
|
125 | _incrementalrepack( | |
120 | repo, |
|
126 | repo, | |
121 | repo.shareddatastores, |
|
127 | repo.shareddatastores, | |
122 | repo.sharedhistorystores, |
|
128 | repo.sharedhistorystores, | |
123 | packpath, |
|
129 | packpath, | |
124 | constants.FILEPACK_CATEGORY, |
|
130 | constants.FILEPACK_CATEGORY, | |
125 | options=options, |
|
131 | options=options, | |
126 | ) |
|
132 | ) | |
127 |
|
133 | |||
128 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
134 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
129 | localdata, shareddata = _getmanifeststores(repo) |
|
135 | localdata, shareddata = _getmanifeststores(repo) | |
130 | lpackpath, ldstores, lhstores = localdata |
|
136 | lpackpath, ldstores, lhstores = localdata | |
131 | spackpath, sdstores, shstores = shareddata |
|
137 | spackpath, sdstores, shstores = shareddata | |
132 |
|
138 | |||
133 | # Repack the shared manifest store |
|
139 | # Repack the shared manifest store | |
134 | _incrementalrepack( |
|
140 | _incrementalrepack( | |
135 | repo, |
|
141 | repo, | |
136 | sdstores, |
|
142 | sdstores, | |
137 | shstores, |
|
143 | shstores, | |
138 | spackpath, |
|
144 | spackpath, | |
139 | constants.TREEPACK_CATEGORY, |
|
145 | constants.TREEPACK_CATEGORY, | |
140 | options=options, |
|
146 | options=options, | |
141 | ) |
|
147 | ) | |
142 |
|
148 | |||
143 | # Repack the local manifest store |
|
149 | # Repack the local manifest store | |
144 | _incrementalrepack( |
|
150 | _incrementalrepack( | |
145 | repo, |
|
151 | repo, | |
146 | ldstores, |
|
152 | ldstores, | |
147 | lhstores, |
|
153 | lhstores, | |
148 | lpackpath, |
|
154 | lpackpath, | |
149 | constants.TREEPACK_CATEGORY, |
|
155 | constants.TREEPACK_CATEGORY, | |
150 | allowincompletedata=True, |
|
156 | allowincompletedata=True, | |
151 | options=options, |
|
157 | options=options, | |
152 | ) |
|
158 | ) | |
153 |
|
159 | |||
154 |
|
160 | |||
155 | def _getmanifeststores(repo): |
|
161 | def _getmanifeststores(repo): | |
156 | shareddatastores = repo.manifestlog.shareddatastores |
|
162 | shareddatastores = repo.manifestlog.shareddatastores | |
157 | localdatastores = repo.manifestlog.localdatastores |
|
163 | localdatastores = repo.manifestlog.localdatastores | |
158 | sharedhistorystores = repo.manifestlog.sharedhistorystores |
|
164 | sharedhistorystores = repo.manifestlog.sharedhistorystores | |
159 | localhistorystores = repo.manifestlog.localhistorystores |
|
165 | localhistorystores = repo.manifestlog.localhistorystores | |
160 |
|
166 | |||
161 | sharedpackpath = shallowutil.getcachepackpath( |
|
167 | sharedpackpath = shallowutil.getcachepackpath( | |
162 | repo, constants.TREEPACK_CATEGORY |
|
168 | repo, constants.TREEPACK_CATEGORY | |
163 | ) |
|
169 | ) | |
164 | localpackpath = shallowutil.getlocalpackpath( |
|
170 | localpackpath = shallowutil.getlocalpackpath( | |
165 | repo.svfs.vfs.base, constants.TREEPACK_CATEGORY |
|
171 | repo.svfs.vfs.base, constants.TREEPACK_CATEGORY | |
166 | ) |
|
172 | ) | |
167 |
|
173 | |||
168 | return ( |
|
174 | return ( | |
169 | (localpackpath, localdatastores, localhistorystores), |
|
175 | (localpackpath, localdatastores, localhistorystores), | |
170 | (sharedpackpath, shareddatastores, sharedhistorystores), |
|
176 | (sharedpackpath, shareddatastores, sharedhistorystores), | |
171 | ) |
|
177 | ) | |
172 |
|
178 | |||
173 |
|
179 | |||
174 | def _topacks(packpath, files, constructor): |
|
180 | def _topacks(packpath, files, constructor): | |
175 | paths = list(os.path.join(packpath, p) for p in files) |
|
181 | paths = list(os.path.join(packpath, p) for p in files) | |
176 | packs = list(constructor(p) for p in paths) |
|
182 | packs = list(constructor(p) for p in paths) | |
177 | return packs |
|
183 | return packs | |
178 |
|
184 | |||
179 |
|
185 | |||
180 | def _deletebigpacks(repo, folder, files): |
|
186 | def _deletebigpacks(repo, folder, files): | |
181 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
|
187 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. | |
182 |
|
188 | |||
183 | Returns ``files` with the removed files omitted.""" |
|
189 | Returns ``files` with the removed files omitted.""" | |
184 | maxsize = repo.ui.configbytes(b"packs", b"maxpacksize") |
|
190 | maxsize = repo.ui.configbytes(b"packs", b"maxpacksize") | |
185 | if maxsize <= 0: |
|
191 | if maxsize <= 0: | |
186 | return files |
|
192 | return files | |
187 |
|
193 | |||
188 | # This only considers datapacks today, but we could broaden it to include |
|
194 | # This only considers datapacks today, but we could broaden it to include | |
189 | # historypacks. |
|
195 | # historypacks. | |
190 | VALIDEXTS = [b".datapack", b".dataidx"] |
|
196 | VALIDEXTS = [b".datapack", b".dataidx"] | |
191 |
|
197 | |||
192 | # Either an oversize index or datapack will trigger cleanup of the whole |
|
198 | # Either an oversize index or datapack will trigger cleanup of the whole | |
193 | # pack: |
|
199 | # pack: | |
194 | oversized = { |
|
200 | oversized = { | |
195 | os.path.splitext(path)[0] |
|
201 | os.path.splitext(path)[0] | |
196 | for path, ftype, stat in files |
|
202 | for path, ftype, stat in files | |
197 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] in VALIDEXTS)) |
|
203 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] in VALIDEXTS)) | |
198 | } |
|
204 | } | |
199 |
|
205 | |||
200 | for rootfname in oversized: |
|
206 | for rootfname in oversized: | |
201 | rootpath = os.path.join(folder, rootfname) |
|
207 | rootpath = os.path.join(folder, rootfname) | |
202 | for ext in VALIDEXTS: |
|
208 | for ext in VALIDEXTS: | |
203 | path = rootpath + ext |
|
209 | path = rootpath + ext | |
204 | repo.ui.debug( |
|
210 | repo.ui.debug( | |
205 | b'removing oversize packfile %s (%s)\n' |
|
211 | b'removing oversize packfile %s (%s)\n' | |
206 | % (path, util.bytecount(os.stat(path).st_size)) |
|
212 | % (path, util.bytecount(os.stat(path).st_size)) | |
207 | ) |
|
213 | ) | |
208 | os.unlink(path) |
|
214 | os.unlink(path) | |
209 | return [row for row in files if os.path.basename(row[0]) not in oversized] |
|
215 | return [row for row in files if os.path.basename(row[0]) not in oversized] | |
210 |
|
216 | |||
211 |
|
217 | |||
212 | def _incrementalrepack( |
|
218 | def _incrementalrepack( | |
213 | repo, |
|
219 | repo, | |
214 | datastore, |
|
220 | datastore, | |
215 | historystore, |
|
221 | historystore, | |
216 | packpath, |
|
222 | packpath, | |
217 | category, |
|
223 | category, | |
218 | allowincompletedata=False, |
|
224 | allowincompletedata=False, | |
219 | options=None, |
|
225 | options=None, | |
220 | ): |
|
226 | ): | |
221 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
227 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
222 |
|
228 | |||
223 | files = osutil.listdir(packpath, stat=True) |
|
229 | files = osutil.listdir(packpath, stat=True) | |
224 | files = _deletebigpacks(repo, packpath, files) |
|
230 | files = _deletebigpacks(repo, packpath, files) | |
225 | datapacks = _topacks( |
|
231 | datapacks = _topacks( | |
226 | packpath, _computeincrementaldatapack(repo.ui, files), datapack.datapack |
|
232 | packpath, _computeincrementaldatapack(repo.ui, files), datapack.datapack | |
227 | ) |
|
233 | ) | |
228 | datapacks.extend( |
|
234 | datapacks.extend( | |
229 | s for s in datastore if not isinstance(s, datapack.datapackstore) |
|
235 | s for s in datastore if not isinstance(s, datapack.datapackstore) | |
230 | ) |
|
236 | ) | |
231 |
|
237 | |||
232 | historypacks = _topacks( |
|
238 | historypacks = _topacks( | |
233 | packpath, |
|
239 | packpath, | |
234 | _computeincrementalhistorypack(repo.ui, files), |
|
240 | _computeincrementalhistorypack(repo.ui, files), | |
235 | historypack.historypack, |
|
241 | historypack.historypack, | |
236 | ) |
|
242 | ) | |
237 | historypacks.extend( |
|
243 | historypacks.extend( | |
238 | s |
|
244 | s | |
239 | for s in historystore |
|
245 | for s in historystore | |
240 | if not isinstance(s, historypack.historypackstore) |
|
246 | if not isinstance(s, historypack.historypackstore) | |
241 | ) |
|
247 | ) | |
242 |
|
248 | |||
243 | # ``allhistory{files,packs}`` contains all known history packs, even ones we |
|
249 | # ``allhistory{files,packs}`` contains all known history packs, even ones we | |
244 | # don't plan to repack. They are used during the datapack repack to ensure |
|
250 | # don't plan to repack. They are used during the datapack repack to ensure | |
245 | # good ordering of nodes. |
|
251 | # good ordering of nodes. | |
246 | allhistoryfiles = _allpackfileswithsuffix( |
|
252 | allhistoryfiles = _allpackfileswithsuffix( | |
247 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX |
|
253 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX | |
248 | ) |
|
254 | ) | |
249 | allhistorypacks = _topacks( |
|
255 | allhistorypacks = _topacks( | |
250 | packpath, |
|
256 | packpath, | |
251 | (f for f, mode, stat in allhistoryfiles), |
|
257 | (f for f, mode, stat in allhistoryfiles), | |
252 | historypack.historypack, |
|
258 | historypack.historypack, | |
253 | ) |
|
259 | ) | |
254 | allhistorypacks.extend( |
|
260 | allhistorypacks.extend( | |
255 | s |
|
261 | s | |
256 | for s in historystore |
|
262 | for s in historystore | |
257 | if not isinstance(s, historypack.historypackstore) |
|
263 | if not isinstance(s, historypack.historypackstore) | |
258 | ) |
|
264 | ) | |
259 | _runrepack( |
|
265 | _runrepack( | |
260 | repo, |
|
266 | repo, | |
261 | contentstore.unioncontentstore( |
|
267 | contentstore.unioncontentstore( | |
262 | *datapacks, allowincomplete=allowincompletedata |
|
268 | *datapacks, allowincomplete=allowincompletedata | |
263 | ), |
|
269 | ), | |
264 | metadatastore.unionmetadatastore(*historypacks, allowincomplete=True), |
|
270 | metadatastore.unionmetadatastore(*historypacks, allowincomplete=True), | |
265 | packpath, |
|
271 | packpath, | |
266 | category, |
|
272 | category, | |
267 | fullhistory=metadatastore.unionmetadatastore( |
|
273 | fullhistory=metadatastore.unionmetadatastore( | |
268 | *allhistorypacks, allowincomplete=True |
|
274 | *allhistorypacks, allowincomplete=True | |
269 | ), |
|
275 | ), | |
270 | options=options, |
|
276 | options=options, | |
271 | ) |
|
277 | ) | |
272 |
|
278 | |||
273 |
|
279 | |||
274 | def _computeincrementaldatapack(ui, files): |
|
280 | def _computeincrementaldatapack(ui, files): | |
275 | opts = { |
|
281 | opts = { | |
276 | b'gencountlimit': ui.configint(b'remotefilelog', b'data.gencountlimit'), |
|
282 | b'gencountlimit': ui.configint(b'remotefilelog', b'data.gencountlimit'), | |
277 | b'generations': ui.configlist(b'remotefilelog', b'data.generations'), |
|
283 | b'generations': ui.configlist(b'remotefilelog', b'data.generations'), | |
278 | b'maxrepackpacks': ui.configint( |
|
284 | b'maxrepackpacks': ui.configint( | |
279 | b'remotefilelog', b'data.maxrepackpacks' |
|
285 | b'remotefilelog', b'data.maxrepackpacks' | |
280 | ), |
|
286 | ), | |
281 | b'repackmaxpacksize': ui.configbytes( |
|
287 | b'repackmaxpacksize': ui.configbytes( | |
282 | b'remotefilelog', b'data.repackmaxpacksize' |
|
288 | b'remotefilelog', b'data.repackmaxpacksize' | |
283 | ), |
|
289 | ), | |
284 | b'repacksizelimit': ui.configbytes( |
|
290 | b'repacksizelimit': ui.configbytes( | |
285 | b'remotefilelog', b'data.repacksizelimit' |
|
291 | b'remotefilelog', b'data.repacksizelimit' | |
286 | ), |
|
292 | ), | |
287 | } |
|
293 | } | |
288 |
|
294 | |||
289 | packfiles = _allpackfileswithsuffix( |
|
295 | packfiles = _allpackfileswithsuffix( | |
290 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX |
|
296 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX | |
291 | ) |
|
297 | ) | |
292 | return _computeincrementalpack(packfiles, opts) |
|
298 | return _computeincrementalpack(packfiles, opts) | |
293 |
|
299 | |||
294 |
|
300 | |||
295 | def _computeincrementalhistorypack(ui, files): |
|
301 | def _computeincrementalhistorypack(ui, files): | |
296 | opts = { |
|
302 | opts = { | |
297 | b'gencountlimit': ui.configint( |
|
303 | b'gencountlimit': ui.configint( | |
298 | b'remotefilelog', b'history.gencountlimit' |
|
304 | b'remotefilelog', b'history.gencountlimit' | |
299 | ), |
|
305 | ), | |
300 | b'generations': ui.configlist( |
|
306 | b'generations': ui.configlist( | |
301 | b'remotefilelog', b'history.generations', [b'100MB'] |
|
307 | b'remotefilelog', b'history.generations', [b'100MB'] | |
302 | ), |
|
308 | ), | |
303 | b'maxrepackpacks': ui.configint( |
|
309 | b'maxrepackpacks': ui.configint( | |
304 | b'remotefilelog', b'history.maxrepackpacks' |
|
310 | b'remotefilelog', b'history.maxrepackpacks' | |
305 | ), |
|
311 | ), | |
306 | b'repackmaxpacksize': ui.configbytes( |
|
312 | b'repackmaxpacksize': ui.configbytes( | |
307 | b'remotefilelog', b'history.repackmaxpacksize', b'400MB' |
|
313 | b'remotefilelog', b'history.repackmaxpacksize', b'400MB' | |
308 | ), |
|
314 | ), | |
309 | b'repacksizelimit': ui.configbytes( |
|
315 | b'repacksizelimit': ui.configbytes( | |
310 | b'remotefilelog', b'history.repacksizelimit' |
|
316 | b'remotefilelog', b'history.repacksizelimit' | |
311 | ), |
|
317 | ), | |
312 | } |
|
318 | } | |
313 |
|
319 | |||
314 | packfiles = _allpackfileswithsuffix( |
|
320 | packfiles = _allpackfileswithsuffix( | |
315 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX |
|
321 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX | |
316 | ) |
|
322 | ) | |
317 | return _computeincrementalpack(packfiles, opts) |
|
323 | return _computeincrementalpack(packfiles, opts) | |
318 |
|
324 | |||
319 |
|
325 | |||
320 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): |
|
326 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): | |
321 | result = [] |
|
327 | result = [] | |
322 | fileset = set(fn for fn, mode, stat in files) |
|
328 | fileset = set(fn for fn, mode, stat in files) | |
323 | for filename, mode, stat in files: |
|
329 | for filename, mode, stat in files: | |
324 | if not filename.endswith(packsuffix): |
|
330 | if not filename.endswith(packsuffix): | |
325 | continue |
|
331 | continue | |
326 |
|
332 | |||
327 | prefix = filename[: -len(packsuffix)] |
|
333 | prefix = filename[: -len(packsuffix)] | |
328 |
|
334 | |||
329 | # Don't process a pack if it doesn't have an index. |
|
335 | # Don't process a pack if it doesn't have an index. | |
330 | if (prefix + indexsuffix) not in fileset: |
|
336 | if (prefix + indexsuffix) not in fileset: | |
331 | continue |
|
337 | continue | |
332 | result.append((prefix, mode, stat)) |
|
338 | result.append((prefix, mode, stat)) | |
333 |
|
339 | |||
334 | return result |
|
340 | return result | |
335 |
|
341 | |||
336 |
|
342 | |||
337 | def _computeincrementalpack(files, opts): |
|
343 | def _computeincrementalpack(files, opts): | |
338 | """Given a set of pack files along with the configuration options, this |
|
344 | """Given a set of pack files along with the configuration options, this | |
339 | function computes the list of files that should be packed as part of an |
|
345 | function computes the list of files that should be packed as part of an | |
340 | incremental repack. |
|
346 | incremental repack. | |
341 |
|
347 | |||
342 | It tries to strike a balance between keeping incremental repacks cheap (i.e. |
|
348 | It tries to strike a balance between keeping incremental repacks cheap (i.e. | |
343 | packing small things when possible, and rolling the packs up to the big ones |
|
349 | packing small things when possible, and rolling the packs up to the big ones | |
344 | over time). |
|
350 | over time). | |
345 | """ |
|
351 | """ | |
346 |
|
352 | |||
347 | limits = list( |
|
353 | limits = list( | |
348 | sorted((util.sizetoint(s) for s in opts[b'generations']), reverse=True) |
|
354 | sorted((util.sizetoint(s) for s in opts[b'generations']), reverse=True) | |
349 | ) |
|
355 | ) | |
350 | limits.append(0) |
|
356 | limits.append(0) | |
351 |
|
357 | |||
352 | # Group the packs by generation (i.e. by size) |
|
358 | # Group the packs by generation (i.e. by size) | |
353 | generations = [] |
|
359 | generations = [] | |
354 | for i in pycompat.xrange(len(limits)): |
|
360 | for i in pycompat.xrange(len(limits)): | |
355 | generations.append([]) |
|
361 | generations.append([]) | |
356 |
|
362 | |||
357 | sizes = {} |
|
363 | sizes = {} | |
358 | for prefix, mode, stat in files: |
|
364 | for prefix, mode, stat in files: | |
359 | size = stat.st_size |
|
365 | size = stat.st_size | |
360 | if size > opts[b'repackmaxpacksize']: |
|
366 | if size > opts[b'repackmaxpacksize']: | |
361 | continue |
|
367 | continue | |
362 |
|
368 | |||
363 | sizes[prefix] = size |
|
369 | sizes[prefix] = size | |
364 | for i, limit in enumerate(limits): |
|
370 | for i, limit in enumerate(limits): | |
365 | if size > limit: |
|
371 | if size > limit: | |
366 | generations[i].append(prefix) |
|
372 | generations[i].append(prefix) | |
367 | break |
|
373 | break | |
368 |
|
374 | |||
369 | # Steps for picking what packs to repack: |
|
375 | # Steps for picking what packs to repack: | |
370 | # 1. Pick the largest generation with > gencountlimit pack files. |
|
376 | # 1. Pick the largest generation with > gencountlimit pack files. | |
371 | # 2. Take the smallest three packs. |
|
377 | # 2. Take the smallest three packs. | |
372 | # 3. While total-size-of-packs < repacksizelimit: add another pack |
|
378 | # 3. While total-size-of-packs < repacksizelimit: add another pack | |
373 |
|
379 | |||
374 | # Find the largest generation with more than gencountlimit packs |
|
380 | # Find the largest generation with more than gencountlimit packs | |
375 | genpacks = [] |
|
381 | genpacks = [] | |
376 | for i, limit in enumerate(limits): |
|
382 | for i, limit in enumerate(limits): | |
377 | if len(generations[i]) > opts[b'gencountlimit']: |
|
383 | if len(generations[i]) > opts[b'gencountlimit']: | |
378 | # Sort to be smallest last, for easy popping later |
|
384 | # Sort to be smallest last, for easy popping later | |
379 | genpacks.extend( |
|
385 | genpacks.extend( | |
380 | sorted(generations[i], reverse=True, key=lambda x: sizes[x]) |
|
386 | sorted(generations[i], reverse=True, key=lambda x: sizes[x]) | |
381 | ) |
|
387 | ) | |
382 | break |
|
388 | break | |
383 |
|
389 | |||
384 | # Take as many packs from the generation as we can |
|
390 | # Take as many packs from the generation as we can | |
385 | chosenpacks = genpacks[-3:] |
|
391 | chosenpacks = genpacks[-3:] | |
386 | genpacks = genpacks[:-3] |
|
392 | genpacks = genpacks[:-3] | |
387 | repacksize = sum(sizes[n] for n in chosenpacks) |
|
393 | repacksize = sum(sizes[n] for n in chosenpacks) | |
388 | while ( |
|
394 | while ( | |
389 | repacksize < opts[b'repacksizelimit'] |
|
395 | repacksize < opts[b'repacksizelimit'] | |
390 | and genpacks |
|
396 | and genpacks | |
391 | and len(chosenpacks) < opts[b'maxrepackpacks'] |
|
397 | and len(chosenpacks) < opts[b'maxrepackpacks'] | |
392 | ): |
|
398 | ): | |
393 | chosenpacks.append(genpacks.pop()) |
|
399 | chosenpacks.append(genpacks.pop()) | |
394 | repacksize += sizes[chosenpacks[-1]] |
|
400 | repacksize += sizes[chosenpacks[-1]] | |
395 |
|
401 | |||
396 | return chosenpacks |
|
402 | return chosenpacks | |
397 |
|
403 | |||
398 |
|
404 | |||
399 | def _runrepack( |
|
405 | def _runrepack( | |
400 | repo, data, history, packpath, category, fullhistory=None, options=None |
|
406 | repo, data, history, packpath, category, fullhistory=None, options=None | |
401 | ): |
|
407 | ): | |
402 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
408 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
403 |
|
409 | |||
404 | def isold(repo, filename, node): |
|
410 | def isold(repo, filename, node): | |
405 | """Check if the file node is older than a limit. |
|
411 | """Check if the file node is older than a limit. | |
406 | Unless a limit is specified in the config the default limit is taken. |
|
412 | Unless a limit is specified in the config the default limit is taken. | |
407 | """ |
|
413 | """ | |
408 | filectx = repo.filectx(filename, fileid=node) |
|
414 | filectx = repo.filectx(filename, fileid=node) | |
409 | filetime = repo[filectx.linkrev()].date() |
|
415 | filetime = repo[filectx.linkrev()].date() | |
410 |
|
416 | |||
411 | ttl = repo.ui.configint(b'remotefilelog', b'nodettl') |
|
417 | ttl = repo.ui.configint(b'remotefilelog', b'nodettl') | |
412 |
|
418 | |||
413 | limit = time.time() - ttl |
|
419 | limit = time.time() - ttl | |
414 | return filetime[0] < limit |
|
420 | return filetime[0] < limit | |
415 |
|
421 | |||
416 | garbagecollect = repo.ui.configbool(b'remotefilelog', b'gcrepack') |
|
422 | garbagecollect = repo.ui.configbool(b'remotefilelog', b'gcrepack') | |
417 | if not fullhistory: |
|
423 | if not fullhistory: | |
418 | fullhistory = history |
|
424 | fullhistory = history | |
419 | packer = repacker( |
|
425 | packer = repacker( | |
420 | repo, |
|
426 | repo, | |
421 | data, |
|
427 | data, | |
422 | history, |
|
428 | history, | |
423 | fullhistory, |
|
429 | fullhistory, | |
424 | category, |
|
430 | category, | |
425 | gc=garbagecollect, |
|
431 | gc=garbagecollect, | |
426 | isold=isold, |
|
432 | isold=isold, | |
427 | options=options, |
|
433 | options=options, | |
428 | ) |
|
434 | ) | |
429 |
|
435 | |||
430 | with datapack.mutabledatapack(repo.ui, packpath) as dpack: |
|
436 | with datapack.mutabledatapack(repo.ui, packpath) as dpack: | |
431 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
|
437 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: | |
432 | try: |
|
438 | try: | |
433 | packer.run(dpack, hpack) |
|
439 | packer.run(dpack, hpack) | |
434 | except error.LockHeld: |
|
440 | except error.LockHeld: | |
435 | raise RepackAlreadyRunning( |
|
441 | raise RepackAlreadyRunning( | |
436 | _( |
|
442 | _( | |
437 | b"skipping repack - another repack " |
|
443 | b"skipping repack - another repack " | |
438 | b"is already running" |
|
444 | b"is already running" | |
439 | ) |
|
445 | ) | |
440 | ) |
|
446 | ) | |
441 |
|
447 | |||
442 |
|
448 | |||
443 | def keepset(repo, keyfn, lastkeepkeys=None): |
|
449 | def keepset(repo, keyfn, lastkeepkeys=None): | |
444 | """Computes a keepset which is not garbage collected. |
|
450 | """Computes a keepset which is not garbage collected. | |
445 | 'keyfn' is a function that maps filename, node to a unique key. |
|
451 | 'keyfn' is a function that maps filename, node to a unique key. | |
446 | 'lastkeepkeys' is an optional argument and if provided the keepset |
|
452 | 'lastkeepkeys' is an optional argument and if provided the keepset | |
447 | function updates lastkeepkeys with more keys and returns the result. |
|
453 | function updates lastkeepkeys with more keys and returns the result. | |
448 | """ |
|
454 | """ | |
449 | if not lastkeepkeys: |
|
455 | if not lastkeepkeys: | |
450 | keepkeys = set() |
|
456 | keepkeys = set() | |
451 | else: |
|
457 | else: | |
452 | keepkeys = lastkeepkeys |
|
458 | keepkeys = lastkeepkeys | |
453 |
|
459 | |||
454 | # We want to keep: |
|
460 | # We want to keep: | |
455 | # 1. Working copy parent |
|
461 | # 1. Working copy parent | |
456 | # 2. Draft commits |
|
462 | # 2. Draft commits | |
457 | # 3. Parents of draft commits |
|
463 | # 3. Parents of draft commits | |
458 | # 4. Pullprefetch and bgprefetchrevs revsets if specified |
|
464 | # 4. Pullprefetch and bgprefetchrevs revsets if specified | |
459 | revs = [b'.', b'draft()', b'parents(draft())'] |
|
465 | revs = [b'.', b'draft()', b'parents(draft())'] | |
460 | prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None) |
|
466 | prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None) | |
461 | if prefetchrevs: |
|
467 | if prefetchrevs: | |
462 | revs.append(b'(%s)' % prefetchrevs) |
|
468 | revs.append(b'(%s)' % prefetchrevs) | |
463 | prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None) |
|
469 | prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None) | |
464 | if prefetchrevs: |
|
470 | if prefetchrevs: | |
465 | revs.append(b'(%s)' % prefetchrevs) |
|
471 | revs.append(b'(%s)' % prefetchrevs) | |
466 | revs = b'+'.join(revs) |
|
472 | revs = b'+'.join(revs) | |
467 |
|
473 | |||
468 | revs = [b'sort((%s), "topo")' % revs] |
|
474 | revs = [b'sort((%s), "topo")' % revs] | |
469 | keep = scmutil.revrange(repo, revs) |
|
475 | keep = scmutil.revrange(repo, revs) | |
470 |
|
476 | |||
471 | processed = set() |
|
477 | processed = set() | |
472 | lastmanifest = None |
|
478 | lastmanifest = None | |
473 |
|
479 | |||
474 | # process the commits in toposorted order starting from the oldest |
|
480 | # process the commits in toposorted order starting from the oldest | |
475 | for r in reversed(keep._list): |
|
481 | for r in reversed(keep._list): | |
476 | if repo[r].p1().rev() in processed: |
|
482 | if repo[r].p1().rev() in processed: | |
477 | # if the direct parent has already been processed |
|
483 | # if the direct parent has already been processed | |
478 | # then we only need to process the delta |
|
484 | # then we only need to process the delta | |
479 | m = repo[r].manifestctx().readdelta() |
|
485 | m = repo[r].manifestctx().readdelta() | |
480 | else: |
|
486 | else: | |
481 | # otherwise take the manifest and diff it |
|
487 | # otherwise take the manifest and diff it | |
482 | # with the previous manifest if one exists |
|
488 | # with the previous manifest if one exists | |
483 | if lastmanifest: |
|
489 | if lastmanifest: | |
484 | m = repo[r].manifest().diff(lastmanifest) |
|
490 | m = repo[r].manifest().diff(lastmanifest) | |
485 | else: |
|
491 | else: | |
486 | m = repo[r].manifest() |
|
492 | m = repo[r].manifest() | |
487 | lastmanifest = repo[r].manifest() |
|
493 | lastmanifest = repo[r].manifest() | |
488 | processed.add(r) |
|
494 | processed.add(r) | |
489 |
|
495 | |||
490 | # populate keepkeys with keys from the current manifest |
|
496 | # populate keepkeys with keys from the current manifest | |
491 | if type(m) is dict: |
|
497 | if type(m) is dict: | |
492 | # m is a result of diff of two manifests and is a dictionary that |
|
498 | # m is a result of diff of two manifests and is a dictionary that | |
493 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple |
|
499 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple | |
494 | for filename, diff in pycompat.iteritems(m): |
|
500 | for filename, diff in pycompat.iteritems(m): | |
495 | if diff[0][0] is not None: |
|
501 | if diff[0][0] is not None: | |
496 | keepkeys.add(keyfn(filename, diff[0][0])) |
|
502 | keepkeys.add(keyfn(filename, diff[0][0])) | |
497 | else: |
|
503 | else: | |
498 | # m is a manifest object |
|
504 | # m is a manifest object | |
499 | for filename, filenode in pycompat.iteritems(m): |
|
505 | for filename, filenode in pycompat.iteritems(m): | |
500 | keepkeys.add(keyfn(filename, filenode)) |
|
506 | keepkeys.add(keyfn(filename, filenode)) | |
501 |
|
507 | |||
502 | return keepkeys |
|
508 | return keepkeys | |
503 |
|
509 | |||
504 |
|
510 | |||
505 | class repacker(object): |
|
511 | class repacker(object): | |
506 | """Class for orchestrating the repack of data and history information into a |
|
512 | """Class for orchestrating the repack of data and history information into a | |
507 | new format. |
|
513 | new format. | |
508 | """ |
|
514 | """ | |
509 |
|
515 | |||
510 | def __init__( |
|
516 | def __init__( | |
511 | self, |
|
517 | self, | |
512 | repo, |
|
518 | repo, | |
513 | data, |
|
519 | data, | |
514 | history, |
|
520 | history, | |
515 | fullhistory, |
|
521 | fullhistory, | |
516 | category, |
|
522 | category, | |
517 | gc=False, |
|
523 | gc=False, | |
518 | isold=None, |
|
524 | isold=None, | |
519 | options=None, |
|
525 | options=None, | |
520 | ): |
|
526 | ): | |
521 | self.repo = repo |
|
527 | self.repo = repo | |
522 | self.data = data |
|
528 | self.data = data | |
523 | self.history = history |
|
529 | self.history = history | |
524 | self.fullhistory = fullhistory |
|
530 | self.fullhistory = fullhistory | |
525 | self.unit = constants.getunits(category) |
|
531 | self.unit = constants.getunits(category) | |
526 | self.garbagecollect = gc |
|
532 | self.garbagecollect = gc | |
527 | self.options = options |
|
533 | self.options = options | |
528 | if self.garbagecollect: |
|
534 | if self.garbagecollect: | |
529 | if not isold: |
|
535 | if not isold: | |
530 | raise ValueError(b"Function 'isold' is not properly specified") |
|
536 | raise ValueError(b"Function 'isold' is not properly specified") | |
531 | # use (filename, node) tuple as a keepset key |
|
537 | # use (filename, node) tuple as a keepset key | |
532 | self.keepkeys = keepset(repo, lambda f, n: (f, n)) |
|
538 | self.keepkeys = keepset(repo, lambda f, n: (f, n)) | |
533 | self.isold = isold |
|
539 | self.isold = isold | |
534 |
|
540 | |||
535 | def run(self, targetdata, targethistory): |
|
541 | def run(self, targetdata, targethistory): | |
536 | ledger = repackledger() |
|
542 | ledger = repackledger() | |
537 |
|
543 | |||
538 | with lockmod.lock( |
|
544 | with lockmod.lock( | |
539 | repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0 |
|
545 | repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0 | |
540 | ): |
|
546 | ): | |
541 | self.repo.hook(b'prerepack') |
|
547 | self.repo.hook(b'prerepack') | |
542 |
|
548 | |||
543 | # Populate ledger from source |
|
549 | # Populate ledger from source | |
544 | self.data.markledger(ledger, options=self.options) |
|
550 | self.data.markledger(ledger, options=self.options) | |
545 | self.history.markledger(ledger, options=self.options) |
|
551 | self.history.markledger(ledger, options=self.options) | |
546 |
|
552 | |||
547 | # Run repack |
|
553 | # Run repack | |
548 | self.repackdata(ledger, targetdata) |
|
554 | self.repackdata(ledger, targetdata) | |
549 | self.repackhistory(ledger, targethistory) |
|
555 | self.repackhistory(ledger, targethistory) | |
550 |
|
556 | |||
551 | # Call cleanup on each source |
|
557 | # Call cleanup on each source | |
552 | for source in ledger.sources: |
|
558 | for source in ledger.sources: | |
553 | source.cleanup(ledger) |
|
559 | source.cleanup(ledger) | |
554 |
|
560 | |||
555 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): |
|
561 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): | |
556 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and |
|
562 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and | |
557 | ``deltabases``. |
|
563 | ``deltabases``. | |
558 |
|
564 | |||
559 | We often have orphan entries (nodes without a base that aren't |
|
565 | We often have orphan entries (nodes without a base that aren't | |
560 | referenced by other nodes -- i.e., part of a chain) due to gaps in |
|
566 | referenced by other nodes -- i.e., part of a chain) due to gaps in | |
561 | history. Rather than store them as individual fulltexts, we prefer to |
|
567 | history. Rather than store them as individual fulltexts, we prefer to | |
562 | insert them as one chain sorted by size. |
|
568 | insert them as one chain sorted by size. | |
563 | """ |
|
569 | """ | |
564 | if not orphans: |
|
570 | if not orphans: | |
565 | return nodes |
|
571 | return nodes | |
566 |
|
572 | |||
567 | def getsize(node, default=0): |
|
573 | def getsize(node, default=0): | |
568 | meta = self.data.getmeta(filename, node) |
|
574 | meta = self.data.getmeta(filename, node) | |
569 | if constants.METAKEYSIZE in meta: |
|
575 | if constants.METAKEYSIZE in meta: | |
570 | return meta[constants.METAKEYSIZE] |
|
576 | return meta[constants.METAKEYSIZE] | |
571 | else: |
|
577 | else: | |
572 | return default |
|
578 | return default | |
573 |
|
579 | |||
574 | # Sort orphans by size; biggest first is preferred, since it's more |
|
580 | # Sort orphans by size; biggest first is preferred, since it's more | |
575 | # likely to be the newest version assuming files grow over time. |
|
581 | # likely to be the newest version assuming files grow over time. | |
576 | # (Sort by node first to ensure the sort is stable.) |
|
582 | # (Sort by node first to ensure the sort is stable.) | |
577 | orphans = sorted(orphans) |
|
583 | orphans = sorted(orphans) | |
578 | orphans = list(sorted(orphans, key=getsize, reverse=True)) |
|
584 | orphans = list(sorted(orphans, key=getsize, reverse=True)) | |
579 | if ui.debugflag: |
|
585 | if ui.debugflag: | |
580 | ui.debug( |
|
586 | ui.debug( | |
581 | b"%s: orphan chain: %s\n" |
|
587 | b"%s: orphan chain: %s\n" | |
582 | % (filename, b", ".join([short(s) for s in orphans])) |
|
588 | % (filename, b", ".join([short(s) for s in orphans])) | |
583 | ) |
|
589 | ) | |
584 |
|
590 | |||
585 | # Create one contiguous chain and reassign deltabases. |
|
591 | # Create one contiguous chain and reassign deltabases. | |
586 | for i, node in enumerate(orphans): |
|
592 | for i, node in enumerate(orphans): | |
587 | if i == 0: |
|
593 | if i == 0: | |
588 | deltabases[node] = (nullid, 0) |
|
594 | deltabases[node] = (nullid, 0) | |
589 | else: |
|
595 | else: | |
590 | parent = orphans[i - 1] |
|
596 | parent = orphans[i - 1] | |
591 | deltabases[node] = (parent, deltabases[parent][1] + 1) |
|
597 | deltabases[node] = (parent, deltabases[parent][1] + 1) | |
592 | nodes = [n for n in nodes if n not in orphans] |
|
598 | nodes = [n for n in nodes if n not in orphans] | |
593 | nodes += orphans |
|
599 | nodes += orphans | |
594 | return nodes |
|
600 | return nodes | |
595 |
|
601 | |||
596 | def repackdata(self, ledger, target): |
|
602 | def repackdata(self, ledger, target): | |
597 | ui = self.repo.ui |
|
603 | ui = self.repo.ui | |
598 | maxchainlen = ui.configint(b'packs', b'maxchainlen', 1000) |
|
604 | maxchainlen = ui.configint(b'packs', b'maxchainlen', 1000) | |
599 |
|
605 | |||
600 | byfile = {} |
|
606 | byfile = {} | |
601 | for entry in pycompat.itervalues(ledger.entries): |
|
607 | for entry in pycompat.itervalues(ledger.entries): | |
602 | if entry.datasource: |
|
608 | if entry.datasource: | |
603 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
609 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
604 |
|
610 | |||
605 | count = 0 |
|
611 | count = 0 | |
606 | repackprogress = ui.makeprogress( |
|
612 | repackprogress = ui.makeprogress( | |
607 | _(b"repacking data"), unit=self.unit, total=len(byfile) |
|
613 | _(b"repacking data"), unit=self.unit, total=len(byfile) | |
608 | ) |
|
614 | ) | |
609 | for filename, entries in sorted(pycompat.iteritems(byfile)): |
|
615 | for filename, entries in sorted(pycompat.iteritems(byfile)): | |
610 | repackprogress.update(count) |
|
616 | repackprogress.update(count) | |
611 |
|
617 | |||
612 | ancestors = {} |
|
618 | ancestors = {} | |
613 | nodes = list(node for node in entries) |
|
619 | nodes = list(node for node in entries) | |
614 | nohistory = [] |
|
620 | nohistory = [] | |
615 | buildprogress = ui.makeprogress( |
|
621 | buildprogress = ui.makeprogress( | |
616 | _(b"building history"), unit=b'nodes', total=len(nodes) |
|
622 | _(b"building history"), unit=b'nodes', total=len(nodes) | |
617 | ) |
|
623 | ) | |
618 | for i, node in enumerate(nodes): |
|
624 | for i, node in enumerate(nodes): | |
619 | if node in ancestors: |
|
625 | if node in ancestors: | |
620 | continue |
|
626 | continue | |
621 | buildprogress.update(i) |
|
627 | buildprogress.update(i) | |
622 | try: |
|
628 | try: | |
623 | ancestors.update( |
|
629 | ancestors.update( | |
624 | self.fullhistory.getancestors( |
|
630 | self.fullhistory.getancestors( | |
625 | filename, node, known=ancestors |
|
631 | filename, node, known=ancestors | |
626 | ) |
|
632 | ) | |
627 | ) |
|
633 | ) | |
628 | except KeyError: |
|
634 | except KeyError: | |
629 | # Since we're packing data entries, we may not have the |
|
635 | # Since we're packing data entries, we may not have the | |
630 | # corresponding history entries for them. It's not a big |
|
636 | # corresponding history entries for them. It's not a big | |
631 | # deal, but the entries won't be delta'd perfectly. |
|
637 | # deal, but the entries won't be delta'd perfectly. | |
632 | nohistory.append(node) |
|
638 | nohistory.append(node) | |
633 | buildprogress.complete() |
|
639 | buildprogress.complete() | |
634 |
|
640 | |||
635 | # Order the nodes children first, so we can produce reverse deltas |
|
641 | # Order the nodes children first, so we can produce reverse deltas | |
636 | orderednodes = list(reversed(self._toposort(ancestors))) |
|
642 | orderednodes = list(reversed(self._toposort(ancestors))) | |
637 | if len(nohistory) > 0: |
|
643 | if len(nohistory) > 0: | |
638 | ui.debug( |
|
644 | ui.debug( | |
639 | b'repackdata: %d nodes without history\n' % len(nohistory) |
|
645 | b'repackdata: %d nodes without history\n' % len(nohistory) | |
640 | ) |
|
646 | ) | |
641 | orderednodes.extend(sorted(nohistory)) |
|
647 | orderednodes.extend(sorted(nohistory)) | |
642 |
|
648 | |||
643 | # Filter orderednodes to just the nodes we want to serialize (it |
|
649 | # Filter orderednodes to just the nodes we want to serialize (it | |
644 | # currently also has the edge nodes' ancestors). |
|
650 | # currently also has the edge nodes' ancestors). | |
645 | orderednodes = list( |
|
651 | orderednodes = list( | |
646 | filter(lambda node: node in nodes, orderednodes) |
|
652 | filter(lambda node: node in nodes, orderednodes) | |
647 | ) |
|
653 | ) | |
648 |
|
654 | |||
649 | # Garbage collect old nodes: |
|
655 | # Garbage collect old nodes: | |
650 | if self.garbagecollect: |
|
656 | if self.garbagecollect: | |
651 | neworderednodes = [] |
|
657 | neworderednodes = [] | |
652 | for node in orderednodes: |
|
658 | for node in orderednodes: | |
653 | # If the node is old and is not in the keepset, we skip it, |
|
659 | # If the node is old and is not in the keepset, we skip it, | |
654 | # and mark as garbage collected |
|
660 | # and mark as garbage collected | |
655 | if (filename, node) not in self.keepkeys and self.isold( |
|
661 | if (filename, node) not in self.keepkeys and self.isold( | |
656 | self.repo, filename, node |
|
662 | self.repo, filename, node | |
657 | ): |
|
663 | ): | |
658 | entries[node].gced = True |
|
664 | entries[node].gced = True | |
659 | continue |
|
665 | continue | |
660 | neworderednodes.append(node) |
|
666 | neworderednodes.append(node) | |
661 | orderednodes = neworderednodes |
|
667 | orderednodes = neworderednodes | |
662 |
|
668 | |||
663 | # Compute delta bases for nodes: |
|
669 | # Compute delta bases for nodes: | |
664 | deltabases = {} |
|
670 | deltabases = {} | |
665 | nobase = set() |
|
671 | nobase = set() | |
666 | referenced = set() |
|
672 | referenced = set() | |
667 | nodes = set(nodes) |
|
673 | nodes = set(nodes) | |
668 | processprogress = ui.makeprogress( |
|
674 | processprogress = ui.makeprogress( | |
669 | _(b"processing nodes"), unit=b'nodes', total=len(orderednodes) |
|
675 | _(b"processing nodes"), unit=b'nodes', total=len(orderednodes) | |
670 | ) |
|
676 | ) | |
671 | for i, node in enumerate(orderednodes): |
|
677 | for i, node in enumerate(orderednodes): | |
672 | processprogress.update(i) |
|
678 | processprogress.update(i) | |
673 | # Find delta base |
|
679 | # Find delta base | |
674 | # TODO: allow delta'ing against most recent descendant instead |
|
680 | # TODO: allow delta'ing against most recent descendant instead | |
675 | # of immediate child |
|
681 | # of immediate child | |
676 | deltatuple = deltabases.get(node, None) |
|
682 | deltatuple = deltabases.get(node, None) | |
677 | if deltatuple is None: |
|
683 | if deltatuple is None: | |
678 | deltabase, chainlen = nullid, 0 |
|
684 | deltabase, chainlen = nullid, 0 | |
679 | deltabases[node] = (nullid, 0) |
|
685 | deltabases[node] = (nullid, 0) | |
680 | nobase.add(node) |
|
686 | nobase.add(node) | |
681 | else: |
|
687 | else: | |
682 | deltabase, chainlen = deltatuple |
|
688 | deltabase, chainlen = deltatuple | |
683 | referenced.add(deltabase) |
|
689 | referenced.add(deltabase) | |
684 |
|
690 | |||
685 | # Use available ancestor information to inform our delta choices |
|
691 | # Use available ancestor information to inform our delta choices | |
686 | ancestorinfo = ancestors.get(node) |
|
692 | ancestorinfo = ancestors.get(node) | |
687 | if ancestorinfo: |
|
693 | if ancestorinfo: | |
688 | p1, p2, linknode, copyfrom = ancestorinfo |
|
694 | p1, p2, linknode, copyfrom = ancestorinfo | |
689 |
|
695 | |||
690 | # The presence of copyfrom means we're at a point where the |
|
696 | # The presence of copyfrom means we're at a point where the | |
691 | # file was copied from elsewhere. So don't attempt to do any |
|
697 | # file was copied from elsewhere. So don't attempt to do any | |
692 | # deltas with the other file. |
|
698 | # deltas with the other file. | |
693 | if copyfrom: |
|
699 | if copyfrom: | |
694 | p1 = nullid |
|
700 | p1 = nullid | |
695 |
|
701 | |||
696 | if chainlen < maxchainlen: |
|
702 | if chainlen < maxchainlen: | |
697 | # Record this child as the delta base for its parents. |
|
703 | # Record this child as the delta base for its parents. | |
698 | # This may be non optimal, since the parents may have |
|
704 | # This may be non optimal, since the parents may have | |
699 | # many children, and this will only choose the last one. |
|
705 | # many children, and this will only choose the last one. | |
700 | # TODO: record all children and try all deltas to find |
|
706 | # TODO: record all children and try all deltas to find | |
701 | # best |
|
707 | # best | |
702 | if p1 != nullid: |
|
708 | if p1 != nullid: | |
703 | deltabases[p1] = (node, chainlen + 1) |
|
709 | deltabases[p1] = (node, chainlen + 1) | |
704 | if p2 != nullid: |
|
710 | if p2 != nullid: | |
705 | deltabases[p2] = (node, chainlen + 1) |
|
711 | deltabases[p2] = (node, chainlen + 1) | |
706 |
|
712 | |||
707 | # experimental config: repack.chainorphansbysize |
|
713 | # experimental config: repack.chainorphansbysize | |
708 | if ui.configbool(b'repack', b'chainorphansbysize'): |
|
714 | if ui.configbool(b'repack', b'chainorphansbysize'): | |
709 | orphans = nobase - referenced |
|
715 | orphans = nobase - referenced | |
710 | orderednodes = self._chainorphans( |
|
716 | orderednodes = self._chainorphans( | |
711 | ui, filename, orderednodes, orphans, deltabases |
|
717 | ui, filename, orderednodes, orphans, deltabases | |
712 | ) |
|
718 | ) | |
713 |
|
719 | |||
714 | # Compute deltas and write to the pack |
|
720 | # Compute deltas and write to the pack | |
715 | for i, node in enumerate(orderednodes): |
|
721 | for i, node in enumerate(orderednodes): | |
716 | deltabase, chainlen = deltabases[node] |
|
722 | deltabase, chainlen = deltabases[node] | |
717 | # Compute delta |
|
723 | # Compute delta | |
718 | # TODO: Optimize the deltachain fetching. Since we're |
|
724 | # TODO: Optimize the deltachain fetching. Since we're | |
719 | # iterating over the different version of the file, we may |
|
725 | # iterating over the different version of the file, we may | |
720 | # be fetching the same deltachain over and over again. |
|
726 | # be fetching the same deltachain over and over again. | |
721 | if deltabase != nullid: |
|
727 | if deltabase != nullid: | |
722 | deltaentry = self.data.getdelta(filename, node) |
|
728 | deltaentry = self.data.getdelta(filename, node) | |
723 | delta, deltabasename, origdeltabase, meta = deltaentry |
|
729 | delta, deltabasename, origdeltabase, meta = deltaentry | |
724 | size = meta.get(constants.METAKEYSIZE) |
|
730 | size = meta.get(constants.METAKEYSIZE) | |
725 | if ( |
|
731 | if ( | |
726 | deltabasename != filename |
|
732 | deltabasename != filename | |
727 | or origdeltabase != deltabase |
|
733 | or origdeltabase != deltabase | |
728 | or size is None |
|
734 | or size is None | |
729 | ): |
|
735 | ): | |
730 | deltabasetext = self.data.get(filename, deltabase) |
|
736 | deltabasetext = self.data.get(filename, deltabase) | |
731 | original = self.data.get(filename, node) |
|
737 | original = self.data.get(filename, node) | |
732 | size = len(original) |
|
738 | size = len(original) | |
733 | delta = mdiff.textdiff(deltabasetext, original) |
|
739 | delta = mdiff.textdiff(deltabasetext, original) | |
734 | else: |
|
740 | else: | |
735 | delta = self.data.get(filename, node) |
|
741 | delta = self.data.get(filename, node) | |
736 | size = len(delta) |
|
742 | size = len(delta) | |
737 | meta = self.data.getmeta(filename, node) |
|
743 | meta = self.data.getmeta(filename, node) | |
738 |
|
744 | |||
739 | # TODO: don't use the delta if it's larger than the fulltext |
|
745 | # TODO: don't use the delta if it's larger than the fulltext | |
740 | if constants.METAKEYSIZE not in meta: |
|
746 | if constants.METAKEYSIZE not in meta: | |
741 | meta[constants.METAKEYSIZE] = size |
|
747 | meta[constants.METAKEYSIZE] = size | |
742 | target.add(filename, node, deltabase, delta, meta) |
|
748 | target.add(filename, node, deltabase, delta, meta) | |
743 |
|
749 | |||
744 | entries[node].datarepacked = True |
|
750 | entries[node].datarepacked = True | |
745 |
|
751 | |||
746 | processprogress.complete() |
|
752 | processprogress.complete() | |
747 | count += 1 |
|
753 | count += 1 | |
748 |
|
754 | |||
749 | repackprogress.complete() |
|
755 | repackprogress.complete() | |
750 | target.close(ledger=ledger) |
|
756 | target.close(ledger=ledger) | |
751 |
|
757 | |||
752 | def repackhistory(self, ledger, target): |
|
758 | def repackhistory(self, ledger, target): | |
753 | ui = self.repo.ui |
|
759 | ui = self.repo.ui | |
754 |
|
760 | |||
755 | byfile = {} |
|
761 | byfile = {} | |
756 | for entry in pycompat.itervalues(ledger.entries): |
|
762 | for entry in pycompat.itervalues(ledger.entries): | |
757 | if entry.historysource: |
|
763 | if entry.historysource: | |
758 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
764 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
759 |
|
765 | |||
760 | progress = ui.makeprogress( |
|
766 | progress = ui.makeprogress( | |
761 | _(b"repacking history"), unit=self.unit, total=len(byfile) |
|
767 | _(b"repacking history"), unit=self.unit, total=len(byfile) | |
762 | ) |
|
768 | ) | |
763 | for filename, entries in sorted(pycompat.iteritems(byfile)): |
|
769 | for filename, entries in sorted(pycompat.iteritems(byfile)): | |
764 | ancestors = {} |
|
770 | ancestors = {} | |
765 | nodes = list(node for node in entries) |
|
771 | nodes = list(node for node in entries) | |
766 |
|
772 | |||
767 | for node in nodes: |
|
773 | for node in nodes: | |
768 | if node in ancestors: |
|
774 | if node in ancestors: | |
769 | continue |
|
775 | continue | |
770 | ancestors.update( |
|
776 | ancestors.update( | |
771 | self.history.getancestors(filename, node, known=ancestors) |
|
777 | self.history.getancestors(filename, node, known=ancestors) | |
772 | ) |
|
778 | ) | |
773 |
|
779 | |||
774 | # Order the nodes children first |
|
780 | # Order the nodes children first | |
775 | orderednodes = reversed(self._toposort(ancestors)) |
|
781 | orderednodes = reversed(self._toposort(ancestors)) | |
776 |
|
782 | |||
777 | # Write to the pack |
|
783 | # Write to the pack | |
778 | dontprocess = set() |
|
784 | dontprocess = set() | |
779 | for node in orderednodes: |
|
785 | for node in orderednodes: | |
780 | p1, p2, linknode, copyfrom = ancestors[node] |
|
786 | p1, p2, linknode, copyfrom = ancestors[node] | |
781 |
|
787 | |||
782 | # If the node is marked dontprocess, but it's also in the |
|
788 | # If the node is marked dontprocess, but it's also in the | |
783 | # explicit entries set, that means the node exists both in this |
|
789 | # explicit entries set, that means the node exists both in this | |
784 | # file and in another file that was copied to this file. |
|
790 | # file and in another file that was copied to this file. | |
785 | # Usually this happens if the file was copied to another file, |
|
791 | # Usually this happens if the file was copied to another file, | |
786 | # then the copy was deleted, then reintroduced without copy |
|
792 | # then the copy was deleted, then reintroduced without copy | |
787 | # metadata. The original add and the new add have the same hash |
|
793 | # metadata. The original add and the new add have the same hash | |
788 | # since the content is identical and the parents are null. |
|
794 | # since the content is identical and the parents are null. | |
789 | if node in dontprocess and node not in entries: |
|
795 | if node in dontprocess and node not in entries: | |
790 | # If copyfrom == filename, it means the copy history |
|
796 | # If copyfrom == filename, it means the copy history | |
791 | # went to come other file, then came back to this one, so we |
|
797 | # went to come other file, then came back to this one, so we | |
792 | # should continue processing it. |
|
798 | # should continue processing it. | |
793 | if p1 != nullid and copyfrom != filename: |
|
799 | if p1 != nullid and copyfrom != filename: | |
794 | dontprocess.add(p1) |
|
800 | dontprocess.add(p1) | |
795 | if p2 != nullid: |
|
801 | if p2 != nullid: | |
796 | dontprocess.add(p2) |
|
802 | dontprocess.add(p2) | |
797 | continue |
|
803 | continue | |
798 |
|
804 | |||
799 | if copyfrom: |
|
805 | if copyfrom: | |
800 | dontprocess.add(p1) |
|
806 | dontprocess.add(p1) | |
801 |
|
807 | |||
802 | target.add(filename, node, p1, p2, linknode, copyfrom) |
|
808 | target.add(filename, node, p1, p2, linknode, copyfrom) | |
803 |
|
809 | |||
804 | if node in entries: |
|
810 | if node in entries: | |
805 | entries[node].historyrepacked = True |
|
811 | entries[node].historyrepacked = True | |
806 |
|
812 | |||
807 | progress.increment() |
|
813 | progress.increment() | |
808 |
|
814 | |||
809 | progress.complete() |
|
815 | progress.complete() | |
810 | target.close(ledger=ledger) |
|
816 | target.close(ledger=ledger) | |
811 |
|
817 | |||
812 | def _toposort(self, ancestors): |
|
818 | def _toposort(self, ancestors): | |
813 | def parentfunc(node): |
|
819 | def parentfunc(node): | |
814 | p1, p2, linknode, copyfrom = ancestors[node] |
|
820 | p1, p2, linknode, copyfrom = ancestors[node] | |
815 | parents = [] |
|
821 | parents = [] | |
816 | if p1 != nullid: |
|
822 | if p1 != nullid: | |
817 | parents.append(p1) |
|
823 | parents.append(p1) | |
818 | if p2 != nullid: |
|
824 | if p2 != nullid: | |
819 | parents.append(p2) |
|
825 | parents.append(p2) | |
820 | return parents |
|
826 | return parents | |
821 |
|
827 | |||
822 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) |
|
828 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | |
823 | return sortednodes |
|
829 | return sortednodes | |
824 |
|
830 | |||
825 |
|
831 | |||
826 | class repackledger(object): |
|
832 | class repackledger(object): | |
827 | """Storage for all the bookkeeping that happens during a repack. It contains |
|
833 | """Storage for all the bookkeeping that happens during a repack. It contains | |
828 | the list of revisions being repacked, what happened to each revision, and |
|
834 | the list of revisions being repacked, what happened to each revision, and | |
829 | which source store contained which revision originally (for later cleanup). |
|
835 | which source store contained which revision originally (for later cleanup). | |
830 | """ |
|
836 | """ | |
831 |
|
837 | |||
832 | def __init__(self): |
|
838 | def __init__(self): | |
833 | self.entries = {} |
|
839 | self.entries = {} | |
834 | self.sources = {} |
|
840 | self.sources = {} | |
835 | self.created = set() |
|
841 | self.created = set() | |
836 |
|
842 | |||
837 | def markdataentry(self, source, filename, node): |
|
843 | def markdataentry(self, source, filename, node): | |
838 | """Mark the given filename+node revision as having a data rev in the |
|
844 | """Mark the given filename+node revision as having a data rev in the | |
839 | given source. |
|
845 | given source. | |
840 | """ |
|
846 | """ | |
841 | entry = self._getorcreateentry(filename, node) |
|
847 | entry = self._getorcreateentry(filename, node) | |
842 | entry.datasource = True |
|
848 | entry.datasource = True | |
843 | entries = self.sources.get(source) |
|
849 | entries = self.sources.get(source) | |
844 | if not entries: |
|
850 | if not entries: | |
845 | entries = set() |
|
851 | entries = set() | |
846 | self.sources[source] = entries |
|
852 | self.sources[source] = entries | |
847 | entries.add(entry) |
|
853 | entries.add(entry) | |
848 |
|
854 | |||
849 | def markhistoryentry(self, source, filename, node): |
|
855 | def markhistoryentry(self, source, filename, node): | |
850 | """Mark the given filename+node revision as having a history rev in the |
|
856 | """Mark the given filename+node revision as having a history rev in the | |
851 | given source. |
|
857 | given source. | |
852 | """ |
|
858 | """ | |
853 | entry = self._getorcreateentry(filename, node) |
|
859 | entry = self._getorcreateentry(filename, node) | |
854 | entry.historysource = True |
|
860 | entry.historysource = True | |
855 | entries = self.sources.get(source) |
|
861 | entries = self.sources.get(source) | |
856 | if not entries: |
|
862 | if not entries: | |
857 | entries = set() |
|
863 | entries = set() | |
858 | self.sources[source] = entries |
|
864 | self.sources[source] = entries | |
859 | entries.add(entry) |
|
865 | entries.add(entry) | |
860 |
|
866 | |||
861 | def _getorcreateentry(self, filename, node): |
|
867 | def _getorcreateentry(self, filename, node): | |
862 | key = (filename, node) |
|
868 | key = (filename, node) | |
863 | value = self.entries.get(key) |
|
869 | value = self.entries.get(key) | |
864 | if not value: |
|
870 | if not value: | |
865 | value = repackentry(filename, node) |
|
871 | value = repackentry(filename, node) | |
866 | self.entries[key] = value |
|
872 | self.entries[key] = value | |
867 |
|
873 | |||
868 | return value |
|
874 | return value | |
869 |
|
875 | |||
870 | def addcreated(self, value): |
|
876 | def addcreated(self, value): | |
871 | self.created.add(value) |
|
877 | self.created.add(value) | |
872 |
|
878 | |||
873 |
|
879 | |||
874 | class repackentry(object): |
|
880 | class repackentry(object): | |
875 | """Simple class representing a single revision entry in the repackledger. |
|
881 | """Simple class representing a single revision entry in the repackledger. | |
876 | """ |
|
882 | """ | |
877 |
|
883 | |||
878 | __slots__ = ( |
|
884 | __slots__ = ( | |
879 | r'filename', |
|
885 | r'filename', | |
880 | r'node', |
|
886 | r'node', | |
881 | r'datasource', |
|
887 | r'datasource', | |
882 | r'historysource', |
|
888 | r'historysource', | |
883 | r'datarepacked', |
|
889 | r'datarepacked', | |
884 | r'historyrepacked', |
|
890 | r'historyrepacked', | |
885 | r'gced', |
|
891 | r'gced', | |
886 | ) |
|
892 | ) | |
887 |
|
893 | |||
888 | def __init__(self, filename, node): |
|
894 | def __init__(self, filename, node): | |
889 | self.filename = filename |
|
895 | self.filename = filename | |
890 | self.node = node |
|
896 | self.node = node | |
891 | # If the revision has a data entry in the source |
|
897 | # If the revision has a data entry in the source | |
892 | self.datasource = False |
|
898 | self.datasource = False | |
893 | # If the revision has a history entry in the source |
|
899 | # If the revision has a history entry in the source | |
894 | self.historysource = False |
|
900 | self.historysource = False | |
895 | # If the revision's data entry was repacked into the repack target |
|
901 | # If the revision's data entry was repacked into the repack target | |
896 | self.datarepacked = False |
|
902 | self.datarepacked = False | |
897 | # If the revision's history entry was repacked into the repack target |
|
903 | # If the revision's history entry was repacked into the repack target | |
898 | self.historyrepacked = False |
|
904 | self.historyrepacked = False | |
899 | # If garbage collected |
|
905 | # If garbage collected | |
900 | self.gced = False |
|
906 | self.gced = False | |
901 |
|
907 | |||
902 |
|
908 | |||
903 | def repacklockvfs(repo): |
|
909 | def repacklockvfs(repo): | |
904 | if util.safehasattr(repo, 'name'): |
|
910 | if util.safehasattr(repo, 'name'): | |
905 | # Lock in the shared cache so repacks across multiple copies of the same |
|
911 | # Lock in the shared cache so repacks across multiple copies of the same | |
906 | # repo are coordinated. |
|
912 | # repo are coordinated. | |
907 | sharedcachepath = shallowutil.getcachepackpath( |
|
913 | sharedcachepath = shallowutil.getcachepackpath( | |
908 | repo, constants.FILEPACK_CATEGORY |
|
914 | repo, constants.FILEPACK_CATEGORY | |
909 | ) |
|
915 | ) | |
910 | return vfs.vfs(sharedcachepath) |
|
916 | return vfs.vfs(sharedcachepath) | |
911 | else: |
|
917 | else: | |
912 | return repo.svfs |
|
918 | return repo.svfs |
@@ -1,354 +1,358 b'' | |||||
1 | # shallowrepo.py - shallow repository that uses remote filelogs |
|
1 | # shallowrepo.py - shallow repository that uses remote filelogs | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import os |
|
9 | import os | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial.node import hex, nullid, nullrev |
|
12 | from mercurial.node import hex, nullid, nullrev | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | encoding, |
|
14 | encoding, | |
15 | error, |
|
15 | error, | |
16 | localrepo, |
|
16 | localrepo, | |
17 | match, |
|
17 | match, | |
18 | pycompat, |
|
18 | pycompat, | |
19 | scmutil, |
|
19 | scmutil, | |
20 | sparse, |
|
20 | sparse, | |
21 | util, |
|
21 | util, | |
22 | ) |
|
22 | ) | |
23 | from mercurial.utils import procutil |
|
23 | from mercurial.utils import procutil | |
24 | from . import ( |
|
24 | from . import ( | |
25 | connectionpool, |
|
25 | connectionpool, | |
26 | constants, |
|
26 | constants, | |
27 | contentstore, |
|
27 | contentstore, | |
28 | datapack, |
|
28 | datapack, | |
29 | fileserverclient, |
|
29 | fileserverclient, | |
30 | historypack, |
|
30 | historypack, | |
31 | metadatastore, |
|
31 | metadatastore, | |
32 | remotefilectx, |
|
32 | remotefilectx, | |
33 | remotefilelog, |
|
33 | remotefilelog, | |
34 | shallowutil, |
|
34 | shallowutil, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
37 | # These make*stores functions are global so that other extensions can replace |
|
37 | # These make*stores functions are global so that other extensions can replace | |
38 | # them. |
|
38 | # them. | |
39 | def makelocalstores(repo): |
|
39 | def makelocalstores(repo): | |
40 | """In-repo stores, like .hg/store/data; can not be discarded.""" |
|
40 | """In-repo stores, like .hg/store/data; can not be discarded.""" | |
41 | localpath = os.path.join(repo.svfs.vfs.base, b'data') |
|
41 | localpath = os.path.join(repo.svfs.vfs.base, b'data') | |
42 | if not os.path.exists(localpath): |
|
42 | if not os.path.exists(localpath): | |
43 | os.makedirs(localpath) |
|
43 | os.makedirs(localpath) | |
44 |
|
44 | |||
45 | # Instantiate local data stores |
|
45 | # Instantiate local data stores | |
46 | localcontent = contentstore.remotefilelogcontentstore( |
|
46 | localcontent = contentstore.remotefilelogcontentstore( | |
47 | repo, localpath, repo.name, shared=False |
|
47 | repo, localpath, repo.name, shared=False | |
48 | ) |
|
48 | ) | |
49 | localmetadata = metadatastore.remotefilelogmetadatastore( |
|
49 | localmetadata = metadatastore.remotefilelogmetadatastore( | |
50 | repo, localpath, repo.name, shared=False |
|
50 | repo, localpath, repo.name, shared=False | |
51 | ) |
|
51 | ) | |
52 | return localcontent, localmetadata |
|
52 | return localcontent, localmetadata | |
53 |
|
53 | |||
54 |
|
54 | |||
55 | def makecachestores(repo): |
|
55 | def makecachestores(repo): | |
56 | """Typically machine-wide, cache of remote data; can be discarded.""" |
|
56 | """Typically machine-wide, cache of remote data; can be discarded.""" | |
57 | # Instantiate shared cache stores |
|
57 | # Instantiate shared cache stores | |
58 | cachepath = shallowutil.getcachepath(repo.ui) |
|
58 | cachepath = shallowutil.getcachepath(repo.ui) | |
59 | cachecontent = contentstore.remotefilelogcontentstore( |
|
59 | cachecontent = contentstore.remotefilelogcontentstore( | |
60 | repo, cachepath, repo.name, shared=True |
|
60 | repo, cachepath, repo.name, shared=True | |
61 | ) |
|
61 | ) | |
62 | cachemetadata = metadatastore.remotefilelogmetadatastore( |
|
62 | cachemetadata = metadatastore.remotefilelogmetadatastore( | |
63 | repo, cachepath, repo.name, shared=True |
|
63 | repo, cachepath, repo.name, shared=True | |
64 | ) |
|
64 | ) | |
65 |
|
65 | |||
66 | repo.sharedstore = cachecontent |
|
66 | repo.sharedstore = cachecontent | |
67 | repo.shareddatastores.append(cachecontent) |
|
67 | repo.shareddatastores.append(cachecontent) | |
68 | repo.sharedhistorystores.append(cachemetadata) |
|
68 | repo.sharedhistorystores.append(cachemetadata) | |
69 |
|
69 | |||
70 | return cachecontent, cachemetadata |
|
70 | return cachecontent, cachemetadata | |
71 |
|
71 | |||
72 |
|
72 | |||
73 | def makeremotestores(repo, cachecontent, cachemetadata): |
|
73 | def makeremotestores(repo, cachecontent, cachemetadata): | |
74 | """These stores fetch data from a remote server.""" |
|
74 | """These stores fetch data from a remote server.""" | |
75 | # Instantiate remote stores |
|
75 | # Instantiate remote stores | |
76 | repo.fileservice = fileserverclient.fileserverclient(repo) |
|
76 | repo.fileservice = fileserverclient.fileserverclient(repo) | |
77 | remotecontent = contentstore.remotecontentstore( |
|
77 | remotecontent = contentstore.remotecontentstore( | |
78 | repo.ui, repo.fileservice, cachecontent |
|
78 | repo.ui, repo.fileservice, cachecontent | |
79 | ) |
|
79 | ) | |
80 | remotemetadata = metadatastore.remotemetadatastore( |
|
80 | remotemetadata = metadatastore.remotemetadatastore( | |
81 | repo.ui, repo.fileservice, cachemetadata |
|
81 | repo.ui, repo.fileservice, cachemetadata | |
82 | ) |
|
82 | ) | |
83 | return remotecontent, remotemetadata |
|
83 | return remotecontent, remotemetadata | |
84 |
|
84 | |||
85 |
|
85 | |||
86 | def makepackstores(repo): |
|
86 | def makepackstores(repo): | |
87 | """Packs are more efficient (to read from) cache stores.""" |
|
87 | """Packs are more efficient (to read from) cache stores.""" | |
88 | # Instantiate pack stores |
|
88 | # Instantiate pack stores | |
89 | packpath = shallowutil.getcachepackpath(repo, constants.FILEPACK_CATEGORY) |
|
89 | packpath = shallowutil.getcachepackpath(repo, constants.FILEPACK_CATEGORY) | |
90 | packcontentstore = datapack.datapackstore(repo.ui, packpath) |
|
90 | packcontentstore = datapack.datapackstore(repo.ui, packpath) | |
91 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) |
|
91 | packmetadatastore = historypack.historypackstore(repo.ui, packpath) | |
92 |
|
92 | |||
93 | repo.shareddatastores.append(packcontentstore) |
|
93 | repo.shareddatastores.append(packcontentstore) | |
94 | repo.sharedhistorystores.append(packmetadatastore) |
|
94 | repo.sharedhistorystores.append(packmetadatastore) | |
95 | shallowutil.reportpackmetrics( |
|
95 | shallowutil.reportpackmetrics( | |
96 | repo.ui, b'filestore', packcontentstore, packmetadatastore |
|
96 | repo.ui, b'filestore', packcontentstore, packmetadatastore | |
97 | ) |
|
97 | ) | |
98 | return packcontentstore, packmetadatastore |
|
98 | return packcontentstore, packmetadatastore | |
99 |
|
99 | |||
100 |
|
100 | |||
101 | def makeunionstores(repo): |
|
101 | def makeunionstores(repo): | |
102 | """Union stores iterate the other stores and return the first result.""" |
|
102 | """Union stores iterate the other stores and return the first result.""" | |
103 | repo.shareddatastores = [] |
|
103 | repo.shareddatastores = [] | |
104 | repo.sharedhistorystores = [] |
|
104 | repo.sharedhistorystores = [] | |
105 |
|
105 | |||
106 | packcontentstore, packmetadatastore = makepackstores(repo) |
|
106 | packcontentstore, packmetadatastore = makepackstores(repo) | |
107 | cachecontent, cachemetadata = makecachestores(repo) |
|
107 | cachecontent, cachemetadata = makecachestores(repo) | |
108 | localcontent, localmetadata = makelocalstores(repo) |
|
108 | localcontent, localmetadata = makelocalstores(repo) | |
109 | remotecontent, remotemetadata = makeremotestores( |
|
109 | remotecontent, remotemetadata = makeremotestores( | |
110 | repo, cachecontent, cachemetadata |
|
110 | repo, cachecontent, cachemetadata | |
111 | ) |
|
111 | ) | |
112 |
|
112 | |||
113 | # Instantiate union stores |
|
113 | # Instantiate union stores | |
114 | repo.contentstore = contentstore.unioncontentstore( |
|
114 | repo.contentstore = contentstore.unioncontentstore( | |
115 | packcontentstore, |
|
115 | packcontentstore, | |
116 | cachecontent, |
|
116 | cachecontent, | |
117 | localcontent, |
|
117 | localcontent, | |
118 | remotecontent, |
|
118 | remotecontent, | |
119 | writestore=localcontent, |
|
119 | writestore=localcontent, | |
120 | ) |
|
120 | ) | |
121 | repo.metadatastore = metadatastore.unionmetadatastore( |
|
121 | repo.metadatastore = metadatastore.unionmetadatastore( | |
122 | packmetadatastore, |
|
122 | packmetadatastore, | |
123 | cachemetadata, |
|
123 | cachemetadata, | |
124 | localmetadata, |
|
124 | localmetadata, | |
125 | remotemetadata, |
|
125 | remotemetadata, | |
126 | writestore=localmetadata, |
|
126 | writestore=localmetadata, | |
127 | ) |
|
127 | ) | |
128 |
|
128 | |||
129 | fileservicedatawrite = cachecontent |
|
129 | fileservicedatawrite = cachecontent | |
130 | fileservicehistorywrite = cachemetadata |
|
130 | fileservicehistorywrite = cachemetadata | |
131 | repo.fileservice.setstore( |
|
131 | repo.fileservice.setstore( | |
132 | repo.contentstore, |
|
132 | repo.contentstore, | |
133 | repo.metadatastore, |
|
133 | repo.metadatastore, | |
134 | fileservicedatawrite, |
|
134 | fileservicedatawrite, | |
135 | fileservicehistorywrite, |
|
135 | fileservicehistorywrite, | |
136 | ) |
|
136 | ) | |
137 | shallowutil.reportpackmetrics( |
|
137 | shallowutil.reportpackmetrics( | |
138 | repo.ui, b'filestore', packcontentstore, packmetadatastore |
|
138 | repo.ui, b'filestore', packcontentstore, packmetadatastore | |
139 | ) |
|
139 | ) | |
140 |
|
140 | |||
141 |
|
141 | |||
142 | def wraprepo(repo): |
|
142 | def wraprepo(repo): | |
143 | class shallowrepository(repo.__class__): |
|
143 | class shallowrepository(repo.__class__): | |
144 | @util.propertycache |
|
144 | @util.propertycache | |
145 | def name(self): |
|
145 | def name(self): | |
146 | return self.ui.config(b'remotefilelog', b'reponame') |
|
146 | return self.ui.config(b'remotefilelog', b'reponame') | |
147 |
|
147 | |||
148 | @util.propertycache |
|
148 | @util.propertycache | |
149 | def fallbackpath(self): |
|
149 | def fallbackpath(self): | |
150 | path = repo.ui.config( |
|
150 | path = repo.ui.config( | |
151 | b"remotefilelog", |
|
151 | b"remotefilelog", | |
152 | b"fallbackpath", |
|
152 | b"fallbackpath", | |
153 | repo.ui.config(b'paths', b'default'), |
|
153 | repo.ui.config(b'paths', b'default'), | |
154 | ) |
|
154 | ) | |
155 | if not path: |
|
155 | if not path: | |
156 | raise error.Abort( |
|
156 | raise error.Abort( | |
157 | b"no remotefilelog server " |
|
157 | b"no remotefilelog server " | |
158 | b"configured - is your .hg/hgrc trusted?" |
|
158 | b"configured - is your .hg/hgrc trusted?" | |
159 | ) |
|
159 | ) | |
160 |
|
160 | |||
161 | return path |
|
161 | return path | |
162 |
|
162 | |||
163 | def maybesparsematch(self, *revs, **kwargs): |
|
163 | def maybesparsematch(self, *revs, **kwargs): | |
164 | ''' |
|
164 | ''' | |
165 | A wrapper that allows the remotefilelog to invoke sparsematch() if |
|
165 | A wrapper that allows the remotefilelog to invoke sparsematch() if | |
166 | this is a sparse repository, or returns None if this is not a |
|
166 | this is a sparse repository, or returns None if this is not a | |
167 | sparse repository. |
|
167 | sparse repository. | |
168 | ''' |
|
168 | ''' | |
169 | if revs: |
|
169 | if revs: | |
170 | ret = sparse.matcher(repo, revs=revs) |
|
170 | ret = sparse.matcher(repo, revs=revs) | |
171 | else: |
|
171 | else: | |
172 | ret = sparse.matcher(repo) |
|
172 | ret = sparse.matcher(repo) | |
173 |
|
173 | |||
174 | if ret.always(): |
|
174 | if ret.always(): | |
175 | return None |
|
175 | return None | |
176 | return ret |
|
176 | return ret | |
177 |
|
177 | |||
178 | def file(self, f): |
|
178 | def file(self, f): | |
179 | if f[0] == b'/': |
|
179 | if f[0] == b'/': | |
180 | f = f[1:] |
|
180 | f = f[1:] | |
181 |
|
181 | |||
182 | if self.shallowmatch(f): |
|
182 | if self.shallowmatch(f): | |
183 | return remotefilelog.remotefilelog(self.svfs, f, self) |
|
183 | return remotefilelog.remotefilelog(self.svfs, f, self) | |
184 | else: |
|
184 | else: | |
185 | return super(shallowrepository, self).file(f) |
|
185 | return super(shallowrepository, self).file(f) | |
186 |
|
186 | |||
187 | def filectx(self, path, *args, **kwargs): |
|
187 | def filectx(self, path, *args, **kwargs): | |
188 | if self.shallowmatch(path): |
|
188 | if self.shallowmatch(path): | |
189 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) |
|
189 | return remotefilectx.remotefilectx(self, path, *args, **kwargs) | |
190 | else: |
|
190 | else: | |
191 | return super(shallowrepository, self).filectx( |
|
191 | return super(shallowrepository, self).filectx( | |
192 | path, *args, **kwargs |
|
192 | path, *args, **kwargs | |
193 | ) |
|
193 | ) | |
194 |
|
194 | |||
195 | @localrepo.unfilteredmethod |
|
195 | @localrepo.unfilteredmethod | |
196 | def commitctx(self, ctx, error=False, origctx=None): |
|
196 | def commitctx(self, ctx, error=False, origctx=None): | |
197 | """Add a new revision to current repository. |
|
197 | """Add a new revision to current repository. | |
198 | Revision information is passed via the context argument. |
|
198 | Revision information is passed via the context argument. | |
199 | """ |
|
199 | """ | |
200 |
|
200 | |||
201 | # some contexts already have manifest nodes, they don't need any |
|
201 | # some contexts already have manifest nodes, they don't need any | |
202 | # prefetching (for example if we're just editing a commit message |
|
202 | # prefetching (for example if we're just editing a commit message | |
203 | # we can reuse manifest |
|
203 | # we can reuse manifest | |
204 | if not ctx.manifestnode(): |
|
204 | if not ctx.manifestnode(): | |
205 | # prefetch files that will likely be compared |
|
205 | # prefetch files that will likely be compared | |
206 | m1 = ctx.p1().manifest() |
|
206 | m1 = ctx.p1().manifest() | |
207 | files = [] |
|
207 | files = [] | |
208 | for f in ctx.modified() + ctx.added(): |
|
208 | for f in ctx.modified() + ctx.added(): | |
209 | fparent1 = m1.get(f, nullid) |
|
209 | fparent1 = m1.get(f, nullid) | |
210 | if fparent1 != nullid: |
|
210 | if fparent1 != nullid: | |
211 | files.append((f, hex(fparent1))) |
|
211 | files.append((f, hex(fparent1))) | |
212 | self.fileservice.prefetch(files) |
|
212 | self.fileservice.prefetch(files) | |
213 | return super(shallowrepository, self).commitctx( |
|
213 | return super(shallowrepository, self).commitctx( | |
214 | ctx, error=error, origctx=origctx |
|
214 | ctx, error=error, origctx=origctx | |
215 | ) |
|
215 | ) | |
216 |
|
216 | |||
217 | def backgroundprefetch( |
|
217 | def backgroundprefetch( | |
218 | self, |
|
218 | self, | |
219 | revs, |
|
219 | revs, | |
220 | base=None, |
|
220 | base=None, | |
221 | repack=False, |
|
221 | repack=False, | |
222 | pats=None, |
|
222 | pats=None, | |
223 | opts=None, |
|
223 | opts=None, | |
224 | ensurestart=False, |
|
224 | ensurestart=False, | |
225 | ): |
|
225 | ): | |
226 | """Runs prefetch in background with optional repack |
|
226 | """Runs prefetch in background with optional repack | |
227 | """ |
|
227 | """ | |
228 | cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch'] |
|
228 | cmd = [procutil.hgexecutable(), b'-R', repo.origroot, b'prefetch'] | |
229 | if repack: |
|
229 | if repack: | |
230 | cmd.append(b'--repack') |
|
230 | cmd.append(b'--repack') | |
231 | if revs: |
|
231 | if revs: | |
232 | cmd += [b'-r', revs] |
|
232 | cmd += [b'-r', revs] | |
233 | # We know this command will find a binary, so don't block |
|
233 | # We know this command will find a binary, so don't block | |
234 | # on it starting. |
|
234 | # on it starting. | |
|
235 | kwargs = {} | |||
|
236 | if repo.ui.configbool(b'devel', b'remotefilelog.bg-wait'): | |||
|
237 | kwargs['record_wait'] = repo.ui.atexit | |||
|
238 | ||||
235 | procutil.runbgcommand( |
|
239 | procutil.runbgcommand( | |
236 | cmd, encoding.environ, ensurestart=ensurestart |
|
240 | cmd, encoding.environ, ensurestart=ensurestart, **kwargs | |
237 | ) |
|
241 | ) | |
238 |
|
242 | |||
239 | def prefetch(self, revs, base=None, pats=None, opts=None): |
|
243 | def prefetch(self, revs, base=None, pats=None, opts=None): | |
240 | """Prefetches all the necessary file revisions for the given revs |
|
244 | """Prefetches all the necessary file revisions for the given revs | |
241 | Optionally runs repack in background |
|
245 | Optionally runs repack in background | |
242 | """ |
|
246 | """ | |
243 | with repo._lock( |
|
247 | with repo._lock( | |
244 | repo.svfs, |
|
248 | repo.svfs, | |
245 | b'prefetchlock', |
|
249 | b'prefetchlock', | |
246 | True, |
|
250 | True, | |
247 | None, |
|
251 | None, | |
248 | None, |
|
252 | None, | |
249 | _(b'prefetching in %s') % repo.origroot, |
|
253 | _(b'prefetching in %s') % repo.origroot, | |
250 | ): |
|
254 | ): | |
251 | self._prefetch(revs, base, pats, opts) |
|
255 | self._prefetch(revs, base, pats, opts) | |
252 |
|
256 | |||
253 | def _prefetch(self, revs, base=None, pats=None, opts=None): |
|
257 | def _prefetch(self, revs, base=None, pats=None, opts=None): | |
254 | fallbackpath = self.fallbackpath |
|
258 | fallbackpath = self.fallbackpath | |
255 | if fallbackpath: |
|
259 | if fallbackpath: | |
256 | # If we know a rev is on the server, we should fetch the server |
|
260 | # If we know a rev is on the server, we should fetch the server | |
257 | # version of those files, since our local file versions might |
|
261 | # version of those files, since our local file versions might | |
258 | # become obsolete if the local commits are stripped. |
|
262 | # become obsolete if the local commits are stripped. | |
259 | localrevs = repo.revs(b'outgoing(%s)', fallbackpath) |
|
263 | localrevs = repo.revs(b'outgoing(%s)', fallbackpath) | |
260 | if base is not None and base != nullrev: |
|
264 | if base is not None and base != nullrev: | |
261 | serverbase = list( |
|
265 | serverbase = list( | |
262 | repo.revs( |
|
266 | repo.revs( | |
263 | b'first(reverse(::%s) - %ld)', base, localrevs |
|
267 | b'first(reverse(::%s) - %ld)', base, localrevs | |
264 | ) |
|
268 | ) | |
265 | ) |
|
269 | ) | |
266 | if serverbase: |
|
270 | if serverbase: | |
267 | base = serverbase[0] |
|
271 | base = serverbase[0] | |
268 | else: |
|
272 | else: | |
269 | localrevs = repo |
|
273 | localrevs = repo | |
270 |
|
274 | |||
271 | mfl = repo.manifestlog |
|
275 | mfl = repo.manifestlog | |
272 | mfrevlog = mfl.getstorage(b'') |
|
276 | mfrevlog = mfl.getstorage(b'') | |
273 | if base is not None: |
|
277 | if base is not None: | |
274 | mfdict = mfl[repo[base].manifestnode()].read() |
|
278 | mfdict = mfl[repo[base].manifestnode()].read() | |
275 | skip = set(pycompat.iteritems(mfdict)) |
|
279 | skip = set(pycompat.iteritems(mfdict)) | |
276 | else: |
|
280 | else: | |
277 | skip = set() |
|
281 | skip = set() | |
278 |
|
282 | |||
279 | # Copy the skip set to start large and avoid constant resizing, |
|
283 | # Copy the skip set to start large and avoid constant resizing, | |
280 | # and since it's likely to be very similar to the prefetch set. |
|
284 | # and since it's likely to be very similar to the prefetch set. | |
281 | files = skip.copy() |
|
285 | files = skip.copy() | |
282 | serverfiles = skip.copy() |
|
286 | serverfiles = skip.copy() | |
283 | visited = set() |
|
287 | visited = set() | |
284 | visited.add(nullrev) |
|
288 | visited.add(nullrev) | |
285 | revcount = len(revs) |
|
289 | revcount = len(revs) | |
286 | progress = self.ui.makeprogress(_(b'prefetching'), total=revcount) |
|
290 | progress = self.ui.makeprogress(_(b'prefetching'), total=revcount) | |
287 | progress.update(0) |
|
291 | progress.update(0) | |
288 | for rev in sorted(revs): |
|
292 | for rev in sorted(revs): | |
289 | ctx = repo[rev] |
|
293 | ctx = repo[rev] | |
290 | if pats: |
|
294 | if pats: | |
291 | m = scmutil.match(ctx, pats, opts) |
|
295 | m = scmutil.match(ctx, pats, opts) | |
292 | sparsematch = repo.maybesparsematch(rev) |
|
296 | sparsematch = repo.maybesparsematch(rev) | |
293 |
|
297 | |||
294 | mfnode = ctx.manifestnode() |
|
298 | mfnode = ctx.manifestnode() | |
295 | mfrev = mfrevlog.rev(mfnode) |
|
299 | mfrev = mfrevlog.rev(mfnode) | |
296 |
|
300 | |||
297 | # Decompressing manifests is expensive. |
|
301 | # Decompressing manifests is expensive. | |
298 | # When possible, only read the deltas. |
|
302 | # When possible, only read the deltas. | |
299 | p1, p2 = mfrevlog.parentrevs(mfrev) |
|
303 | p1, p2 = mfrevlog.parentrevs(mfrev) | |
300 | if p1 in visited and p2 in visited: |
|
304 | if p1 in visited and p2 in visited: | |
301 | mfdict = mfl[mfnode].readfast() |
|
305 | mfdict = mfl[mfnode].readfast() | |
302 | else: |
|
306 | else: | |
303 | mfdict = mfl[mfnode].read() |
|
307 | mfdict = mfl[mfnode].read() | |
304 |
|
308 | |||
305 | diff = pycompat.iteritems(mfdict) |
|
309 | diff = pycompat.iteritems(mfdict) | |
306 | if pats: |
|
310 | if pats: | |
307 | diff = (pf for pf in diff if m(pf[0])) |
|
311 | diff = (pf for pf in diff if m(pf[0])) | |
308 | if sparsematch: |
|
312 | if sparsematch: | |
309 | diff = (pf for pf in diff if sparsematch(pf[0])) |
|
313 | diff = (pf for pf in diff if sparsematch(pf[0])) | |
310 | if rev not in localrevs: |
|
314 | if rev not in localrevs: | |
311 | serverfiles.update(diff) |
|
315 | serverfiles.update(diff) | |
312 | else: |
|
316 | else: | |
313 | files.update(diff) |
|
317 | files.update(diff) | |
314 |
|
318 | |||
315 | visited.add(mfrev) |
|
319 | visited.add(mfrev) | |
316 | progress.increment() |
|
320 | progress.increment() | |
317 |
|
321 | |||
318 | files.difference_update(skip) |
|
322 | files.difference_update(skip) | |
319 | serverfiles.difference_update(skip) |
|
323 | serverfiles.difference_update(skip) | |
320 | progress.complete() |
|
324 | progress.complete() | |
321 |
|
325 | |||
322 | # Fetch files known to be on the server |
|
326 | # Fetch files known to be on the server | |
323 | if serverfiles: |
|
327 | if serverfiles: | |
324 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] |
|
328 | results = [(path, hex(fnode)) for (path, fnode) in serverfiles] | |
325 | repo.fileservice.prefetch(results, force=True) |
|
329 | repo.fileservice.prefetch(results, force=True) | |
326 |
|
330 | |||
327 | # Fetch files that may or may not be on the server |
|
331 | # Fetch files that may or may not be on the server | |
328 | if files: |
|
332 | if files: | |
329 | results = [(path, hex(fnode)) for (path, fnode) in files] |
|
333 | results = [(path, hex(fnode)) for (path, fnode) in files] | |
330 | repo.fileservice.prefetch(results) |
|
334 | repo.fileservice.prefetch(results) | |
331 |
|
335 | |||
332 | def close(self): |
|
336 | def close(self): | |
333 | super(shallowrepository, self).close() |
|
337 | super(shallowrepository, self).close() | |
334 | self.connectionpool.close() |
|
338 | self.connectionpool.close() | |
335 |
|
339 | |||
336 | repo.__class__ = shallowrepository |
|
340 | repo.__class__ = shallowrepository | |
337 |
|
341 | |||
338 | repo.shallowmatch = match.always() |
|
342 | repo.shallowmatch = match.always() | |
339 |
|
343 | |||
340 | makeunionstores(repo) |
|
344 | makeunionstores(repo) | |
341 |
|
345 | |||
342 | repo.includepattern = repo.ui.configlist( |
|
346 | repo.includepattern = repo.ui.configlist( | |
343 | b"remotefilelog", b"includepattern", None |
|
347 | b"remotefilelog", b"includepattern", None | |
344 | ) |
|
348 | ) | |
345 | repo.excludepattern = repo.ui.configlist( |
|
349 | repo.excludepattern = repo.ui.configlist( | |
346 | b"remotefilelog", b"excludepattern", None |
|
350 | b"remotefilelog", b"excludepattern", None | |
347 | ) |
|
351 | ) | |
348 | if not util.safehasattr(repo, 'connectionpool'): |
|
352 | if not util.safehasattr(repo, 'connectionpool'): | |
349 | repo.connectionpool = connectionpool.connectionpool(repo) |
|
353 | repo.connectionpool = connectionpool.connectionpool(repo) | |
350 |
|
354 | |||
351 | if repo.includepattern or repo.excludepattern: |
|
355 | if repo.includepattern or repo.excludepattern: | |
352 | repo.shallowmatch = match.match( |
|
356 | repo.shallowmatch = match.match( | |
353 | repo.root, b'', None, repo.includepattern, repo.excludepattern |
|
357 | repo.root, b'', None, repo.includepattern, repo.excludepattern | |
354 | ) |
|
358 | ) |
General Comments 0
You need to be logged in to leave comments.
Login now