Show More
@@ -1,1139 +1,1142 | |||||
1 | # __init__.py - remotefilelog extension |
|
1 | # __init__.py - remotefilelog extension | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) |
|
7 | """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
9 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | |
10 | GUARANTEES. This means that repositories created with this extension may |
|
10 | GUARANTEES. This means that repositories created with this extension may | |
11 | only be usable with the exact version of this extension/Mercurial that was |
|
11 | only be usable with the exact version of this extension/Mercurial that was | |
12 | used. The extension attempts to enforce this in order to prevent repository |
|
12 | used. The extension attempts to enforce this in order to prevent repository | |
13 | corruption. |
|
13 | corruption. | |
14 |
|
14 | |||
15 | remotefilelog works by fetching file contents lazily and storing them |
|
15 | remotefilelog works by fetching file contents lazily and storing them | |
16 | in a cache on the client rather than in revlogs. This allows enormous |
|
16 | in a cache on the client rather than in revlogs. This allows enormous | |
17 | histories to be transferred only partially, making them easier to |
|
17 | histories to be transferred only partially, making them easier to | |
18 | operate on. |
|
18 | operate on. | |
19 |
|
19 | |||
20 | Configs: |
|
20 | Configs: | |
21 |
|
21 | |||
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files |
|
22 | ``packs.maxchainlen`` specifies the maximum delta chain length in pack files | |
23 |
|
23 | |||
24 | ``packs.maxpacksize`` specifies the maximum pack file size |
|
24 | ``packs.maxpacksize`` specifies the maximum pack file size | |
25 |
|
25 | |||
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the |
|
26 | ``packs.maxpackfilecount`` specifies the maximum number of packs in the | |
27 | shared cache (trees only for now) |
|
27 | shared cache (trees only for now) | |
28 |
|
28 | |||
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True |
|
29 | ``remotefilelog.backgroundprefetch`` runs prefetch in background when True | |
30 |
|
30 | |||
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and |
|
31 | ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and | |
32 | update, and on other commands that use them. Different from pullprefetch. |
|
32 | update, and on other commands that use them. Different from pullprefetch. | |
33 |
|
33 | |||
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True |
|
34 | ``remotefilelog.gcrepack`` does garbage collection during repack when True | |
35 |
|
35 | |||
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before |
|
36 | ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before | |
37 | it is garbage collected |
|
37 | it is garbage collected | |
38 |
|
38 | |||
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True |
|
39 | ``remotefilelog.repackonhggc`` runs repack on hg gc when True | |
40 |
|
40 | |||
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in |
|
41 | ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in | |
42 | days after which it is no longer prefetched. |
|
42 | days after which it is no longer prefetched. | |
43 |
|
43 | |||
44 | ``remotefilelog.prefetchdelay`` specifies delay between background |
|
44 | ``remotefilelog.prefetchdelay`` specifies delay between background | |
45 | prefetches in seconds after operations that change the working copy parent |
|
45 | prefetches in seconds after operations that change the working copy parent | |
46 |
|
46 | |||
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data |
|
47 | ``remotefilelog.data.gencountlimit`` constraints the minimum number of data | |
48 | pack files required to be considered part of a generation. In particular, |
|
48 | pack files required to be considered part of a generation. In particular, | |
49 | minimum number of packs files > gencountlimit. |
|
49 | minimum number of packs files > gencountlimit. | |
50 |
|
50 | |||
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of |
|
51 | ``remotefilelog.data.generations`` list for specifying the lower bound of | |
52 | each generation of the data pack files. For example, list ['100MB','1MB'] |
|
52 | each generation of the data pack files. For example, list ['100MB','1MB'] | |
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ |
|
53 | or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [ | |
54 | 1MB, 100MB) and [100MB, infinity). |
|
54 | 1MB, 100MB) and [100MB, infinity). | |
55 |
|
55 | |||
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to |
|
56 | ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to | |
57 | include in an incremental data repack. |
|
57 | include in an incremental data repack. | |
58 |
|
58 | |||
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for |
|
59 | ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for | |
60 | it to be considered for an incremental data repack. |
|
60 | it to be considered for an incremental data repack. | |
61 |
|
61 | |||
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files |
|
62 | ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files | |
63 | to include in an incremental data repack. |
|
63 | to include in an incremental data repack. | |
64 |
|
64 | |||
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of |
|
65 | ``remotefilelog.history.gencountlimit`` constraints the minimum number of | |
66 | history pack files required to be considered part of a generation. In |
|
66 | history pack files required to be considered part of a generation. In | |
67 | particular, minimum number of packs files > gencountlimit. |
|
67 | particular, minimum number of packs files > gencountlimit. | |
68 |
|
68 | |||
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of |
|
69 | ``remotefilelog.history.generations`` list for specifying the lower bound of | |
70 | each generation of the historhy pack files. For example, list [ |
|
70 | each generation of the historhy pack files. For example, list [ | |
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ |
|
71 | '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [ | |
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). |
|
72 | 0, 1MB), [1MB, 100MB) and [100MB, infinity). | |
73 |
|
73 | |||
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to |
|
74 | ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to | |
75 | include in an incremental history repack. |
|
75 | include in an incremental history repack. | |
76 |
|
76 | |||
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file |
|
77 | ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file | |
78 | for it to be considered for an incremental history repack. |
|
78 | for it to be considered for an incremental history repack. | |
79 |
|
79 | |||
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack |
|
80 | ``remotefilelog.history.repacksizelimit`` the maximum total size of pack | |
81 | files to include in an incremental history repack. |
|
81 | files to include in an incremental history repack. | |
82 |
|
82 | |||
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the |
|
83 | ``remotefilelog.backgroundrepack`` automatically consolidate packs in the | |
84 | background |
|
84 | background | |
85 |
|
85 | |||
86 | ``remotefilelog.cachepath`` path to cache |
|
86 | ``remotefilelog.cachepath`` path to cache | |
87 |
|
87 | |||
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this |
|
88 | ``remotefilelog.cachegroup`` if set, make cache directory sgid to this | |
89 | group |
|
89 | group | |
90 |
|
90 | |||
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data |
|
91 | ``remotefilelog.cacheprocess`` binary to invoke for fetching file data | |
92 |
|
92 | |||
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output |
|
93 | ``remotefilelog.debug`` turn on remotefilelog-specific debug output | |
94 |
|
94 | |||
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls |
|
95 | ``remotefilelog.excludepattern`` pattern of files to exclude from pulls | |
96 |
|
96 | |||
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls |
|
97 | ``remotefilelog.includepattern`` pattern of files to include in pulls | |
98 |
|
98 | |||
99 | ``remotefilelog.fetchwarning``: message to print when too many |
|
99 | ``remotefilelog.fetchwarning``: message to print when too many | |
100 | single-file fetches occur |
|
100 | single-file fetches occur | |
101 |
|
101 | |||
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC |
|
102 | ``remotefilelog.getfilesstep`` number of files to request in a single RPC | |
103 |
|
103 | |||
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch |
|
104 | ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch | |
105 | files, otherwise use optimistic fetching |
|
105 | files, otherwise use optimistic fetching | |
106 |
|
106 | |||
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be |
|
107 | ``remotefilelog.pullprefetch`` revset for selecting files that should be | |
108 | eagerly downloaded rather than lazily |
|
108 | eagerly downloaded rather than lazily | |
109 |
|
109 | |||
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition |
|
110 | ``remotefilelog.reponame`` name of the repo. If set, used to partition | |
111 | data from other repos in a shared store. |
|
111 | data from other repos in a shared store. | |
112 |
|
112 | |||
113 | ``remotefilelog.server`` if true, enable server-side functionality |
|
113 | ``remotefilelog.server`` if true, enable server-side functionality | |
114 |
|
114 | |||
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server |
|
115 | ``remotefilelog.servercachepath`` path for caching blobs on the server | |
116 |
|
116 | |||
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server |
|
117 | ``remotefilelog.serverexpiration`` number of days to keep cached server | |
118 | blobs |
|
118 | blobs | |
119 |
|
119 | |||
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption |
|
120 | ``remotefilelog.validatecache`` if set, check cache entries for corruption | |
121 | before returning blobs |
|
121 | before returning blobs | |
122 |
|
122 | |||
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for |
|
123 | ``remotefilelog.validatecachelog`` if set, check cache entries for | |
124 | corruption before returning metadata |
|
124 | corruption before returning metadata | |
125 |
|
125 | |||
126 | """ |
|
126 | """ | |
127 | from __future__ import absolute_import |
|
127 | from __future__ import absolute_import | |
128 |
|
128 | |||
129 | import os |
|
129 | import os | |
130 | import time |
|
130 | import time | |
131 | import traceback |
|
131 | import traceback | |
132 |
|
132 | |||
133 | from mercurial.node import hex |
|
133 | from mercurial.node import hex | |
134 | from mercurial.i18n import _ |
|
134 | from mercurial.i18n import _ | |
135 | from mercurial import ( |
|
135 | from mercurial import ( | |
136 | changegroup, |
|
136 | changegroup, | |
137 | changelog, |
|
137 | changelog, | |
138 | cmdutil, |
|
138 | cmdutil, | |
139 | commands, |
|
139 | commands, | |
140 | configitems, |
|
140 | configitems, | |
141 | context, |
|
141 | context, | |
142 | copies, |
|
142 | copies, | |
143 | debugcommands as hgdebugcommands, |
|
143 | debugcommands as hgdebugcommands, | |
144 | dispatch, |
|
144 | dispatch, | |
145 | error, |
|
145 | error, | |
146 | exchange, |
|
146 | exchange, | |
147 | extensions, |
|
147 | extensions, | |
148 | hg, |
|
148 | hg, | |
149 | localrepo, |
|
149 | localrepo, | |
150 | match, |
|
150 | match, | |
151 | merge, |
|
151 | merge, | |
152 | node as nodemod, |
|
152 | node as nodemod, | |
153 | patch, |
|
153 | patch, | |
|
154 | pycompat, | |||
154 | registrar, |
|
155 | registrar, | |
155 | repair, |
|
156 | repair, | |
156 | repoview, |
|
157 | repoview, | |
157 | revset, |
|
158 | revset, | |
158 | scmutil, |
|
159 | scmutil, | |
159 | smartset, |
|
160 | smartset, | |
160 | streamclone, |
|
161 | streamclone, | |
161 | templatekw, |
|
162 | templatekw, | |
162 | util, |
|
163 | util, | |
163 | ) |
|
164 | ) | |
164 | from . import ( |
|
165 | from . import ( | |
165 | constants, |
|
166 | constants, | |
166 | debugcommands, |
|
167 | debugcommands, | |
167 | fileserverclient, |
|
168 | fileserverclient, | |
168 | remotefilectx, |
|
169 | remotefilectx, | |
169 | remotefilelog, |
|
170 | remotefilelog, | |
170 | remotefilelogserver, |
|
171 | remotefilelogserver, | |
171 | repack as repackmod, |
|
172 | repack as repackmod, | |
172 | shallowbundle, |
|
173 | shallowbundle, | |
173 | shallowrepo, |
|
174 | shallowrepo, | |
174 | shallowstore, |
|
175 | shallowstore, | |
175 | shallowutil, |
|
176 | shallowutil, | |
176 | shallowverifier, |
|
177 | shallowverifier, | |
177 | ) |
|
178 | ) | |
178 |
|
179 | |||
179 | # ensures debug commands are registered |
|
180 | # ensures debug commands are registered | |
180 | hgdebugcommands.command |
|
181 | hgdebugcommands.command | |
181 |
|
182 | |||
182 | cmdtable = {} |
|
183 | cmdtable = {} | |
183 | command = registrar.command(cmdtable) |
|
184 | command = registrar.command(cmdtable) | |
184 |
|
185 | |||
185 | configtable = {} |
|
186 | configtable = {} | |
186 | configitem = registrar.configitem(configtable) |
|
187 | configitem = registrar.configitem(configtable) | |
187 |
|
188 | |||
188 | configitem('remotefilelog', 'debug', default=False) |
|
189 | configitem('remotefilelog', 'debug', default=False) | |
189 |
|
190 | |||
190 | configitem('remotefilelog', 'reponame', default='') |
|
191 | configitem('remotefilelog', 'reponame', default='') | |
191 | configitem('remotefilelog', 'cachepath', default=None) |
|
192 | configitem('remotefilelog', 'cachepath', default=None) | |
192 | configitem('remotefilelog', 'cachegroup', default=None) |
|
193 | configitem('remotefilelog', 'cachegroup', default=None) | |
193 | configitem('remotefilelog', 'cacheprocess', default=None) |
|
194 | configitem('remotefilelog', 'cacheprocess', default=None) | |
194 | configitem('remotefilelog', 'cacheprocess.includepath', default=None) |
|
195 | configitem('remotefilelog', 'cacheprocess.includepath', default=None) | |
195 | configitem("remotefilelog", "cachelimit", default="1000 GB") |
|
196 | configitem("remotefilelog", "cachelimit", default="1000 GB") | |
196 |
|
197 | |||
197 | configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault, |
|
198 | configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault, | |
198 | alias=[('remotefilelog', 'fallbackrepo')]) |
|
199 | alias=[('remotefilelog', 'fallbackrepo')]) | |
199 |
|
200 | |||
200 | configitem('remotefilelog', 'validatecachelog', default=None) |
|
201 | configitem('remotefilelog', 'validatecachelog', default=None) | |
201 | configitem('remotefilelog', 'validatecache', default='on') |
|
202 | configitem('remotefilelog', 'validatecache', default='on') | |
202 | configitem('remotefilelog', 'server', default=None) |
|
203 | configitem('remotefilelog', 'server', default=None) | |
203 | configitem('remotefilelog', 'servercachepath', default=None) |
|
204 | configitem('remotefilelog', 'servercachepath', default=None) | |
204 | configitem("remotefilelog", "serverexpiration", default=30) |
|
205 | configitem("remotefilelog", "serverexpiration", default=30) | |
205 | configitem('remotefilelog', 'backgroundrepack', default=False) |
|
206 | configitem('remotefilelog', 'backgroundrepack', default=False) | |
206 | configitem('remotefilelog', 'bgprefetchrevs', default=None) |
|
207 | configitem('remotefilelog', 'bgprefetchrevs', default=None) | |
207 | configitem('remotefilelog', 'pullprefetch', default=None) |
|
208 | configitem('remotefilelog', 'pullprefetch', default=None) | |
208 | configitem('remotefilelog', 'backgroundprefetch', default=False) |
|
209 | configitem('remotefilelog', 'backgroundprefetch', default=False) | |
209 | configitem('remotefilelog', 'prefetchdelay', default=120) |
|
210 | configitem('remotefilelog', 'prefetchdelay', default=120) | |
210 | configitem('remotefilelog', 'prefetchdays', default=14) |
|
211 | configitem('remotefilelog', 'prefetchdays', default=14) | |
211 |
|
212 | |||
212 | configitem('remotefilelog', 'getfilesstep', default=10000) |
|
213 | configitem('remotefilelog', 'getfilesstep', default=10000) | |
213 | configitem('remotefilelog', 'getfilestype', default='optimistic') |
|
214 | configitem('remotefilelog', 'getfilestype', default='optimistic') | |
214 | configitem('remotefilelog', 'batchsize', configitems.dynamicdefault) |
|
215 | configitem('remotefilelog', 'batchsize', configitems.dynamicdefault) | |
215 | configitem('remotefilelog', 'fetchwarning', default='') |
|
216 | configitem('remotefilelog', 'fetchwarning', default='') | |
216 |
|
217 | |||
217 | configitem('remotefilelog', 'includepattern', default=None) |
|
218 | configitem('remotefilelog', 'includepattern', default=None) | |
218 | configitem('remotefilelog', 'excludepattern', default=None) |
|
219 | configitem('remotefilelog', 'excludepattern', default=None) | |
219 |
|
220 | |||
220 | configitem('remotefilelog', 'gcrepack', default=False) |
|
221 | configitem('remotefilelog', 'gcrepack', default=False) | |
221 | configitem('remotefilelog', 'repackonhggc', default=False) |
|
222 | configitem('remotefilelog', 'repackonhggc', default=False) | |
222 | configitem('repack', 'chainorphansbysize', default=True) |
|
223 | configitem('repack', 'chainorphansbysize', default=True) | |
223 |
|
224 | |||
224 | configitem('packs', 'maxpacksize', default=0) |
|
225 | configitem('packs', 'maxpacksize', default=0) | |
225 | configitem('packs', 'maxchainlen', default=1000) |
|
226 | configitem('packs', 'maxchainlen', default=1000) | |
226 |
|
227 | |||
227 | # default TTL limit is 30 days |
|
228 | # default TTL limit is 30 days | |
228 | _defaultlimit = 60 * 60 * 24 * 30 |
|
229 | _defaultlimit = 60 * 60 * 24 * 30 | |
229 | configitem('remotefilelog', 'nodettl', default=_defaultlimit) |
|
230 | configitem('remotefilelog', 'nodettl', default=_defaultlimit) | |
230 |
|
231 | |||
231 | configitem('remotefilelog', 'data.gencountlimit', default=2), |
|
232 | configitem('remotefilelog', 'data.gencountlimit', default=2), | |
232 | configitem('remotefilelog', 'data.generations', |
|
233 | configitem('remotefilelog', 'data.generations', | |
233 | default=['1GB', '100MB', '1MB']) |
|
234 | default=['1GB', '100MB', '1MB']) | |
234 | configitem('remotefilelog', 'data.maxrepackpacks', default=50) |
|
235 | configitem('remotefilelog', 'data.maxrepackpacks', default=50) | |
235 | configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB') |
|
236 | configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB') | |
236 | configitem('remotefilelog', 'data.repacksizelimit', default='100MB') |
|
237 | configitem('remotefilelog', 'data.repacksizelimit', default='100MB') | |
237 |
|
238 | |||
238 | configitem('remotefilelog', 'history.gencountlimit', default=2), |
|
239 | configitem('remotefilelog', 'history.gencountlimit', default=2), | |
239 | configitem('remotefilelog', 'history.generations', default=['100MB']) |
|
240 | configitem('remotefilelog', 'history.generations', default=['100MB']) | |
240 | configitem('remotefilelog', 'history.maxrepackpacks', default=50) |
|
241 | configitem('remotefilelog', 'history.maxrepackpacks', default=50) | |
241 | configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB') |
|
242 | configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB') | |
242 | configitem('remotefilelog', 'history.repacksizelimit', default='100MB') |
|
243 | configitem('remotefilelog', 'history.repacksizelimit', default='100MB') | |
243 |
|
244 | |||
244 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
245 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
245 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
246 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
246 | # be specifying the version(s) of Mercurial they are tested with, or |
|
247 | # be specifying the version(s) of Mercurial they are tested with, or | |
247 | # leave the attribute unspecified. |
|
248 | # leave the attribute unspecified. | |
248 | testedwith = 'ships-with-hg-core' |
|
249 | testedwith = 'ships-with-hg-core' | |
249 |
|
250 | |||
250 | repoclass = localrepo.localrepository |
|
251 | repoclass = localrepo.localrepository | |
251 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) |
|
252 | repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT) | |
252 |
|
253 | |||
253 | isenabled = shallowutil.isenabled |
|
254 | isenabled = shallowutil.isenabled | |
254 |
|
255 | |||
255 | def uisetup(ui): |
|
256 | def uisetup(ui): | |
256 | """Wraps user facing Mercurial commands to swap them out with shallow |
|
257 | """Wraps user facing Mercurial commands to swap them out with shallow | |
257 | versions. |
|
258 | versions. | |
258 | """ |
|
259 | """ | |
259 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) |
|
260 | hg.wirepeersetupfuncs.append(fileserverclient.peersetup) | |
260 |
|
261 | |||
261 | entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow) |
|
262 | entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow) | |
262 | entry[1].append(('', 'shallow', None, |
|
263 | entry[1].append(('', 'shallow', None, | |
263 | _("create a shallow clone which uses remote file " |
|
264 | _("create a shallow clone which uses remote file " | |
264 | "history"))) |
|
265 | "history"))) | |
265 |
|
266 | |||
266 | extensions.wrapcommand(commands.table, 'debugindex', |
|
267 | extensions.wrapcommand(commands.table, 'debugindex', | |
267 | debugcommands.debugindex) |
|
268 | debugcommands.debugindex) | |
268 | extensions.wrapcommand(commands.table, 'debugindexdot', |
|
269 | extensions.wrapcommand(commands.table, 'debugindexdot', | |
269 | debugcommands.debugindexdot) |
|
270 | debugcommands.debugindexdot) | |
270 | extensions.wrapcommand(commands.table, 'log', log) |
|
271 | extensions.wrapcommand(commands.table, 'log', log) | |
271 | extensions.wrapcommand(commands.table, 'pull', pull) |
|
272 | extensions.wrapcommand(commands.table, 'pull', pull) | |
272 |
|
273 | |||
273 | # Prevent 'hg manifest --all' |
|
274 | # Prevent 'hg manifest --all' | |
274 | def _manifest(orig, ui, repo, *args, **opts): |
|
275 | def _manifest(orig, ui, repo, *args, **opts): | |
275 | if (isenabled(repo) and opts.get('all')): |
|
276 | if (isenabled(repo) and opts.get(r'all')): | |
276 | raise error.Abort(_("--all is not supported in a shallow repo")) |
|
277 | raise error.Abort(_("--all is not supported in a shallow repo")) | |
277 |
|
278 | |||
278 | return orig(ui, repo, *args, **opts) |
|
279 | return orig(ui, repo, *args, **opts) | |
279 | extensions.wrapcommand(commands.table, "manifest", _manifest) |
|
280 | extensions.wrapcommand(commands.table, "manifest", _manifest) | |
280 |
|
281 | |||
281 | # Wrap remotefilelog with lfs code |
|
282 | # Wrap remotefilelog with lfs code | |
282 | def _lfsloaded(loaded=False): |
|
283 | def _lfsloaded(loaded=False): | |
283 | lfsmod = None |
|
284 | lfsmod = None | |
284 | try: |
|
285 | try: | |
285 | lfsmod = extensions.find('lfs') |
|
286 | lfsmod = extensions.find('lfs') | |
286 | except KeyError: |
|
287 | except KeyError: | |
287 | pass |
|
288 | pass | |
288 | if lfsmod: |
|
289 | if lfsmod: | |
289 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) |
|
290 | lfsmod.wrapfilelog(remotefilelog.remotefilelog) | |
290 | fileserverclient._lfsmod = lfsmod |
|
291 | fileserverclient._lfsmod = lfsmod | |
291 | extensions.afterloaded('lfs', _lfsloaded) |
|
292 | extensions.afterloaded('lfs', _lfsloaded) | |
292 |
|
293 | |||
293 | # debugdata needs remotefilelog.len to work |
|
294 | # debugdata needs remotefilelog.len to work | |
294 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) |
|
295 | extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow) | |
295 |
|
296 | |||
296 | def cloneshallow(orig, ui, repo, *args, **opts): |
|
297 | def cloneshallow(orig, ui, repo, *args, **opts): | |
297 | if opts.get('shallow'): |
|
298 | if opts.get(r'shallow'): | |
298 | repos = [] |
|
299 | repos = [] | |
299 | def pull_shallow(orig, self, *args, **kwargs): |
|
300 | def pull_shallow(orig, self, *args, **kwargs): | |
300 | if not isenabled(self): |
|
301 | if not isenabled(self): | |
301 | repos.append(self.unfiltered()) |
|
302 | repos.append(self.unfiltered()) | |
302 | # set up the client hooks so the post-clone update works |
|
303 | # set up the client hooks so the post-clone update works | |
303 | setupclient(self.ui, self.unfiltered()) |
|
304 | setupclient(self.ui, self.unfiltered()) | |
304 |
|
305 | |||
305 | # setupclient fixed the class on the repo itself |
|
306 | # setupclient fixed the class on the repo itself | |
306 | # but we also need to fix it on the repoview |
|
307 | # but we also need to fix it on the repoview | |
307 | if isinstance(self, repoview.repoview): |
|
308 | if isinstance(self, repoview.repoview): | |
308 | self.__class__.__bases__ = (self.__class__.__bases__[0], |
|
309 | self.__class__.__bases__ = (self.__class__.__bases__[0], | |
309 | self.unfiltered().__class__) |
|
310 | self.unfiltered().__class__) | |
310 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
311 | self.requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
311 | self._writerequirements() |
|
312 | self._writerequirements() | |
312 |
|
313 | |||
313 | # Since setupclient hadn't been called, exchange.pull was not |
|
314 | # Since setupclient hadn't been called, exchange.pull was not | |
314 | # wrapped. So we need to manually invoke our version of it. |
|
315 | # wrapped. So we need to manually invoke our version of it. | |
315 | return exchangepull(orig, self, *args, **kwargs) |
|
316 | return exchangepull(orig, self, *args, **kwargs) | |
316 | else: |
|
317 | else: | |
317 | return orig(self, *args, **kwargs) |
|
318 | return orig(self, *args, **kwargs) | |
318 | extensions.wrapfunction(exchange, 'pull', pull_shallow) |
|
319 | extensions.wrapfunction(exchange, 'pull', pull_shallow) | |
319 |
|
320 | |||
320 | # Wrap the stream logic to add requirements and to pass include/exclude |
|
321 | # Wrap the stream logic to add requirements and to pass include/exclude | |
321 | # patterns around. |
|
322 | # patterns around. | |
322 | def setup_streamout(repo, remote): |
|
323 | def setup_streamout(repo, remote): | |
323 | # Replace remote.stream_out with a version that sends file |
|
324 | # Replace remote.stream_out with a version that sends file | |
324 | # patterns. |
|
325 | # patterns. | |
325 | def stream_out_shallow(orig): |
|
326 | def stream_out_shallow(orig): | |
326 | caps = remote.capabilities() |
|
327 | caps = remote.capabilities() | |
327 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: |
|
328 | if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps: | |
328 | opts = {} |
|
329 | opts = {} | |
329 | if repo.includepattern: |
|
330 | if repo.includepattern: | |
330 | opts['includepattern'] = '\0'.join(repo.includepattern) |
|
331 | opts[r'includepattern'] = '\0'.join(repo.includepattern) | |
331 | if repo.excludepattern: |
|
332 | if repo.excludepattern: | |
332 | opts['excludepattern'] = '\0'.join(repo.excludepattern) |
|
333 | opts[r'excludepattern'] = '\0'.join(repo.excludepattern) | |
333 | return remote._callstream('stream_out_shallow', **opts) |
|
334 | return remote._callstream('stream_out_shallow', **opts) | |
334 | else: |
|
335 | else: | |
335 | return orig() |
|
336 | return orig() | |
336 | extensions.wrapfunction(remote, 'stream_out', stream_out_shallow) |
|
337 | extensions.wrapfunction(remote, 'stream_out', stream_out_shallow) | |
337 | def stream_wrap(orig, op): |
|
338 | def stream_wrap(orig, op): | |
338 | setup_streamout(op.repo, op.remote) |
|
339 | setup_streamout(op.repo, op.remote) | |
339 | return orig(op) |
|
340 | return orig(op) | |
340 | extensions.wrapfunction( |
|
341 | extensions.wrapfunction( | |
341 | streamclone, 'maybeperformlegacystreamclone', stream_wrap) |
|
342 | streamclone, 'maybeperformlegacystreamclone', stream_wrap) | |
342 |
|
343 | |||
343 | def canperformstreamclone(orig, pullop, bundle2=False): |
|
344 | def canperformstreamclone(orig, pullop, bundle2=False): | |
344 | # remotefilelog is currently incompatible with the |
|
345 | # remotefilelog is currently incompatible with the | |
345 | # bundle2 flavor of streamclones, so force us to use |
|
346 | # bundle2 flavor of streamclones, so force us to use | |
346 | # v1 instead. |
|
347 | # v1 instead. | |
347 | if 'v2' in pullop.remotebundle2caps.get('stream', []): |
|
348 | if 'v2' in pullop.remotebundle2caps.get('stream', []): | |
348 | pullop.remotebundle2caps['stream'] = [ |
|
349 | pullop.remotebundle2caps['stream'] = [ | |
349 | c for c in pullop.remotebundle2caps['stream'] |
|
350 | c for c in pullop.remotebundle2caps['stream'] | |
350 | if c != 'v2'] |
|
351 | if c != 'v2'] | |
351 | if bundle2: |
|
352 | if bundle2: | |
352 | return False, None |
|
353 | return False, None | |
353 | supported, requirements = orig(pullop, bundle2=bundle2) |
|
354 | supported, requirements = orig(pullop, bundle2=bundle2) | |
354 | if requirements is not None: |
|
355 | if requirements is not None: | |
355 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) |
|
356 | requirements.add(constants.SHALLOWREPO_REQUIREMENT) | |
356 | return supported, requirements |
|
357 | return supported, requirements | |
357 | extensions.wrapfunction( |
|
358 | extensions.wrapfunction( | |
358 | streamclone, 'canperformstreamclone', canperformstreamclone) |
|
359 | streamclone, 'canperformstreamclone', canperformstreamclone) | |
359 |
|
360 | |||
360 | try: |
|
361 | try: | |
361 | orig(ui, repo, *args, **opts) |
|
362 | orig(ui, repo, *args, **opts) | |
362 | finally: |
|
363 | finally: | |
363 | if opts.get('shallow'): |
|
364 | if opts.get(r'shallow'): | |
364 | for r in repos: |
|
365 | for r in repos: | |
365 | if util.safehasattr(r, 'fileservice'): |
|
366 | if util.safehasattr(r, 'fileservice'): | |
366 | r.fileservice.close() |
|
367 | r.fileservice.close() | |
367 |
|
368 | |||
368 | def debugdatashallow(orig, *args, **kwds): |
|
369 | def debugdatashallow(orig, *args, **kwds): | |
369 | oldlen = remotefilelog.remotefilelog.__len__ |
|
370 | oldlen = remotefilelog.remotefilelog.__len__ | |
370 | try: |
|
371 | try: | |
371 | remotefilelog.remotefilelog.__len__ = lambda x: 1 |
|
372 | remotefilelog.remotefilelog.__len__ = lambda x: 1 | |
372 | return orig(*args, **kwds) |
|
373 | return orig(*args, **kwds) | |
373 | finally: |
|
374 | finally: | |
374 | remotefilelog.remotefilelog.__len__ = oldlen |
|
375 | remotefilelog.remotefilelog.__len__ = oldlen | |
375 |
|
376 | |||
376 | def reposetup(ui, repo): |
|
377 | def reposetup(ui, repo): | |
377 | if not isinstance(repo, localrepo.localrepository): |
|
378 | if not isinstance(repo, localrepo.localrepository): | |
378 | return |
|
379 | return | |
379 |
|
380 | |||
380 | # put here intentionally bc doesnt work in uisetup |
|
381 | # put here intentionally bc doesnt work in uisetup | |
381 | ui.setconfig('hooks', 'update.prefetch', wcpprefetch) |
|
382 | ui.setconfig('hooks', 'update.prefetch', wcpprefetch) | |
382 | ui.setconfig('hooks', 'commit.prefetch', wcpprefetch) |
|
383 | ui.setconfig('hooks', 'commit.prefetch', wcpprefetch) | |
383 |
|
384 | |||
384 | isserverenabled = ui.configbool('remotefilelog', 'server') |
|
385 | isserverenabled = ui.configbool('remotefilelog', 'server') | |
385 | isshallowclient = isenabled(repo) |
|
386 | isshallowclient = isenabled(repo) | |
386 |
|
387 | |||
387 | if isserverenabled and isshallowclient: |
|
388 | if isserverenabled and isshallowclient: | |
388 | raise RuntimeError("Cannot be both a server and shallow client.") |
|
389 | raise RuntimeError("Cannot be both a server and shallow client.") | |
389 |
|
390 | |||
390 | if isshallowclient: |
|
391 | if isshallowclient: | |
391 | setupclient(ui, repo) |
|
392 | setupclient(ui, repo) | |
392 |
|
393 | |||
393 | if isserverenabled: |
|
394 | if isserverenabled: | |
394 | remotefilelogserver.setupserver(ui, repo) |
|
395 | remotefilelogserver.setupserver(ui, repo) | |
395 |
|
396 | |||
396 | def setupclient(ui, repo): |
|
397 | def setupclient(ui, repo): | |
397 | if not isinstance(repo, localrepo.localrepository): |
|
398 | if not isinstance(repo, localrepo.localrepository): | |
398 | return |
|
399 | return | |
399 |
|
400 | |||
400 | # Even clients get the server setup since they need to have the |
|
401 | # Even clients get the server setup since they need to have the | |
401 | # wireprotocol endpoints registered. |
|
402 | # wireprotocol endpoints registered. | |
402 | remotefilelogserver.onetimesetup(ui) |
|
403 | remotefilelogserver.onetimesetup(ui) | |
403 | onetimeclientsetup(ui) |
|
404 | onetimeclientsetup(ui) | |
404 |
|
405 | |||
405 | shallowrepo.wraprepo(repo) |
|
406 | shallowrepo.wraprepo(repo) | |
406 | repo.store = shallowstore.wrapstore(repo.store) |
|
407 | repo.store = shallowstore.wrapstore(repo.store) | |
407 |
|
408 | |||
408 | clientonetime = False |
|
409 | clientonetime = False | |
409 | def onetimeclientsetup(ui): |
|
410 | def onetimeclientsetup(ui): | |
410 | global clientonetime |
|
411 | global clientonetime | |
411 | if clientonetime: |
|
412 | if clientonetime: | |
412 | return |
|
413 | return | |
413 | clientonetime = True |
|
414 | clientonetime = True | |
414 |
|
415 | |||
415 | changegroup.cgpacker = shallowbundle.shallowcg1packer |
|
416 | changegroup.cgpacker = shallowbundle.shallowcg1packer | |
416 |
|
417 | |||
417 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', |
|
418 | extensions.wrapfunction(changegroup, '_addchangegroupfiles', | |
418 | shallowbundle.addchangegroupfiles) |
|
419 | shallowbundle.addchangegroupfiles) | |
419 | extensions.wrapfunction( |
|
420 | extensions.wrapfunction( | |
420 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) |
|
421 | changegroup, 'makechangegroup', shallowbundle.makechangegroup) | |
421 |
|
422 | |||
422 | def storewrapper(orig, requirements, path, vfstype): |
|
423 | def storewrapper(orig, requirements, path, vfstype): | |
423 | s = orig(requirements, path, vfstype) |
|
424 | s = orig(requirements, path, vfstype) | |
424 | if constants.SHALLOWREPO_REQUIREMENT in requirements: |
|
425 | if constants.SHALLOWREPO_REQUIREMENT in requirements: | |
425 | s = shallowstore.wrapstore(s) |
|
426 | s = shallowstore.wrapstore(s) | |
426 |
|
427 | |||
427 | return s |
|
428 | return s | |
428 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) |
|
429 | extensions.wrapfunction(localrepo, 'makestore', storewrapper) | |
429 |
|
430 | |||
430 | extensions.wrapfunction(exchange, 'pull', exchangepull) |
|
431 | extensions.wrapfunction(exchange, 'pull', exchangepull) | |
431 |
|
432 | |||
432 | # prefetch files before update |
|
433 | # prefetch files before update | |
433 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None): |
|
434 | def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None): | |
434 | if isenabled(repo): |
|
435 | if isenabled(repo): | |
435 | manifest = mctx.manifest() |
|
436 | manifest = mctx.manifest() | |
436 | files = [] |
|
437 | files = [] | |
437 | for f, args, msg in actions['g']: |
|
438 | for f, args, msg in actions['g']: | |
438 | files.append((f, hex(manifest[f]))) |
|
439 | files.append((f, hex(manifest[f]))) | |
439 | # batch fetch the needed files from the server |
|
440 | # batch fetch the needed files from the server | |
440 | repo.fileservice.prefetch(files) |
|
441 | repo.fileservice.prefetch(files) | |
441 | return orig(repo, actions, wctx, mctx, overwrite, labels=labels) |
|
442 | return orig(repo, actions, wctx, mctx, overwrite, labels=labels) | |
442 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) |
|
443 | extensions.wrapfunction(merge, 'applyupdates', applyupdates) | |
443 |
|
444 | |||
444 | # Prefetch merge checkunknownfiles |
|
445 | # Prefetch merge checkunknownfiles | |
445 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, |
|
446 | def checkunknownfiles(orig, repo, wctx, mctx, force, actions, | |
446 | *args, **kwargs): |
|
447 | *args, **kwargs): | |
447 | if isenabled(repo): |
|
448 | if isenabled(repo): | |
448 | files = [] |
|
449 | files = [] | |
449 | sparsematch = repo.maybesparsematch(mctx.rev()) |
|
450 | sparsematch = repo.maybesparsematch(mctx.rev()) | |
450 | for f, (m, actionargs, msg) in actions.iteritems(): |
|
451 | for f, (m, actionargs, msg) in actions.iteritems(): | |
451 | if sparsematch and not sparsematch(f): |
|
452 | if sparsematch and not sparsematch(f): | |
452 | continue |
|
453 | continue | |
453 | if m in ('c', 'dc', 'cm'): |
|
454 | if m in ('c', 'dc', 'cm'): | |
454 | files.append((f, hex(mctx.filenode(f)))) |
|
455 | files.append((f, hex(mctx.filenode(f)))) | |
455 | elif m == 'dg': |
|
456 | elif m == 'dg': | |
456 | f2 = actionargs[0] |
|
457 | f2 = actionargs[0] | |
457 | files.append((f2, hex(mctx.filenode(f2)))) |
|
458 | files.append((f2, hex(mctx.filenode(f2)))) | |
458 | # batch fetch the needed files from the server |
|
459 | # batch fetch the needed files from the server | |
459 | repo.fileservice.prefetch(files) |
|
460 | repo.fileservice.prefetch(files) | |
460 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) |
|
461 | return orig(repo, wctx, mctx, force, actions, *args, **kwargs) | |
461 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) |
|
462 | extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles) | |
462 |
|
463 | |||
463 | # Prefetch files before status attempts to look at their size and contents |
|
464 | # Prefetch files before status attempts to look at their size and contents | |
464 | def checklookup(orig, self, files): |
|
465 | def checklookup(orig, self, files): | |
465 | repo = self._repo |
|
466 | repo = self._repo | |
466 | if isenabled(repo): |
|
467 | if isenabled(repo): | |
467 | prefetchfiles = [] |
|
468 | prefetchfiles = [] | |
468 | for parent in self._parents: |
|
469 | for parent in self._parents: | |
469 | for f in files: |
|
470 | for f in files: | |
470 | if f in parent: |
|
471 | if f in parent: | |
471 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
472 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
472 | # batch fetch the needed files from the server |
|
473 | # batch fetch the needed files from the server | |
473 | repo.fileservice.prefetch(prefetchfiles) |
|
474 | repo.fileservice.prefetch(prefetchfiles) | |
474 | return orig(self, files) |
|
475 | return orig(self, files) | |
475 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) |
|
476 | extensions.wrapfunction(context.workingctx, '_checklookup', checklookup) | |
476 |
|
477 | |||
477 | # Prefetch the logic that compares added and removed files for renames |
|
478 | # Prefetch the logic that compares added and removed files for renames | |
478 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): |
|
479 | def findrenames(orig, repo, matcher, added, removed, *args, **kwargs): | |
479 | if isenabled(repo): |
|
480 | if isenabled(repo): | |
480 | files = [] |
|
481 | files = [] | |
481 | parentctx = repo['.'] |
|
482 | parentctx = repo['.'] | |
482 | for f in removed: |
|
483 | for f in removed: | |
483 | files.append((f, hex(parentctx.filenode(f)))) |
|
484 | files.append((f, hex(parentctx.filenode(f)))) | |
484 | # batch fetch the needed files from the server |
|
485 | # batch fetch the needed files from the server | |
485 | repo.fileservice.prefetch(files) |
|
486 | repo.fileservice.prefetch(files) | |
486 | return orig(repo, matcher, added, removed, *args, **kwargs) |
|
487 | return orig(repo, matcher, added, removed, *args, **kwargs) | |
487 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) |
|
488 | extensions.wrapfunction(scmutil, '_findrenames', findrenames) | |
488 |
|
489 | |||
489 | # prefetch files before mergecopies check |
|
490 | # prefetch files before mergecopies check | |
490 | def computenonoverlap(orig, repo, c1, c2, *args, **kwargs): |
|
491 | def computenonoverlap(orig, repo, c1, c2, *args, **kwargs): | |
491 | u1, u2 = orig(repo, c1, c2, *args, **kwargs) |
|
492 | u1, u2 = orig(repo, c1, c2, *args, **kwargs) | |
492 | if isenabled(repo): |
|
493 | if isenabled(repo): | |
493 | m1 = c1.manifest() |
|
494 | m1 = c1.manifest() | |
494 | m2 = c2.manifest() |
|
495 | m2 = c2.manifest() | |
495 | files = [] |
|
496 | files = [] | |
496 |
|
497 | |||
497 | sparsematch1 = repo.maybesparsematch(c1.rev()) |
|
498 | sparsematch1 = repo.maybesparsematch(c1.rev()) | |
498 | if sparsematch1: |
|
499 | if sparsematch1: | |
499 | sparseu1 = [] |
|
500 | sparseu1 = [] | |
500 | for f in u1: |
|
501 | for f in u1: | |
501 | if sparsematch1(f): |
|
502 | if sparsematch1(f): | |
502 | files.append((f, hex(m1[f]))) |
|
503 | files.append((f, hex(m1[f]))) | |
503 | sparseu1.append(f) |
|
504 | sparseu1.append(f) | |
504 | u1 = sparseu1 |
|
505 | u1 = sparseu1 | |
505 |
|
506 | |||
506 | sparsematch2 = repo.maybesparsematch(c2.rev()) |
|
507 | sparsematch2 = repo.maybesparsematch(c2.rev()) | |
507 | if sparsematch2: |
|
508 | if sparsematch2: | |
508 | sparseu2 = [] |
|
509 | sparseu2 = [] | |
509 | for f in u2: |
|
510 | for f in u2: | |
510 | if sparsematch2(f): |
|
511 | if sparsematch2(f): | |
511 | files.append((f, hex(m2[f]))) |
|
512 | files.append((f, hex(m2[f]))) | |
512 | sparseu2.append(f) |
|
513 | sparseu2.append(f) | |
513 | u2 = sparseu2 |
|
514 | u2 = sparseu2 | |
514 |
|
515 | |||
515 | # batch fetch the needed files from the server |
|
516 | # batch fetch the needed files from the server | |
516 | repo.fileservice.prefetch(files) |
|
517 | repo.fileservice.prefetch(files) | |
517 | return u1, u2 |
|
518 | return u1, u2 | |
518 | extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap) |
|
519 | extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap) | |
519 |
|
520 | |||
520 | # prefetch files before pathcopies check |
|
521 | # prefetch files before pathcopies check | |
521 | def computeforwardmissing(orig, a, b, match=None): |
|
522 | def computeforwardmissing(orig, a, b, match=None): | |
522 | missing = list(orig(a, b, match=match)) |
|
523 | missing = list(orig(a, b, match=match)) | |
523 | repo = a._repo |
|
524 | repo = a._repo | |
524 | if isenabled(repo): |
|
525 | if isenabled(repo): | |
525 | mb = b.manifest() |
|
526 | mb = b.manifest() | |
526 |
|
527 | |||
527 | files = [] |
|
528 | files = [] | |
528 | sparsematch = repo.maybesparsematch(b.rev()) |
|
529 | sparsematch = repo.maybesparsematch(b.rev()) | |
529 | if sparsematch: |
|
530 | if sparsematch: | |
530 | sparsemissing = [] |
|
531 | sparsemissing = [] | |
531 | for f in missing: |
|
532 | for f in missing: | |
532 | if sparsematch(f): |
|
533 | if sparsematch(f): | |
533 | files.append((f, hex(mb[f]))) |
|
534 | files.append((f, hex(mb[f]))) | |
534 | sparsemissing.append(f) |
|
535 | sparsemissing.append(f) | |
535 | missing = sparsemissing |
|
536 | missing = sparsemissing | |
536 |
|
537 | |||
537 | # batch fetch the needed files from the server |
|
538 | # batch fetch the needed files from the server | |
538 | repo.fileservice.prefetch(files) |
|
539 | repo.fileservice.prefetch(files) | |
539 | return missing |
|
540 | return missing | |
540 | extensions.wrapfunction(copies, '_computeforwardmissing', |
|
541 | extensions.wrapfunction(copies, '_computeforwardmissing', | |
541 | computeforwardmissing) |
|
542 | computeforwardmissing) | |
542 |
|
543 | |||
543 | # close cache miss server connection after the command has finished |
|
544 | # close cache miss server connection after the command has finished | |
544 | def runcommand(orig, lui, repo, *args, **kwargs): |
|
545 | def runcommand(orig, lui, repo, *args, **kwargs): | |
545 | fileservice = None |
|
546 | fileservice = None | |
546 | # repo can be None when running in chg: |
|
547 | # repo can be None when running in chg: | |
547 | # - at startup, reposetup was called because serve is not norepo |
|
548 | # - at startup, reposetup was called because serve is not norepo | |
548 | # - a norepo command like "help" is called |
|
549 | # - a norepo command like "help" is called | |
549 | if repo and isenabled(repo): |
|
550 | if repo and isenabled(repo): | |
550 | fileservice = repo.fileservice |
|
551 | fileservice = repo.fileservice | |
551 | try: |
|
552 | try: | |
552 | return orig(lui, repo, *args, **kwargs) |
|
553 | return orig(lui, repo, *args, **kwargs) | |
553 | finally: |
|
554 | finally: | |
554 | if fileservice: |
|
555 | if fileservice: | |
555 | fileservice.close() |
|
556 | fileservice.close() | |
556 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) |
|
557 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |
557 |
|
558 | |||
558 | # disappointing hacks below |
|
559 | # disappointing hacks below | |
559 | templatekw.getrenamedfn = getrenamedfn |
|
560 | templatekw.getrenamedfn = getrenamedfn | |
560 | extensions.wrapfunction(revset, 'filelog', filelogrevset) |
|
561 | extensions.wrapfunction(revset, 'filelog', filelogrevset) | |
561 | revset.symbols['filelog'] = revset.filelog |
|
562 | revset.symbols['filelog'] = revset.filelog | |
562 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) |
|
563 | extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs) | |
563 |
|
564 | |||
564 | # prevent strip from stripping remotefilelogs |
|
565 | # prevent strip from stripping remotefilelogs | |
565 | def _collectbrokencsets(orig, repo, files, striprev): |
|
566 | def _collectbrokencsets(orig, repo, files, striprev): | |
566 | if isenabled(repo): |
|
567 | if isenabled(repo): | |
567 | files = list([f for f in files if not repo.shallowmatch(f)]) |
|
568 | files = list([f for f in files if not repo.shallowmatch(f)]) | |
568 | return orig(repo, files, striprev) |
|
569 | return orig(repo, files, striprev) | |
569 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) |
|
570 | extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets) | |
570 |
|
571 | |||
571 | # Don't commit filelogs until we know the commit hash, since the hash |
|
572 | # Don't commit filelogs until we know the commit hash, since the hash | |
572 | # is present in the filelog blob. |
|
573 | # is present in the filelog blob. | |
573 | # This violates Mercurial's filelog->manifest->changelog write order, |
|
574 | # This violates Mercurial's filelog->manifest->changelog write order, | |
574 | # but is generally fine for client repos. |
|
575 | # but is generally fine for client repos. | |
575 | pendingfilecommits = [] |
|
576 | pendingfilecommits = [] | |
576 | def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node, |
|
577 | def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node, | |
577 | flags, cachedelta=None, _metatuple=None): |
|
578 | flags, cachedelta=None, _metatuple=None): | |
578 | if isinstance(link, int): |
|
579 | if isinstance(link, int): | |
579 | pendingfilecommits.append( |
|
580 | pendingfilecommits.append( | |
580 | (self, rawtext, transaction, link, p1, p2, node, flags, |
|
581 | (self, rawtext, transaction, link, p1, p2, node, flags, | |
581 | cachedelta, _metatuple)) |
|
582 | cachedelta, _metatuple)) | |
582 | return node |
|
583 | return node | |
583 | else: |
|
584 | else: | |
584 | return orig(self, rawtext, transaction, link, p1, p2, node, flags, |
|
585 | return orig(self, rawtext, transaction, link, p1, p2, node, flags, | |
585 | cachedelta, _metatuple=_metatuple) |
|
586 | cachedelta, _metatuple=_metatuple) | |
586 | extensions.wrapfunction( |
|
587 | extensions.wrapfunction( | |
587 | remotefilelog.remotefilelog, 'addrawrevision', addrawrevision) |
|
588 | remotefilelog.remotefilelog, 'addrawrevision', addrawrevision) | |
588 |
|
589 | |||
589 | def changelogadd(orig, self, *args): |
|
590 | def changelogadd(orig, self, *args): | |
590 | oldlen = len(self) |
|
591 | oldlen = len(self) | |
591 | node = orig(self, *args) |
|
592 | node = orig(self, *args) | |
592 | newlen = len(self) |
|
593 | newlen = len(self) | |
593 | if oldlen != newlen: |
|
594 | if oldlen != newlen: | |
594 | for oldargs in pendingfilecommits: |
|
595 | for oldargs in pendingfilecommits: | |
595 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs |
|
596 | log, rt, tr, link, p1, p2, n, fl, c, m = oldargs | |
596 | linknode = self.node(link) |
|
597 | linknode = self.node(link) | |
597 | if linknode == node: |
|
598 | if linknode == node: | |
598 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) |
|
599 | log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m) | |
599 | else: |
|
600 | else: | |
600 | raise error.ProgrammingError( |
|
601 | raise error.ProgrammingError( | |
601 | 'pending multiple integer revisions are not supported') |
|
602 | 'pending multiple integer revisions are not supported') | |
602 | else: |
|
603 | else: | |
603 | # "link" is actually wrong here (it is set to len(changelog)) |
|
604 | # "link" is actually wrong here (it is set to len(changelog)) | |
604 | # if changelog remains unchanged, skip writing file revisions |
|
605 | # if changelog remains unchanged, skip writing file revisions | |
605 | # but still do a sanity check about pending multiple revisions |
|
606 | # but still do a sanity check about pending multiple revisions | |
606 | if len(set(x[3] for x in pendingfilecommits)) > 1: |
|
607 | if len(set(x[3] for x in pendingfilecommits)) > 1: | |
607 | raise error.ProgrammingError( |
|
608 | raise error.ProgrammingError( | |
608 | 'pending multiple integer revisions are not supported') |
|
609 | 'pending multiple integer revisions are not supported') | |
609 | del pendingfilecommits[:] |
|
610 | del pendingfilecommits[:] | |
610 | return node |
|
611 | return node | |
611 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) |
|
612 | extensions.wrapfunction(changelog.changelog, 'add', changelogadd) | |
612 |
|
613 | |||
613 | # changectx wrappers |
|
614 | # changectx wrappers | |
614 | def filectx(orig, self, path, fileid=None, filelog=None): |
|
615 | def filectx(orig, self, path, fileid=None, filelog=None): | |
615 | if fileid is None: |
|
616 | if fileid is None: | |
616 | fileid = self.filenode(path) |
|
617 | fileid = self.filenode(path) | |
617 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): |
|
618 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
618 | return remotefilectx.remotefilectx(self._repo, path, |
|
619 | return remotefilectx.remotefilectx(self._repo, path, | |
619 | fileid=fileid, changectx=self, filelog=filelog) |
|
620 | fileid=fileid, changectx=self, filelog=filelog) | |
620 | return orig(self, path, fileid=fileid, filelog=filelog) |
|
621 | return orig(self, path, fileid=fileid, filelog=filelog) | |
621 | extensions.wrapfunction(context.changectx, 'filectx', filectx) |
|
622 | extensions.wrapfunction(context.changectx, 'filectx', filectx) | |
622 |
|
623 | |||
623 | def workingfilectx(orig, self, path, filelog=None): |
|
624 | def workingfilectx(orig, self, path, filelog=None): | |
624 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): |
|
625 | if (isenabled(self._repo) and self._repo.shallowmatch(path)): | |
625 | return remotefilectx.remoteworkingfilectx(self._repo, |
|
626 | return remotefilectx.remoteworkingfilectx(self._repo, | |
626 | path, workingctx=self, filelog=filelog) |
|
627 | path, workingctx=self, filelog=filelog) | |
627 | return orig(self, path, filelog=filelog) |
|
628 | return orig(self, path, filelog=filelog) | |
628 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) |
|
629 | extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx) | |
629 |
|
630 | |||
630 | # prefetch required revisions before a diff |
|
631 | # prefetch required revisions before a diff | |
631 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, |
|
632 | def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, | |
632 | copy, getfilectx, *args, **kwargs): |
|
633 | copy, getfilectx, *args, **kwargs): | |
633 | if isenabled(repo): |
|
634 | if isenabled(repo): | |
634 | prefetch = [] |
|
635 | prefetch = [] | |
635 | mf1 = ctx1.manifest() |
|
636 | mf1 = ctx1.manifest() | |
636 | for fname in modified + added + removed: |
|
637 | for fname in modified + added + removed: | |
637 | if fname in mf1: |
|
638 | if fname in mf1: | |
638 | fnode = getfilectx(fname, ctx1).filenode() |
|
639 | fnode = getfilectx(fname, ctx1).filenode() | |
639 | # fnode can be None if it's a edited working ctx file |
|
640 | # fnode can be None if it's a edited working ctx file | |
640 | if fnode: |
|
641 | if fnode: | |
641 | prefetch.append((fname, hex(fnode))) |
|
642 | prefetch.append((fname, hex(fnode))) | |
642 | if fname not in removed: |
|
643 | if fname not in removed: | |
643 | fnode = getfilectx(fname, ctx2).filenode() |
|
644 | fnode = getfilectx(fname, ctx2).filenode() | |
644 | if fnode: |
|
645 | if fnode: | |
645 | prefetch.append((fname, hex(fnode))) |
|
646 | prefetch.append((fname, hex(fnode))) | |
646 |
|
647 | |||
647 | repo.fileservice.prefetch(prefetch) |
|
648 | repo.fileservice.prefetch(prefetch) | |
648 |
|
649 | |||
649 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, |
|
650 | return orig(repo, revs, ctx1, ctx2, modified, added, removed, | |
650 | copy, getfilectx, *args, **kwargs) |
|
651 | copy, getfilectx, *args, **kwargs) | |
651 | extensions.wrapfunction(patch, 'trydiff', trydiff) |
|
652 | extensions.wrapfunction(patch, 'trydiff', trydiff) | |
652 |
|
653 | |||
653 | # Prevent verify from processing files |
|
654 | # Prevent verify from processing files | |
654 | # a stub for mercurial.hg.verify() |
|
655 | # a stub for mercurial.hg.verify() | |
655 | def _verify(orig, repo): |
|
656 | def _verify(orig, repo): | |
656 | lock = repo.lock() |
|
657 | lock = repo.lock() | |
657 | try: |
|
658 | try: | |
658 | return shallowverifier.shallowverifier(repo).verify() |
|
659 | return shallowverifier.shallowverifier(repo).verify() | |
659 | finally: |
|
660 | finally: | |
660 | lock.release() |
|
661 | lock.release() | |
661 |
|
662 | |||
662 | extensions.wrapfunction(hg, 'verify', _verify) |
|
663 | extensions.wrapfunction(hg, 'verify', _verify) | |
663 |
|
664 | |||
664 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) |
|
665 | scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook) | |
665 |
|
666 | |||
666 | def getrenamedfn(repo, endrev=None): |
|
667 | def getrenamedfn(repo, endrev=None): | |
667 | rcache = {} |
|
668 | rcache = {} | |
668 |
|
669 | |||
669 | def getrenamed(fn, rev): |
|
670 | def getrenamed(fn, rev): | |
670 | '''looks up all renames for a file (up to endrev) the first |
|
671 | '''looks up all renames for a file (up to endrev) the first | |
671 | time the file is given. It indexes on the changerev and only |
|
672 | time the file is given. It indexes on the changerev and only | |
672 | parses the manifest if linkrev != changerev. |
|
673 | parses the manifest if linkrev != changerev. | |
673 | Returns rename info for fn at changerev rev.''' |
|
674 | Returns rename info for fn at changerev rev.''' | |
674 | if rev in rcache.setdefault(fn, {}): |
|
675 | if rev in rcache.setdefault(fn, {}): | |
675 | return rcache[fn][rev] |
|
676 | return rcache[fn][rev] | |
676 |
|
677 | |||
677 | try: |
|
678 | try: | |
678 | fctx = repo[rev].filectx(fn) |
|
679 | fctx = repo[rev].filectx(fn) | |
679 | for ancestor in fctx.ancestors(): |
|
680 | for ancestor in fctx.ancestors(): | |
680 | if ancestor.path() == fn: |
|
681 | if ancestor.path() == fn: | |
681 | renamed = ancestor.renamed() |
|
682 | renamed = ancestor.renamed() | |
682 | rcache[fn][ancestor.rev()] = renamed |
|
683 | rcache[fn][ancestor.rev()] = renamed | |
683 |
|
684 | |||
684 | return fctx.renamed() |
|
685 | return fctx.renamed() | |
685 | except error.LookupError: |
|
686 | except error.LookupError: | |
686 | return None |
|
687 | return None | |
687 |
|
688 | |||
688 | return getrenamed |
|
689 | return getrenamed | |
689 |
|
690 | |||
690 | def walkfilerevs(orig, repo, match, follow, revs, fncache): |
|
691 | def walkfilerevs(orig, repo, match, follow, revs, fncache): | |
691 | if not isenabled(repo): |
|
692 | if not isenabled(repo): | |
692 | return orig(repo, match, follow, revs, fncache) |
|
693 | return orig(repo, match, follow, revs, fncache) | |
693 |
|
694 | |||
694 | # remotefilelog's can't be walked in rev order, so throw. |
|
695 | # remotefilelog's can't be walked in rev order, so throw. | |
695 | # The caller will see the exception and walk the commit tree instead. |
|
696 | # The caller will see the exception and walk the commit tree instead. | |
696 | if not follow: |
|
697 | if not follow: | |
697 | raise cmdutil.FileWalkError("Cannot walk via filelog") |
|
698 | raise cmdutil.FileWalkError("Cannot walk via filelog") | |
698 |
|
699 | |||
699 | wanted = set() |
|
700 | wanted = set() | |
700 | minrev, maxrev = min(revs), max(revs) |
|
701 | minrev, maxrev = min(revs), max(revs) | |
701 |
|
702 | |||
702 | pctx = repo['.'] |
|
703 | pctx = repo['.'] | |
703 | for filename in match.files(): |
|
704 | for filename in match.files(): | |
704 | if filename not in pctx: |
|
705 | if filename not in pctx: | |
705 | raise error.Abort(_('cannot follow file not in parent ' |
|
706 | raise error.Abort(_('cannot follow file not in parent ' | |
706 | 'revision: "%s"') % filename) |
|
707 | 'revision: "%s"') % filename) | |
707 | fctx = pctx[filename] |
|
708 | fctx = pctx[filename] | |
708 |
|
709 | |||
709 | linkrev = fctx.linkrev() |
|
710 | linkrev = fctx.linkrev() | |
710 | if linkrev >= minrev and linkrev <= maxrev: |
|
711 | if linkrev >= minrev and linkrev <= maxrev: | |
711 | fncache.setdefault(linkrev, []).append(filename) |
|
712 | fncache.setdefault(linkrev, []).append(filename) | |
712 | wanted.add(linkrev) |
|
713 | wanted.add(linkrev) | |
713 |
|
714 | |||
714 | for ancestor in fctx.ancestors(): |
|
715 | for ancestor in fctx.ancestors(): | |
715 | linkrev = ancestor.linkrev() |
|
716 | linkrev = ancestor.linkrev() | |
716 | if linkrev >= minrev and linkrev <= maxrev: |
|
717 | if linkrev >= minrev and linkrev <= maxrev: | |
717 | fncache.setdefault(linkrev, []).append(ancestor.path()) |
|
718 | fncache.setdefault(linkrev, []).append(ancestor.path()) | |
718 | wanted.add(linkrev) |
|
719 | wanted.add(linkrev) | |
719 |
|
720 | |||
720 | return wanted |
|
721 | return wanted | |
721 |
|
722 | |||
722 | def filelogrevset(orig, repo, subset, x): |
|
723 | def filelogrevset(orig, repo, subset, x): | |
723 | """``filelog(pattern)`` |
|
724 | """``filelog(pattern)`` | |
724 | Changesets connected to the specified filelog. |
|
725 | Changesets connected to the specified filelog. | |
725 |
|
726 | |||
726 | For performance reasons, ``filelog()`` does not show every changeset |
|
727 | For performance reasons, ``filelog()`` does not show every changeset | |
727 | that affects the requested file(s). See :hg:`help log` for details. For |
|
728 | that affects the requested file(s). See :hg:`help log` for details. For | |
728 | a slower, more accurate result, use ``file()``. |
|
729 | a slower, more accurate result, use ``file()``. | |
729 | """ |
|
730 | """ | |
730 |
|
731 | |||
731 | if not isenabled(repo): |
|
732 | if not isenabled(repo): | |
732 | return orig(repo, subset, x) |
|
733 | return orig(repo, subset, x) | |
733 |
|
734 | |||
734 | # i18n: "filelog" is a keyword |
|
735 | # i18n: "filelog" is a keyword | |
735 | pat = revset.getstring(x, _("filelog requires a pattern")) |
|
736 | pat = revset.getstring(x, _("filelog requires a pattern")) | |
736 | m = match.match(repo.root, repo.getcwd(), [pat], default='relpath', |
|
737 | m = match.match(repo.root, repo.getcwd(), [pat], default='relpath', | |
737 | ctx=repo[None]) |
|
738 | ctx=repo[None]) | |
738 | s = set() |
|
739 | s = set() | |
739 |
|
740 | |||
740 | if not match.patkind(pat): |
|
741 | if not match.patkind(pat): | |
741 | # slow |
|
742 | # slow | |
742 | for r in subset: |
|
743 | for r in subset: | |
743 | ctx = repo[r] |
|
744 | ctx = repo[r] | |
744 | cfiles = ctx.files() |
|
745 | cfiles = ctx.files() | |
745 | for f in m.files(): |
|
746 | for f in m.files(): | |
746 | if f in cfiles: |
|
747 | if f in cfiles: | |
747 | s.add(ctx.rev()) |
|
748 | s.add(ctx.rev()) | |
748 | break |
|
749 | break | |
749 | else: |
|
750 | else: | |
750 | # partial |
|
751 | # partial | |
751 | files = (f for f in repo[None] if m(f)) |
|
752 | files = (f for f in repo[None] if m(f)) | |
752 | for f in files: |
|
753 | for f in files: | |
753 | fctx = repo[None].filectx(f) |
|
754 | fctx = repo[None].filectx(f) | |
754 | s.add(fctx.linkrev()) |
|
755 | s.add(fctx.linkrev()) | |
755 | for actx in fctx.ancestors(): |
|
756 | for actx in fctx.ancestors(): | |
756 | s.add(actx.linkrev()) |
|
757 | s.add(actx.linkrev()) | |
757 |
|
758 | |||
758 | return smartset.baseset([r for r in subset if r in s]) |
|
759 | return smartset.baseset([r for r in subset if r in s]) | |
759 |
|
760 | |||
760 | @command('gc', [], _('hg gc [REPO...]'), norepo=True) |
|
761 | @command('gc', [], _('hg gc [REPO...]'), norepo=True) | |
761 | def gc(ui, *args, **opts): |
|
762 | def gc(ui, *args, **opts): | |
762 | '''garbage collect the client and server filelog caches |
|
763 | '''garbage collect the client and server filelog caches | |
763 | ''' |
|
764 | ''' | |
764 | cachepaths = set() |
|
765 | cachepaths = set() | |
765 |
|
766 | |||
766 | # get the system client cache |
|
767 | # get the system client cache | |
767 | systemcache = shallowutil.getcachepath(ui, allowempty=True) |
|
768 | systemcache = shallowutil.getcachepath(ui, allowempty=True) | |
768 | if systemcache: |
|
769 | if systemcache: | |
769 | cachepaths.add(systemcache) |
|
770 | cachepaths.add(systemcache) | |
770 |
|
771 | |||
771 | # get repo client and server cache |
|
772 | # get repo client and server cache | |
772 | repopaths = [] |
|
773 | repopaths = [] | |
773 | pwd = ui.environ.get('PWD') |
|
774 | pwd = ui.environ.get('PWD') | |
774 | if pwd: |
|
775 | if pwd: | |
775 | repopaths.append(pwd) |
|
776 | repopaths.append(pwd) | |
776 |
|
777 | |||
777 | repopaths.extend(args) |
|
778 | repopaths.extend(args) | |
778 | repos = [] |
|
779 | repos = [] | |
779 | for repopath in repopaths: |
|
780 | for repopath in repopaths: | |
780 | try: |
|
781 | try: | |
781 | repo = hg.peer(ui, {}, repopath) |
|
782 | repo = hg.peer(ui, {}, repopath) | |
782 | repos.append(repo) |
|
783 | repos.append(repo) | |
783 |
|
784 | |||
784 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) |
|
785 | repocache = shallowutil.getcachepath(repo.ui, allowempty=True) | |
785 | if repocache: |
|
786 | if repocache: | |
786 | cachepaths.add(repocache) |
|
787 | cachepaths.add(repocache) | |
787 | except error.RepoError: |
|
788 | except error.RepoError: | |
788 | pass |
|
789 | pass | |
789 |
|
790 | |||
790 | # gc client cache |
|
791 | # gc client cache | |
791 | for cachepath in cachepaths: |
|
792 | for cachepath in cachepaths: | |
792 | gcclient(ui, cachepath) |
|
793 | gcclient(ui, cachepath) | |
793 |
|
794 | |||
794 | # gc server cache |
|
795 | # gc server cache | |
795 | for repo in repos: |
|
796 | for repo in repos: | |
796 | remotefilelogserver.gcserver(ui, repo._repo) |
|
797 | remotefilelogserver.gcserver(ui, repo._repo) | |
797 |
|
798 | |||
798 | def gcclient(ui, cachepath): |
|
799 | def gcclient(ui, cachepath): | |
799 | # get list of repos that use this cache |
|
800 | # get list of repos that use this cache | |
800 | repospath = os.path.join(cachepath, 'repos') |
|
801 | repospath = os.path.join(cachepath, 'repos') | |
801 | if not os.path.exists(repospath): |
|
802 | if not os.path.exists(repospath): | |
802 | ui.warn(_("no known cache at %s\n") % cachepath) |
|
803 | ui.warn(_("no known cache at %s\n") % cachepath) | |
803 | return |
|
804 | return | |
804 |
|
805 | |||
805 | reposfile = open(repospath, 'r') |
|
806 | reposfile = open(repospath, 'r') | |
806 | repos = set([r[:-1] for r in reposfile.readlines()]) |
|
807 | repos = set([r[:-1] for r in reposfile.readlines()]) | |
807 | reposfile.close() |
|
808 | reposfile.close() | |
808 |
|
809 | |||
809 | # build list of useful files |
|
810 | # build list of useful files | |
810 | validrepos = [] |
|
811 | validrepos = [] | |
811 | keepkeys = set() |
|
812 | keepkeys = set() | |
812 |
|
813 | |||
813 | _analyzing = _("analyzing repositories") |
|
814 | _analyzing = _("analyzing repositories") | |
814 |
|
815 | |||
815 | sharedcache = None |
|
816 | sharedcache = None | |
816 | filesrepacked = False |
|
817 | filesrepacked = False | |
817 |
|
818 | |||
818 | count = 0 |
|
819 | count = 0 | |
819 | for path in repos: |
|
820 | for path in repos: | |
820 | ui.progress(_analyzing, count, unit="repos", total=len(repos)) |
|
821 | ui.progress(_analyzing, count, unit="repos", total=len(repos)) | |
821 | count += 1 |
|
822 | count += 1 | |
822 | try: |
|
823 | try: | |
823 | path = ui.expandpath(os.path.normpath(path)) |
|
824 | path = ui.expandpath(os.path.normpath(path)) | |
824 | except TypeError as e: |
|
825 | except TypeError as e: | |
825 | ui.warn(_("warning: malformed path: %r:%s\n") % (path, e)) |
|
826 | ui.warn(_("warning: malformed path: %r:%s\n") % (path, e)) | |
826 | traceback.print_exc() |
|
827 | traceback.print_exc() | |
827 | continue |
|
828 | continue | |
828 | try: |
|
829 | try: | |
829 | peer = hg.peer(ui, {}, path) |
|
830 | peer = hg.peer(ui, {}, path) | |
830 | repo = peer._repo |
|
831 | repo = peer._repo | |
831 | except error.RepoError: |
|
832 | except error.RepoError: | |
832 | continue |
|
833 | continue | |
833 |
|
834 | |||
834 | validrepos.append(path) |
|
835 | validrepos.append(path) | |
835 |
|
836 | |||
836 | # Protect against any repo or config changes that have happened since |
|
837 | # Protect against any repo or config changes that have happened since | |
837 | # this repo was added to the repos file. We'd rather this loop succeed |
|
838 | # this repo was added to the repos file. We'd rather this loop succeed | |
838 | # and too much be deleted, than the loop fail and nothing gets deleted. |
|
839 | # and too much be deleted, than the loop fail and nothing gets deleted. | |
839 | if not isenabled(repo): |
|
840 | if not isenabled(repo): | |
840 | continue |
|
841 | continue | |
841 |
|
842 | |||
842 | if not util.safehasattr(repo, 'name'): |
|
843 | if not util.safehasattr(repo, 'name'): | |
843 | ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path) |
|
844 | ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path) | |
844 | continue |
|
845 | continue | |
845 |
|
846 | |||
846 | # If garbage collection on repack and repack on hg gc are enabled |
|
847 | # If garbage collection on repack and repack on hg gc are enabled | |
847 | # then loose files are repacked and garbage collected. |
|
848 | # then loose files are repacked and garbage collected. | |
848 | # Otherwise regular garbage collection is performed. |
|
849 | # Otherwise regular garbage collection is performed. | |
849 | repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc') |
|
850 | repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc') | |
850 | gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
851 | gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack') | |
851 | if repackonhggc and gcrepack: |
|
852 | if repackonhggc and gcrepack: | |
852 | try: |
|
853 | try: | |
853 | repackmod.incrementalrepack(repo) |
|
854 | repackmod.incrementalrepack(repo) | |
854 | filesrepacked = True |
|
855 | filesrepacked = True | |
855 | continue |
|
856 | continue | |
856 | except (IOError, repackmod.RepackAlreadyRunning): |
|
857 | except (IOError, repackmod.RepackAlreadyRunning): | |
857 | # If repack cannot be performed due to not enough disk space |
|
858 | # If repack cannot be performed due to not enough disk space | |
858 | # continue doing garbage collection of loose files w/o repack |
|
859 | # continue doing garbage collection of loose files w/o repack | |
859 | pass |
|
860 | pass | |
860 |
|
861 | |||
861 | reponame = repo.name |
|
862 | reponame = repo.name | |
862 | if not sharedcache: |
|
863 | if not sharedcache: | |
863 | sharedcache = repo.sharedstore |
|
864 | sharedcache = repo.sharedstore | |
864 |
|
865 | |||
865 | # Compute a keepset which is not garbage collected |
|
866 | # Compute a keepset which is not garbage collected | |
866 | def keyfn(fname, fnode): |
|
867 | def keyfn(fname, fnode): | |
867 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) |
|
868 | return fileserverclient.getcachekey(reponame, fname, hex(fnode)) | |
868 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) |
|
869 | keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys) | |
869 |
|
870 | |||
870 | ui.progress(_analyzing, None) |
|
871 | ui.progress(_analyzing, None) | |
871 |
|
872 | |||
872 | # write list of valid repos back |
|
873 | # write list of valid repos back | |
873 | oldumask = os.umask(0o002) |
|
874 | oldumask = os.umask(0o002) | |
874 | try: |
|
875 | try: | |
875 | reposfile = open(repospath, 'w') |
|
876 | reposfile = open(repospath, 'w') | |
876 | reposfile.writelines([("%s\n" % r) for r in validrepos]) |
|
877 | reposfile.writelines([("%s\n" % r) for r in validrepos]) | |
877 | reposfile.close() |
|
878 | reposfile.close() | |
878 | finally: |
|
879 | finally: | |
879 | os.umask(oldumask) |
|
880 | os.umask(oldumask) | |
880 |
|
881 | |||
881 | # prune cache |
|
882 | # prune cache | |
882 | if sharedcache is not None: |
|
883 | if sharedcache is not None: | |
883 | sharedcache.gc(keepkeys) |
|
884 | sharedcache.gc(keepkeys) | |
884 | elif not filesrepacked: |
|
885 | elif not filesrepacked: | |
885 | ui.warn(_("warning: no valid repos in repofile\n")) |
|
886 | ui.warn(_("warning: no valid repos in repofile\n")) | |
886 |
|
887 | |||
887 | def log(orig, ui, repo, *pats, **opts): |
|
888 | def log(orig, ui, repo, *pats, **opts): | |
888 | if not isenabled(repo): |
|
889 | if not isenabled(repo): | |
889 | return orig(ui, repo, *pats, **opts) |
|
890 | return orig(ui, repo, *pats, **opts) | |
890 |
|
891 | |||
891 | follow = opts.get('follow') |
|
892 | follow = opts.get(r'follow') | |
892 | revs = opts.get('rev') |
|
893 | revs = opts.get(r'rev') | |
893 | if pats: |
|
894 | if pats: | |
894 | # Force slowpath for non-follow patterns and follows that start from |
|
895 | # Force slowpath for non-follow patterns and follows that start from | |
895 | # non-working-copy-parent revs. |
|
896 | # non-working-copy-parent revs. | |
896 | if not follow or revs: |
|
897 | if not follow or revs: | |
897 | # This forces the slowpath |
|
898 | # This forces the slowpath | |
898 | opts['removed'] = True |
|
899 | opts[r'removed'] = True | |
899 |
|
900 | |||
900 | # If this is a non-follow log without any revs specified, recommend that |
|
901 | # If this is a non-follow log without any revs specified, recommend that | |
901 | # the user add -f to speed it up. |
|
902 | # the user add -f to speed it up. | |
902 | if not follow and not revs: |
|
903 | if not follow and not revs: | |
903 |
match, pats = scmutil.matchandpats(repo['.'], pats, |
|
904 | match, pats = scmutil.matchandpats(repo['.'], pats, | |
|
905 | pycompat.byteskwargs(opts)) | |||
904 | isfile = not match.anypats() |
|
906 | isfile = not match.anypats() | |
905 | if isfile: |
|
907 | if isfile: | |
906 | for file in match.files(): |
|
908 | for file in match.files(): | |
907 | if not os.path.isfile(repo.wjoin(file)): |
|
909 | if not os.path.isfile(repo.wjoin(file)): | |
908 | isfile = False |
|
910 | isfile = False | |
909 | break |
|
911 | break | |
910 |
|
912 | |||
911 | if isfile: |
|
913 | if isfile: | |
912 | ui.warn(_("warning: file log can be slow on large repos - " + |
|
914 | ui.warn(_("warning: file log can be slow on large repos - " + | |
913 | "use -f to speed it up\n")) |
|
915 | "use -f to speed it up\n")) | |
914 |
|
916 | |||
915 | return orig(ui, repo, *pats, **opts) |
|
917 | return orig(ui, repo, *pats, **opts) | |
916 |
|
918 | |||
917 | def revdatelimit(ui, revset): |
|
919 | def revdatelimit(ui, revset): | |
918 | """Update revset so that only changesets no older than 'prefetchdays' days |
|
920 | """Update revset so that only changesets no older than 'prefetchdays' days | |
919 | are included. The default value is set to 14 days. If 'prefetchdays' is set |
|
921 | are included. The default value is set to 14 days. If 'prefetchdays' is set | |
920 | to zero or negative value then date restriction is not applied. |
|
922 | to zero or negative value then date restriction is not applied. | |
921 | """ |
|
923 | """ | |
922 | days = ui.configint('remotefilelog', 'prefetchdays') |
|
924 | days = ui.configint('remotefilelog', 'prefetchdays') | |
923 | if days > 0: |
|
925 | if days > 0: | |
924 | revset = '(%s) & date(-%s)' % (revset, days) |
|
926 | revset = '(%s) & date(-%s)' % (revset, days) | |
925 | return revset |
|
927 | return revset | |
926 |
|
928 | |||
927 | def readytofetch(repo): |
|
929 | def readytofetch(repo): | |
928 | """Check that enough time has passed since the last background prefetch. |
|
930 | """Check that enough time has passed since the last background prefetch. | |
929 | This only relates to prefetches after operations that change the working |
|
931 | This only relates to prefetches after operations that change the working | |
930 | copy parent. Default delay between background prefetches is 2 minutes. |
|
932 | copy parent. Default delay between background prefetches is 2 minutes. | |
931 | """ |
|
933 | """ | |
932 | timeout = repo.ui.configint('remotefilelog', 'prefetchdelay') |
|
934 | timeout = repo.ui.configint('remotefilelog', 'prefetchdelay') | |
933 | fname = repo.vfs.join('lastprefetch') |
|
935 | fname = repo.vfs.join('lastprefetch') | |
934 |
|
936 | |||
935 | ready = False |
|
937 | ready = False | |
936 | with open(fname, 'a'): |
|
938 | with open(fname, 'a'): | |
937 | # the with construct above is used to avoid race conditions |
|
939 | # the with construct above is used to avoid race conditions | |
938 | modtime = os.path.getmtime(fname) |
|
940 | modtime = os.path.getmtime(fname) | |
939 | if (time.time() - modtime) > timeout: |
|
941 | if (time.time() - modtime) > timeout: | |
940 | os.utime(fname, None) |
|
942 | os.utime(fname, None) | |
941 | ready = True |
|
943 | ready = True | |
942 |
|
944 | |||
943 | return ready |
|
945 | return ready | |
944 |
|
946 | |||
945 | def wcpprefetch(ui, repo, **kwargs): |
|
947 | def wcpprefetch(ui, repo, **kwargs): | |
946 | """Prefetches in background revisions specified by bgprefetchrevs revset. |
|
948 | """Prefetches in background revisions specified by bgprefetchrevs revset. | |
947 | Does background repack if backgroundrepack flag is set in config. |
|
949 | Does background repack if backgroundrepack flag is set in config. | |
948 | """ |
|
950 | """ | |
949 | shallow = isenabled(repo) |
|
951 | shallow = isenabled(repo) | |
950 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs') |
|
952 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs') | |
951 | isready = readytofetch(repo) |
|
953 | isready = readytofetch(repo) | |
952 |
|
954 | |||
953 | if not (shallow and bgprefetchrevs and isready): |
|
955 | if not (shallow and bgprefetchrevs and isready): | |
954 | return |
|
956 | return | |
955 |
|
957 | |||
956 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
958 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') | |
957 | # update a revset with a date limit |
|
959 | # update a revset with a date limit | |
958 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) |
|
960 | bgprefetchrevs = revdatelimit(ui, bgprefetchrevs) | |
959 |
|
961 | |||
960 | def anon(): |
|
962 | def anon(): | |
961 | if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch: |
|
963 | if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch: | |
962 | return |
|
964 | return | |
963 | repo.ranprefetch = True |
|
965 | repo.ranprefetch = True | |
964 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) |
|
966 | repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack) | |
965 |
|
967 | |||
966 | repo._afterlock(anon) |
|
968 | repo._afterlock(anon) | |
967 |
|
969 | |||
968 | def pull(orig, ui, repo, *pats, **opts): |
|
970 | def pull(orig, ui, repo, *pats, **opts): | |
969 | result = orig(ui, repo, *pats, **opts) |
|
971 | result = orig(ui, repo, *pats, **opts) | |
970 |
|
972 | |||
971 | if isenabled(repo): |
|
973 | if isenabled(repo): | |
972 | # prefetch if it's configured |
|
974 | # prefetch if it's configured | |
973 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch') |
|
975 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch') | |
974 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') |
|
976 | bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack') | |
975 | bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') |
|
977 | bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch') | |
976 |
|
978 | |||
977 | if prefetchrevset: |
|
979 | if prefetchrevset: | |
978 | ui.status(_("prefetching file contents\n")) |
|
980 | ui.status(_("prefetching file contents\n")) | |
979 | revs = scmutil.revrange(repo, [prefetchrevset]) |
|
981 | revs = scmutil.revrange(repo, [prefetchrevset]) | |
980 | base = repo['.'].rev() |
|
982 | base = repo['.'].rev() | |
981 | if bgprefetch: |
|
983 | if bgprefetch: | |
982 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) |
|
984 | repo.backgroundprefetch(prefetchrevset, repack=bgrepack) | |
983 | else: |
|
985 | else: | |
984 | repo.prefetch(revs, base=base) |
|
986 | repo.prefetch(revs, base=base) | |
985 | if bgrepack: |
|
987 | if bgrepack: | |
986 | repackmod.backgroundrepack(repo, incremental=True) |
|
988 | repackmod.backgroundrepack(repo, incremental=True) | |
987 | elif bgrepack: |
|
989 | elif bgrepack: | |
988 | repackmod.backgroundrepack(repo, incremental=True) |
|
990 | repackmod.backgroundrepack(repo, incremental=True) | |
989 |
|
991 | |||
990 | return result |
|
992 | return result | |
991 |
|
993 | |||
992 | def exchangepull(orig, repo, remote, *args, **kwargs): |
|
994 | def exchangepull(orig, repo, remote, *args, **kwargs): | |
993 | # Hook into the callstream/getbundle to insert bundle capabilities |
|
995 | # Hook into the callstream/getbundle to insert bundle capabilities | |
994 | # during a pull. |
|
996 | # during a pull. | |
995 | def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None, |
|
997 | def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None, | |
996 | **kwargs): |
|
998 | **kwargs): | |
997 | if not bundlecaps: |
|
999 | if not bundlecaps: | |
998 | bundlecaps = set() |
|
1000 | bundlecaps = set() | |
999 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) |
|
1001 | bundlecaps.add(constants.BUNDLE2_CAPABLITY) | |
1000 | return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, |
|
1002 | return orig(source, heads=heads, common=common, bundlecaps=bundlecaps, | |
1001 | **kwargs) |
|
1003 | **kwargs) | |
1002 |
|
1004 | |||
1003 | if util.safehasattr(remote, '_callstream'): |
|
1005 | if util.safehasattr(remote, '_callstream'): | |
1004 | remote._localrepo = repo |
|
1006 | remote._localrepo = repo | |
1005 | elif util.safehasattr(remote, 'getbundle'): |
|
1007 | elif util.safehasattr(remote, 'getbundle'): | |
1006 | extensions.wrapfunction(remote, 'getbundle', localgetbundle) |
|
1008 | extensions.wrapfunction(remote, 'getbundle', localgetbundle) | |
1007 |
|
1009 | |||
1008 | return orig(repo, remote, *args, **kwargs) |
|
1010 | return orig(repo, remote, *args, **kwargs) | |
1009 |
|
1011 | |||
1010 | def _fileprefetchhook(repo, revs, match): |
|
1012 | def _fileprefetchhook(repo, revs, match): | |
1011 | if isenabled(repo): |
|
1013 | if isenabled(repo): | |
1012 | allfiles = [] |
|
1014 | allfiles = [] | |
1013 | for rev in revs: |
|
1015 | for rev in revs: | |
1014 | if rev == nodemod.wdirrev or rev is None: |
|
1016 | if rev == nodemod.wdirrev or rev is None: | |
1015 | continue |
|
1017 | continue | |
1016 | ctx = repo[rev] |
|
1018 | ctx = repo[rev] | |
1017 | mf = ctx.manifest() |
|
1019 | mf = ctx.manifest() | |
1018 | sparsematch = repo.maybesparsematch(ctx.rev()) |
|
1020 | sparsematch = repo.maybesparsematch(ctx.rev()) | |
1019 | for path in ctx.walk(match): |
|
1021 | for path in ctx.walk(match): | |
1020 | if path.endswith('/'): |
|
1022 | if path.endswith('/'): | |
1021 | # Tree manifest that's being excluded as part of narrow |
|
1023 | # Tree manifest that's being excluded as part of narrow | |
1022 | continue |
|
1024 | continue | |
1023 | if (not sparsematch or sparsematch(path)) and path in mf: |
|
1025 | if (not sparsematch or sparsematch(path)) and path in mf: | |
1024 | allfiles.append((path, hex(mf[path]))) |
|
1026 | allfiles.append((path, hex(mf[path]))) | |
1025 | repo.fileservice.prefetch(allfiles) |
|
1027 | repo.fileservice.prefetch(allfiles) | |
1026 |
|
1028 | |||
1027 | @command('debugremotefilelog', [ |
|
1029 | @command('debugremotefilelog', [ | |
1028 | ('d', 'decompress', None, _('decompress the filelog first')), |
|
1030 | ('d', 'decompress', None, _('decompress the filelog first')), | |
1029 | ], _('hg debugremotefilelog <path>'), norepo=True) |
|
1031 | ], _('hg debugremotefilelog <path>'), norepo=True) | |
1030 | def debugremotefilelog(ui, path, **opts): |
|
1032 | def debugremotefilelog(ui, path, **opts): | |
1031 | return debugcommands.debugremotefilelog(ui, path, **opts) |
|
1033 | return debugcommands.debugremotefilelog(ui, path, **opts) | |
1032 |
|
1034 | |||
1033 | @command('verifyremotefilelog', [ |
|
1035 | @command('verifyremotefilelog', [ | |
1034 | ('d', 'decompress', None, _('decompress the filelogs first')), |
|
1036 | ('d', 'decompress', None, _('decompress the filelogs first')), | |
1035 | ], _('hg verifyremotefilelogs <directory>'), norepo=True) |
|
1037 | ], _('hg verifyremotefilelogs <directory>'), norepo=True) | |
1036 | def verifyremotefilelog(ui, path, **opts): |
|
1038 | def verifyremotefilelog(ui, path, **opts): | |
1037 | return debugcommands.verifyremotefilelog(ui, path, **opts) |
|
1039 | return debugcommands.verifyremotefilelog(ui, path, **opts) | |
1038 |
|
1040 | |||
1039 | @command('debugdatapack', [ |
|
1041 | @command('debugdatapack', [ | |
1040 | ('', 'long', None, _('print the long hashes')), |
|
1042 | ('', 'long', None, _('print the long hashes')), | |
1041 | ('', 'node', '', _('dump the contents of node'), 'NODE'), |
|
1043 | ('', 'node', '', _('dump the contents of node'), 'NODE'), | |
1042 | ], _('hg debugdatapack <paths>'), norepo=True) |
|
1044 | ], _('hg debugdatapack <paths>'), norepo=True) | |
1043 | def debugdatapack(ui, *paths, **opts): |
|
1045 | def debugdatapack(ui, *paths, **opts): | |
1044 | return debugcommands.debugdatapack(ui, *paths, **opts) |
|
1046 | return debugcommands.debugdatapack(ui, *paths, **opts) | |
1045 |
|
1047 | |||
1046 | @command('debughistorypack', [ |
|
1048 | @command('debughistorypack', [ | |
1047 | ], _('hg debughistorypack <path>'), norepo=True) |
|
1049 | ], _('hg debughistorypack <path>'), norepo=True) | |
1048 | def debughistorypack(ui, path, **opts): |
|
1050 | def debughistorypack(ui, path, **opts): | |
1049 | return debugcommands.debughistorypack(ui, path) |
|
1051 | return debugcommands.debughistorypack(ui, path) | |
1050 |
|
1052 | |||
1051 | @command('debugkeepset', [ |
|
1053 | @command('debugkeepset', [ | |
1052 | ], _('hg debugkeepset')) |
|
1054 | ], _('hg debugkeepset')) | |
1053 | def debugkeepset(ui, repo, **opts): |
|
1055 | def debugkeepset(ui, repo, **opts): | |
1054 | # The command is used to measure keepset computation time |
|
1056 | # The command is used to measure keepset computation time | |
1055 | def keyfn(fname, fnode): |
|
1057 | def keyfn(fname, fnode): | |
1056 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) |
|
1058 | return fileserverclient.getcachekey(repo.name, fname, hex(fnode)) | |
1057 | repackmod.keepset(repo, keyfn) |
|
1059 | repackmod.keepset(repo, keyfn) | |
1058 | return |
|
1060 | return | |
1059 |
|
1061 | |||
1060 | @command('debugwaitonrepack', [ |
|
1062 | @command('debugwaitonrepack', [ | |
1061 | ], _('hg debugwaitonrepack')) |
|
1063 | ], _('hg debugwaitonrepack')) | |
1062 | def debugwaitonrepack(ui, repo, **opts): |
|
1064 | def debugwaitonrepack(ui, repo, **opts): | |
1063 | return debugcommands.debugwaitonrepack(repo) |
|
1065 | return debugcommands.debugwaitonrepack(repo) | |
1064 |
|
1066 | |||
1065 | @command('debugwaitonprefetch', [ |
|
1067 | @command('debugwaitonprefetch', [ | |
1066 | ], _('hg debugwaitonprefetch')) |
|
1068 | ], _('hg debugwaitonprefetch')) | |
1067 | def debugwaitonprefetch(ui, repo, **opts): |
|
1069 | def debugwaitonprefetch(ui, repo, **opts): | |
1068 | return debugcommands.debugwaitonprefetch(repo) |
|
1070 | return debugcommands.debugwaitonprefetch(repo) | |
1069 |
|
1071 | |||
1070 | def resolveprefetchopts(ui, opts): |
|
1072 | def resolveprefetchopts(ui, opts): | |
1071 | if not opts.get('rev'): |
|
1073 | if not opts.get('rev'): | |
1072 | revset = ['.', 'draft()'] |
|
1074 | revset = ['.', 'draft()'] | |
1073 |
|
1075 | |||
1074 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None) |
|
1076 | prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None) | |
1075 | if prefetchrevset: |
|
1077 | if prefetchrevset: | |
1076 | revset.append('(%s)' % prefetchrevset) |
|
1078 | revset.append('(%s)' % prefetchrevset) | |
1077 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
1079 | bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None) | |
1078 | if bgprefetchrevs: |
|
1080 | if bgprefetchrevs: | |
1079 | revset.append('(%s)' % bgprefetchrevs) |
|
1081 | revset.append('(%s)' % bgprefetchrevs) | |
1080 | revset = '+'.join(revset) |
|
1082 | revset = '+'.join(revset) | |
1081 |
|
1083 | |||
1082 | # update a revset with a date limit |
|
1084 | # update a revset with a date limit | |
1083 | revset = revdatelimit(ui, revset) |
|
1085 | revset = revdatelimit(ui, revset) | |
1084 |
|
1086 | |||
1085 | opts['rev'] = [revset] |
|
1087 | opts['rev'] = [revset] | |
1086 |
|
1088 | |||
1087 | if not opts.get('base'): |
|
1089 | if not opts.get('base'): | |
1088 | opts['base'] = None |
|
1090 | opts['base'] = None | |
1089 |
|
1091 | |||
1090 | return opts |
|
1092 | return opts | |
1091 |
|
1093 | |||
1092 | @command('prefetch', [ |
|
1094 | @command('prefetch', [ | |
1093 | ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')), |
|
1095 | ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')), | |
1094 | ('', 'repack', False, _('run repack after prefetch')), |
|
1096 | ('', 'repack', False, _('run repack after prefetch')), | |
1095 | ('b', 'base', '', _("rev that is assumed to already be local")), |
|
1097 | ('b', 'base', '', _("rev that is assumed to already be local")), | |
1096 | ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]')) |
|
1098 | ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]')) | |
1097 | def prefetch(ui, repo, *pats, **opts): |
|
1099 | def prefetch(ui, repo, *pats, **opts): | |
1098 | """prefetch file revisions from the server |
|
1100 | """prefetch file revisions from the server | |
1099 |
|
1101 | |||
1100 | Prefetchs file revisions for the specified revs and stores them in the |
|
1102 | Prefetchs file revisions for the specified revs and stores them in the | |
1101 | local remotefilelog cache. If no rev is specified, the default rev is |
|
1103 | local remotefilelog cache. If no rev is specified, the default rev is | |
1102 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. |
|
1104 | used which is the union of dot, draft, pullprefetch and bgprefetchrev. | |
1103 | File names or patterns can be used to limit which files are downloaded. |
|
1105 | File names or patterns can be used to limit which files are downloaded. | |
1104 |
|
1106 | |||
1105 | Return 0 on success. |
|
1107 | Return 0 on success. | |
1106 | """ |
|
1108 | """ | |
|
1109 | opts = pycompat.byteskwargs(opts) | |||
1107 | if not isenabled(repo): |
|
1110 | if not isenabled(repo): | |
1108 | raise error.Abort(_("repo is not shallow")) |
|
1111 | raise error.Abort(_("repo is not shallow")) | |
1109 |
|
1112 | |||
1110 | opts = resolveprefetchopts(ui, opts) |
|
1113 | opts = resolveprefetchopts(ui, opts) | |
1111 | revs = scmutil.revrange(repo, opts.get('rev')) |
|
1114 | revs = scmutil.revrange(repo, opts.get('rev')) | |
1112 | repo.prefetch(revs, opts.get('base'), pats, opts) |
|
1115 | repo.prefetch(revs, opts.get('base'), pats, opts) | |
1113 |
|
1116 | |||
1114 | # Run repack in background |
|
1117 | # Run repack in background | |
1115 | if opts.get('repack'): |
|
1118 | if opts.get('repack'): | |
1116 | repackmod.backgroundrepack(repo, incremental=True) |
|
1119 | repackmod.backgroundrepack(repo, incremental=True) | |
1117 |
|
1120 | |||
1118 | @command('repack', [ |
|
1121 | @command('repack', [ | |
1119 | ('', 'background', None, _('run in a background process'), None), |
|
1122 | ('', 'background', None, _('run in a background process'), None), | |
1120 | ('', 'incremental', None, _('do an incremental repack'), None), |
|
1123 | ('', 'incremental', None, _('do an incremental repack'), None), | |
1121 | ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None), |
|
1124 | ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None), | |
1122 | ], _('hg repack [OPTIONS]')) |
|
1125 | ], _('hg repack [OPTIONS]')) | |
1123 | def repack_(ui, repo, *pats, **opts): |
|
1126 | def repack_(ui, repo, *pats, **opts): | |
1124 | if opts.get('background'): |
|
1127 | if opts.get(r'background'): | |
1125 | repackmod.backgroundrepack(repo, incremental=opts.get('incremental'), |
|
1128 | repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'), | |
1126 | packsonly=opts.get('packsonly', False)) |
|
1129 | packsonly=opts.get(r'packsonly', False)) | |
1127 | return |
|
1130 | return | |
1128 |
|
1131 | |||
1129 | options = {'packsonly': opts.get('packsonly')} |
|
1132 | options = {'packsonly': opts.get(r'packsonly')} | |
1130 |
|
1133 | |||
1131 | try: |
|
1134 | try: | |
1132 | if opts.get('incremental'): |
|
1135 | if opts.get(r'incremental'): | |
1133 | repackmod.incrementalrepack(repo, options=options) |
|
1136 | repackmod.incrementalrepack(repo, options=options) | |
1134 | else: |
|
1137 | else: | |
1135 | repackmod.fullrepack(repo, options=options) |
|
1138 | repackmod.fullrepack(repo, options=options) | |
1136 | except repackmod.RepackAlreadyRunning as ex: |
|
1139 | except repackmod.RepackAlreadyRunning as ex: | |
1137 | # Don't propogate the exception if the repack is already in |
|
1140 | # Don't propogate the exception if the repack is already in | |
1138 | # progress, since we want the command to exit 0. |
|
1141 | # progress, since we want the command to exit 0. | |
1139 | repo.ui.warn('%s\n' % ex) |
|
1142 | repo.ui.warn('%s\n' % ex) |
@@ -1,423 +1,423 | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import errno |
|
3 | import errno | |
4 | import hashlib |
|
4 | import hashlib | |
5 | import os |
|
5 | import os | |
6 | import shutil |
|
6 | import shutil | |
7 | import stat |
|
7 | import stat | |
8 | import time |
|
8 | import time | |
9 |
|
9 | |||
10 | from mercurial.i18n import _ |
|
10 | from mercurial.i18n import _ | |
11 | from mercurial.node import bin, hex |
|
11 | from mercurial.node import bin, hex | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | error, |
|
13 | error, | |
14 | pycompat, |
|
14 | pycompat, | |
15 | util, |
|
15 | util, | |
16 | ) |
|
16 | ) | |
17 | from . import ( |
|
17 | from . import ( | |
18 | constants, |
|
18 | constants, | |
19 | shallowutil, |
|
19 | shallowutil, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | class basestore(object): |
|
22 | class basestore(object): | |
23 | def __init__(self, repo, path, reponame, shared=False): |
|
23 | def __init__(self, repo, path, reponame, shared=False): | |
24 | """Creates a remotefilelog store object for the given repo name. |
|
24 | """Creates a remotefilelog store object for the given repo name. | |
25 |
|
25 | |||
26 | `path` - The file path where this store keeps its data |
|
26 | `path` - The file path where this store keeps its data | |
27 | `reponame` - The name of the repo. This is used to partition data from |
|
27 | `reponame` - The name of the repo. This is used to partition data from | |
28 | many repos. |
|
28 | many repos. | |
29 | `shared` - True if this store is a shared cache of data from the central |
|
29 | `shared` - True if this store is a shared cache of data from the central | |
30 | server, for many repos on this machine. False means this store is for |
|
30 | server, for many repos on this machine. False means this store is for | |
31 | the local data for one repo. |
|
31 | the local data for one repo. | |
32 | """ |
|
32 | """ | |
33 | self.repo = repo |
|
33 | self.repo = repo | |
34 | self.ui = repo.ui |
|
34 | self.ui = repo.ui | |
35 | self._path = path |
|
35 | self._path = path | |
36 | self._reponame = reponame |
|
36 | self._reponame = reponame | |
37 | self._shared = shared |
|
37 | self._shared = shared | |
38 | self._uid = os.getuid() if not pycompat.iswindows else None |
|
38 | self._uid = os.getuid() if not pycompat.iswindows else None | |
39 |
|
39 | |||
40 | self._validatecachelog = self.ui.config("remotefilelog", |
|
40 | self._validatecachelog = self.ui.config("remotefilelog", | |
41 | "validatecachelog") |
|
41 | "validatecachelog") | |
42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", |
|
42 | self._validatecache = self.ui.config("remotefilelog", "validatecache", | |
43 | 'on') |
|
43 | 'on') | |
44 | if self._validatecache not in ('on', 'strict', 'off'): |
|
44 | if self._validatecache not in ('on', 'strict', 'off'): | |
45 | self._validatecache = 'on' |
|
45 | self._validatecache = 'on' | |
46 | if self._validatecache == 'off': |
|
46 | if self._validatecache == 'off': | |
47 | self._validatecache = False |
|
47 | self._validatecache = False | |
48 |
|
48 | |||
49 | if shared: |
|
49 | if shared: | |
50 | shallowutil.mkstickygroupdir(self.ui, path) |
|
50 | shallowutil.mkstickygroupdir(self.ui, path) | |
51 |
|
51 | |||
52 | def getmissing(self, keys): |
|
52 | def getmissing(self, keys): | |
53 | missing = [] |
|
53 | missing = [] | |
54 | for name, node in keys: |
|
54 | for name, node in keys: | |
55 | filepath = self._getfilepath(name, node) |
|
55 | filepath = self._getfilepath(name, node) | |
56 | exists = os.path.exists(filepath) |
|
56 | exists = os.path.exists(filepath) | |
57 | if (exists and self._validatecache == 'strict' and |
|
57 | if (exists and self._validatecache == 'strict' and | |
58 | not self._validatekey(filepath, 'contains')): |
|
58 | not self._validatekey(filepath, 'contains')): | |
59 | exists = False |
|
59 | exists = False | |
60 | if not exists: |
|
60 | if not exists: | |
61 | missing.append((name, node)) |
|
61 | missing.append((name, node)) | |
62 |
|
62 | |||
63 | return missing |
|
63 | return missing | |
64 |
|
64 | |||
65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE |
|
65 | # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE | |
66 |
|
66 | |||
67 | def markledger(self, ledger, options=None): |
|
67 | def markledger(self, ledger, options=None): | |
68 | if options and options.get(constants.OPTION_PACKSONLY): |
|
68 | if options and options.get(constants.OPTION_PACKSONLY): | |
69 | return |
|
69 | return | |
70 | if self._shared: |
|
70 | if self._shared: | |
71 | for filename, nodes in self._getfiles(): |
|
71 | for filename, nodes in self._getfiles(): | |
72 | for node in nodes: |
|
72 | for node in nodes: | |
73 | ledger.markdataentry(self, filename, node) |
|
73 | ledger.markdataentry(self, filename, node) | |
74 | ledger.markhistoryentry(self, filename, node) |
|
74 | ledger.markhistoryentry(self, filename, node) | |
75 |
|
75 | |||
76 | def cleanup(self, ledger): |
|
76 | def cleanup(self, ledger): | |
77 | ui = self.ui |
|
77 | ui = self.ui | |
78 | entries = ledger.sources.get(self, []) |
|
78 | entries = ledger.sources.get(self, []) | |
79 | count = 0 |
|
79 | count = 0 | |
80 | for entry in entries: |
|
80 | for entry in entries: | |
81 | if entry.gced or (entry.datarepacked and entry.historyrepacked): |
|
81 | if entry.gced or (entry.datarepacked and entry.historyrepacked): | |
82 | ui.progress(_("cleaning up"), count, unit="files", |
|
82 | ui.progress(_("cleaning up"), count, unit="files", | |
83 | total=len(entries)) |
|
83 | total=len(entries)) | |
84 | path = self._getfilepath(entry.filename, entry.node) |
|
84 | path = self._getfilepath(entry.filename, entry.node) | |
85 | util.tryunlink(path) |
|
85 | util.tryunlink(path) | |
86 | count += 1 |
|
86 | count += 1 | |
87 | ui.progress(_("cleaning up"), None) |
|
87 | ui.progress(_("cleaning up"), None) | |
88 |
|
88 | |||
89 | # Clean up the repo cache directory. |
|
89 | # Clean up the repo cache directory. | |
90 | self._cleanupdirectory(self._getrepocachepath()) |
|
90 | self._cleanupdirectory(self._getrepocachepath()) | |
91 |
|
91 | |||
92 | # BELOW THIS ARE NON-STANDARD APIS |
|
92 | # BELOW THIS ARE NON-STANDARD APIS | |
93 |
|
93 | |||
94 | def _cleanupdirectory(self, rootdir): |
|
94 | def _cleanupdirectory(self, rootdir): | |
95 | """Removes the empty directories and unnecessary files within the root |
|
95 | """Removes the empty directories and unnecessary files within the root | |
96 | directory recursively. Note that this method does not remove the root |
|
96 | directory recursively. Note that this method does not remove the root | |
97 | directory itself. """ |
|
97 | directory itself. """ | |
98 |
|
98 | |||
99 | oldfiles = set() |
|
99 | oldfiles = set() | |
100 | otherfiles = set() |
|
100 | otherfiles = set() | |
101 | # osutil.listdir returns stat information which saves some rmdir/listdir |
|
101 | # osutil.listdir returns stat information which saves some rmdir/listdir | |
102 | # syscalls. |
|
102 | # syscalls. | |
103 | for name, mode in util.osutil.listdir(rootdir): |
|
103 | for name, mode in util.osutil.listdir(rootdir): | |
104 | if stat.S_ISDIR(mode): |
|
104 | if stat.S_ISDIR(mode): | |
105 | dirpath = os.path.join(rootdir, name) |
|
105 | dirpath = os.path.join(rootdir, name) | |
106 | self._cleanupdirectory(dirpath) |
|
106 | self._cleanupdirectory(dirpath) | |
107 |
|
107 | |||
108 | # Now that the directory specified by dirpath is potentially |
|
108 | # Now that the directory specified by dirpath is potentially | |
109 | # empty, try and remove it. |
|
109 | # empty, try and remove it. | |
110 | try: |
|
110 | try: | |
111 | os.rmdir(dirpath) |
|
111 | os.rmdir(dirpath) | |
112 | except OSError: |
|
112 | except OSError: | |
113 | pass |
|
113 | pass | |
114 |
|
114 | |||
115 | elif stat.S_ISREG(mode): |
|
115 | elif stat.S_ISREG(mode): | |
116 | if name.endswith('_old'): |
|
116 | if name.endswith('_old'): | |
117 | oldfiles.add(name[:-4]) |
|
117 | oldfiles.add(name[:-4]) | |
118 | else: |
|
118 | else: | |
119 | otherfiles.add(name) |
|
119 | otherfiles.add(name) | |
120 |
|
120 | |||
121 | # Remove the files which end with suffix '_old' and have no |
|
121 | # Remove the files which end with suffix '_old' and have no | |
122 | # corresponding file without the suffix '_old'. See addremotefilelognode |
|
122 | # corresponding file without the suffix '_old'. See addremotefilelognode | |
123 | # method for the generation/purpose of files with '_old' suffix. |
|
123 | # method for the generation/purpose of files with '_old' suffix. | |
124 | for filename in oldfiles - otherfiles: |
|
124 | for filename in oldfiles - otherfiles: | |
125 | filepath = os.path.join(rootdir, filename + '_old') |
|
125 | filepath = os.path.join(rootdir, filename + '_old') | |
126 | util.tryunlink(filepath) |
|
126 | util.tryunlink(filepath) | |
127 |
|
127 | |||
128 | def _getfiles(self): |
|
128 | def _getfiles(self): | |
129 | """Return a list of (filename, [node,...]) for all the revisions that |
|
129 | """Return a list of (filename, [node,...]) for all the revisions that | |
130 | exist in the store. |
|
130 | exist in the store. | |
131 |
|
131 | |||
132 | This is useful for obtaining a list of all the contents of the store |
|
132 | This is useful for obtaining a list of all the contents of the store | |
133 | when performing a repack to another store, since the store API requires |
|
133 | when performing a repack to another store, since the store API requires | |
134 | name+node keys and not namehash+node keys. |
|
134 | name+node keys and not namehash+node keys. | |
135 | """ |
|
135 | """ | |
136 | existing = {} |
|
136 | existing = {} | |
137 | for filenamehash, node in self._listkeys(): |
|
137 | for filenamehash, node in self._listkeys(): | |
138 | existing.setdefault(filenamehash, []).append(node) |
|
138 | existing.setdefault(filenamehash, []).append(node) | |
139 |
|
139 | |||
140 | filenamemap = self._resolvefilenames(existing.keys()) |
|
140 | filenamemap = self._resolvefilenames(existing.keys()) | |
141 |
|
141 | |||
142 | for filename, sha in filenamemap.iteritems(): |
|
142 | for filename, sha in filenamemap.iteritems(): | |
143 | yield (filename, existing[sha]) |
|
143 | yield (filename, existing[sha]) | |
144 |
|
144 | |||
145 | def _resolvefilenames(self, hashes): |
|
145 | def _resolvefilenames(self, hashes): | |
146 | """Given a list of filename hashes that are present in the |
|
146 | """Given a list of filename hashes that are present in the | |
147 | remotefilelog store, return a mapping from filename->hash. |
|
147 | remotefilelog store, return a mapping from filename->hash. | |
148 |
|
148 | |||
149 | This is useful when converting remotefilelog blobs into other storage |
|
149 | This is useful when converting remotefilelog blobs into other storage | |
150 | formats. |
|
150 | formats. | |
151 | """ |
|
151 | """ | |
152 | if not hashes: |
|
152 | if not hashes: | |
153 | return {} |
|
153 | return {} | |
154 |
|
154 | |||
155 | filenames = {} |
|
155 | filenames = {} | |
156 | missingfilename = set(hashes) |
|
156 | missingfilename = set(hashes) | |
157 |
|
157 | |||
158 | # Start with a full manifest, since it'll cover the majority of files |
|
158 | # Start with a full manifest, since it'll cover the majority of files | |
159 | for filename in self.repo['tip'].manifest(): |
|
159 | for filename in self.repo['tip'].manifest(): | |
160 | sha = hashlib.sha1(filename).digest() |
|
160 | sha = hashlib.sha1(filename).digest() | |
161 | if sha in missingfilename: |
|
161 | if sha in missingfilename: | |
162 | filenames[filename] = sha |
|
162 | filenames[filename] = sha | |
163 | missingfilename.discard(sha) |
|
163 | missingfilename.discard(sha) | |
164 |
|
164 | |||
165 | # Scan the changelog until we've found every file name |
|
165 | # Scan the changelog until we've found every file name | |
166 | cl = self.repo.unfiltered().changelog |
|
166 | cl = self.repo.unfiltered().changelog | |
167 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): |
|
167 | for rev in pycompat.xrange(len(cl) - 1, -1, -1): | |
168 | if not missingfilename: |
|
168 | if not missingfilename: | |
169 | break |
|
169 | break | |
170 | files = cl.readfiles(cl.node(rev)) |
|
170 | files = cl.readfiles(cl.node(rev)) | |
171 | for filename in files: |
|
171 | for filename in files: | |
172 | sha = hashlib.sha1(filename).digest() |
|
172 | sha = hashlib.sha1(filename).digest() | |
173 | if sha in missingfilename: |
|
173 | if sha in missingfilename: | |
174 | filenames[filename] = sha |
|
174 | filenames[filename] = sha | |
175 | missingfilename.discard(sha) |
|
175 | missingfilename.discard(sha) | |
176 |
|
176 | |||
177 | return filenames |
|
177 | return filenames | |
178 |
|
178 | |||
179 | def _getrepocachepath(self): |
|
179 | def _getrepocachepath(self): | |
180 | return os.path.join( |
|
180 | return os.path.join( | |
181 | self._path, self._reponame) if self._shared else self._path |
|
181 | self._path, self._reponame) if self._shared else self._path | |
182 |
|
182 | |||
183 | def _listkeys(self): |
|
183 | def _listkeys(self): | |
184 | """List all the remotefilelog keys that exist in the store. |
|
184 | """List all the remotefilelog keys that exist in the store. | |
185 |
|
185 | |||
186 | Returns a iterator of (filename hash, filecontent hash) tuples. |
|
186 | Returns a iterator of (filename hash, filecontent hash) tuples. | |
187 | """ |
|
187 | """ | |
188 |
|
188 | |||
189 | for root, dirs, files in os.walk(self._getrepocachepath()): |
|
189 | for root, dirs, files in os.walk(self._getrepocachepath()): | |
190 | for filename in files: |
|
190 | for filename in files: | |
191 | if len(filename) != 40: |
|
191 | if len(filename) != 40: | |
192 | continue |
|
192 | continue | |
193 | node = filename |
|
193 | node = filename | |
194 | if self._shared: |
|
194 | if self._shared: | |
195 | # .../1a/85ffda..be21 |
|
195 | # .../1a/85ffda..be21 | |
196 | filenamehash = root[-41:-39] + root[-38:] |
|
196 | filenamehash = root[-41:-39] + root[-38:] | |
197 | else: |
|
197 | else: | |
198 | filenamehash = root[-40:] |
|
198 | filenamehash = root[-40:] | |
199 | yield (bin(filenamehash), bin(node)) |
|
199 | yield (bin(filenamehash), bin(node)) | |
200 |
|
200 | |||
201 | def _getfilepath(self, name, node): |
|
201 | def _getfilepath(self, name, node): | |
202 | node = hex(node) |
|
202 | node = hex(node) | |
203 | if self._shared: |
|
203 | if self._shared: | |
204 | key = shallowutil.getcachekey(self._reponame, name, node) |
|
204 | key = shallowutil.getcachekey(self._reponame, name, node) | |
205 | else: |
|
205 | else: | |
206 | key = shallowutil.getlocalkey(name, node) |
|
206 | key = shallowutil.getlocalkey(name, node) | |
207 |
|
207 | |||
208 | return os.path.join(self._path, key) |
|
208 | return os.path.join(self._path, key) | |
209 |
|
209 | |||
210 | def _getdata(self, name, node): |
|
210 | def _getdata(self, name, node): | |
211 | filepath = self._getfilepath(name, node) |
|
211 | filepath = self._getfilepath(name, node) | |
212 | try: |
|
212 | try: | |
213 | data = shallowutil.readfile(filepath) |
|
213 | data = shallowutil.readfile(filepath) | |
214 | if self._validatecache and not self._validatedata(data, filepath): |
|
214 | if self._validatecache and not self._validatedata(data, filepath): | |
215 | if self._validatecachelog: |
|
215 | if self._validatecachelog: | |
216 | with open(self._validatecachelog, 'a+') as f: |
|
216 | with open(self._validatecachelog, 'a+') as f: | |
217 | f.write("corrupt %s during read\n" % filepath) |
|
217 | f.write("corrupt %s during read\n" % filepath) | |
218 | os.rename(filepath, filepath + ".corrupt") |
|
218 | os.rename(filepath, filepath + ".corrupt") | |
219 | raise KeyError("corrupt local cache file %s" % filepath) |
|
219 | raise KeyError("corrupt local cache file %s" % filepath) | |
220 | except IOError: |
|
220 | except IOError: | |
221 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, |
|
221 | raise KeyError("no file found at %s for %s:%s" % (filepath, name, | |
222 | hex(node))) |
|
222 | hex(node))) | |
223 |
|
223 | |||
224 | return data |
|
224 | return data | |
225 |
|
225 | |||
226 | def addremotefilelognode(self, name, node, data): |
|
226 | def addremotefilelognode(self, name, node, data): | |
227 | filepath = self._getfilepath(name, node) |
|
227 | filepath = self._getfilepath(name, node) | |
228 |
|
228 | |||
229 | oldumask = os.umask(0o002) |
|
229 | oldumask = os.umask(0o002) | |
230 | try: |
|
230 | try: | |
231 | # if this node already exists, save the old version for |
|
231 | # if this node already exists, save the old version for | |
232 | # recovery/debugging purposes. |
|
232 | # recovery/debugging purposes. | |
233 | if os.path.exists(filepath): |
|
233 | if os.path.exists(filepath): | |
234 | newfilename = filepath + '_old' |
|
234 | newfilename = filepath + '_old' | |
235 | # newfilename can be read-only and shutil.copy will fail. |
|
235 | # newfilename can be read-only and shutil.copy will fail. | |
236 | # Delete newfilename to avoid it |
|
236 | # Delete newfilename to avoid it | |
237 | if os.path.exists(newfilename): |
|
237 | if os.path.exists(newfilename): | |
238 | shallowutil.unlinkfile(newfilename) |
|
238 | shallowutil.unlinkfile(newfilename) | |
239 | shutil.copy(filepath, newfilename) |
|
239 | shutil.copy(filepath, newfilename) | |
240 |
|
240 | |||
241 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) |
|
241 | shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath)) | |
242 | shallowutil.writefile(filepath, data, readonly=True) |
|
242 | shallowutil.writefile(filepath, data, readonly=True) | |
243 |
|
243 | |||
244 | if self._validatecache: |
|
244 | if self._validatecache: | |
245 | if not self._validatekey(filepath, 'write'): |
|
245 | if not self._validatekey(filepath, 'write'): | |
246 | raise error.Abort(_("local cache write was corrupted %s") % |
|
246 | raise error.Abort(_("local cache write was corrupted %s") % | |
247 | filepath) |
|
247 | filepath) | |
248 | finally: |
|
248 | finally: | |
249 | os.umask(oldumask) |
|
249 | os.umask(oldumask) | |
250 |
|
250 | |||
251 | def markrepo(self, path): |
|
251 | def markrepo(self, path): | |
252 | """Call this to add the given repo path to the store's list of |
|
252 | """Call this to add the given repo path to the store's list of | |
253 | repositories that are using it. This is useful later when doing garbage |
|
253 | repositories that are using it. This is useful later when doing garbage | |
254 | collection, since it allows us to insecpt the repos to see what nodes |
|
254 | collection, since it allows us to insecpt the repos to see what nodes | |
255 | they want to be kept alive in the store. |
|
255 | they want to be kept alive in the store. | |
256 | """ |
|
256 | """ | |
257 | repospath = os.path.join(self._path, "repos") |
|
257 | repospath = os.path.join(self._path, "repos") | |
258 | with open(repospath, 'a') as reposfile: |
|
258 | with open(repospath, 'a') as reposfile: | |
259 | reposfile.write(os.path.dirname(path) + "\n") |
|
259 | reposfile.write(os.path.dirname(path) + "\n") | |
260 |
|
260 | |||
261 | repospathstat = os.stat(repospath) |
|
261 | repospathstat = os.stat(repospath) | |
262 | if repospathstat.st_uid == self._uid: |
|
262 | if repospathstat.st_uid == self._uid: | |
263 | os.chmod(repospath, 0o0664) |
|
263 | os.chmod(repospath, 0o0664) | |
264 |
|
264 | |||
265 | def _validatekey(self, path, action): |
|
265 | def _validatekey(self, path, action): | |
266 | with open(path, 'rb') as f: |
|
266 | with open(path, 'rb') as f: | |
267 | data = f.read() |
|
267 | data = f.read() | |
268 |
|
268 | |||
269 | if self._validatedata(data, path): |
|
269 | if self._validatedata(data, path): | |
270 | return True |
|
270 | return True | |
271 |
|
271 | |||
272 | if self._validatecachelog: |
|
272 | if self._validatecachelog: | |
273 | with open(self._validatecachelog, 'a+') as f: |
|
273 | with open(self._validatecachelog, 'a+') as f: | |
274 | f.write("corrupt %s during %s\n" % (path, action)) |
|
274 | f.write("corrupt %s during %s\n" % (path, action)) | |
275 |
|
275 | |||
276 | os.rename(path, path + ".corrupt") |
|
276 | os.rename(path, path + ".corrupt") | |
277 | return False |
|
277 | return False | |
278 |
|
278 | |||
279 | def _validatedata(self, data, path): |
|
279 | def _validatedata(self, data, path): | |
280 | try: |
|
280 | try: | |
281 | if len(data) > 0: |
|
281 | if len(data) > 0: | |
282 | # see remotefilelogserver.createfileblob for the format |
|
282 | # see remotefilelogserver.createfileblob for the format | |
283 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
283 | offset, size, flags = shallowutil.parsesizeflags(data) | |
284 | if len(data) <= size: |
|
284 | if len(data) <= size: | |
285 | # it is truncated |
|
285 | # it is truncated | |
286 | return False |
|
286 | return False | |
287 |
|
287 | |||
288 | # extract the node from the metadata |
|
288 | # extract the node from the metadata | |
289 | offset += size |
|
289 | offset += size | |
290 | datanode = data[offset:offset + 20] |
|
290 | datanode = data[offset:offset + 20] | |
291 |
|
291 | |||
292 | # and compare against the path |
|
292 | # and compare against the path | |
293 | if os.path.basename(path) == hex(datanode): |
|
293 | if os.path.basename(path) == hex(datanode): | |
294 | # Content matches the intended path |
|
294 | # Content matches the intended path | |
295 | return True |
|
295 | return True | |
296 | return False |
|
296 | return False | |
297 | except (ValueError, RuntimeError): |
|
297 | except (ValueError, RuntimeError): | |
298 | pass |
|
298 | pass | |
299 |
|
299 | |||
300 | return False |
|
300 | return False | |
301 |
|
301 | |||
302 | def gc(self, keepkeys): |
|
302 | def gc(self, keepkeys): | |
303 | ui = self.ui |
|
303 | ui = self.ui | |
304 | cachepath = self._path |
|
304 | cachepath = self._path | |
305 | _removing = _("removing unnecessary files") |
|
305 | _removing = _("removing unnecessary files") | |
306 | _truncating = _("enforcing cache limit") |
|
306 | _truncating = _("enforcing cache limit") | |
307 |
|
307 | |||
308 | # prune cache |
|
308 | # prune cache | |
309 | import Queue |
|
309 | import Queue | |
310 | queue = Queue.PriorityQueue() |
|
310 | queue = Queue.PriorityQueue() | |
311 | originalsize = 0 |
|
311 | originalsize = 0 | |
312 | size = 0 |
|
312 | size = 0 | |
313 | count = 0 |
|
313 | count = 0 | |
314 | removed = 0 |
|
314 | removed = 0 | |
315 |
|
315 | |||
316 | # keep files newer than a day even if they aren't needed |
|
316 | # keep files newer than a day even if they aren't needed | |
317 | limit = time.time() - (60 * 60 * 24) |
|
317 | limit = time.time() - (60 * 60 * 24) | |
318 |
|
318 | |||
319 | ui.progress(_removing, count, unit="files") |
|
319 | ui.progress(_removing, count, unit="files") | |
320 | for root, dirs, files in os.walk(cachepath): |
|
320 | for root, dirs, files in os.walk(cachepath): | |
321 | for file in files: |
|
321 | for file in files: | |
322 | if file == 'repos': |
|
322 | if file == 'repos': | |
323 | continue |
|
323 | continue | |
324 |
|
324 | |||
325 | # Don't delete pack files |
|
325 | # Don't delete pack files | |
326 | if '/packs/' in root: |
|
326 | if '/packs/' in root: | |
327 | continue |
|
327 | continue | |
328 |
|
328 | |||
329 | ui.progress(_removing, count, unit="files") |
|
329 | ui.progress(_removing, count, unit="files") | |
330 | path = os.path.join(root, file) |
|
330 | path = os.path.join(root, file) | |
331 | key = os.path.relpath(path, cachepath) |
|
331 | key = os.path.relpath(path, cachepath) | |
332 | count += 1 |
|
332 | count += 1 | |
333 | try: |
|
333 | try: | |
334 | pathstat = os.stat(path) |
|
334 | pathstat = os.stat(path) | |
335 | except OSError as e: |
|
335 | except OSError as e: | |
336 | # errno.ENOENT = no such file or directory |
|
336 | # errno.ENOENT = no such file or directory | |
337 | if e.errno != errno.ENOENT: |
|
337 | if e.errno != errno.ENOENT: | |
338 | raise |
|
338 | raise | |
339 | msg = _("warning: file %s was removed by another process\n") |
|
339 | msg = _("warning: file %s was removed by another process\n") | |
340 | ui.warn(msg % path) |
|
340 | ui.warn(msg % path) | |
341 | continue |
|
341 | continue | |
342 |
|
342 | |||
343 | originalsize += pathstat.st_size |
|
343 | originalsize += pathstat.st_size | |
344 |
|
344 | |||
345 | if key in keepkeys or pathstat.st_atime > limit: |
|
345 | if key in keepkeys or pathstat.st_atime > limit: | |
346 | queue.put((pathstat.st_atime, path, pathstat)) |
|
346 | queue.put((pathstat.st_atime, path, pathstat)) | |
347 | size += pathstat.st_size |
|
347 | size += pathstat.st_size | |
348 | else: |
|
348 | else: | |
349 | try: |
|
349 | try: | |
350 | shallowutil.unlinkfile(path) |
|
350 | shallowutil.unlinkfile(path) | |
351 | except OSError as e: |
|
351 | except OSError as e: | |
352 | # errno.ENOENT = no such file or directory |
|
352 | # errno.ENOENT = no such file or directory | |
353 | if e.errno != errno.ENOENT: |
|
353 | if e.errno != errno.ENOENT: | |
354 | raise |
|
354 | raise | |
355 | msg = _("warning: file %s was removed by another " |
|
355 | msg = _("warning: file %s was removed by another " | |
356 | "process\n") |
|
356 | "process\n") | |
357 | ui.warn(msg % path) |
|
357 | ui.warn(msg % path) | |
358 | continue |
|
358 | continue | |
359 | removed += 1 |
|
359 | removed += 1 | |
360 | ui.progress(_removing, None) |
|
360 | ui.progress(_removing, None) | |
361 |
|
361 | |||
362 | # remove oldest files until under limit |
|
362 | # remove oldest files until under limit | |
363 | limit = ui.configbytes("remotefilelog", "cachelimit") |
|
363 | limit = ui.configbytes("remotefilelog", "cachelimit") | |
364 | if size > limit: |
|
364 | if size > limit: | |
365 | excess = size - limit |
|
365 | excess = size - limit | |
366 | removedexcess = 0 |
|
366 | removedexcess = 0 | |
367 | while queue and size > limit and size > 0: |
|
367 | while queue and size > limit and size > 0: | |
368 | ui.progress(_truncating, removedexcess, unit="bytes", |
|
368 | ui.progress(_truncating, removedexcess, unit="bytes", | |
369 | total=excess) |
|
369 | total=excess) | |
370 | atime, oldpath, oldpathstat = queue.get() |
|
370 | atime, oldpath, oldpathstat = queue.get() | |
371 | try: |
|
371 | try: | |
372 | shallowutil.unlinkfile(oldpath) |
|
372 | shallowutil.unlinkfile(oldpath) | |
373 | except OSError as e: |
|
373 | except OSError as e: | |
374 | # errno.ENOENT = no such file or directory |
|
374 | # errno.ENOENT = no such file or directory | |
375 | if e.errno != errno.ENOENT: |
|
375 | if e.errno != errno.ENOENT: | |
376 | raise |
|
376 | raise | |
377 | msg = _("warning: file %s was removed by another process\n") |
|
377 | msg = _("warning: file %s was removed by another process\n") | |
378 | ui.warn(msg % oldpath) |
|
378 | ui.warn(msg % oldpath) | |
379 | size -= oldpathstat.st_size |
|
379 | size -= oldpathstat.st_size | |
380 | removed += 1 |
|
380 | removed += 1 | |
381 | removedexcess += oldpathstat.st_size |
|
381 | removedexcess += oldpathstat.st_size | |
382 | ui.progress(_truncating, None) |
|
382 | ui.progress(_truncating, None) | |
383 |
|
383 | |||
384 | ui.status(_("finished: removed %s of %s files (%0.2f GB to %0.2f GB)\n") |
|
384 | ui.status(_("finished: removed %s of %s files (%0.2f GB to %0.2f GB)\n") | |
385 | % (removed, count, |
|
385 | % (removed, count, | |
386 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, |
|
386 | float(originalsize) / 1024.0 / 1024.0 / 1024.0, | |
387 | float(size) / 1024.0 / 1024.0 / 1024.0)) |
|
387 | float(size) / 1024.0 / 1024.0 / 1024.0)) | |
388 |
|
388 | |||
389 | class baseunionstore(object): |
|
389 | class baseunionstore(object): | |
390 | def __init__(self, *args, **kwargs): |
|
390 | def __init__(self, *args, **kwargs): | |
391 | # If one of the functions that iterates all of the stores is about to |
|
391 | # If one of the functions that iterates all of the stores is about to | |
392 | # throw a KeyError, try this many times with a full refresh between |
|
392 | # throw a KeyError, try this many times with a full refresh between | |
393 | # attempts. A repack operation may have moved data from one store to |
|
393 | # attempts. A repack operation may have moved data from one store to | |
394 | # another while we were running. |
|
394 | # another while we were running. | |
395 | self.numattempts = kwargs.get('numretries', 0) + 1 |
|
395 | self.numattempts = kwargs.get(r'numretries', 0) + 1 | |
396 | # If not-None, call this function on every retry and if the attempts are |
|
396 | # If not-None, call this function on every retry and if the attempts are | |
397 | # exhausted. |
|
397 | # exhausted. | |
398 | self.retrylog = kwargs.get('retrylog', None) |
|
398 | self.retrylog = kwargs.get(r'retrylog', None) | |
399 |
|
399 | |||
400 | def markforrefresh(self): |
|
400 | def markforrefresh(self): | |
401 | for store in self.stores: |
|
401 | for store in self.stores: | |
402 | if util.safehasattr(store, 'markforrefresh'): |
|
402 | if util.safehasattr(store, 'markforrefresh'): | |
403 | store.markforrefresh() |
|
403 | store.markforrefresh() | |
404 |
|
404 | |||
405 | @staticmethod |
|
405 | @staticmethod | |
406 | def retriable(fn): |
|
406 | def retriable(fn): | |
407 | def noop(*args): |
|
407 | def noop(*args): | |
408 | pass |
|
408 | pass | |
409 | def wrapped(self, *args, **kwargs): |
|
409 | def wrapped(self, *args, **kwargs): | |
410 | retrylog = self.retrylog or noop |
|
410 | retrylog = self.retrylog or noop | |
411 | funcname = fn.__name__ |
|
411 | funcname = fn.__name__ | |
412 | for i in pycompat.xrange(self.numattempts): |
|
412 | for i in pycompat.xrange(self.numattempts): | |
413 | if i > 0: |
|
413 | if i > 0: | |
414 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) |
|
414 | retrylog('re-attempting (n=%d) %s\n' % (i, funcname)) | |
415 | self.markforrefresh() |
|
415 | self.markforrefresh() | |
416 | try: |
|
416 | try: | |
417 | return fn(self, *args, **kwargs) |
|
417 | return fn(self, *args, **kwargs) | |
418 | except KeyError: |
|
418 | except KeyError: | |
419 | pass |
|
419 | pass | |
420 | # retries exhausted |
|
420 | # retries exhausted | |
421 | retrylog('retries exhausted in %s, raising KeyError\n' % funcname) |
|
421 | retrylog('retries exhausted in %s, raising KeyError\n' % funcname) | |
422 | raise |
|
422 | raise | |
423 | return wrapped |
|
423 | return wrapped |
@@ -1,376 +1,376 | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import threading |
|
3 | import threading | |
4 |
|
4 | |||
5 | from mercurial.node import hex, nullid |
|
5 | from mercurial.node import hex, nullid | |
6 | from mercurial import ( |
|
6 | from mercurial import ( | |
7 | mdiff, |
|
7 | mdiff, | |
8 | pycompat, |
|
8 | pycompat, | |
9 | revlog, |
|
9 | revlog, | |
10 | ) |
|
10 | ) | |
11 | from . import ( |
|
11 | from . import ( | |
12 | basestore, |
|
12 | basestore, | |
13 | constants, |
|
13 | constants, | |
14 | shallowutil, |
|
14 | shallowutil, | |
15 | ) |
|
15 | ) | |
16 |
|
16 | |||
17 | class ChainIndicies(object): |
|
17 | class ChainIndicies(object): | |
18 | """A static class for easy reference to the delta chain indicies. |
|
18 | """A static class for easy reference to the delta chain indicies. | |
19 | """ |
|
19 | """ | |
20 | # The filename of this revision delta |
|
20 | # The filename of this revision delta | |
21 | NAME = 0 |
|
21 | NAME = 0 | |
22 | # The mercurial file node for this revision delta |
|
22 | # The mercurial file node for this revision delta | |
23 | NODE = 1 |
|
23 | NODE = 1 | |
24 | # The filename of the delta base's revision. This is useful when delta |
|
24 | # The filename of the delta base's revision. This is useful when delta | |
25 | # between different files (like in the case of a move or copy, we can delta |
|
25 | # between different files (like in the case of a move or copy, we can delta | |
26 | # against the original file content). |
|
26 | # against the original file content). | |
27 | BASENAME = 2 |
|
27 | BASENAME = 2 | |
28 | # The mercurial file node for the delta base revision. This is the nullid if |
|
28 | # The mercurial file node for the delta base revision. This is the nullid if | |
29 | # this delta is a full text. |
|
29 | # this delta is a full text. | |
30 | BASENODE = 3 |
|
30 | BASENODE = 3 | |
31 | # The actual delta or full text data. |
|
31 | # The actual delta or full text data. | |
32 | DATA = 4 |
|
32 | DATA = 4 | |
33 |
|
33 | |||
34 | class unioncontentstore(basestore.baseunionstore): |
|
34 | class unioncontentstore(basestore.baseunionstore): | |
35 | def __init__(self, *args, **kwargs): |
|
35 | def __init__(self, *args, **kwargs): | |
36 | super(unioncontentstore, self).__init__(*args, **kwargs) |
|
36 | super(unioncontentstore, self).__init__(*args, **kwargs) | |
37 |
|
37 | |||
38 | self.stores = args |
|
38 | self.stores = args | |
39 | self.writestore = kwargs.get('writestore') |
|
39 | self.writestore = kwargs.get(r'writestore') | |
40 |
|
40 | |||
41 | # If allowincomplete==True then the union store can return partial |
|
41 | # If allowincomplete==True then the union store can return partial | |
42 | # delta chains, otherwise it will throw a KeyError if a full |
|
42 | # delta chains, otherwise it will throw a KeyError if a full | |
43 | # deltachain can't be found. |
|
43 | # deltachain can't be found. | |
44 | self.allowincomplete = kwargs.get('allowincomplete', False) |
|
44 | self.allowincomplete = kwargs.get(r'allowincomplete', False) | |
45 |
|
45 | |||
46 | def get(self, name, node): |
|
46 | def get(self, name, node): | |
47 | """Fetches the full text revision contents of the given name+node pair. |
|
47 | """Fetches the full text revision contents of the given name+node pair. | |
48 | If the full text doesn't exist, throws a KeyError. |
|
48 | If the full text doesn't exist, throws a KeyError. | |
49 |
|
49 | |||
50 | Under the hood, this uses getdeltachain() across all the stores to build |
|
50 | Under the hood, this uses getdeltachain() across all the stores to build | |
51 | up a full chain to produce the full text. |
|
51 | up a full chain to produce the full text. | |
52 | """ |
|
52 | """ | |
53 | chain = self.getdeltachain(name, node) |
|
53 | chain = self.getdeltachain(name, node) | |
54 |
|
54 | |||
55 | if chain[-1][ChainIndicies.BASENODE] != nullid: |
|
55 | if chain[-1][ChainIndicies.BASENODE] != nullid: | |
56 | # If we didn't receive a full chain, throw |
|
56 | # If we didn't receive a full chain, throw | |
57 | raise KeyError((name, hex(node))) |
|
57 | raise KeyError((name, hex(node))) | |
58 |
|
58 | |||
59 | # The last entry in the chain is a full text, so we start our delta |
|
59 | # The last entry in the chain is a full text, so we start our delta | |
60 | # applies with that. |
|
60 | # applies with that. | |
61 | fulltext = chain.pop()[ChainIndicies.DATA] |
|
61 | fulltext = chain.pop()[ChainIndicies.DATA] | |
62 |
|
62 | |||
63 | text = fulltext |
|
63 | text = fulltext | |
64 | while chain: |
|
64 | while chain: | |
65 | delta = chain.pop()[ChainIndicies.DATA] |
|
65 | delta = chain.pop()[ChainIndicies.DATA] | |
66 | text = mdiff.patches(text, [delta]) |
|
66 | text = mdiff.patches(text, [delta]) | |
67 |
|
67 | |||
68 | return text |
|
68 | return text | |
69 |
|
69 | |||
70 | @basestore.baseunionstore.retriable |
|
70 | @basestore.baseunionstore.retriable | |
71 | def getdelta(self, name, node): |
|
71 | def getdelta(self, name, node): | |
72 | """Return the single delta entry for the given name/node pair. |
|
72 | """Return the single delta entry for the given name/node pair. | |
73 | """ |
|
73 | """ | |
74 | for store in self.stores: |
|
74 | for store in self.stores: | |
75 | try: |
|
75 | try: | |
76 | return store.getdelta(name, node) |
|
76 | return store.getdelta(name, node) | |
77 | except KeyError: |
|
77 | except KeyError: | |
78 | pass |
|
78 | pass | |
79 |
|
79 | |||
80 | raise KeyError((name, hex(node))) |
|
80 | raise KeyError((name, hex(node))) | |
81 |
|
81 | |||
82 | def getdeltachain(self, name, node): |
|
82 | def getdeltachain(self, name, node): | |
83 | """Returns the deltachain for the given name/node pair. |
|
83 | """Returns the deltachain for the given name/node pair. | |
84 |
|
84 | |||
85 | Returns an ordered list of: |
|
85 | Returns an ordered list of: | |
86 |
|
86 | |||
87 | [(name, node, deltabasename, deltabasenode, deltacontent),...] |
|
87 | [(name, node, deltabasename, deltabasenode, deltacontent),...] | |
88 |
|
88 | |||
89 | where the chain is terminated by a full text entry with a nullid |
|
89 | where the chain is terminated by a full text entry with a nullid | |
90 | deltabasenode. |
|
90 | deltabasenode. | |
91 | """ |
|
91 | """ | |
92 | chain = self._getpartialchain(name, node) |
|
92 | chain = self._getpartialchain(name, node) | |
93 | while chain[-1][ChainIndicies.BASENODE] != nullid: |
|
93 | while chain[-1][ChainIndicies.BASENODE] != nullid: | |
94 | x, x, deltabasename, deltabasenode, x = chain[-1] |
|
94 | x, x, deltabasename, deltabasenode, x = chain[-1] | |
95 | try: |
|
95 | try: | |
96 | morechain = self._getpartialchain(deltabasename, deltabasenode) |
|
96 | morechain = self._getpartialchain(deltabasename, deltabasenode) | |
97 | chain.extend(morechain) |
|
97 | chain.extend(morechain) | |
98 | except KeyError: |
|
98 | except KeyError: | |
99 | # If we allow incomplete chains, don't throw. |
|
99 | # If we allow incomplete chains, don't throw. | |
100 | if not self.allowincomplete: |
|
100 | if not self.allowincomplete: | |
101 | raise |
|
101 | raise | |
102 | break |
|
102 | break | |
103 |
|
103 | |||
104 | return chain |
|
104 | return chain | |
105 |
|
105 | |||
106 | @basestore.baseunionstore.retriable |
|
106 | @basestore.baseunionstore.retriable | |
107 | def getmeta(self, name, node): |
|
107 | def getmeta(self, name, node): | |
108 | """Returns the metadata dict for given node.""" |
|
108 | """Returns the metadata dict for given node.""" | |
109 | for store in self.stores: |
|
109 | for store in self.stores: | |
110 | try: |
|
110 | try: | |
111 | return store.getmeta(name, node) |
|
111 | return store.getmeta(name, node) | |
112 | except KeyError: |
|
112 | except KeyError: | |
113 | pass |
|
113 | pass | |
114 | raise KeyError((name, hex(node))) |
|
114 | raise KeyError((name, hex(node))) | |
115 |
|
115 | |||
116 | def getmetrics(self): |
|
116 | def getmetrics(self): | |
117 | metrics = [s.getmetrics() for s in self.stores] |
|
117 | metrics = [s.getmetrics() for s in self.stores] | |
118 | return shallowutil.sumdicts(*metrics) |
|
118 | return shallowutil.sumdicts(*metrics) | |
119 |
|
119 | |||
120 | @basestore.baseunionstore.retriable |
|
120 | @basestore.baseunionstore.retriable | |
121 | def _getpartialchain(self, name, node): |
|
121 | def _getpartialchain(self, name, node): | |
122 | """Returns a partial delta chain for the given name/node pair. |
|
122 | """Returns a partial delta chain for the given name/node pair. | |
123 |
|
123 | |||
124 | A partial chain is a chain that may not be terminated in a full-text. |
|
124 | A partial chain is a chain that may not be terminated in a full-text. | |
125 | """ |
|
125 | """ | |
126 | for store in self.stores: |
|
126 | for store in self.stores: | |
127 | try: |
|
127 | try: | |
128 | return store.getdeltachain(name, node) |
|
128 | return store.getdeltachain(name, node) | |
129 | except KeyError: |
|
129 | except KeyError: | |
130 | pass |
|
130 | pass | |
131 |
|
131 | |||
132 | raise KeyError((name, hex(node))) |
|
132 | raise KeyError((name, hex(node))) | |
133 |
|
133 | |||
134 | def add(self, name, node, data): |
|
134 | def add(self, name, node, data): | |
135 | raise RuntimeError("cannot add content only to remotefilelog " |
|
135 | raise RuntimeError("cannot add content only to remotefilelog " | |
136 | "contentstore") |
|
136 | "contentstore") | |
137 |
|
137 | |||
138 | def getmissing(self, keys): |
|
138 | def getmissing(self, keys): | |
139 | missing = keys |
|
139 | missing = keys | |
140 | for store in self.stores: |
|
140 | for store in self.stores: | |
141 | if missing: |
|
141 | if missing: | |
142 | missing = store.getmissing(missing) |
|
142 | missing = store.getmissing(missing) | |
143 | return missing |
|
143 | return missing | |
144 |
|
144 | |||
145 | def addremotefilelognode(self, name, node, data): |
|
145 | def addremotefilelognode(self, name, node, data): | |
146 | if self.writestore: |
|
146 | if self.writestore: | |
147 | self.writestore.addremotefilelognode(name, node, data) |
|
147 | self.writestore.addremotefilelognode(name, node, data) | |
148 | else: |
|
148 | else: | |
149 | raise RuntimeError("no writable store configured") |
|
149 | raise RuntimeError("no writable store configured") | |
150 |
|
150 | |||
151 | def markledger(self, ledger, options=None): |
|
151 | def markledger(self, ledger, options=None): | |
152 | for store in self.stores: |
|
152 | for store in self.stores: | |
153 | store.markledger(ledger, options) |
|
153 | store.markledger(ledger, options) | |
154 |
|
154 | |||
155 | class remotefilelogcontentstore(basestore.basestore): |
|
155 | class remotefilelogcontentstore(basestore.basestore): | |
156 | def __init__(self, *args, **kwargs): |
|
156 | def __init__(self, *args, **kwargs): | |
157 | super(remotefilelogcontentstore, self).__init__(*args, **kwargs) |
|
157 | super(remotefilelogcontentstore, self).__init__(*args, **kwargs) | |
158 | self._threaddata = threading.local() |
|
158 | self._threaddata = threading.local() | |
159 |
|
159 | |||
160 | def get(self, name, node): |
|
160 | def get(self, name, node): | |
161 | # return raw revision text |
|
161 | # return raw revision text | |
162 | data = self._getdata(name, node) |
|
162 | data = self._getdata(name, node) | |
163 |
|
163 | |||
164 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
164 | offset, size, flags = shallowutil.parsesizeflags(data) | |
165 | content = data[offset:offset + size] |
|
165 | content = data[offset:offset + size] | |
166 |
|
166 | |||
167 | ancestormap = shallowutil.ancestormap(data) |
|
167 | ancestormap = shallowutil.ancestormap(data) | |
168 | p1, p2, linknode, copyfrom = ancestormap[node] |
|
168 | p1, p2, linknode, copyfrom = ancestormap[node] | |
169 | copyrev = None |
|
169 | copyrev = None | |
170 | if copyfrom: |
|
170 | if copyfrom: | |
171 | copyrev = hex(p1) |
|
171 | copyrev = hex(p1) | |
172 |
|
172 | |||
173 | self._updatemetacache(node, size, flags) |
|
173 | self._updatemetacache(node, size, flags) | |
174 |
|
174 | |||
175 | # lfs tracks renames in its own metadata, remove hg copy metadata, |
|
175 | # lfs tracks renames in its own metadata, remove hg copy metadata, | |
176 | # because copy metadata will be re-added by lfs flag processor. |
|
176 | # because copy metadata will be re-added by lfs flag processor. | |
177 | if flags & revlog.REVIDX_EXTSTORED: |
|
177 | if flags & revlog.REVIDX_EXTSTORED: | |
178 | copyrev = copyfrom = None |
|
178 | copyrev = copyfrom = None | |
179 | revision = shallowutil.createrevlogtext(content, copyfrom, copyrev) |
|
179 | revision = shallowutil.createrevlogtext(content, copyfrom, copyrev) | |
180 | return revision |
|
180 | return revision | |
181 |
|
181 | |||
182 | def getdelta(self, name, node): |
|
182 | def getdelta(self, name, node): | |
183 | # Since remotefilelog content stores only contain full texts, just |
|
183 | # Since remotefilelog content stores only contain full texts, just | |
184 | # return that. |
|
184 | # return that. | |
185 | revision = self.get(name, node) |
|
185 | revision = self.get(name, node) | |
186 | return revision, name, nullid, self.getmeta(name, node) |
|
186 | return revision, name, nullid, self.getmeta(name, node) | |
187 |
|
187 | |||
188 | def getdeltachain(self, name, node): |
|
188 | def getdeltachain(self, name, node): | |
189 | # Since remotefilelog content stores just contain full texts, we return |
|
189 | # Since remotefilelog content stores just contain full texts, we return | |
190 | # a fake delta chain that just consists of a single full text revision. |
|
190 | # a fake delta chain that just consists of a single full text revision. | |
191 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
191 | # The nullid in the deltabasenode slot indicates that the revision is a | |
192 | # fulltext. |
|
192 | # fulltext. | |
193 | revision = self.get(name, node) |
|
193 | revision = self.get(name, node) | |
194 | return [(name, node, None, nullid, revision)] |
|
194 | return [(name, node, None, nullid, revision)] | |
195 |
|
195 | |||
196 | def getmeta(self, name, node): |
|
196 | def getmeta(self, name, node): | |
197 | self._sanitizemetacache() |
|
197 | self._sanitizemetacache() | |
198 | if node != self._threaddata.metacache[0]: |
|
198 | if node != self._threaddata.metacache[0]: | |
199 | data = self._getdata(name, node) |
|
199 | data = self._getdata(name, node) | |
200 | offset, size, flags = shallowutil.parsesizeflags(data) |
|
200 | offset, size, flags = shallowutil.parsesizeflags(data) | |
201 | self._updatemetacache(node, size, flags) |
|
201 | self._updatemetacache(node, size, flags) | |
202 | return self._threaddata.metacache[1] |
|
202 | return self._threaddata.metacache[1] | |
203 |
|
203 | |||
204 | def add(self, name, node, data): |
|
204 | def add(self, name, node, data): | |
205 | raise RuntimeError("cannot add content only to remotefilelog " |
|
205 | raise RuntimeError("cannot add content only to remotefilelog " | |
206 | "contentstore") |
|
206 | "contentstore") | |
207 |
|
207 | |||
208 | def _sanitizemetacache(self): |
|
208 | def _sanitizemetacache(self): | |
209 | metacache = getattr(self._threaddata, 'metacache', None) |
|
209 | metacache = getattr(self._threaddata, 'metacache', None) | |
210 | if metacache is None: |
|
210 | if metacache is None: | |
211 | self._threaddata.metacache = (None, None) # (node, meta) |
|
211 | self._threaddata.metacache = (None, None) # (node, meta) | |
212 |
|
212 | |||
213 | def _updatemetacache(self, node, size, flags): |
|
213 | def _updatemetacache(self, node, size, flags): | |
214 | self._sanitizemetacache() |
|
214 | self._sanitizemetacache() | |
215 | if node == self._threaddata.metacache[0]: |
|
215 | if node == self._threaddata.metacache[0]: | |
216 | return |
|
216 | return | |
217 | meta = {constants.METAKEYFLAG: flags, |
|
217 | meta = {constants.METAKEYFLAG: flags, | |
218 | constants.METAKEYSIZE: size} |
|
218 | constants.METAKEYSIZE: size} | |
219 | self._threaddata.metacache = (node, meta) |
|
219 | self._threaddata.metacache = (node, meta) | |
220 |
|
220 | |||
221 | class remotecontentstore(object): |
|
221 | class remotecontentstore(object): | |
222 | def __init__(self, ui, fileservice, shared): |
|
222 | def __init__(self, ui, fileservice, shared): | |
223 | self._fileservice = fileservice |
|
223 | self._fileservice = fileservice | |
224 | # type(shared) is usually remotefilelogcontentstore |
|
224 | # type(shared) is usually remotefilelogcontentstore | |
225 | self._shared = shared |
|
225 | self._shared = shared | |
226 |
|
226 | |||
227 | def get(self, name, node): |
|
227 | def get(self, name, node): | |
228 | self._fileservice.prefetch([(name, hex(node))], force=True, |
|
228 | self._fileservice.prefetch([(name, hex(node))], force=True, | |
229 | fetchdata=True) |
|
229 | fetchdata=True) | |
230 | return self._shared.get(name, node) |
|
230 | return self._shared.get(name, node) | |
231 |
|
231 | |||
232 | def getdelta(self, name, node): |
|
232 | def getdelta(self, name, node): | |
233 | revision = self.get(name, node) |
|
233 | revision = self.get(name, node) | |
234 | return revision, name, nullid, self._shared.getmeta(name, node) |
|
234 | return revision, name, nullid, self._shared.getmeta(name, node) | |
235 |
|
235 | |||
236 | def getdeltachain(self, name, node): |
|
236 | def getdeltachain(self, name, node): | |
237 | # Since our remote content stores just contain full texts, we return a |
|
237 | # Since our remote content stores just contain full texts, we return a | |
238 | # fake delta chain that just consists of a single full text revision. |
|
238 | # fake delta chain that just consists of a single full text revision. | |
239 | # The nullid in the deltabasenode slot indicates that the revision is a |
|
239 | # The nullid in the deltabasenode slot indicates that the revision is a | |
240 | # fulltext. |
|
240 | # fulltext. | |
241 | revision = self.get(name, node) |
|
241 | revision = self.get(name, node) | |
242 | return [(name, node, None, nullid, revision)] |
|
242 | return [(name, node, None, nullid, revision)] | |
243 |
|
243 | |||
244 | def getmeta(self, name, node): |
|
244 | def getmeta(self, name, node): | |
245 | self._fileservice.prefetch([(name, hex(node))], force=True, |
|
245 | self._fileservice.prefetch([(name, hex(node))], force=True, | |
246 | fetchdata=True) |
|
246 | fetchdata=True) | |
247 | return self._shared.getmeta(name, node) |
|
247 | return self._shared.getmeta(name, node) | |
248 |
|
248 | |||
249 | def add(self, name, node, data): |
|
249 | def add(self, name, node, data): | |
250 | raise RuntimeError("cannot add to a remote store") |
|
250 | raise RuntimeError("cannot add to a remote store") | |
251 |
|
251 | |||
252 | def getmissing(self, keys): |
|
252 | def getmissing(self, keys): | |
253 | return keys |
|
253 | return keys | |
254 |
|
254 | |||
255 | def markledger(self, ledger, options=None): |
|
255 | def markledger(self, ledger, options=None): | |
256 | pass |
|
256 | pass | |
257 |
|
257 | |||
258 | class manifestrevlogstore(object): |
|
258 | class manifestrevlogstore(object): | |
259 | def __init__(self, repo): |
|
259 | def __init__(self, repo): | |
260 | self._store = repo.store |
|
260 | self._store = repo.store | |
261 | self._svfs = repo.svfs |
|
261 | self._svfs = repo.svfs | |
262 | self._revlogs = dict() |
|
262 | self._revlogs = dict() | |
263 | self._cl = revlog.revlog(self._svfs, '00changelog.i') |
|
263 | self._cl = revlog.revlog(self._svfs, '00changelog.i') | |
264 | self._repackstartlinkrev = 0 |
|
264 | self._repackstartlinkrev = 0 | |
265 |
|
265 | |||
266 | def get(self, name, node): |
|
266 | def get(self, name, node): | |
267 | return self._revlog(name).revision(node, raw=True) |
|
267 | return self._revlog(name).revision(node, raw=True) | |
268 |
|
268 | |||
269 | def getdelta(self, name, node): |
|
269 | def getdelta(self, name, node): | |
270 | revision = self.get(name, node) |
|
270 | revision = self.get(name, node) | |
271 | return revision, name, nullid, self.getmeta(name, node) |
|
271 | return revision, name, nullid, self.getmeta(name, node) | |
272 |
|
272 | |||
273 | def getdeltachain(self, name, node): |
|
273 | def getdeltachain(self, name, node): | |
274 | revision = self.get(name, node) |
|
274 | revision = self.get(name, node) | |
275 | return [(name, node, None, nullid, revision)] |
|
275 | return [(name, node, None, nullid, revision)] | |
276 |
|
276 | |||
277 | def getmeta(self, name, node): |
|
277 | def getmeta(self, name, node): | |
278 | rl = self._revlog(name) |
|
278 | rl = self._revlog(name) | |
279 | rev = rl.rev(node) |
|
279 | rev = rl.rev(node) | |
280 | return {constants.METAKEYFLAG: rl.flags(rev), |
|
280 | return {constants.METAKEYFLAG: rl.flags(rev), | |
281 | constants.METAKEYSIZE: rl.rawsize(rev)} |
|
281 | constants.METAKEYSIZE: rl.rawsize(rev)} | |
282 |
|
282 | |||
283 | def getancestors(self, name, node, known=None): |
|
283 | def getancestors(self, name, node, known=None): | |
284 | if known is None: |
|
284 | if known is None: | |
285 | known = set() |
|
285 | known = set() | |
286 | if node in known: |
|
286 | if node in known: | |
287 | return [] |
|
287 | return [] | |
288 |
|
288 | |||
289 | rl = self._revlog(name) |
|
289 | rl = self._revlog(name) | |
290 | ancestors = {} |
|
290 | ancestors = {} | |
291 | missing = set((node,)) |
|
291 | missing = set((node,)) | |
292 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): |
|
292 | for ancrev in rl.ancestors([rl.rev(node)], inclusive=True): | |
293 | ancnode = rl.node(ancrev) |
|
293 | ancnode = rl.node(ancrev) | |
294 | missing.discard(ancnode) |
|
294 | missing.discard(ancnode) | |
295 |
|
295 | |||
296 | p1, p2 = rl.parents(ancnode) |
|
296 | p1, p2 = rl.parents(ancnode) | |
297 | if p1 != nullid and p1 not in known: |
|
297 | if p1 != nullid and p1 not in known: | |
298 | missing.add(p1) |
|
298 | missing.add(p1) | |
299 | if p2 != nullid and p2 not in known: |
|
299 | if p2 != nullid and p2 not in known: | |
300 | missing.add(p2) |
|
300 | missing.add(p2) | |
301 |
|
301 | |||
302 | linknode = self._cl.node(rl.linkrev(ancrev)) |
|
302 | linknode = self._cl.node(rl.linkrev(ancrev)) | |
303 | ancestors[rl.node(ancrev)] = (p1, p2, linknode, '') |
|
303 | ancestors[rl.node(ancrev)] = (p1, p2, linknode, '') | |
304 | if not missing: |
|
304 | if not missing: | |
305 | break |
|
305 | break | |
306 | return ancestors |
|
306 | return ancestors | |
307 |
|
307 | |||
308 | def getnodeinfo(self, name, node): |
|
308 | def getnodeinfo(self, name, node): | |
309 | cl = self._cl |
|
309 | cl = self._cl | |
310 | rl = self._revlog(name) |
|
310 | rl = self._revlog(name) | |
311 | parents = rl.parents(node) |
|
311 | parents = rl.parents(node) | |
312 | linkrev = rl.linkrev(rl.rev(node)) |
|
312 | linkrev = rl.linkrev(rl.rev(node)) | |
313 | return (parents[0], parents[1], cl.node(linkrev), None) |
|
313 | return (parents[0], parents[1], cl.node(linkrev), None) | |
314 |
|
314 | |||
315 | def add(self, *args): |
|
315 | def add(self, *args): | |
316 | raise RuntimeError("cannot add to a revlog store") |
|
316 | raise RuntimeError("cannot add to a revlog store") | |
317 |
|
317 | |||
318 | def _revlog(self, name): |
|
318 | def _revlog(self, name): | |
319 | rl = self._revlogs.get(name) |
|
319 | rl = self._revlogs.get(name) | |
320 | if rl is None: |
|
320 | if rl is None: | |
321 | revlogname = '00manifesttree.i' |
|
321 | revlogname = '00manifesttree.i' | |
322 | if name != '': |
|
322 | if name != '': | |
323 | revlogname = 'meta/%s/00manifest.i' % name |
|
323 | revlogname = 'meta/%s/00manifest.i' % name | |
324 | rl = revlog.revlog(self._svfs, revlogname) |
|
324 | rl = revlog.revlog(self._svfs, revlogname) | |
325 | self._revlogs[name] = rl |
|
325 | self._revlogs[name] = rl | |
326 | return rl |
|
326 | return rl | |
327 |
|
327 | |||
328 | def getmissing(self, keys): |
|
328 | def getmissing(self, keys): | |
329 | missing = [] |
|
329 | missing = [] | |
330 | for name, node in keys: |
|
330 | for name, node in keys: | |
331 | mfrevlog = self._revlog(name) |
|
331 | mfrevlog = self._revlog(name) | |
332 | if node not in mfrevlog.nodemap: |
|
332 | if node not in mfrevlog.nodemap: | |
333 | missing.append((name, node)) |
|
333 | missing.append((name, node)) | |
334 |
|
334 | |||
335 | return missing |
|
335 | return missing | |
336 |
|
336 | |||
337 | def setrepacklinkrevrange(self, startrev, endrev): |
|
337 | def setrepacklinkrevrange(self, startrev, endrev): | |
338 | self._repackstartlinkrev = startrev |
|
338 | self._repackstartlinkrev = startrev | |
339 | self._repackendlinkrev = endrev |
|
339 | self._repackendlinkrev = endrev | |
340 |
|
340 | |||
341 | def markledger(self, ledger, options=None): |
|
341 | def markledger(self, ledger, options=None): | |
342 | if options and options.get(constants.OPTION_PACKSONLY): |
|
342 | if options and options.get(constants.OPTION_PACKSONLY): | |
343 | return |
|
343 | return | |
344 | treename = '' |
|
344 | treename = '' | |
345 | rl = revlog.revlog(self._svfs, '00manifesttree.i') |
|
345 | rl = revlog.revlog(self._svfs, '00manifesttree.i') | |
346 | startlinkrev = self._repackstartlinkrev |
|
346 | startlinkrev = self._repackstartlinkrev | |
347 | endlinkrev = self._repackendlinkrev |
|
347 | endlinkrev = self._repackendlinkrev | |
348 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
|
348 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |
349 | linkrev = rl.linkrev(rev) |
|
349 | linkrev = rl.linkrev(rev) | |
350 | if linkrev < startlinkrev: |
|
350 | if linkrev < startlinkrev: | |
351 | break |
|
351 | break | |
352 | if linkrev > endlinkrev: |
|
352 | if linkrev > endlinkrev: | |
353 | continue |
|
353 | continue | |
354 | node = rl.node(rev) |
|
354 | node = rl.node(rev) | |
355 | ledger.markdataentry(self, treename, node) |
|
355 | ledger.markdataentry(self, treename, node) | |
356 | ledger.markhistoryentry(self, treename, node) |
|
356 | ledger.markhistoryentry(self, treename, node) | |
357 |
|
357 | |||
358 | for path, encoded, size in self._store.datafiles(): |
|
358 | for path, encoded, size in self._store.datafiles(): | |
359 | if path[:5] != 'meta/' or path[-2:] != '.i': |
|
359 | if path[:5] != 'meta/' or path[-2:] != '.i': | |
360 | continue |
|
360 | continue | |
361 |
|
361 | |||
362 | treename = path[5:-len('/00manifest.i')] |
|
362 | treename = path[5:-len('/00manifest.i')] | |
363 |
|
363 | |||
364 | rl = revlog.revlog(self._svfs, path) |
|
364 | rl = revlog.revlog(self._svfs, path) | |
365 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): |
|
365 | for rev in pycompat.xrange(len(rl) - 1, -1, -1): | |
366 | linkrev = rl.linkrev(rev) |
|
366 | linkrev = rl.linkrev(rev) | |
367 | if linkrev < startlinkrev: |
|
367 | if linkrev < startlinkrev: | |
368 | break |
|
368 | break | |
369 | if linkrev > endlinkrev: |
|
369 | if linkrev > endlinkrev: | |
370 | continue |
|
370 | continue | |
371 | node = rl.node(rev) |
|
371 | node = rl.node(rev) | |
372 | ledger.markdataentry(self, treename, node) |
|
372 | ledger.markdataentry(self, treename, node) | |
373 | ledger.markhistoryentry(self, treename, node) |
|
373 | ledger.markhistoryentry(self, treename, node) | |
374 |
|
374 | |||
375 | def cleanup(self, ledger): |
|
375 | def cleanup(self, ledger): | |
376 | pass |
|
376 | pass |
@@ -1,377 +1,377 | |||||
1 | # debugcommands.py - debug logic for remotefilelog |
|
1 | # debugcommands.py - debug logic for remotefilelog | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import hashlib |
|
9 | import hashlib | |
10 | import os |
|
10 | import os | |
11 | import zlib |
|
11 | import zlib | |
12 |
|
12 | |||
13 | from mercurial.node import bin, hex, nullid, short |
|
13 | from mercurial.node import bin, hex, nullid, short | |
14 | from mercurial.i18n import _ |
|
14 | from mercurial.i18n import _ | |
15 | from mercurial import ( |
|
15 | from mercurial import ( | |
16 | error, |
|
16 | error, | |
17 | filelog, |
|
17 | filelog, | |
18 | revlog, |
|
18 | revlog, | |
19 | ) |
|
19 | ) | |
20 | from . import ( |
|
20 | from . import ( | |
21 | constants, |
|
21 | constants, | |
22 | datapack, |
|
22 | datapack, | |
23 | extutil, |
|
23 | extutil, | |
24 | fileserverclient, |
|
24 | fileserverclient, | |
25 | historypack, |
|
25 | historypack, | |
26 | repack, |
|
26 | repack, | |
27 | shallowutil, |
|
27 | shallowutil, | |
28 | ) |
|
28 | ) | |
29 |
|
29 | |||
30 | def debugremotefilelog(ui, path, **opts): |
|
30 | def debugremotefilelog(ui, path, **opts): | |
31 | decompress = opts.get('decompress') |
|
31 | decompress = opts.get(r'decompress') | |
32 |
|
32 | |||
33 | size, firstnode, mapping = parsefileblob(path, decompress) |
|
33 | size, firstnode, mapping = parsefileblob(path, decompress) | |
34 |
|
34 | |||
35 | ui.status(_("size: %s bytes\n") % (size)) |
|
35 | ui.status(_("size: %s bytes\n") % (size)) | |
36 | ui.status(_("path: %s \n") % (path)) |
|
36 | ui.status(_("path: %s \n") % (path)) | |
37 | ui.status(_("key: %s \n") % (short(firstnode))) |
|
37 | ui.status(_("key: %s \n") % (short(firstnode))) | |
38 | ui.status(_("\n")) |
|
38 | ui.status(_("\n")) | |
39 | ui.status(_("%12s => %12s %13s %13s %12s\n") % |
|
39 | ui.status(_("%12s => %12s %13s %13s %12s\n") % | |
40 | ("node", "p1", "p2", "linknode", "copyfrom")) |
|
40 | ("node", "p1", "p2", "linknode", "copyfrom")) | |
41 |
|
41 | |||
42 | queue = [firstnode] |
|
42 | queue = [firstnode] | |
43 | while queue: |
|
43 | while queue: | |
44 | node = queue.pop(0) |
|
44 | node = queue.pop(0) | |
45 | p1, p2, linknode, copyfrom = mapping[node] |
|
45 | p1, p2, linknode, copyfrom = mapping[node] | |
46 | ui.status(_("%s => %s %s %s %s\n") % |
|
46 | ui.status(_("%s => %s %s %s %s\n") % | |
47 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) |
|
47 | (short(node), short(p1), short(p2), short(linknode), copyfrom)) | |
48 | if p1 != nullid: |
|
48 | if p1 != nullid: | |
49 | queue.append(p1) |
|
49 | queue.append(p1) | |
50 | if p2 != nullid: |
|
50 | if p2 != nullid: | |
51 | queue.append(p2) |
|
51 | queue.append(p2) | |
52 |
|
52 | |||
53 | def buildtemprevlog(repo, file): |
|
53 | def buildtemprevlog(repo, file): | |
54 | # get filename key |
|
54 | # get filename key | |
55 | filekey = hashlib.sha1(file).hexdigest() |
|
55 | filekey = hashlib.sha1(file).hexdigest() | |
56 | filedir = os.path.join(repo.path, 'store/data', filekey) |
|
56 | filedir = os.path.join(repo.path, 'store/data', filekey) | |
57 |
|
57 | |||
58 | # sort all entries based on linkrev |
|
58 | # sort all entries based on linkrev | |
59 | fctxs = [] |
|
59 | fctxs = [] | |
60 | for filenode in os.listdir(filedir): |
|
60 | for filenode in os.listdir(filedir): | |
61 | if '_old' not in filenode: |
|
61 | if '_old' not in filenode: | |
62 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) |
|
62 | fctxs.append(repo.filectx(file, fileid=bin(filenode))) | |
63 |
|
63 | |||
64 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) |
|
64 | fctxs = sorted(fctxs, key=lambda x: x.linkrev()) | |
65 |
|
65 | |||
66 | # add to revlog |
|
66 | # add to revlog | |
67 | temppath = repo.sjoin('data/temprevlog.i') |
|
67 | temppath = repo.sjoin('data/temprevlog.i') | |
68 | if os.path.exists(temppath): |
|
68 | if os.path.exists(temppath): | |
69 | os.remove(temppath) |
|
69 | os.remove(temppath) | |
70 | r = filelog.filelog(repo.svfs, 'temprevlog') |
|
70 | r = filelog.filelog(repo.svfs, 'temprevlog') | |
71 |
|
71 | |||
72 | class faket(object): |
|
72 | class faket(object): | |
73 | def add(self, a, b, c): |
|
73 | def add(self, a, b, c): | |
74 | pass |
|
74 | pass | |
75 | t = faket() |
|
75 | t = faket() | |
76 | for fctx in fctxs: |
|
76 | for fctx in fctxs: | |
77 | if fctx.node() not in repo: |
|
77 | if fctx.node() not in repo: | |
78 | continue |
|
78 | continue | |
79 |
|
79 | |||
80 | p = fctx.filelog().parents(fctx.filenode()) |
|
80 | p = fctx.filelog().parents(fctx.filenode()) | |
81 | meta = {} |
|
81 | meta = {} | |
82 | if fctx.renamed(): |
|
82 | if fctx.renamed(): | |
83 | meta['copy'] = fctx.renamed()[0] |
|
83 | meta['copy'] = fctx.renamed()[0] | |
84 | meta['copyrev'] = hex(fctx.renamed()[1]) |
|
84 | meta['copyrev'] = hex(fctx.renamed()[1]) | |
85 |
|
85 | |||
86 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) |
|
86 | r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1]) | |
87 |
|
87 | |||
88 | return r |
|
88 | return r | |
89 |
|
89 | |||
90 | def debugindex(orig, ui, repo, file_=None, **opts): |
|
90 | def debugindex(orig, ui, repo, file_=None, **opts): | |
91 | """dump the contents of an index file""" |
|
91 | """dump the contents of an index file""" | |
92 | if (opts.get('changelog') or |
|
92 | if (opts.get(r'changelog') or | |
93 | opts.get('manifest') or |
|
93 | opts.get(r'manifest') or | |
94 | opts.get('dir') or |
|
94 | opts.get(r'dir') or | |
95 | not shallowutil.isenabled(repo) or |
|
95 | not shallowutil.isenabled(repo) or | |
96 | not repo.shallowmatch(file_)): |
|
96 | not repo.shallowmatch(file_)): | |
97 | return orig(ui, repo, file_, **opts) |
|
97 | return orig(ui, repo, file_, **opts) | |
98 |
|
98 | |||
99 | r = buildtemprevlog(repo, file_) |
|
99 | r = buildtemprevlog(repo, file_) | |
100 |
|
100 | |||
101 | # debugindex like normal |
|
101 | # debugindex like normal | |
102 | format = opts.get('format', 0) |
|
102 | format = opts.get('format', 0) | |
103 | if format not in (0, 1): |
|
103 | if format not in (0, 1): | |
104 | raise error.Abort(_("unknown format %d") % format) |
|
104 | raise error.Abort(_("unknown format %d") % format) | |
105 |
|
105 | |||
106 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
106 | generaldelta = r.version & revlog.FLAG_GENERALDELTA | |
107 | if generaldelta: |
|
107 | if generaldelta: | |
108 | basehdr = ' delta' |
|
108 | basehdr = ' delta' | |
109 | else: |
|
109 | else: | |
110 | basehdr = ' base' |
|
110 | basehdr = ' base' | |
111 |
|
111 | |||
112 | if format == 0: |
|
112 | if format == 0: | |
113 | ui.write((" rev offset length " + basehdr + " linkrev" |
|
113 | ui.write((" rev offset length " + basehdr + " linkrev" | |
114 | " nodeid p1 p2\n")) |
|
114 | " nodeid p1 p2\n")) | |
115 | elif format == 1: |
|
115 | elif format == 1: | |
116 | ui.write((" rev flag offset length" |
|
116 | ui.write((" rev flag offset length" | |
117 | " size " + basehdr + " link p1 p2" |
|
117 | " size " + basehdr + " link p1 p2" | |
118 | " nodeid\n")) |
|
118 | " nodeid\n")) | |
119 |
|
119 | |||
120 | for i in r: |
|
120 | for i in r: | |
121 | node = r.node(i) |
|
121 | node = r.node(i) | |
122 | if generaldelta: |
|
122 | if generaldelta: | |
123 | base = r.deltaparent(i) |
|
123 | base = r.deltaparent(i) | |
124 | else: |
|
124 | else: | |
125 | base = r.chainbase(i) |
|
125 | base = r.chainbase(i) | |
126 | if format == 0: |
|
126 | if format == 0: | |
127 | try: |
|
127 | try: | |
128 | pp = r.parents(node) |
|
128 | pp = r.parents(node) | |
129 | except Exception: |
|
129 | except Exception: | |
130 | pp = [nullid, nullid] |
|
130 | pp = [nullid, nullid] | |
131 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( |
|
131 | ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( | |
132 | i, r.start(i), r.length(i), base, r.linkrev(i), |
|
132 | i, r.start(i), r.length(i), base, r.linkrev(i), | |
133 | short(node), short(pp[0]), short(pp[1]))) |
|
133 | short(node), short(pp[0]), short(pp[1]))) | |
134 | elif format == 1: |
|
134 | elif format == 1: | |
135 | pr = r.parentrevs(i) |
|
135 | pr = r.parentrevs(i) | |
136 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( |
|
136 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( | |
137 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
137 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), | |
138 | base, r.linkrev(i), pr[0], pr[1], short(node))) |
|
138 | base, r.linkrev(i), pr[0], pr[1], short(node))) | |
139 |
|
139 | |||
140 | def debugindexdot(orig, ui, repo, file_): |
|
140 | def debugindexdot(orig, ui, repo, file_): | |
141 | """dump an index DAG as a graphviz dot file""" |
|
141 | """dump an index DAG as a graphviz dot file""" | |
142 | if not shallowutil.isenabled(repo): |
|
142 | if not shallowutil.isenabled(repo): | |
143 | return orig(ui, repo, file_) |
|
143 | return orig(ui, repo, file_) | |
144 |
|
144 | |||
145 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) |
|
145 | r = buildtemprevlog(repo, os.path.basename(file_)[:-2]) | |
146 |
|
146 | |||
147 | ui.write(("digraph G {\n")) |
|
147 | ui.write(("digraph G {\n")) | |
148 | for i in r: |
|
148 | for i in r: | |
149 | node = r.node(i) |
|
149 | node = r.node(i) | |
150 | pp = r.parents(node) |
|
150 | pp = r.parents(node) | |
151 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
151 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) | |
152 | if pp[1] != nullid: |
|
152 | if pp[1] != nullid: | |
153 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
153 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) | |
154 | ui.write("}\n") |
|
154 | ui.write("}\n") | |
155 |
|
155 | |||
156 | def verifyremotefilelog(ui, path, **opts): |
|
156 | def verifyremotefilelog(ui, path, **opts): | |
157 | decompress = opts.get('decompress') |
|
157 | decompress = opts.get(r'decompress') | |
158 |
|
158 | |||
159 | for root, dirs, files in os.walk(path): |
|
159 | for root, dirs, files in os.walk(path): | |
160 | for file in files: |
|
160 | for file in files: | |
161 | if file == "repos": |
|
161 | if file == "repos": | |
162 | continue |
|
162 | continue | |
163 | filepath = os.path.join(root, file) |
|
163 | filepath = os.path.join(root, file) | |
164 | size, firstnode, mapping = parsefileblob(filepath, decompress) |
|
164 | size, firstnode, mapping = parsefileblob(filepath, decompress) | |
165 | for p1, p2, linknode, copyfrom in mapping.itervalues(): |
|
165 | for p1, p2, linknode, copyfrom in mapping.itervalues(): | |
166 | if linknode == nullid: |
|
166 | if linknode == nullid: | |
167 | actualpath = os.path.relpath(root, path) |
|
167 | actualpath = os.path.relpath(root, path) | |
168 | key = fileserverclient.getcachekey("reponame", actualpath, |
|
168 | key = fileserverclient.getcachekey("reponame", actualpath, | |
169 | file) |
|
169 | file) | |
170 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, |
|
170 | ui.status("%s %s\n" % (key, os.path.relpath(filepath, | |
171 | path))) |
|
171 | path))) | |
172 |
|
172 | |||
173 | def _decompressblob(raw): |
|
173 | def _decompressblob(raw): | |
174 | return zlib.decompress(raw) |
|
174 | return zlib.decompress(raw) | |
175 |
|
175 | |||
176 | def parsefileblob(path, decompress): |
|
176 | def parsefileblob(path, decompress): | |
177 | raw = None |
|
177 | raw = None | |
178 | f = open(path, "r") |
|
178 | f = open(path, "r") | |
179 | try: |
|
179 | try: | |
180 | raw = f.read() |
|
180 | raw = f.read() | |
181 | finally: |
|
181 | finally: | |
182 | f.close() |
|
182 | f.close() | |
183 |
|
183 | |||
184 | if decompress: |
|
184 | if decompress: | |
185 | raw = _decompressblob(raw) |
|
185 | raw = _decompressblob(raw) | |
186 |
|
186 | |||
187 | offset, size, flags = shallowutil.parsesizeflags(raw) |
|
187 | offset, size, flags = shallowutil.parsesizeflags(raw) | |
188 | start = offset + size |
|
188 | start = offset + size | |
189 |
|
189 | |||
190 | firstnode = None |
|
190 | firstnode = None | |
191 |
|
191 | |||
192 | mapping = {} |
|
192 | mapping = {} | |
193 | while start < len(raw): |
|
193 | while start < len(raw): | |
194 | divider = raw.index('\0', start + 80) |
|
194 | divider = raw.index('\0', start + 80) | |
195 |
|
195 | |||
196 | currentnode = raw[start:(start + 20)] |
|
196 | currentnode = raw[start:(start + 20)] | |
197 | if not firstnode: |
|
197 | if not firstnode: | |
198 | firstnode = currentnode |
|
198 | firstnode = currentnode | |
199 |
|
199 | |||
200 | p1 = raw[(start + 20):(start + 40)] |
|
200 | p1 = raw[(start + 20):(start + 40)] | |
201 | p2 = raw[(start + 40):(start + 60)] |
|
201 | p2 = raw[(start + 40):(start + 60)] | |
202 | linknode = raw[(start + 60):(start + 80)] |
|
202 | linknode = raw[(start + 60):(start + 80)] | |
203 | copyfrom = raw[(start + 80):divider] |
|
203 | copyfrom = raw[(start + 80):divider] | |
204 |
|
204 | |||
205 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
205 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |
206 | start = divider + 1 |
|
206 | start = divider + 1 | |
207 |
|
207 | |||
208 | return size, firstnode, mapping |
|
208 | return size, firstnode, mapping | |
209 |
|
209 | |||
210 | def debugdatapack(ui, *paths, **opts): |
|
210 | def debugdatapack(ui, *paths, **opts): | |
211 | for path in paths: |
|
211 | for path in paths: | |
212 | if '.data' in path: |
|
212 | if '.data' in path: | |
213 | path = path[:path.index('.data')] |
|
213 | path = path[:path.index('.data')] | |
214 | ui.write("%s:\n" % path) |
|
214 | ui.write("%s:\n" % path) | |
215 | dpack = datapack.datapack(path) |
|
215 | dpack = datapack.datapack(path) | |
216 | node = opts.get('node') |
|
216 | node = opts.get(r'node') | |
217 | if node: |
|
217 | if node: | |
218 | deltachain = dpack.getdeltachain('', bin(node)) |
|
218 | deltachain = dpack.getdeltachain('', bin(node)) | |
219 | dumpdeltachain(ui, deltachain, **opts) |
|
219 | dumpdeltachain(ui, deltachain, **opts) | |
220 | return |
|
220 | return | |
221 |
|
221 | |||
222 | if opts.get('long'): |
|
222 | if opts.get(r'long'): | |
223 | hashformatter = hex |
|
223 | hashformatter = hex | |
224 | hashlen = 42 |
|
224 | hashlen = 42 | |
225 | else: |
|
225 | else: | |
226 | hashformatter = short |
|
226 | hashformatter = short | |
227 | hashlen = 14 |
|
227 | hashlen = 14 | |
228 |
|
228 | |||
229 | lastfilename = None |
|
229 | lastfilename = None | |
230 | totaldeltasize = 0 |
|
230 | totaldeltasize = 0 | |
231 | totalblobsize = 0 |
|
231 | totalblobsize = 0 | |
232 | def printtotals(): |
|
232 | def printtotals(): | |
233 | if lastfilename is not None: |
|
233 | if lastfilename is not None: | |
234 | ui.write("\n") |
|
234 | ui.write("\n") | |
235 | if not totaldeltasize or not totalblobsize: |
|
235 | if not totaldeltasize or not totalblobsize: | |
236 | return |
|
236 | return | |
237 | difference = totalblobsize - totaldeltasize |
|
237 | difference = totalblobsize - totaldeltasize | |
238 | deltastr = "%0.1f%% %s" % ( |
|
238 | deltastr = "%0.1f%% %s" % ( | |
239 | (100.0 * abs(difference) / totalblobsize), |
|
239 | (100.0 * abs(difference) / totalblobsize), | |
240 | ("smaller" if difference > 0 else "bigger")) |
|
240 | ("smaller" if difference > 0 else "bigger")) | |
241 |
|
241 | |||
242 | ui.write(("Total:%s%s %s (%s)\n") % ( |
|
242 | ui.write(("Total:%s%s %s (%s)\n") % ( | |
243 | "".ljust(2 * hashlen - len("Total:")), |
|
243 | "".ljust(2 * hashlen - len("Total:")), | |
244 | str(totaldeltasize).ljust(12), |
|
244 | str(totaldeltasize).ljust(12), | |
245 | str(totalblobsize).ljust(9), |
|
245 | str(totalblobsize).ljust(9), | |
246 | deltastr |
|
246 | deltastr | |
247 | )) |
|
247 | )) | |
248 |
|
248 | |||
249 | bases = {} |
|
249 | bases = {} | |
250 | nodes = set() |
|
250 | nodes = set() | |
251 | failures = 0 |
|
251 | failures = 0 | |
252 | for filename, node, deltabase, deltalen in dpack.iterentries(): |
|
252 | for filename, node, deltabase, deltalen in dpack.iterentries(): | |
253 | bases[node] = deltabase |
|
253 | bases[node] = deltabase | |
254 | if node in nodes: |
|
254 | if node in nodes: | |
255 | ui.write(("Bad entry: %s appears twice\n" % short(node))) |
|
255 | ui.write(("Bad entry: %s appears twice\n" % short(node))) | |
256 | failures += 1 |
|
256 | failures += 1 | |
257 | nodes.add(node) |
|
257 | nodes.add(node) | |
258 | if filename != lastfilename: |
|
258 | if filename != lastfilename: | |
259 | printtotals() |
|
259 | printtotals() | |
260 | name = '(empty name)' if filename == '' else filename |
|
260 | name = '(empty name)' if filename == '' else filename | |
261 | ui.write("%s:\n" % name) |
|
261 | ui.write("%s:\n" % name) | |
262 | ui.write("%s%s%s%s\n" % ( |
|
262 | ui.write("%s%s%s%s\n" % ( | |
263 | "Node".ljust(hashlen), |
|
263 | "Node".ljust(hashlen), | |
264 | "Delta Base".ljust(hashlen), |
|
264 | "Delta Base".ljust(hashlen), | |
265 | "Delta Length".ljust(14), |
|
265 | "Delta Length".ljust(14), | |
266 | "Blob Size".ljust(9))) |
|
266 | "Blob Size".ljust(9))) | |
267 | lastfilename = filename |
|
267 | lastfilename = filename | |
268 | totalblobsize = 0 |
|
268 | totalblobsize = 0 | |
269 | totaldeltasize = 0 |
|
269 | totaldeltasize = 0 | |
270 |
|
270 | |||
271 | # Metadata could be missing, in which case it will be an empty dict. |
|
271 | # Metadata could be missing, in which case it will be an empty dict. | |
272 | meta = dpack.getmeta(filename, node) |
|
272 | meta = dpack.getmeta(filename, node) | |
273 | if constants.METAKEYSIZE in meta: |
|
273 | if constants.METAKEYSIZE in meta: | |
274 | blobsize = meta[constants.METAKEYSIZE] |
|
274 | blobsize = meta[constants.METAKEYSIZE] | |
275 | totaldeltasize += deltalen |
|
275 | totaldeltasize += deltalen | |
276 | totalblobsize += blobsize |
|
276 | totalblobsize += blobsize | |
277 | else: |
|
277 | else: | |
278 | blobsize = "(missing)" |
|
278 | blobsize = "(missing)" | |
279 | ui.write("%s %s %s%s\n" % ( |
|
279 | ui.write("%s %s %s%s\n" % ( | |
280 | hashformatter(node), |
|
280 | hashformatter(node), | |
281 | hashformatter(deltabase), |
|
281 | hashformatter(deltabase), | |
282 | str(deltalen).ljust(14), |
|
282 | str(deltalen).ljust(14), | |
283 | blobsize)) |
|
283 | blobsize)) | |
284 |
|
284 | |||
285 | if filename is not None: |
|
285 | if filename is not None: | |
286 | printtotals() |
|
286 | printtotals() | |
287 |
|
287 | |||
288 | failures += _sanitycheck(ui, set(nodes), bases) |
|
288 | failures += _sanitycheck(ui, set(nodes), bases) | |
289 | if failures > 1: |
|
289 | if failures > 1: | |
290 | ui.warn(("%d failures\n" % failures)) |
|
290 | ui.warn(("%d failures\n" % failures)) | |
291 | return 1 |
|
291 | return 1 | |
292 |
|
292 | |||
293 | def _sanitycheck(ui, nodes, bases): |
|
293 | def _sanitycheck(ui, nodes, bases): | |
294 | """ |
|
294 | """ | |
295 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a |
|
295 | Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a | |
296 | mapping of node->base): |
|
296 | mapping of node->base): | |
297 |
|
297 | |||
298 | - Each deltabase must itself be a node elsewhere in the pack |
|
298 | - Each deltabase must itself be a node elsewhere in the pack | |
299 | - There must be no cycles |
|
299 | - There must be no cycles | |
300 | """ |
|
300 | """ | |
301 | failures = 0 |
|
301 | failures = 0 | |
302 | for node in nodes: |
|
302 | for node in nodes: | |
303 | seen = set() |
|
303 | seen = set() | |
304 | current = node |
|
304 | current = node | |
305 | deltabase = bases[current] |
|
305 | deltabase = bases[current] | |
306 |
|
306 | |||
307 | while deltabase != nullid: |
|
307 | while deltabase != nullid: | |
308 | if deltabase not in nodes: |
|
308 | if deltabase not in nodes: | |
309 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % |
|
309 | ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" % | |
310 | (short(node), short(deltabase)))) |
|
310 | (short(node), short(deltabase)))) | |
311 | failures += 1 |
|
311 | failures += 1 | |
312 | break |
|
312 | break | |
313 |
|
313 | |||
314 | if deltabase in seen: |
|
314 | if deltabase in seen: | |
315 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % |
|
315 | ui.warn(("Bad entry: %s has a cycle (at %s)\n" % | |
316 | (short(node), short(deltabase)))) |
|
316 | (short(node), short(deltabase)))) | |
317 | failures += 1 |
|
317 | failures += 1 | |
318 | break |
|
318 | break | |
319 |
|
319 | |||
320 | current = deltabase |
|
320 | current = deltabase | |
321 | seen.add(current) |
|
321 | seen.add(current) | |
322 | deltabase = bases[current] |
|
322 | deltabase = bases[current] | |
323 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid |
|
323 | # Since ``node`` begins a valid chain, reset/memoize its base to nullid | |
324 | # so we don't traverse it again. |
|
324 | # so we don't traverse it again. | |
325 | bases[node] = nullid |
|
325 | bases[node] = nullid | |
326 | return failures |
|
326 | return failures | |
327 |
|
327 | |||
328 | def dumpdeltachain(ui, deltachain, **opts): |
|
328 | def dumpdeltachain(ui, deltachain, **opts): | |
329 | hashformatter = hex |
|
329 | hashformatter = hex | |
330 | hashlen = 40 |
|
330 | hashlen = 40 | |
331 |
|
331 | |||
332 | lastfilename = None |
|
332 | lastfilename = None | |
333 | for filename, node, filename, deltabasenode, delta in deltachain: |
|
333 | for filename, node, filename, deltabasenode, delta in deltachain: | |
334 | if filename != lastfilename: |
|
334 | if filename != lastfilename: | |
335 | ui.write("\n%s\n" % filename) |
|
335 | ui.write("\n%s\n" % filename) | |
336 | lastfilename = filename |
|
336 | lastfilename = filename | |
337 | ui.write("%s %s %s %s\n" % ( |
|
337 | ui.write("%s %s %s %s\n" % ( | |
338 | "Node".ljust(hashlen), |
|
338 | "Node".ljust(hashlen), | |
339 | "Delta Base".ljust(hashlen), |
|
339 | "Delta Base".ljust(hashlen), | |
340 | "Delta SHA1".ljust(hashlen), |
|
340 | "Delta SHA1".ljust(hashlen), | |
341 | "Delta Length".ljust(6), |
|
341 | "Delta Length".ljust(6), | |
342 | )) |
|
342 | )) | |
343 |
|
343 | |||
344 | ui.write("%s %s %s %s\n" % ( |
|
344 | ui.write("%s %s %s %s\n" % ( | |
345 | hashformatter(node), |
|
345 | hashformatter(node), | |
346 | hashformatter(deltabasenode), |
|
346 | hashformatter(deltabasenode), | |
347 | hashlib.sha1(delta).hexdigest(), |
|
347 | hashlib.sha1(delta).hexdigest(), | |
348 | len(delta))) |
|
348 | len(delta))) | |
349 |
|
349 | |||
350 | def debughistorypack(ui, path): |
|
350 | def debughistorypack(ui, path): | |
351 | if '.hist' in path: |
|
351 | if '.hist' in path: | |
352 | path = path[:path.index('.hist')] |
|
352 | path = path[:path.index('.hist')] | |
353 | hpack = historypack.historypack(path) |
|
353 | hpack = historypack.historypack(path) | |
354 |
|
354 | |||
355 | lastfilename = None |
|
355 | lastfilename = None | |
356 | for entry in hpack.iterentries(): |
|
356 | for entry in hpack.iterentries(): | |
357 | filename, node, p1node, p2node, linknode, copyfrom = entry |
|
357 | filename, node, p1node, p2node, linknode, copyfrom = entry | |
358 | if filename != lastfilename: |
|
358 | if filename != lastfilename: | |
359 | ui.write("\n%s\n" % filename) |
|
359 | ui.write("\n%s\n" % filename) | |
360 | ui.write("%s%s%s%s%s\n" % ( |
|
360 | ui.write("%s%s%s%s%s\n" % ( | |
361 | "Node".ljust(14), |
|
361 | "Node".ljust(14), | |
362 | "P1 Node".ljust(14), |
|
362 | "P1 Node".ljust(14), | |
363 | "P2 Node".ljust(14), |
|
363 | "P2 Node".ljust(14), | |
364 | "Link Node".ljust(14), |
|
364 | "Link Node".ljust(14), | |
365 | "Copy From")) |
|
365 | "Copy From")) | |
366 | lastfilename = filename |
|
366 | lastfilename = filename | |
367 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), |
|
367 | ui.write("%s %s %s %s %s\n" % (short(node), short(p1node), | |
368 | short(p2node), short(linknode), copyfrom)) |
|
368 | short(p2node), short(linknode), copyfrom)) | |
369 |
|
369 | |||
370 | def debugwaitonrepack(repo): |
|
370 | def debugwaitonrepack(repo): | |
371 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): |
|
371 | with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''): | |
372 | return |
|
372 | return | |
373 |
|
373 | |||
374 | def debugwaitonprefetch(repo): |
|
374 | def debugwaitonprefetch(repo): | |
375 | with repo._lock(repo.svfs, "prefetchlock", True, None, |
|
375 | with repo._lock(repo.svfs, "prefetchlock", True, None, | |
376 | None, _('prefetching in %s') % repo.origroot): |
|
376 | None, _('prefetching in %s') % repo.origroot): | |
377 | pass |
|
377 | pass |
@@ -1,587 +1,588 | |||||
1 | # fileserverclient.py - client for communicating with the cache process |
|
1 | # fileserverclient.py - client for communicating with the cache process | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import hashlib |
|
10 | import hashlib | |
11 | import io |
|
11 | import io | |
12 | import os |
|
12 | import os | |
13 | import threading |
|
13 | import threading | |
14 | import time |
|
14 | import time | |
15 | import zlib |
|
15 | import zlib | |
16 |
|
16 | |||
17 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
18 | from mercurial.node import bin, hex, nullid |
|
18 | from mercurial.node import bin, hex, nullid | |
19 | from mercurial import ( |
|
19 | from mercurial import ( | |
20 | error, |
|
20 | error, | |
|
21 | pycompat, | |||
21 | revlog, |
|
22 | revlog, | |
22 | sshpeer, |
|
23 | sshpeer, | |
23 | util, |
|
24 | util, | |
24 | wireprotov1peer, |
|
25 | wireprotov1peer, | |
25 | ) |
|
26 | ) | |
26 | from mercurial.utils import procutil |
|
27 | from mercurial.utils import procutil | |
27 |
|
28 | |||
28 | from . import ( |
|
29 | from . import ( | |
29 | constants, |
|
30 | constants, | |
30 | contentstore, |
|
31 | contentstore, | |
31 | metadatastore, |
|
32 | metadatastore, | |
32 | ) |
|
33 | ) | |
33 |
|
34 | |||
34 | _sshv1peer = sshpeer.sshv1peer |
|
35 | _sshv1peer = sshpeer.sshv1peer | |
35 |
|
36 | |||
36 | # Statistics for debugging |
|
37 | # Statistics for debugging | |
37 | fetchcost = 0 |
|
38 | fetchcost = 0 | |
38 | fetches = 0 |
|
39 | fetches = 0 | |
39 | fetched = 0 |
|
40 | fetched = 0 | |
40 | fetchmisses = 0 |
|
41 | fetchmisses = 0 | |
41 |
|
42 | |||
42 | _lfsmod = None |
|
43 | _lfsmod = None | |
43 | _downloading = _('downloading') |
|
44 | _downloading = _('downloading') | |
44 |
|
45 | |||
45 | def getcachekey(reponame, file, id): |
|
46 | def getcachekey(reponame, file, id): | |
46 | pathhash = hashlib.sha1(file).hexdigest() |
|
47 | pathhash = hashlib.sha1(file).hexdigest() | |
47 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) |
|
48 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) | |
48 |
|
49 | |||
49 | def getlocalkey(file, id): |
|
50 | def getlocalkey(file, id): | |
50 | pathhash = hashlib.sha1(file).hexdigest() |
|
51 | pathhash = hashlib.sha1(file).hexdigest() | |
51 | return os.path.join(pathhash, id) |
|
52 | return os.path.join(pathhash, id) | |
52 |
|
53 | |||
53 | def peersetup(ui, peer): |
|
54 | def peersetup(ui, peer): | |
54 |
|
55 | |||
55 | class remotefilepeer(peer.__class__): |
|
56 | class remotefilepeer(peer.__class__): | |
56 | @wireprotov1peer.batchable |
|
57 | @wireprotov1peer.batchable | |
57 | def x_rfl_getfile(self, file, node): |
|
58 | def x_rfl_getfile(self, file, node): | |
58 | if not self.capable('x_rfl_getfile'): |
|
59 | if not self.capable('x_rfl_getfile'): | |
59 | raise error.Abort( |
|
60 | raise error.Abort( | |
60 | 'configured remotefile server does not support getfile') |
|
61 | 'configured remotefile server does not support getfile') | |
61 | f = wireprotov1peer.future() |
|
62 | f = wireprotov1peer.future() | |
62 | yield {'file': file, 'node': node}, f |
|
63 | yield {'file': file, 'node': node}, f | |
63 | code, data = f.value.split('\0', 1) |
|
64 | code, data = f.value.split('\0', 1) | |
64 | if int(code): |
|
65 | if int(code): | |
65 | raise error.LookupError(file, node, data) |
|
66 | raise error.LookupError(file, node, data) | |
66 | yield data |
|
67 | yield data | |
67 |
|
68 | |||
68 | @wireprotov1peer.batchable |
|
69 | @wireprotov1peer.batchable | |
69 | def x_rfl_getflogheads(self, path): |
|
70 | def x_rfl_getflogheads(self, path): | |
70 | if not self.capable('x_rfl_getflogheads'): |
|
71 | if not self.capable('x_rfl_getflogheads'): | |
71 | raise error.Abort('configured remotefile server does not ' |
|
72 | raise error.Abort('configured remotefile server does not ' | |
72 | 'support getflogheads') |
|
73 | 'support getflogheads') | |
73 | f = wireprotov1peer.future() |
|
74 | f = wireprotov1peer.future() | |
74 | yield {'path': path}, f |
|
75 | yield {'path': path}, f | |
75 | heads = f.value.split('\n') if f.value else [] |
|
76 | heads = f.value.split('\n') if f.value else [] | |
76 | yield heads |
|
77 | yield heads | |
77 |
|
78 | |||
78 | def _updatecallstreamopts(self, command, opts): |
|
79 | def _updatecallstreamopts(self, command, opts): | |
79 | if command != 'getbundle': |
|
80 | if command != 'getbundle': | |
80 | return |
|
81 | return | |
81 | if (constants.NETWORK_CAP_LEGACY_SSH_GETFILES |
|
82 | if (constants.NETWORK_CAP_LEGACY_SSH_GETFILES | |
82 | not in self.capabilities()): |
|
83 | not in self.capabilities()): | |
83 | return |
|
84 | return | |
84 | if not util.safehasattr(self, '_localrepo'): |
|
85 | if not util.safehasattr(self, '_localrepo'): | |
85 | return |
|
86 | return | |
86 | if (constants.SHALLOWREPO_REQUIREMENT |
|
87 | if (constants.SHALLOWREPO_REQUIREMENT | |
87 | not in self._localrepo.requirements): |
|
88 | not in self._localrepo.requirements): | |
88 | return |
|
89 | return | |
89 |
|
90 | |||
90 | bundlecaps = opts.get('bundlecaps') |
|
91 | bundlecaps = opts.get('bundlecaps') | |
91 | if bundlecaps: |
|
92 | if bundlecaps: | |
92 | bundlecaps = [bundlecaps] |
|
93 | bundlecaps = [bundlecaps] | |
93 | else: |
|
94 | else: | |
94 | bundlecaps = [] |
|
95 | bundlecaps = [] | |
95 |
|
96 | |||
96 | # shallow, includepattern, and excludepattern are a hacky way of |
|
97 | # shallow, includepattern, and excludepattern are a hacky way of | |
97 | # carrying over data from the local repo to this getbundle |
|
98 | # carrying over data from the local repo to this getbundle | |
98 | # command. We need to do it this way because bundle1 getbundle |
|
99 | # command. We need to do it this way because bundle1 getbundle | |
99 | # doesn't provide any other place we can hook in to manipulate |
|
100 | # doesn't provide any other place we can hook in to manipulate | |
100 | # getbundle args before it goes across the wire. Once we get rid |
|
101 | # getbundle args before it goes across the wire. Once we get rid | |
101 | # of bundle1, we can use bundle2's _pullbundle2extraprepare to |
|
102 | # of bundle1, we can use bundle2's _pullbundle2extraprepare to | |
102 | # do this more cleanly. |
|
103 | # do this more cleanly. | |
103 | bundlecaps.append(constants.BUNDLE2_CAPABLITY) |
|
104 | bundlecaps.append(constants.BUNDLE2_CAPABLITY) | |
104 | if self._localrepo.includepattern: |
|
105 | if self._localrepo.includepattern: | |
105 | patterns = '\0'.join(self._localrepo.includepattern) |
|
106 | patterns = '\0'.join(self._localrepo.includepattern) | |
106 | includecap = "includepattern=" + patterns |
|
107 | includecap = "includepattern=" + patterns | |
107 | bundlecaps.append(includecap) |
|
108 | bundlecaps.append(includecap) | |
108 | if self._localrepo.excludepattern: |
|
109 | if self._localrepo.excludepattern: | |
109 | patterns = '\0'.join(self._localrepo.excludepattern) |
|
110 | patterns = '\0'.join(self._localrepo.excludepattern) | |
110 | excludecap = "excludepattern=" + patterns |
|
111 | excludecap = "excludepattern=" + patterns | |
111 | bundlecaps.append(excludecap) |
|
112 | bundlecaps.append(excludecap) | |
112 | opts['bundlecaps'] = ','.join(bundlecaps) |
|
113 | opts['bundlecaps'] = ','.join(bundlecaps) | |
113 |
|
114 | |||
114 | def _sendrequest(self, command, args, **opts): |
|
115 | def _sendrequest(self, command, args, **opts): | |
115 | self._updatecallstreamopts(command, args) |
|
116 | self._updatecallstreamopts(command, args) | |
116 | return super(remotefilepeer, self)._sendrequest(command, args, |
|
117 | return super(remotefilepeer, self)._sendrequest(command, args, | |
117 | **opts) |
|
118 | **opts) | |
118 |
|
119 | |||
119 | def _callstream(self, command, **opts): |
|
120 | def _callstream(self, command, **opts): | |
120 | supertype = super(remotefilepeer, self) |
|
121 | supertype = super(remotefilepeer, self) | |
121 | if not util.safehasattr(supertype, '_sendrequest'): |
|
122 | if not util.safehasattr(supertype, '_sendrequest'): | |
122 | self._updatecallstreamopts(command, opts) |
|
123 | self._updatecallstreamopts(command, pycompat.byteskwargs(opts)) | |
123 | return super(remotefilepeer, self)._callstream(command, **opts) |
|
124 | return super(remotefilepeer, self)._callstream(command, **opts) | |
124 |
|
125 | |||
125 | peer.__class__ = remotefilepeer |
|
126 | peer.__class__ = remotefilepeer | |
126 |
|
127 | |||
127 | class cacheconnection(object): |
|
128 | class cacheconnection(object): | |
128 | """The connection for communicating with the remote cache. Performs |
|
129 | """The connection for communicating with the remote cache. Performs | |
129 | gets and sets by communicating with an external process that has the |
|
130 | gets and sets by communicating with an external process that has the | |
130 | cache-specific implementation. |
|
131 | cache-specific implementation. | |
131 | """ |
|
132 | """ | |
132 | def __init__(self): |
|
133 | def __init__(self): | |
133 | self.pipeo = self.pipei = self.pipee = None |
|
134 | self.pipeo = self.pipei = self.pipee = None | |
134 | self.subprocess = None |
|
135 | self.subprocess = None | |
135 | self.connected = False |
|
136 | self.connected = False | |
136 |
|
137 | |||
137 | def connect(self, cachecommand): |
|
138 | def connect(self, cachecommand): | |
138 | if self.pipeo: |
|
139 | if self.pipeo: | |
139 | raise error.Abort(_("cache connection already open")) |
|
140 | raise error.Abort(_("cache connection already open")) | |
140 | self.pipei, self.pipeo, self.pipee, self.subprocess = \ |
|
141 | self.pipei, self.pipeo, self.pipee, self.subprocess = \ | |
141 | procutil.popen4(cachecommand) |
|
142 | procutil.popen4(cachecommand) | |
142 | self.connected = True |
|
143 | self.connected = True | |
143 |
|
144 | |||
144 | def close(self): |
|
145 | def close(self): | |
145 | def tryclose(pipe): |
|
146 | def tryclose(pipe): | |
146 | try: |
|
147 | try: | |
147 | pipe.close() |
|
148 | pipe.close() | |
148 | except Exception: |
|
149 | except Exception: | |
149 | pass |
|
150 | pass | |
150 | if self.connected: |
|
151 | if self.connected: | |
151 | try: |
|
152 | try: | |
152 | self.pipei.write("exit\n") |
|
153 | self.pipei.write("exit\n") | |
153 | except Exception: |
|
154 | except Exception: | |
154 | pass |
|
155 | pass | |
155 | tryclose(self.pipei) |
|
156 | tryclose(self.pipei) | |
156 | self.pipei = None |
|
157 | self.pipei = None | |
157 | tryclose(self.pipeo) |
|
158 | tryclose(self.pipeo) | |
158 | self.pipeo = None |
|
159 | self.pipeo = None | |
159 | tryclose(self.pipee) |
|
160 | tryclose(self.pipee) | |
160 | self.pipee = None |
|
161 | self.pipee = None | |
161 | try: |
|
162 | try: | |
162 | # Wait for process to terminate, making sure to avoid deadlock. |
|
163 | # Wait for process to terminate, making sure to avoid deadlock. | |
163 | # See https://docs.python.org/2/library/subprocess.html for |
|
164 | # See https://docs.python.org/2/library/subprocess.html for | |
164 | # warnings about wait() and deadlocking. |
|
165 | # warnings about wait() and deadlocking. | |
165 | self.subprocess.communicate() |
|
166 | self.subprocess.communicate() | |
166 | except Exception: |
|
167 | except Exception: | |
167 | pass |
|
168 | pass | |
168 | self.subprocess = None |
|
169 | self.subprocess = None | |
169 | self.connected = False |
|
170 | self.connected = False | |
170 |
|
171 | |||
171 | def request(self, request, flush=True): |
|
172 | def request(self, request, flush=True): | |
172 | if self.connected: |
|
173 | if self.connected: | |
173 | try: |
|
174 | try: | |
174 | self.pipei.write(request) |
|
175 | self.pipei.write(request) | |
175 | if flush: |
|
176 | if flush: | |
176 | self.pipei.flush() |
|
177 | self.pipei.flush() | |
177 | except IOError: |
|
178 | except IOError: | |
178 | self.close() |
|
179 | self.close() | |
179 |
|
180 | |||
180 | def receiveline(self): |
|
181 | def receiveline(self): | |
181 | if not self.connected: |
|
182 | if not self.connected: | |
182 | return None |
|
183 | return None | |
183 | try: |
|
184 | try: | |
184 | result = self.pipeo.readline()[:-1] |
|
185 | result = self.pipeo.readline()[:-1] | |
185 | if not result: |
|
186 | if not result: | |
186 | self.close() |
|
187 | self.close() | |
187 | except IOError: |
|
188 | except IOError: | |
188 | self.close() |
|
189 | self.close() | |
189 |
|
190 | |||
190 | return result |
|
191 | return result | |
191 |
|
192 | |||
192 | def _getfilesbatch( |
|
193 | def _getfilesbatch( | |
193 | remote, receivemissing, progresstick, missed, idmap, batchsize): |
|
194 | remote, receivemissing, progresstick, missed, idmap, batchsize): | |
194 | # Over http(s), iterbatch is a streamy method and we can start |
|
195 | # Over http(s), iterbatch is a streamy method and we can start | |
195 | # looking at results early. This means we send one (potentially |
|
196 | # looking at results early. This means we send one (potentially | |
196 | # large) request, but then we show nice progress as we process |
|
197 | # large) request, but then we show nice progress as we process | |
197 | # file results, rather than showing chunks of $batchsize in |
|
198 | # file results, rather than showing chunks of $batchsize in | |
198 | # progress. |
|
199 | # progress. | |
199 | # |
|
200 | # | |
200 | # Over ssh, iterbatch isn't streamy because batch() wasn't |
|
201 | # Over ssh, iterbatch isn't streamy because batch() wasn't | |
201 | # explicitly designed as a streaming method. In the future we |
|
202 | # explicitly designed as a streaming method. In the future we | |
202 | # should probably introduce a streambatch() method upstream and |
|
203 | # should probably introduce a streambatch() method upstream and | |
203 | # use that for this. |
|
204 | # use that for this. | |
204 | with remote.commandexecutor() as e: |
|
205 | with remote.commandexecutor() as e: | |
205 | futures = [] |
|
206 | futures = [] | |
206 | for m in missed: |
|
207 | for m in missed: | |
207 | futures.append(e.callcommand('x_rfl_getfile', { |
|
208 | futures.append(e.callcommand('x_rfl_getfile', { | |
208 | 'file': idmap[m], |
|
209 | 'file': idmap[m], | |
209 | 'node': m[-40:] |
|
210 | 'node': m[-40:] | |
210 | })) |
|
211 | })) | |
211 |
|
212 | |||
212 | for i, m in enumerate(missed): |
|
213 | for i, m in enumerate(missed): | |
213 | r = futures[i].result() |
|
214 | r = futures[i].result() | |
214 | futures[i] = None # release memory |
|
215 | futures[i] = None # release memory | |
215 | file_ = idmap[m] |
|
216 | file_ = idmap[m] | |
216 | node = m[-40:] |
|
217 | node = m[-40:] | |
217 | receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node) |
|
218 | receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node) | |
218 | progresstick() |
|
219 | progresstick() | |
219 |
|
220 | |||
220 | def _getfiles_optimistic( |
|
221 | def _getfiles_optimistic( | |
221 | remote, receivemissing, progresstick, missed, idmap, step): |
|
222 | remote, receivemissing, progresstick, missed, idmap, step): | |
222 | remote._callstream("x_rfl_getfiles") |
|
223 | remote._callstream("x_rfl_getfiles") | |
223 | i = 0 |
|
224 | i = 0 | |
224 | pipeo = remote._pipeo |
|
225 | pipeo = remote._pipeo | |
225 | pipei = remote._pipei |
|
226 | pipei = remote._pipei | |
226 | while i < len(missed): |
|
227 | while i < len(missed): | |
227 | # issue a batch of requests |
|
228 | # issue a batch of requests | |
228 | start = i |
|
229 | start = i | |
229 | end = min(len(missed), start + step) |
|
230 | end = min(len(missed), start + step) | |
230 | i = end |
|
231 | i = end | |
231 | for missingid in missed[start:end]: |
|
232 | for missingid in missed[start:end]: | |
232 | # issue new request |
|
233 | # issue new request | |
233 | versionid = missingid[-40:] |
|
234 | versionid = missingid[-40:] | |
234 | file = idmap[missingid] |
|
235 | file = idmap[missingid] | |
235 | sshrequest = "%s%s\n" % (versionid, file) |
|
236 | sshrequest = "%s%s\n" % (versionid, file) | |
236 | pipeo.write(sshrequest) |
|
237 | pipeo.write(sshrequest) | |
237 | pipeo.flush() |
|
238 | pipeo.flush() | |
238 |
|
239 | |||
239 | # receive batch results |
|
240 | # receive batch results | |
240 | for missingid in missed[start:end]: |
|
241 | for missingid in missed[start:end]: | |
241 | versionid = missingid[-40:] |
|
242 | versionid = missingid[-40:] | |
242 | file = idmap[missingid] |
|
243 | file = idmap[missingid] | |
243 | receivemissing(pipei, file, versionid) |
|
244 | receivemissing(pipei, file, versionid) | |
244 | progresstick() |
|
245 | progresstick() | |
245 |
|
246 | |||
246 | # End the command |
|
247 | # End the command | |
247 | pipeo.write('\n') |
|
248 | pipeo.write('\n') | |
248 | pipeo.flush() |
|
249 | pipeo.flush() | |
249 |
|
250 | |||
250 | def _getfiles_threaded( |
|
251 | def _getfiles_threaded( | |
251 | remote, receivemissing, progresstick, missed, idmap, step): |
|
252 | remote, receivemissing, progresstick, missed, idmap, step): | |
252 | remote._callstream("getfiles") |
|
253 | remote._callstream("getfiles") | |
253 | pipeo = remote._pipeo |
|
254 | pipeo = remote._pipeo | |
254 | pipei = remote._pipei |
|
255 | pipei = remote._pipei | |
255 |
|
256 | |||
256 | def writer(): |
|
257 | def writer(): | |
257 | for missingid in missed: |
|
258 | for missingid in missed: | |
258 | versionid = missingid[-40:] |
|
259 | versionid = missingid[-40:] | |
259 | file = idmap[missingid] |
|
260 | file = idmap[missingid] | |
260 | sshrequest = "%s%s\n" % (versionid, file) |
|
261 | sshrequest = "%s%s\n" % (versionid, file) | |
261 | pipeo.write(sshrequest) |
|
262 | pipeo.write(sshrequest) | |
262 | pipeo.flush() |
|
263 | pipeo.flush() | |
263 | writerthread = threading.Thread(target=writer) |
|
264 | writerthread = threading.Thread(target=writer) | |
264 | writerthread.daemon = True |
|
265 | writerthread.daemon = True | |
265 | writerthread.start() |
|
266 | writerthread.start() | |
266 |
|
267 | |||
267 | for missingid in missed: |
|
268 | for missingid in missed: | |
268 | versionid = missingid[-40:] |
|
269 | versionid = missingid[-40:] | |
269 | file = idmap[missingid] |
|
270 | file = idmap[missingid] | |
270 | receivemissing(pipei, file, versionid) |
|
271 | receivemissing(pipei, file, versionid) | |
271 | progresstick() |
|
272 | progresstick() | |
272 |
|
273 | |||
273 | writerthread.join() |
|
274 | writerthread.join() | |
274 | # End the command |
|
275 | # End the command | |
275 | pipeo.write('\n') |
|
276 | pipeo.write('\n') | |
276 | pipeo.flush() |
|
277 | pipeo.flush() | |
277 |
|
278 | |||
278 | class fileserverclient(object): |
|
279 | class fileserverclient(object): | |
279 | """A client for requesting files from the remote file server. |
|
280 | """A client for requesting files from the remote file server. | |
280 | """ |
|
281 | """ | |
281 | def __init__(self, repo): |
|
282 | def __init__(self, repo): | |
282 | ui = repo.ui |
|
283 | ui = repo.ui | |
283 | self.repo = repo |
|
284 | self.repo = repo | |
284 | self.ui = ui |
|
285 | self.ui = ui | |
285 | self.cacheprocess = ui.config("remotefilelog", "cacheprocess") |
|
286 | self.cacheprocess = ui.config("remotefilelog", "cacheprocess") | |
286 | if self.cacheprocess: |
|
287 | if self.cacheprocess: | |
287 | self.cacheprocess = util.expandpath(self.cacheprocess) |
|
288 | self.cacheprocess = util.expandpath(self.cacheprocess) | |
288 |
|
289 | |||
289 | # This option causes remotefilelog to pass the full file path to the |
|
290 | # This option causes remotefilelog to pass the full file path to the | |
290 | # cacheprocess instead of a hashed key. |
|
291 | # cacheprocess instead of a hashed key. | |
291 | self.cacheprocesspasspath = ui.configbool( |
|
292 | self.cacheprocesspasspath = ui.configbool( | |
292 | "remotefilelog", "cacheprocess.includepath") |
|
293 | "remotefilelog", "cacheprocess.includepath") | |
293 |
|
294 | |||
294 | self.debugoutput = ui.configbool("remotefilelog", "debug") |
|
295 | self.debugoutput = ui.configbool("remotefilelog", "debug") | |
295 |
|
296 | |||
296 | self.remotecache = cacheconnection() |
|
297 | self.remotecache = cacheconnection() | |
297 |
|
298 | |||
298 | def setstore(self, datastore, historystore, writedata, writehistory): |
|
299 | def setstore(self, datastore, historystore, writedata, writehistory): | |
299 | self.datastore = datastore |
|
300 | self.datastore = datastore | |
300 | self.historystore = historystore |
|
301 | self.historystore = historystore | |
301 | self.writedata = writedata |
|
302 | self.writedata = writedata | |
302 | self.writehistory = writehistory |
|
303 | self.writehistory = writehistory | |
303 |
|
304 | |||
304 | def _connect(self): |
|
305 | def _connect(self): | |
305 | return self.repo.connectionpool.get(self.repo.fallbackpath) |
|
306 | return self.repo.connectionpool.get(self.repo.fallbackpath) | |
306 |
|
307 | |||
307 | def request(self, fileids): |
|
308 | def request(self, fileids): | |
308 | """Takes a list of filename/node pairs and fetches them from the |
|
309 | """Takes a list of filename/node pairs and fetches them from the | |
309 | server. Files are stored in the local cache. |
|
310 | server. Files are stored in the local cache. | |
310 | A list of nodes that the server couldn't find is returned. |
|
311 | A list of nodes that the server couldn't find is returned. | |
311 | If the connection fails, an exception is raised. |
|
312 | If the connection fails, an exception is raised. | |
312 | """ |
|
313 | """ | |
313 | if not self.remotecache.connected: |
|
314 | if not self.remotecache.connected: | |
314 | self.connect() |
|
315 | self.connect() | |
315 | cache = self.remotecache |
|
316 | cache = self.remotecache | |
316 | writedata = self.writedata |
|
317 | writedata = self.writedata | |
317 |
|
318 | |||
318 | repo = self.repo |
|
319 | repo = self.repo | |
319 | count = len(fileids) |
|
320 | count = len(fileids) | |
320 | request = "get\n%d\n" % count |
|
321 | request = "get\n%d\n" % count | |
321 | idmap = {} |
|
322 | idmap = {} | |
322 | reponame = repo.name |
|
323 | reponame = repo.name | |
323 | for file, id in fileids: |
|
324 | for file, id in fileids: | |
324 | fullid = getcachekey(reponame, file, id) |
|
325 | fullid = getcachekey(reponame, file, id) | |
325 | if self.cacheprocesspasspath: |
|
326 | if self.cacheprocesspasspath: | |
326 | request += file + '\0' |
|
327 | request += file + '\0' | |
327 | request += fullid + "\n" |
|
328 | request += fullid + "\n" | |
328 | idmap[fullid] = file |
|
329 | idmap[fullid] = file | |
329 |
|
330 | |||
330 | cache.request(request) |
|
331 | cache.request(request) | |
331 |
|
332 | |||
332 | total = count |
|
333 | total = count | |
333 | self.ui.progress(_downloading, 0, total=count) |
|
334 | self.ui.progress(_downloading, 0, total=count) | |
334 |
|
335 | |||
335 | missed = [] |
|
336 | missed = [] | |
336 | count = 0 |
|
337 | count = 0 | |
337 | while True: |
|
338 | while True: | |
338 | missingid = cache.receiveline() |
|
339 | missingid = cache.receiveline() | |
339 | if not missingid: |
|
340 | if not missingid: | |
340 | missedset = set(missed) |
|
341 | missedset = set(missed) | |
341 | for missingid in idmap.iterkeys(): |
|
342 | for missingid in idmap.iterkeys(): | |
342 | if not missingid in missedset: |
|
343 | if not missingid in missedset: | |
343 | missed.append(missingid) |
|
344 | missed.append(missingid) | |
344 | self.ui.warn(_("warning: cache connection closed early - " + |
|
345 | self.ui.warn(_("warning: cache connection closed early - " + | |
345 | "falling back to server\n")) |
|
346 | "falling back to server\n")) | |
346 | break |
|
347 | break | |
347 | if missingid == "0": |
|
348 | if missingid == "0": | |
348 | break |
|
349 | break | |
349 | if missingid.startswith("_hits_"): |
|
350 | if missingid.startswith("_hits_"): | |
350 | # receive progress reports |
|
351 | # receive progress reports | |
351 | parts = missingid.split("_") |
|
352 | parts = missingid.split("_") | |
352 | count += int(parts[2]) |
|
353 | count += int(parts[2]) | |
353 | self.ui.progress(_downloading, count, total=total) |
|
354 | self.ui.progress(_downloading, count, total=total) | |
354 | continue |
|
355 | continue | |
355 |
|
356 | |||
356 | missed.append(missingid) |
|
357 | missed.append(missingid) | |
357 |
|
358 | |||
358 | global fetchmisses |
|
359 | global fetchmisses | |
359 | fetchmisses += len(missed) |
|
360 | fetchmisses += len(missed) | |
360 |
|
361 | |||
361 | count = [total - len(missed)] |
|
362 | count = [total - len(missed)] | |
362 | fromcache = count[0] |
|
363 | fromcache = count[0] | |
363 | self.ui.progress(_downloading, count[0], total=total) |
|
364 | self.ui.progress(_downloading, count[0], total=total) | |
364 | self.ui.log("remotefilelog", "remote cache hit rate is %r of %r\n", |
|
365 | self.ui.log("remotefilelog", "remote cache hit rate is %r of %r\n", | |
365 | count[0], total, hit=count[0], total=total) |
|
366 | count[0], total, hit=count[0], total=total) | |
366 |
|
367 | |||
367 | oldumask = os.umask(0o002) |
|
368 | oldumask = os.umask(0o002) | |
368 | try: |
|
369 | try: | |
369 | # receive cache misses from master |
|
370 | # receive cache misses from master | |
370 | if missed: |
|
371 | if missed: | |
371 | def progresstick(): |
|
372 | def progresstick(): | |
372 | count[0] += 1 |
|
373 | count[0] += 1 | |
373 | self.ui.progress(_downloading, count[0], total=total) |
|
374 | self.ui.progress(_downloading, count[0], total=total) | |
374 | # When verbose is true, sshpeer prints 'running ssh...' |
|
375 | # When verbose is true, sshpeer prints 'running ssh...' | |
375 | # to stdout, which can interfere with some command |
|
376 | # to stdout, which can interfere with some command | |
376 | # outputs |
|
377 | # outputs | |
377 | verbose = self.ui.verbose |
|
378 | verbose = self.ui.verbose | |
378 | self.ui.verbose = False |
|
379 | self.ui.verbose = False | |
379 | try: |
|
380 | try: | |
380 | with self._connect() as conn: |
|
381 | with self._connect() as conn: | |
381 | remote = conn.peer |
|
382 | remote = conn.peer | |
382 | if remote.capable( |
|
383 | if remote.capable( | |
383 | constants.NETWORK_CAP_LEGACY_SSH_GETFILES): |
|
384 | constants.NETWORK_CAP_LEGACY_SSH_GETFILES): | |
384 | if not isinstance(remote, _sshv1peer): |
|
385 | if not isinstance(remote, _sshv1peer): | |
385 | raise error.Abort('remotefilelog requires ssh ' |
|
386 | raise error.Abort('remotefilelog requires ssh ' | |
386 | 'servers') |
|
387 | 'servers') | |
387 | step = self.ui.configint('remotefilelog', |
|
388 | step = self.ui.configint('remotefilelog', | |
388 | 'getfilesstep') |
|
389 | 'getfilesstep') | |
389 | getfilestype = self.ui.config('remotefilelog', |
|
390 | getfilestype = self.ui.config('remotefilelog', | |
390 | 'getfilestype') |
|
391 | 'getfilestype') | |
391 | if getfilestype == 'threaded': |
|
392 | if getfilestype == 'threaded': | |
392 | _getfiles = _getfiles_threaded |
|
393 | _getfiles = _getfiles_threaded | |
393 | else: |
|
394 | else: | |
394 | _getfiles = _getfiles_optimistic |
|
395 | _getfiles = _getfiles_optimistic | |
395 | _getfiles(remote, self.receivemissing, progresstick, |
|
396 | _getfiles(remote, self.receivemissing, progresstick, | |
396 | missed, idmap, step) |
|
397 | missed, idmap, step) | |
397 | elif remote.capable("x_rfl_getfile"): |
|
398 | elif remote.capable("x_rfl_getfile"): | |
398 | if remote.capable('batch'): |
|
399 | if remote.capable('batch'): | |
399 | batchdefault = 100 |
|
400 | batchdefault = 100 | |
400 | else: |
|
401 | else: | |
401 | batchdefault = 10 |
|
402 | batchdefault = 10 | |
402 | batchsize = self.ui.configint( |
|
403 | batchsize = self.ui.configint( | |
403 | 'remotefilelog', 'batchsize', batchdefault) |
|
404 | 'remotefilelog', 'batchsize', batchdefault) | |
404 | _getfilesbatch( |
|
405 | _getfilesbatch( | |
405 | remote, self.receivemissing, progresstick, |
|
406 | remote, self.receivemissing, progresstick, | |
406 | missed, idmap, batchsize) |
|
407 | missed, idmap, batchsize) | |
407 | else: |
|
408 | else: | |
408 | raise error.Abort("configured remotefilelog server" |
|
409 | raise error.Abort("configured remotefilelog server" | |
409 | " does not support remotefilelog") |
|
410 | " does not support remotefilelog") | |
410 |
|
411 | |||
411 | self.ui.log("remotefilefetchlog", |
|
412 | self.ui.log("remotefilefetchlog", | |
412 | "Success\n", |
|
413 | "Success\n", | |
413 | fetched_files = count[0] - fromcache, |
|
414 | fetched_files = count[0] - fromcache, | |
414 | total_to_fetch = total - fromcache) |
|
415 | total_to_fetch = total - fromcache) | |
415 | except Exception: |
|
416 | except Exception: | |
416 | self.ui.log("remotefilefetchlog", |
|
417 | self.ui.log("remotefilefetchlog", | |
417 | "Fail\n", |
|
418 | "Fail\n", | |
418 | fetched_files = count[0] - fromcache, |
|
419 | fetched_files = count[0] - fromcache, | |
419 | total_to_fetch = total - fromcache) |
|
420 | total_to_fetch = total - fromcache) | |
420 | raise |
|
421 | raise | |
421 | finally: |
|
422 | finally: | |
422 | self.ui.verbose = verbose |
|
423 | self.ui.verbose = verbose | |
423 | # send to memcache |
|
424 | # send to memcache | |
424 | count[0] = len(missed) |
|
425 | count[0] = len(missed) | |
425 | request = "set\n%d\n%s\n" % (count[0], "\n".join(missed)) |
|
426 | request = "set\n%d\n%s\n" % (count[0], "\n".join(missed)) | |
426 | cache.request(request) |
|
427 | cache.request(request) | |
427 |
|
428 | |||
428 | self.ui.progress(_downloading, None) |
|
429 | self.ui.progress(_downloading, None) | |
429 |
|
430 | |||
430 | # mark ourselves as a user of this cache |
|
431 | # mark ourselves as a user of this cache | |
431 | writedata.markrepo(self.repo.path) |
|
432 | writedata.markrepo(self.repo.path) | |
432 | finally: |
|
433 | finally: | |
433 | os.umask(oldumask) |
|
434 | os.umask(oldumask) | |
434 |
|
435 | |||
435 | def receivemissing(self, pipe, filename, node): |
|
436 | def receivemissing(self, pipe, filename, node): | |
436 | line = pipe.readline()[:-1] |
|
437 | line = pipe.readline()[:-1] | |
437 | if not line: |
|
438 | if not line: | |
438 | raise error.ResponseError(_("error downloading file contents:"), |
|
439 | raise error.ResponseError(_("error downloading file contents:"), | |
439 | _("connection closed early")) |
|
440 | _("connection closed early")) | |
440 | size = int(line) |
|
441 | size = int(line) | |
441 | data = pipe.read(size) |
|
442 | data = pipe.read(size) | |
442 | if len(data) != size: |
|
443 | if len(data) != size: | |
443 | raise error.ResponseError(_("error downloading file contents:"), |
|
444 | raise error.ResponseError(_("error downloading file contents:"), | |
444 | _("only received %s of %s bytes") |
|
445 | _("only received %s of %s bytes") | |
445 | % (len(data), size)) |
|
446 | % (len(data), size)) | |
446 |
|
447 | |||
447 | self.writedata.addremotefilelognode(filename, bin(node), |
|
448 | self.writedata.addremotefilelognode(filename, bin(node), | |
448 | zlib.decompress(data)) |
|
449 | zlib.decompress(data)) | |
449 |
|
450 | |||
450 | def connect(self): |
|
451 | def connect(self): | |
451 | if self.cacheprocess: |
|
452 | if self.cacheprocess: | |
452 | cmd = "%s %s" % (self.cacheprocess, self.writedata._path) |
|
453 | cmd = "%s %s" % (self.cacheprocess, self.writedata._path) | |
453 | self.remotecache.connect(cmd) |
|
454 | self.remotecache.connect(cmd) | |
454 | else: |
|
455 | else: | |
455 | # If no cache process is specified, we fake one that always |
|
456 | # If no cache process is specified, we fake one that always | |
456 | # returns cache misses. This enables tests to run easily |
|
457 | # returns cache misses. This enables tests to run easily | |
457 | # and may eventually allow us to be a drop in replacement |
|
458 | # and may eventually allow us to be a drop in replacement | |
458 | # for the largefiles extension. |
|
459 | # for the largefiles extension. | |
459 | class simplecache(object): |
|
460 | class simplecache(object): | |
460 | def __init__(self): |
|
461 | def __init__(self): | |
461 | self.missingids = [] |
|
462 | self.missingids = [] | |
462 | self.connected = True |
|
463 | self.connected = True | |
463 |
|
464 | |||
464 | def close(self): |
|
465 | def close(self): | |
465 | pass |
|
466 | pass | |
466 |
|
467 | |||
467 | def request(self, value, flush=True): |
|
468 | def request(self, value, flush=True): | |
468 | lines = value.split("\n") |
|
469 | lines = value.split("\n") | |
469 | if lines[0] != "get": |
|
470 | if lines[0] != "get": | |
470 | return |
|
471 | return | |
471 | self.missingids = lines[2:-1] |
|
472 | self.missingids = lines[2:-1] | |
472 | self.missingids.append('0') |
|
473 | self.missingids.append('0') | |
473 |
|
474 | |||
474 | def receiveline(self): |
|
475 | def receiveline(self): | |
475 | if len(self.missingids) > 0: |
|
476 | if len(self.missingids) > 0: | |
476 | return self.missingids.pop(0) |
|
477 | return self.missingids.pop(0) | |
477 | return None |
|
478 | return None | |
478 |
|
479 | |||
479 | self.remotecache = simplecache() |
|
480 | self.remotecache = simplecache() | |
480 |
|
481 | |||
481 | def close(self): |
|
482 | def close(self): | |
482 | if fetches: |
|
483 | if fetches: | |
483 | msg = ("%s files fetched over %d fetches - " + |
|
484 | msg = ("%s files fetched over %d fetches - " + | |
484 | "(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % ( |
|
485 | "(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % ( | |
485 | fetched, |
|
486 | fetched, | |
486 | fetches, |
|
487 | fetches, | |
487 | fetchmisses, |
|
488 | fetchmisses, | |
488 | float(fetched - fetchmisses) / float(fetched) * 100.0, |
|
489 | float(fetched - fetchmisses) / float(fetched) * 100.0, | |
489 | fetchcost) |
|
490 | fetchcost) | |
490 | if self.debugoutput: |
|
491 | if self.debugoutput: | |
491 | self.ui.warn(msg) |
|
492 | self.ui.warn(msg) | |
492 | self.ui.log("remotefilelog.prefetch", msg.replace("%", "%%"), |
|
493 | self.ui.log("remotefilelog.prefetch", msg.replace("%", "%%"), | |
493 | remotefilelogfetched=fetched, |
|
494 | remotefilelogfetched=fetched, | |
494 | remotefilelogfetches=fetches, |
|
495 | remotefilelogfetches=fetches, | |
495 | remotefilelogfetchmisses=fetchmisses, |
|
496 | remotefilelogfetchmisses=fetchmisses, | |
496 | remotefilelogfetchtime=fetchcost * 1000) |
|
497 | remotefilelogfetchtime=fetchcost * 1000) | |
497 |
|
498 | |||
498 | if self.remotecache.connected: |
|
499 | if self.remotecache.connected: | |
499 | self.remotecache.close() |
|
500 | self.remotecache.close() | |
500 |
|
501 | |||
501 | def prefetch(self, fileids, force=False, fetchdata=True, |
|
502 | def prefetch(self, fileids, force=False, fetchdata=True, | |
502 | fetchhistory=False): |
|
503 | fetchhistory=False): | |
503 | """downloads the given file versions to the cache |
|
504 | """downloads the given file versions to the cache | |
504 | """ |
|
505 | """ | |
505 | repo = self.repo |
|
506 | repo = self.repo | |
506 | idstocheck = [] |
|
507 | idstocheck = [] | |
507 | for file, id in fileids: |
|
508 | for file, id in fileids: | |
508 | # hack |
|
509 | # hack | |
509 | # - we don't use .hgtags |
|
510 | # - we don't use .hgtags | |
510 | # - workingctx produces ids with length 42, |
|
511 | # - workingctx produces ids with length 42, | |
511 | # which we skip since they aren't in any cache |
|
512 | # which we skip since they aren't in any cache | |
512 | if (file == '.hgtags' or len(id) == 42 |
|
513 | if (file == '.hgtags' or len(id) == 42 | |
513 | or not repo.shallowmatch(file)): |
|
514 | or not repo.shallowmatch(file)): | |
514 | continue |
|
515 | continue | |
515 |
|
516 | |||
516 | idstocheck.append((file, bin(id))) |
|
517 | idstocheck.append((file, bin(id))) | |
517 |
|
518 | |||
518 | datastore = self.datastore |
|
519 | datastore = self.datastore | |
519 | historystore = self.historystore |
|
520 | historystore = self.historystore | |
520 | if force: |
|
521 | if force: | |
521 | datastore = contentstore.unioncontentstore(*repo.shareddatastores) |
|
522 | datastore = contentstore.unioncontentstore(*repo.shareddatastores) | |
522 | historystore = metadatastore.unionmetadatastore( |
|
523 | historystore = metadatastore.unionmetadatastore( | |
523 | *repo.sharedhistorystores) |
|
524 | *repo.sharedhistorystores) | |
524 |
|
525 | |||
525 | missingids = set() |
|
526 | missingids = set() | |
526 | if fetchdata: |
|
527 | if fetchdata: | |
527 | missingids.update(datastore.getmissing(idstocheck)) |
|
528 | missingids.update(datastore.getmissing(idstocheck)) | |
528 | if fetchhistory: |
|
529 | if fetchhistory: | |
529 | missingids.update(historystore.getmissing(idstocheck)) |
|
530 | missingids.update(historystore.getmissing(idstocheck)) | |
530 |
|
531 | |||
531 | # partition missing nodes into nullid and not-nullid so we can |
|
532 | # partition missing nodes into nullid and not-nullid so we can | |
532 | # warn about this filtering potentially shadowing bugs. |
|
533 | # warn about this filtering potentially shadowing bugs. | |
533 | nullids = len([None for unused, id in missingids if id == nullid]) |
|
534 | nullids = len([None for unused, id in missingids if id == nullid]) | |
534 | if nullids: |
|
535 | if nullids: | |
535 | missingids = [(f, id) for f, id in missingids if id != nullid] |
|
536 | missingids = [(f, id) for f, id in missingids if id != nullid] | |
536 | repo.ui.develwarn( |
|
537 | repo.ui.develwarn( | |
537 | ('remotefilelog not fetching %d null revs' |
|
538 | ('remotefilelog not fetching %d null revs' | |
538 | ' - this is likely hiding bugs' % nullids), |
|
539 | ' - this is likely hiding bugs' % nullids), | |
539 | config='remotefilelog-ext') |
|
540 | config='remotefilelog-ext') | |
540 | if missingids: |
|
541 | if missingids: | |
541 | global fetches, fetched, fetchcost |
|
542 | global fetches, fetched, fetchcost | |
542 | fetches += 1 |
|
543 | fetches += 1 | |
543 |
|
544 | |||
544 | # We want to be able to detect excess individual file downloads, so |
|
545 | # We want to be able to detect excess individual file downloads, so | |
545 | # let's log that information for debugging. |
|
546 | # let's log that information for debugging. | |
546 | if fetches >= 15 and fetches < 18: |
|
547 | if fetches >= 15 and fetches < 18: | |
547 | if fetches == 15: |
|
548 | if fetches == 15: | |
548 | fetchwarning = self.ui.config('remotefilelog', |
|
549 | fetchwarning = self.ui.config('remotefilelog', | |
549 | 'fetchwarning') |
|
550 | 'fetchwarning') | |
550 | if fetchwarning: |
|
551 | if fetchwarning: | |
551 | self.ui.warn(fetchwarning + '\n') |
|
552 | self.ui.warn(fetchwarning + '\n') | |
552 | self.logstacktrace() |
|
553 | self.logstacktrace() | |
553 | missingids = [(file, hex(id)) for file, id in missingids] |
|
554 | missingids = [(file, hex(id)) for file, id in missingids] | |
554 | fetched += len(missingids) |
|
555 | fetched += len(missingids) | |
555 | start = time.time() |
|
556 | start = time.time() | |
556 | missingids = self.request(missingids) |
|
557 | missingids = self.request(missingids) | |
557 | if missingids: |
|
558 | if missingids: | |
558 | raise error.Abort(_("unable to download %d files") % |
|
559 | raise error.Abort(_("unable to download %d files") % | |
559 | len(missingids)) |
|
560 | len(missingids)) | |
560 | fetchcost += time.time() - start |
|
561 | fetchcost += time.time() - start | |
561 | self._lfsprefetch(fileids) |
|
562 | self._lfsprefetch(fileids) | |
562 |
|
563 | |||
563 | def _lfsprefetch(self, fileids): |
|
564 | def _lfsprefetch(self, fileids): | |
564 | if not _lfsmod or not util.safehasattr( |
|
565 | if not _lfsmod or not util.safehasattr( | |
565 | self.repo.svfs, 'lfslocalblobstore'): |
|
566 | self.repo.svfs, 'lfslocalblobstore'): | |
566 | return |
|
567 | return | |
567 | if not _lfsmod.wrapper.candownload(self.repo): |
|
568 | if not _lfsmod.wrapper.candownload(self.repo): | |
568 | return |
|
569 | return | |
569 | pointers = [] |
|
570 | pointers = [] | |
570 | store = self.repo.svfs.lfslocalblobstore |
|
571 | store = self.repo.svfs.lfslocalblobstore | |
571 | for file, id in fileids: |
|
572 | for file, id in fileids: | |
572 | node = bin(id) |
|
573 | node = bin(id) | |
573 | rlog = self.repo.file(file) |
|
574 | rlog = self.repo.file(file) | |
574 | if rlog.flags(node) & revlog.REVIDX_EXTSTORED: |
|
575 | if rlog.flags(node) & revlog.REVIDX_EXTSTORED: | |
575 | text = rlog.revision(node, raw=True) |
|
576 | text = rlog.revision(node, raw=True) | |
576 | p = _lfsmod.pointer.deserialize(text) |
|
577 | p = _lfsmod.pointer.deserialize(text) | |
577 | oid = p.oid() |
|
578 | oid = p.oid() | |
578 | if not store.has(oid): |
|
579 | if not store.has(oid): | |
579 | pointers.append(p) |
|
580 | pointers.append(p) | |
580 | if len(pointers) > 0: |
|
581 | if len(pointers) > 0: | |
581 | self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store) |
|
582 | self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store) | |
582 | assert all(store.has(p.oid()) for p in pointers) |
|
583 | assert all(store.has(p.oid()) for p in pointers) | |
583 |
|
584 | |||
584 | def logstacktrace(self): |
|
585 | def logstacktrace(self): | |
585 | import traceback |
|
586 | import traceback | |
586 | self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s\n', |
|
587 | self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s\n', | |
587 | ''.join(traceback.format_stack())) |
|
588 | ''.join(traceback.format_stack())) |
@@ -1,156 +1,156 | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | from mercurial.node import hex, nullid |
|
3 | from mercurial.node import hex, nullid | |
4 | from . import ( |
|
4 | from . import ( | |
5 | basestore, |
|
5 | basestore, | |
6 | shallowutil, |
|
6 | shallowutil, | |
7 | ) |
|
7 | ) | |
8 |
|
8 | |||
9 | class unionmetadatastore(basestore.baseunionstore): |
|
9 | class unionmetadatastore(basestore.baseunionstore): | |
10 | def __init__(self, *args, **kwargs): |
|
10 | def __init__(self, *args, **kwargs): | |
11 | super(unionmetadatastore, self).__init__(*args, **kwargs) |
|
11 | super(unionmetadatastore, self).__init__(*args, **kwargs) | |
12 |
|
12 | |||
13 | self.stores = args |
|
13 | self.stores = args | |
14 | self.writestore = kwargs.get('writestore') |
|
14 | self.writestore = kwargs.get(r'writestore') | |
15 |
|
15 | |||
16 | # If allowincomplete==True then the union store can return partial |
|
16 | # If allowincomplete==True then the union store can return partial | |
17 | # ancestor lists, otherwise it will throw a KeyError if a full |
|
17 | # ancestor lists, otherwise it will throw a KeyError if a full | |
18 | # history can't be found. |
|
18 | # history can't be found. | |
19 | self.allowincomplete = kwargs.get('allowincomplete', False) |
|
19 | self.allowincomplete = kwargs.get(r'allowincomplete', False) | |
20 |
|
20 | |||
21 | def getancestors(self, name, node, known=None): |
|
21 | def getancestors(self, name, node, known=None): | |
22 | """Returns as many ancestors as we're aware of. |
|
22 | """Returns as many ancestors as we're aware of. | |
23 |
|
23 | |||
24 | return value: { |
|
24 | return value: { | |
25 | node: (p1, p2, linknode, copyfrom), |
|
25 | node: (p1, p2, linknode, copyfrom), | |
26 | ... |
|
26 | ... | |
27 | } |
|
27 | } | |
28 | """ |
|
28 | """ | |
29 | if known is None: |
|
29 | if known is None: | |
30 | known = set() |
|
30 | known = set() | |
31 | if node in known: |
|
31 | if node in known: | |
32 | return [] |
|
32 | return [] | |
33 |
|
33 | |||
34 | ancestors = {} |
|
34 | ancestors = {} | |
35 | def traverse(curname, curnode): |
|
35 | def traverse(curname, curnode): | |
36 | # TODO: this algorithm has the potential to traverse parts of |
|
36 | # TODO: this algorithm has the potential to traverse parts of | |
37 | # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C |
|
37 | # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C | |
38 | # may be queued as missing, then B and A are traversed for both. |
|
38 | # may be queued as missing, then B and A are traversed for both. | |
39 | queue = [(curname, curnode)] |
|
39 | queue = [(curname, curnode)] | |
40 | missing = [] |
|
40 | missing = [] | |
41 | seen = set() |
|
41 | seen = set() | |
42 | while queue: |
|
42 | while queue: | |
43 | name, node = queue.pop() |
|
43 | name, node = queue.pop() | |
44 | if (name, node) in seen: |
|
44 | if (name, node) in seen: | |
45 | continue |
|
45 | continue | |
46 | seen.add((name, node)) |
|
46 | seen.add((name, node)) | |
47 | value = ancestors.get(node) |
|
47 | value = ancestors.get(node) | |
48 | if not value: |
|
48 | if not value: | |
49 | missing.append((name, node)) |
|
49 | missing.append((name, node)) | |
50 | continue |
|
50 | continue | |
51 | p1, p2, linknode, copyfrom = value |
|
51 | p1, p2, linknode, copyfrom = value | |
52 | if p1 != nullid and p1 not in known: |
|
52 | if p1 != nullid and p1 not in known: | |
53 | queue.append((copyfrom or curname, p1)) |
|
53 | queue.append((copyfrom or curname, p1)) | |
54 | if p2 != nullid and p2 not in known: |
|
54 | if p2 != nullid and p2 not in known: | |
55 | queue.append((curname, p2)) |
|
55 | queue.append((curname, p2)) | |
56 | return missing |
|
56 | return missing | |
57 |
|
57 | |||
58 | missing = [(name, node)] |
|
58 | missing = [(name, node)] | |
59 | while missing: |
|
59 | while missing: | |
60 | curname, curnode = missing.pop() |
|
60 | curname, curnode = missing.pop() | |
61 | try: |
|
61 | try: | |
62 | ancestors.update(self._getpartialancestors(curname, curnode, |
|
62 | ancestors.update(self._getpartialancestors(curname, curnode, | |
63 | known=known)) |
|
63 | known=known)) | |
64 | newmissing = traverse(curname, curnode) |
|
64 | newmissing = traverse(curname, curnode) | |
65 | missing.extend(newmissing) |
|
65 | missing.extend(newmissing) | |
66 | except KeyError: |
|
66 | except KeyError: | |
67 | # If we allow incomplete histories, don't throw. |
|
67 | # If we allow incomplete histories, don't throw. | |
68 | if not self.allowincomplete: |
|
68 | if not self.allowincomplete: | |
69 | raise |
|
69 | raise | |
70 | # If the requested name+node doesn't exist, always throw. |
|
70 | # If the requested name+node doesn't exist, always throw. | |
71 | if (curname, curnode) == (name, node): |
|
71 | if (curname, curnode) == (name, node): | |
72 | raise |
|
72 | raise | |
73 |
|
73 | |||
74 | # TODO: ancestors should probably be (name, node) -> (value) |
|
74 | # TODO: ancestors should probably be (name, node) -> (value) | |
75 | return ancestors |
|
75 | return ancestors | |
76 |
|
76 | |||
77 | @basestore.baseunionstore.retriable |
|
77 | @basestore.baseunionstore.retriable | |
78 | def _getpartialancestors(self, name, node, known=None): |
|
78 | def _getpartialancestors(self, name, node, known=None): | |
79 | for store in self.stores: |
|
79 | for store in self.stores: | |
80 | try: |
|
80 | try: | |
81 | return store.getancestors(name, node, known=known) |
|
81 | return store.getancestors(name, node, known=known) | |
82 | except KeyError: |
|
82 | except KeyError: | |
83 | pass |
|
83 | pass | |
84 |
|
84 | |||
85 | raise KeyError((name, hex(node))) |
|
85 | raise KeyError((name, hex(node))) | |
86 |
|
86 | |||
87 | @basestore.baseunionstore.retriable |
|
87 | @basestore.baseunionstore.retriable | |
88 | def getnodeinfo(self, name, node): |
|
88 | def getnodeinfo(self, name, node): | |
89 | for store in self.stores: |
|
89 | for store in self.stores: | |
90 | try: |
|
90 | try: | |
91 | return store.getnodeinfo(name, node) |
|
91 | return store.getnodeinfo(name, node) | |
92 | except KeyError: |
|
92 | except KeyError: | |
93 | pass |
|
93 | pass | |
94 |
|
94 | |||
95 | raise KeyError((name, hex(node))) |
|
95 | raise KeyError((name, hex(node))) | |
96 |
|
96 | |||
97 | def add(self, name, node, data): |
|
97 | def add(self, name, node, data): | |
98 | raise RuntimeError("cannot add content only to remotefilelog " |
|
98 | raise RuntimeError("cannot add content only to remotefilelog " | |
99 | "contentstore") |
|
99 | "contentstore") | |
100 |
|
100 | |||
101 | def getmissing(self, keys): |
|
101 | def getmissing(self, keys): | |
102 | missing = keys |
|
102 | missing = keys | |
103 | for store in self.stores: |
|
103 | for store in self.stores: | |
104 | if missing: |
|
104 | if missing: | |
105 | missing = store.getmissing(missing) |
|
105 | missing = store.getmissing(missing) | |
106 | return missing |
|
106 | return missing | |
107 |
|
107 | |||
108 | def markledger(self, ledger, options=None): |
|
108 | def markledger(self, ledger, options=None): | |
109 | for store in self.stores: |
|
109 | for store in self.stores: | |
110 | store.markledger(ledger, options) |
|
110 | store.markledger(ledger, options) | |
111 |
|
111 | |||
112 | def getmetrics(self): |
|
112 | def getmetrics(self): | |
113 | metrics = [s.getmetrics() for s in self.stores] |
|
113 | metrics = [s.getmetrics() for s in self.stores] | |
114 | return shallowutil.sumdicts(*metrics) |
|
114 | return shallowutil.sumdicts(*metrics) | |
115 |
|
115 | |||
116 | class remotefilelogmetadatastore(basestore.basestore): |
|
116 | class remotefilelogmetadatastore(basestore.basestore): | |
117 | def getancestors(self, name, node, known=None): |
|
117 | def getancestors(self, name, node, known=None): | |
118 | """Returns as many ancestors as we're aware of. |
|
118 | """Returns as many ancestors as we're aware of. | |
119 |
|
119 | |||
120 | return value: { |
|
120 | return value: { | |
121 | node: (p1, p2, linknode, copyfrom), |
|
121 | node: (p1, p2, linknode, copyfrom), | |
122 | ... |
|
122 | ... | |
123 | } |
|
123 | } | |
124 | """ |
|
124 | """ | |
125 | data = self._getdata(name, node) |
|
125 | data = self._getdata(name, node) | |
126 | ancestors = shallowutil.ancestormap(data) |
|
126 | ancestors = shallowutil.ancestormap(data) | |
127 | return ancestors |
|
127 | return ancestors | |
128 |
|
128 | |||
129 | def getnodeinfo(self, name, node): |
|
129 | def getnodeinfo(self, name, node): | |
130 | return self.getancestors(name, node)[node] |
|
130 | return self.getancestors(name, node)[node] | |
131 |
|
131 | |||
132 | def add(self, name, node, parents, linknode): |
|
132 | def add(self, name, node, parents, linknode): | |
133 | raise RuntimeError("cannot add metadata only to remotefilelog " |
|
133 | raise RuntimeError("cannot add metadata only to remotefilelog " | |
134 | "metadatastore") |
|
134 | "metadatastore") | |
135 |
|
135 | |||
136 | class remotemetadatastore(object): |
|
136 | class remotemetadatastore(object): | |
137 | def __init__(self, ui, fileservice, shared): |
|
137 | def __init__(self, ui, fileservice, shared): | |
138 | self._fileservice = fileservice |
|
138 | self._fileservice = fileservice | |
139 | self._shared = shared |
|
139 | self._shared = shared | |
140 |
|
140 | |||
141 | def getancestors(self, name, node, known=None): |
|
141 | def getancestors(self, name, node, known=None): | |
142 | self._fileservice.prefetch([(name, hex(node))], force=True, |
|
142 | self._fileservice.prefetch([(name, hex(node))], force=True, | |
143 | fetchdata=False, fetchhistory=True) |
|
143 | fetchdata=False, fetchhistory=True) | |
144 | return self._shared.getancestors(name, node, known=known) |
|
144 | return self._shared.getancestors(name, node, known=known) | |
145 |
|
145 | |||
146 | def getnodeinfo(self, name, node): |
|
146 | def getnodeinfo(self, name, node): | |
147 | return self.getancestors(name, node)[node] |
|
147 | return self.getancestors(name, node)[node] | |
148 |
|
148 | |||
149 | def add(self, name, node, data): |
|
149 | def add(self, name, node, data): | |
150 | raise RuntimeError("cannot add to a remote store") |
|
150 | raise RuntimeError("cannot add to a remote store") | |
151 |
|
151 | |||
152 | def getmissing(self, keys): |
|
152 | def getmissing(self, keys): | |
153 | return keys |
|
153 | return keys | |
154 |
|
154 | |||
155 | def markledger(self, ledger, options=None): |
|
155 | def markledger(self, ledger, options=None): | |
156 | pass |
|
156 | pass |
@@ -1,490 +1,491 | |||||
1 | # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog |
|
1 | # remotefilectx.py - filectx/workingfilectx implementations for remotefilelog | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import collections |
|
9 | import collections | |
10 | import time |
|
10 | import time | |
11 |
|
11 | |||
12 | from mercurial.node import bin, hex, nullid, nullrev |
|
12 | from mercurial.node import bin, hex, nullid, nullrev | |
13 | from mercurial import ( |
|
13 | from mercurial import ( | |
14 | ancestor, |
|
14 | ancestor, | |
15 | context, |
|
15 | context, | |
16 | error, |
|
16 | error, | |
17 | phases, |
|
17 | phases, | |
|
18 | pycompat, | |||
18 | util, |
|
19 | util, | |
19 | ) |
|
20 | ) | |
20 | from . import shallowutil |
|
21 | from . import shallowutil | |
21 |
|
22 | |||
22 | propertycache = util.propertycache |
|
23 | propertycache = util.propertycache | |
23 | FASTLOG_TIMEOUT_IN_SECS = 0.5 |
|
24 | FASTLOG_TIMEOUT_IN_SECS = 0.5 | |
24 |
|
25 | |||
25 | class remotefilectx(context.filectx): |
|
26 | class remotefilectx(context.filectx): | |
26 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
27 | def __init__(self, repo, path, changeid=None, fileid=None, | |
27 | filelog=None, changectx=None, ancestormap=None): |
|
28 | filelog=None, changectx=None, ancestormap=None): | |
28 | if fileid == nullrev: |
|
29 | if fileid == nullrev: | |
29 | fileid = nullid |
|
30 | fileid = nullid | |
30 | if fileid and len(fileid) == 40: |
|
31 | if fileid and len(fileid) == 40: | |
31 | fileid = bin(fileid) |
|
32 | fileid = bin(fileid) | |
32 | super(remotefilectx, self).__init__(repo, path, changeid, |
|
33 | super(remotefilectx, self).__init__(repo, path, changeid, | |
33 | fileid, filelog, changectx) |
|
34 | fileid, filelog, changectx) | |
34 | self._ancestormap = ancestormap |
|
35 | self._ancestormap = ancestormap | |
35 |
|
36 | |||
36 | def size(self): |
|
37 | def size(self): | |
37 | return self._filelog.size(self._filenode) |
|
38 | return self._filelog.size(self._filenode) | |
38 |
|
39 | |||
39 | @propertycache |
|
40 | @propertycache | |
40 | def _changeid(self): |
|
41 | def _changeid(self): | |
41 | if '_changeid' in self.__dict__: |
|
42 | if '_changeid' in self.__dict__: | |
42 | return self._changeid |
|
43 | return self._changeid | |
43 | elif '_changectx' in self.__dict__: |
|
44 | elif '_changectx' in self.__dict__: | |
44 | return self._changectx.rev() |
|
45 | return self._changectx.rev() | |
45 | elif '_descendantrev' in self.__dict__: |
|
46 | elif '_descendantrev' in self.__dict__: | |
46 | # this file context was created from a revision with a known |
|
47 | # this file context was created from a revision with a known | |
47 | # descendant, we can (lazily) correct for linkrev aliases |
|
48 | # descendant, we can (lazily) correct for linkrev aliases | |
48 | linknode = self._adjustlinknode(self._path, self._filelog, |
|
49 | linknode = self._adjustlinknode(self._path, self._filelog, | |
49 | self._filenode, self._descendantrev) |
|
50 | self._filenode, self._descendantrev) | |
50 | return self._repo.unfiltered().changelog.rev(linknode) |
|
51 | return self._repo.unfiltered().changelog.rev(linknode) | |
51 | else: |
|
52 | else: | |
52 | return self.linkrev() |
|
53 | return self.linkrev() | |
53 |
|
54 | |||
54 | def filectx(self, fileid, changeid=None): |
|
55 | def filectx(self, fileid, changeid=None): | |
55 | '''opens an arbitrary revision of the file without |
|
56 | '''opens an arbitrary revision of the file without | |
56 | opening a new filelog''' |
|
57 | opening a new filelog''' | |
57 | return remotefilectx(self._repo, self._path, fileid=fileid, |
|
58 | return remotefilectx(self._repo, self._path, fileid=fileid, | |
58 | filelog=self._filelog, changeid=changeid) |
|
59 | filelog=self._filelog, changeid=changeid) | |
59 |
|
60 | |||
60 | def linkrev(self): |
|
61 | def linkrev(self): | |
61 | return self._linkrev |
|
62 | return self._linkrev | |
62 |
|
63 | |||
63 | @propertycache |
|
64 | @propertycache | |
64 | def _linkrev(self): |
|
65 | def _linkrev(self): | |
65 | if self._filenode == nullid: |
|
66 | if self._filenode == nullid: | |
66 | return nullrev |
|
67 | return nullrev | |
67 |
|
68 | |||
68 | ancestormap = self.ancestormap() |
|
69 | ancestormap = self.ancestormap() | |
69 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
|
70 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |
70 | rev = self._repo.changelog.nodemap.get(linknode) |
|
71 | rev = self._repo.changelog.nodemap.get(linknode) | |
71 | if rev is not None: |
|
72 | if rev is not None: | |
72 | return rev |
|
73 | return rev | |
73 |
|
74 | |||
74 | # Search all commits for the appropriate linkrev (slow, but uncommon) |
|
75 | # Search all commits for the appropriate linkrev (slow, but uncommon) | |
75 | path = self._path |
|
76 | path = self._path | |
76 | fileid = self._filenode |
|
77 | fileid = self._filenode | |
77 | cl = self._repo.unfiltered().changelog |
|
78 | cl = self._repo.unfiltered().changelog | |
78 | mfl = self._repo.manifestlog |
|
79 | mfl = self._repo.manifestlog | |
79 |
|
80 | |||
80 | for rev in range(len(cl) - 1, 0, -1): |
|
81 | for rev in range(len(cl) - 1, 0, -1): | |
81 | node = cl.node(rev) |
|
82 | node = cl.node(rev) | |
82 | data = cl.read(node) # get changeset data (we avoid object creation) |
|
83 | data = cl.read(node) # get changeset data (we avoid object creation) | |
83 | if path in data[3]: # checking the 'files' field. |
|
84 | if path in data[3]: # checking the 'files' field. | |
84 | # The file has been touched, check if the hash is what we're |
|
85 | # The file has been touched, check if the hash is what we're | |
85 | # looking for. |
|
86 | # looking for. | |
86 | if fileid == mfl[data[0]].readfast().get(path): |
|
87 | if fileid == mfl[data[0]].readfast().get(path): | |
87 | return rev |
|
88 | return rev | |
88 |
|
89 | |||
89 | # Couldn't find the linkrev. This should generally not happen, and will |
|
90 | # Couldn't find the linkrev. This should generally not happen, and will | |
90 | # likely cause a crash. |
|
91 | # likely cause a crash. | |
91 | return None |
|
92 | return None | |
92 |
|
93 | |||
93 | def introrev(self): |
|
94 | def introrev(self): | |
94 | """return the rev of the changeset which introduced this file revision |
|
95 | """return the rev of the changeset which introduced this file revision | |
95 |
|
96 | |||
96 | This method is different from linkrev because it take into account the |
|
97 | This method is different from linkrev because it take into account the | |
97 | changeset the filectx was created from. It ensures the returned |
|
98 | changeset the filectx was created from. It ensures the returned | |
98 | revision is one of its ancestors. This prevents bugs from |
|
99 | revision is one of its ancestors. This prevents bugs from | |
99 | 'linkrev-shadowing' when a file revision is used by multiple |
|
100 | 'linkrev-shadowing' when a file revision is used by multiple | |
100 | changesets. |
|
101 | changesets. | |
101 | """ |
|
102 | """ | |
102 | lkr = self.linkrev() |
|
103 | lkr = self.linkrev() | |
103 | attrs = vars(self) |
|
104 | attrs = vars(self) | |
104 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) |
|
105 | noctx = not ('_changeid' in attrs or '_changectx' in attrs) | |
105 | if noctx or self.rev() == lkr: |
|
106 | if noctx or self.rev() == lkr: | |
106 | return lkr |
|
107 | return lkr | |
107 | linknode = self._adjustlinknode(self._path, self._filelog, |
|
108 | linknode = self._adjustlinknode(self._path, self._filelog, | |
108 | self._filenode, self.rev(), |
|
109 | self._filenode, self.rev(), | |
109 | inclusive=True) |
|
110 | inclusive=True) | |
110 | return self._repo.changelog.rev(linknode) |
|
111 | return self._repo.changelog.rev(linknode) | |
111 |
|
112 | |||
112 | def renamed(self): |
|
113 | def renamed(self): | |
113 | """check if file was actually renamed in this changeset revision |
|
114 | """check if file was actually renamed in this changeset revision | |
114 |
|
115 | |||
115 | If rename logged in file revision, we report copy for changeset only |
|
116 | If rename logged in file revision, we report copy for changeset only | |
116 | if file revisions linkrev points back to the changeset in question |
|
117 | if file revisions linkrev points back to the changeset in question | |
117 | or both changeset parents contain different file revisions. |
|
118 | or both changeset parents contain different file revisions. | |
118 | """ |
|
119 | """ | |
119 | ancestormap = self.ancestormap() |
|
120 | ancestormap = self.ancestormap() | |
120 |
|
121 | |||
121 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
|
122 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |
122 | if not copyfrom: |
|
123 | if not copyfrom: | |
123 | return None |
|
124 | return None | |
124 |
|
125 | |||
125 | renamed = (copyfrom, p1) |
|
126 | renamed = (copyfrom, p1) | |
126 | if self.rev() == self.linkrev(): |
|
127 | if self.rev() == self.linkrev(): | |
127 | return renamed |
|
128 | return renamed | |
128 |
|
129 | |||
129 | name = self.path() |
|
130 | name = self.path() | |
130 | fnode = self._filenode |
|
131 | fnode = self._filenode | |
131 | for p in self._changectx.parents(): |
|
132 | for p in self._changectx.parents(): | |
132 | try: |
|
133 | try: | |
133 | if fnode == p.filenode(name): |
|
134 | if fnode == p.filenode(name): | |
134 | return None |
|
135 | return None | |
135 | except error.LookupError: |
|
136 | except error.LookupError: | |
136 | pass |
|
137 | pass | |
137 | return renamed |
|
138 | return renamed | |
138 |
|
139 | |||
139 | def ancestormap(self): |
|
140 | def ancestormap(self): | |
140 | if not self._ancestormap: |
|
141 | if not self._ancestormap: | |
141 | self._ancestormap = self.filelog().ancestormap(self._filenode) |
|
142 | self._ancestormap = self.filelog().ancestormap(self._filenode) | |
142 |
|
143 | |||
143 | return self._ancestormap |
|
144 | return self._ancestormap | |
144 |
|
145 | |||
145 | def parents(self): |
|
146 | def parents(self): | |
146 | repo = self._repo |
|
147 | repo = self._repo | |
147 | ancestormap = self.ancestormap() |
|
148 | ancestormap = self.ancestormap() | |
148 |
|
149 | |||
149 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
|
150 | p1, p2, linknode, copyfrom = ancestormap[self._filenode] | |
150 | results = [] |
|
151 | results = [] | |
151 | if p1 != nullid: |
|
152 | if p1 != nullid: | |
152 | path = copyfrom or self._path |
|
153 | path = copyfrom or self._path | |
153 | flog = repo.file(path) |
|
154 | flog = repo.file(path) | |
154 | p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog, |
|
155 | p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog, | |
155 | ancestormap=ancestormap) |
|
156 | ancestormap=ancestormap) | |
156 | p1ctx._descendantrev = self.rev() |
|
157 | p1ctx._descendantrev = self.rev() | |
157 | results.append(p1ctx) |
|
158 | results.append(p1ctx) | |
158 |
|
159 | |||
159 | if p2 != nullid: |
|
160 | if p2 != nullid: | |
160 | path = self._path |
|
161 | path = self._path | |
161 | flog = repo.file(path) |
|
162 | flog = repo.file(path) | |
162 | p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog, |
|
163 | p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog, | |
163 | ancestormap=ancestormap) |
|
164 | ancestormap=ancestormap) | |
164 | p2ctx._descendantrev = self.rev() |
|
165 | p2ctx._descendantrev = self.rev() | |
165 | results.append(p2ctx) |
|
166 | results.append(p2ctx) | |
166 |
|
167 | |||
167 | return results |
|
168 | return results | |
168 |
|
169 | |||
169 | def _nodefromancrev(self, ancrev, cl, mfl, path, fnode): |
|
170 | def _nodefromancrev(self, ancrev, cl, mfl, path, fnode): | |
170 | """returns the node for <path> in <ancrev> if content matches <fnode>""" |
|
171 | """returns the node for <path> in <ancrev> if content matches <fnode>""" | |
171 | ancctx = cl.read(ancrev) # This avoids object creation. |
|
172 | ancctx = cl.read(ancrev) # This avoids object creation. | |
172 | manifestnode, files = ancctx[0], ancctx[3] |
|
173 | manifestnode, files = ancctx[0], ancctx[3] | |
173 | # If the file was touched in this ancestor, and the content is similar |
|
174 | # If the file was touched in this ancestor, and the content is similar | |
174 | # to the one we are searching for. |
|
175 | # to the one we are searching for. | |
175 | if path in files and fnode == mfl[manifestnode].readfast().get(path): |
|
176 | if path in files and fnode == mfl[manifestnode].readfast().get(path): | |
176 | return cl.node(ancrev) |
|
177 | return cl.node(ancrev) | |
177 | return None |
|
178 | return None | |
178 |
|
179 | |||
179 | def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False): |
|
180 | def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False): | |
180 | """return the first ancestor of <srcrev> introducing <fnode> |
|
181 | """return the first ancestor of <srcrev> introducing <fnode> | |
181 |
|
182 | |||
182 | If the linkrev of the file revision does not point to an ancestor of |
|
183 | If the linkrev of the file revision does not point to an ancestor of | |
183 | srcrev, we'll walk down the ancestors until we find one introducing |
|
184 | srcrev, we'll walk down the ancestors until we find one introducing | |
184 | this file revision. |
|
185 | this file revision. | |
185 |
|
186 | |||
186 | :repo: a localrepository object (used to access changelog and manifest) |
|
187 | :repo: a localrepository object (used to access changelog and manifest) | |
187 | :path: the file path |
|
188 | :path: the file path | |
188 | :fnode: the nodeid of the file revision |
|
189 | :fnode: the nodeid of the file revision | |
189 | :filelog: the filelog of this path |
|
190 | :filelog: the filelog of this path | |
190 | :srcrev: the changeset revision we search ancestors from |
|
191 | :srcrev: the changeset revision we search ancestors from | |
191 | :inclusive: if true, the src revision will also be checked |
|
192 | :inclusive: if true, the src revision will also be checked | |
192 |
|
193 | |||
193 | Note: This is based on adjustlinkrev in core, but it's quite different. |
|
194 | Note: This is based on adjustlinkrev in core, but it's quite different. | |
194 |
|
195 | |||
195 | adjustlinkrev depends on the fact that the linkrev is the bottom most |
|
196 | adjustlinkrev depends on the fact that the linkrev is the bottom most | |
196 | node, and uses that as a stopping point for the ancestor traversal. We |
|
197 | node, and uses that as a stopping point for the ancestor traversal. We | |
197 | can't do that here because the linknode is not guaranteed to be the |
|
198 | can't do that here because the linknode is not guaranteed to be the | |
198 | bottom most one. |
|
199 | bottom most one. | |
199 |
|
200 | |||
200 | In our code here, we actually know what a bunch of potential ancestor |
|
201 | In our code here, we actually know what a bunch of potential ancestor | |
201 | linknodes are, so instead of stopping the cheap-ancestor-traversal when |
|
202 | linknodes are, so instead of stopping the cheap-ancestor-traversal when | |
202 | we get to a linkrev, we stop when we see any of the known linknodes. |
|
203 | we get to a linkrev, we stop when we see any of the known linknodes. | |
203 | """ |
|
204 | """ | |
204 | repo = self._repo |
|
205 | repo = self._repo | |
205 | cl = repo.unfiltered().changelog |
|
206 | cl = repo.unfiltered().changelog | |
206 | mfl = repo.manifestlog |
|
207 | mfl = repo.manifestlog | |
207 | ancestormap = self.ancestormap() |
|
208 | ancestormap = self.ancestormap() | |
208 | linknode = ancestormap[fnode][2] |
|
209 | linknode = ancestormap[fnode][2] | |
209 |
|
210 | |||
210 | if srcrev is None: |
|
211 | if srcrev is None: | |
211 | # wctx case, used by workingfilectx during mergecopy |
|
212 | # wctx case, used by workingfilectx during mergecopy | |
212 | revs = [p.rev() for p in self._repo[None].parents()] |
|
213 | revs = [p.rev() for p in self._repo[None].parents()] | |
213 | inclusive = True # we skipped the real (revless) source |
|
214 | inclusive = True # we skipped the real (revless) source | |
214 | else: |
|
215 | else: | |
215 | revs = [srcrev] |
|
216 | revs = [srcrev] | |
216 |
|
217 | |||
217 | if self._verifylinknode(revs, linknode): |
|
218 | if self._verifylinknode(revs, linknode): | |
218 | return linknode |
|
219 | return linknode | |
219 |
|
220 | |||
220 | commonlogkwargs = { |
|
221 | commonlogkwargs = { | |
221 | 'revs': ' '.join([hex(cl.node(rev)) for rev in revs]), |
|
222 | r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]), | |
222 | 'fnode': hex(fnode), |
|
223 | r'fnode': hex(fnode), | |
223 | 'filepath': path, |
|
224 | r'filepath': path, | |
224 | 'user': shallowutil.getusername(repo.ui), |
|
225 | r'user': shallowutil.getusername(repo.ui), | |
225 | 'reponame': shallowutil.getreponame(repo.ui), |
|
226 | r'reponame': shallowutil.getreponame(repo.ui), | |
226 | } |
|
227 | } | |
227 |
|
228 | |||
228 | repo.ui.log('linkrevfixup', 'adjusting linknode', **commonlogkwargs) |
|
229 | repo.ui.log('linkrevfixup', 'adjusting linknode', **commonlogkwargs) | |
229 |
|
230 | |||
230 | pc = repo._phasecache |
|
231 | pc = repo._phasecache | |
231 | seenpublic = False |
|
232 | seenpublic = False | |
232 | iteranc = cl.ancestors(revs, inclusive=inclusive) |
|
233 | iteranc = cl.ancestors(revs, inclusive=inclusive) | |
233 | for ancrev in iteranc: |
|
234 | for ancrev in iteranc: | |
234 | # First, check locally-available history. |
|
235 | # First, check locally-available history. | |
235 | lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode) |
|
236 | lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode) | |
236 | if lnode is not None: |
|
237 | if lnode is not None: | |
237 | return lnode |
|
238 | return lnode | |
238 |
|
239 | |||
239 | # adjusting linknode can be super-slow. To mitigate the issue |
|
240 | # adjusting linknode can be super-slow. To mitigate the issue | |
240 | # we use two heuristics: calling fastlog and forcing remotefilelog |
|
241 | # we use two heuristics: calling fastlog and forcing remotefilelog | |
241 | # prefetch |
|
242 | # prefetch | |
242 | if not seenpublic and pc.phase(repo, ancrev) == phases.public: |
|
243 | if not seenpublic and pc.phase(repo, ancrev) == phases.public: | |
243 | # TODO: there used to be a codepath to fetch linknodes |
|
244 | # TODO: there used to be a codepath to fetch linknodes | |
244 | # from a server as a fast path, but it appeared to |
|
245 | # from a server as a fast path, but it appeared to | |
245 | # depend on an API FB added to their phabricator. |
|
246 | # depend on an API FB added to their phabricator. | |
246 | lnode = self._forceprefetch(repo, path, fnode, revs, |
|
247 | lnode = self._forceprefetch(repo, path, fnode, revs, | |
247 | commonlogkwargs) |
|
248 | commonlogkwargs) | |
248 | if lnode: |
|
249 | if lnode: | |
249 | return lnode |
|
250 | return lnode | |
250 | seenpublic = True |
|
251 | seenpublic = True | |
251 |
|
252 | |||
252 | return linknode |
|
253 | return linknode | |
253 |
|
254 | |||
254 | def _forceprefetch(self, repo, path, fnode, revs, |
|
255 | def _forceprefetch(self, repo, path, fnode, revs, | |
255 | commonlogkwargs): |
|
256 | commonlogkwargs): | |
256 | # This next part is super non-obvious, so big comment block time! |
|
257 | # This next part is super non-obvious, so big comment block time! | |
257 | # |
|
258 | # | |
258 | # It is possible to get extremely bad performance here when a fairly |
|
259 | # It is possible to get extremely bad performance here when a fairly | |
259 | # common set of circumstances occur when this extension is combined |
|
260 | # common set of circumstances occur when this extension is combined | |
260 | # with a server-side commit rewriting extension like pushrebase. |
|
261 | # with a server-side commit rewriting extension like pushrebase. | |
261 | # |
|
262 | # | |
262 | # First, an engineer creates Commit A and pushes it to the server. |
|
263 | # First, an engineer creates Commit A and pushes it to the server. | |
263 | # While the server's data structure will have the correct linkrev |
|
264 | # While the server's data structure will have the correct linkrev | |
264 | # for the files touched in Commit A, the client will have the |
|
265 | # for the files touched in Commit A, the client will have the | |
265 | # linkrev of the local commit, which is "invalid" because it's not |
|
266 | # linkrev of the local commit, which is "invalid" because it's not | |
266 | # an ancestor of the main line of development. |
|
267 | # an ancestor of the main line of development. | |
267 | # |
|
268 | # | |
268 | # The client will never download the remotefilelog with the correct |
|
269 | # The client will never download the remotefilelog with the correct | |
269 | # linkrev as long as nobody else touches that file, since the file |
|
270 | # linkrev as long as nobody else touches that file, since the file | |
270 | # data and history hasn't changed since Commit A. |
|
271 | # data and history hasn't changed since Commit A. | |
271 | # |
|
272 | # | |
272 | # After a long time (or a short time in a heavily used repo), if the |
|
273 | # After a long time (or a short time in a heavily used repo), if the | |
273 | # same engineer returns to change the same file, some commands -- |
|
274 | # same engineer returns to change the same file, some commands -- | |
274 | # such as amends of commits with file moves, logs, diffs, etc -- |
|
275 | # such as amends of commits with file moves, logs, diffs, etc -- | |
275 | # can trigger this _adjustlinknode code. In those cases, finding |
|
276 | # can trigger this _adjustlinknode code. In those cases, finding | |
276 | # the correct rev can become quite expensive, as the correct |
|
277 | # the correct rev can become quite expensive, as the correct | |
277 | # revision is far back in history and we need to walk back through |
|
278 | # revision is far back in history and we need to walk back through | |
278 | # history to find it. |
|
279 | # history to find it. | |
279 | # |
|
280 | # | |
280 | # In order to improve this situation, we force a prefetch of the |
|
281 | # In order to improve this situation, we force a prefetch of the | |
281 | # remotefilelog data blob for the file we were called on. We do this |
|
282 | # remotefilelog data blob for the file we were called on. We do this | |
282 | # at most once, when we first see a public commit in the history we |
|
283 | # at most once, when we first see a public commit in the history we | |
283 | # are traversing. |
|
284 | # are traversing. | |
284 | # |
|
285 | # | |
285 | # Forcing the prefetch means we will download the remote blob even |
|
286 | # Forcing the prefetch means we will download the remote blob even | |
286 | # if we have the "correct" blob in the local store. Since the union |
|
287 | # if we have the "correct" blob in the local store. Since the union | |
287 | # store checks the remote store first, this means we are much more |
|
288 | # store checks the remote store first, this means we are much more | |
288 | # likely to get the correct linkrev at this point. |
|
289 | # likely to get the correct linkrev at this point. | |
289 | # |
|
290 | # | |
290 | # In rare circumstances (such as the server having a suboptimal |
|
291 | # In rare circumstances (such as the server having a suboptimal | |
291 | # linkrev for our use case), we will fall back to the old slow path. |
|
292 | # linkrev for our use case), we will fall back to the old slow path. | |
292 | # |
|
293 | # | |
293 | # We may want to add additional heuristics here in the future if |
|
294 | # We may want to add additional heuristics here in the future if | |
294 | # the slow path is used too much. One promising possibility is using |
|
295 | # the slow path is used too much. One promising possibility is using | |
295 | # obsolescence markers to find a more-likely-correct linkrev. |
|
296 | # obsolescence markers to find a more-likely-correct linkrev. | |
296 |
|
297 | |||
297 | logmsg = '' |
|
298 | logmsg = '' | |
298 | start = time.time() |
|
299 | start = time.time() | |
299 | try: |
|
300 | try: | |
300 | repo.fileservice.prefetch([(path, hex(fnode))], force=True) |
|
301 | repo.fileservice.prefetch([(path, hex(fnode))], force=True) | |
301 |
|
302 | |||
302 | # Now that we've downloaded a new blob from the server, |
|
303 | # Now that we've downloaded a new blob from the server, | |
303 | # we need to rebuild the ancestor map to recompute the |
|
304 | # we need to rebuild the ancestor map to recompute the | |
304 | # linknodes. |
|
305 | # linknodes. | |
305 | self._ancestormap = None |
|
306 | self._ancestormap = None | |
306 | linknode = self.ancestormap()[fnode][2] # 2 is linknode |
|
307 | linknode = self.ancestormap()[fnode][2] # 2 is linknode | |
307 | if self._verifylinknode(revs, linknode): |
|
308 | if self._verifylinknode(revs, linknode): | |
308 | logmsg = 'remotefilelog prefetching succeeded' |
|
309 | logmsg = 'remotefilelog prefetching succeeded' | |
309 | return linknode |
|
310 | return linknode | |
310 | logmsg = 'remotefilelog prefetching not found' |
|
311 | logmsg = 'remotefilelog prefetching not found' | |
311 | return None |
|
312 | return None | |
312 | except Exception as e: |
|
313 | except Exception as e: | |
313 | logmsg = 'remotefilelog prefetching failed (%s)' % e |
|
314 | logmsg = 'remotefilelog prefetching failed (%s)' % e | |
314 | return None |
|
315 | return None | |
315 | finally: |
|
316 | finally: | |
316 | elapsed = time.time() - start |
|
317 | elapsed = time.time() - start | |
317 | repo.ui.log('linkrevfixup', logmsg, elapsed=elapsed * 1000, |
|
318 | repo.ui.log('linkrevfixup', logmsg, elapsed=elapsed * 1000, | |
318 | **commonlogkwargs) |
|
319 | **pycompat.strkwargs(commonlogkwargs)) | |
319 |
|
320 | |||
320 | def _verifylinknode(self, revs, linknode): |
|
321 | def _verifylinknode(self, revs, linknode): | |
321 | """ |
|
322 | """ | |
322 | Check if a linknode is correct one for the current history. |
|
323 | Check if a linknode is correct one for the current history. | |
323 |
|
324 | |||
324 | That is, return True if the linkrev is the ancestor of any of the |
|
325 | That is, return True if the linkrev is the ancestor of any of the | |
325 | passed in revs, otherwise return False. |
|
326 | passed in revs, otherwise return False. | |
326 |
|
327 | |||
327 | `revs` is a list that usually has one element -- usually the wdir parent |
|
328 | `revs` is a list that usually has one element -- usually the wdir parent | |
328 | or the user-passed rev we're looking back from. It may contain two revs |
|
329 | or the user-passed rev we're looking back from. It may contain two revs | |
329 | when there is a merge going on, or zero revs when a root node with no |
|
330 | when there is a merge going on, or zero revs when a root node with no | |
330 | parents is being created. |
|
331 | parents is being created. | |
331 | """ |
|
332 | """ | |
332 | if not revs: |
|
333 | if not revs: | |
333 | return False |
|
334 | return False | |
334 | try: |
|
335 | try: | |
335 | # Use the C fastpath to check if the given linknode is correct. |
|
336 | # Use the C fastpath to check if the given linknode is correct. | |
336 | cl = self._repo.unfiltered().changelog |
|
337 | cl = self._repo.unfiltered().changelog | |
337 | return any(cl.isancestor(linknode, cl.node(r)) for r in revs) |
|
338 | return any(cl.isancestor(linknode, cl.node(r)) for r in revs) | |
338 | except error.LookupError: |
|
339 | except error.LookupError: | |
339 | # The linknode read from the blob may have been stripped or |
|
340 | # The linknode read from the blob may have been stripped or | |
340 | # otherwise not present in the repository anymore. Do not fail hard |
|
341 | # otherwise not present in the repository anymore. Do not fail hard | |
341 | # in this case. Instead, return false and continue the search for |
|
342 | # in this case. Instead, return false and continue the search for | |
342 | # the correct linknode. |
|
343 | # the correct linknode. | |
343 | return False |
|
344 | return False | |
344 |
|
345 | |||
345 | def ancestors(self, followfirst=False): |
|
346 | def ancestors(self, followfirst=False): | |
346 | ancestors = [] |
|
347 | ancestors = [] | |
347 | queue = collections.deque((self,)) |
|
348 | queue = collections.deque((self,)) | |
348 | seen = set() |
|
349 | seen = set() | |
349 | while queue: |
|
350 | while queue: | |
350 | current = queue.pop() |
|
351 | current = queue.pop() | |
351 | if current.filenode() in seen: |
|
352 | if current.filenode() in seen: | |
352 | continue |
|
353 | continue | |
353 | seen.add(current.filenode()) |
|
354 | seen.add(current.filenode()) | |
354 |
|
355 | |||
355 | ancestors.append(current) |
|
356 | ancestors.append(current) | |
356 |
|
357 | |||
357 | parents = current.parents() |
|
358 | parents = current.parents() | |
358 | first = True |
|
359 | first = True | |
359 | for p in parents: |
|
360 | for p in parents: | |
360 | if first or not followfirst: |
|
361 | if first or not followfirst: | |
361 | queue.append(p) |
|
362 | queue.append(p) | |
362 | first = False |
|
363 | first = False | |
363 |
|
364 | |||
364 | # Remove self |
|
365 | # Remove self | |
365 | ancestors.pop(0) |
|
366 | ancestors.pop(0) | |
366 |
|
367 | |||
367 | # Sort by linkrev |
|
368 | # Sort by linkrev | |
368 | # The copy tracing algorithm depends on these coming out in order |
|
369 | # The copy tracing algorithm depends on these coming out in order | |
369 | ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev()) |
|
370 | ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev()) | |
370 |
|
371 | |||
371 | for ancestor in ancestors: |
|
372 | for ancestor in ancestors: | |
372 | yield ancestor |
|
373 | yield ancestor | |
373 |
|
374 | |||
374 | def ancestor(self, fc2, actx): |
|
375 | def ancestor(self, fc2, actx): | |
375 | # the easy case: no (relevant) renames |
|
376 | # the easy case: no (relevant) renames | |
376 | if fc2.path() == self.path() and self.path() in actx: |
|
377 | if fc2.path() == self.path() and self.path() in actx: | |
377 | return actx[self.path()] |
|
378 | return actx[self.path()] | |
378 |
|
379 | |||
379 | # the next easiest cases: unambiguous predecessor (name trumps |
|
380 | # the next easiest cases: unambiguous predecessor (name trumps | |
380 | # history) |
|
381 | # history) | |
381 | if self.path() in actx and fc2.path() not in actx: |
|
382 | if self.path() in actx and fc2.path() not in actx: | |
382 | return actx[self.path()] |
|
383 | return actx[self.path()] | |
383 | if fc2.path() in actx and self.path() not in actx: |
|
384 | if fc2.path() in actx and self.path() not in actx: | |
384 | return actx[fc2.path()] |
|
385 | return actx[fc2.path()] | |
385 |
|
386 | |||
386 | # do a full traversal |
|
387 | # do a full traversal | |
387 | amap = self.ancestormap() |
|
388 | amap = self.ancestormap() | |
388 | bmap = fc2.ancestormap() |
|
389 | bmap = fc2.ancestormap() | |
389 |
|
390 | |||
390 | def parents(x): |
|
391 | def parents(x): | |
391 | f, n = x |
|
392 | f, n = x | |
392 | p = amap.get(n) or bmap.get(n) |
|
393 | p = amap.get(n) or bmap.get(n) | |
393 | if not p: |
|
394 | if not p: | |
394 | return [] |
|
395 | return [] | |
395 |
|
396 | |||
396 | return [(p[3] or f, p[0]), (f, p[1])] |
|
397 | return [(p[3] or f, p[0]), (f, p[1])] | |
397 |
|
398 | |||
398 | a = (self.path(), self.filenode()) |
|
399 | a = (self.path(), self.filenode()) | |
399 | b = (fc2.path(), fc2.filenode()) |
|
400 | b = (fc2.path(), fc2.filenode()) | |
400 | result = ancestor.genericancestor(a, b, parents) |
|
401 | result = ancestor.genericancestor(a, b, parents) | |
401 | if result: |
|
402 | if result: | |
402 | f, n = result |
|
403 | f, n = result | |
403 | r = remotefilectx(self._repo, f, fileid=n, |
|
404 | r = remotefilectx(self._repo, f, fileid=n, | |
404 | ancestormap=amap) |
|
405 | ancestormap=amap) | |
405 | return r |
|
406 | return r | |
406 |
|
407 | |||
407 | return None |
|
408 | return None | |
408 |
|
409 | |||
409 | def annotate(self, *args, **kwargs): |
|
410 | def annotate(self, *args, **kwargs): | |
410 | introctx = self |
|
411 | introctx = self | |
411 | prefetchskip = kwargs.pop('prefetchskip', None) |
|
412 | prefetchskip = kwargs.pop(r'prefetchskip', None) | |
412 | if prefetchskip: |
|
413 | if prefetchskip: | |
413 | # use introrev so prefetchskip can be accurately tested |
|
414 | # use introrev so prefetchskip can be accurately tested | |
414 | introrev = self.introrev() |
|
415 | introrev = self.introrev() | |
415 | if self.rev() != introrev: |
|
416 | if self.rev() != introrev: | |
416 | introctx = remotefilectx(self._repo, self._path, |
|
417 | introctx = remotefilectx(self._repo, self._path, | |
417 | changeid=introrev, |
|
418 | changeid=introrev, | |
418 | fileid=self._filenode, |
|
419 | fileid=self._filenode, | |
419 | filelog=self._filelog, |
|
420 | filelog=self._filelog, | |
420 | ancestormap=self._ancestormap) |
|
421 | ancestormap=self._ancestormap) | |
421 |
|
422 | |||
422 | # like self.ancestors, but append to "fetch" and skip visiting parents |
|
423 | # like self.ancestors, but append to "fetch" and skip visiting parents | |
423 | # of nodes in "prefetchskip". |
|
424 | # of nodes in "prefetchskip". | |
424 | fetch = [] |
|
425 | fetch = [] | |
425 | seen = set() |
|
426 | seen = set() | |
426 | queue = collections.deque((introctx,)) |
|
427 | queue = collections.deque((introctx,)) | |
427 | seen.add(introctx.node()) |
|
428 | seen.add(introctx.node()) | |
428 | while queue: |
|
429 | while queue: | |
429 | current = queue.pop() |
|
430 | current = queue.pop() | |
430 | if current.filenode() != self.filenode(): |
|
431 | if current.filenode() != self.filenode(): | |
431 | # this is a "joint point". fastannotate needs contents of |
|
432 | # this is a "joint point". fastannotate needs contents of | |
432 | # "joint point"s to calculate diffs for side branches. |
|
433 | # "joint point"s to calculate diffs for side branches. | |
433 | fetch.append((current.path(), hex(current.filenode()))) |
|
434 | fetch.append((current.path(), hex(current.filenode()))) | |
434 | if prefetchskip and current in prefetchskip: |
|
435 | if prefetchskip and current in prefetchskip: | |
435 | continue |
|
436 | continue | |
436 | for parent in current.parents(): |
|
437 | for parent in current.parents(): | |
437 | if parent.node() not in seen: |
|
438 | if parent.node() not in seen: | |
438 | seen.add(parent.node()) |
|
439 | seen.add(parent.node()) | |
439 | queue.append(parent) |
|
440 | queue.append(parent) | |
440 |
|
441 | |||
441 | self._repo.ui.debug('remotefilelog: prefetching %d files ' |
|
442 | self._repo.ui.debug('remotefilelog: prefetching %d files ' | |
442 | 'for annotate\n' % len(fetch)) |
|
443 | 'for annotate\n' % len(fetch)) | |
443 | if fetch: |
|
444 | if fetch: | |
444 | self._repo.fileservice.prefetch(fetch) |
|
445 | self._repo.fileservice.prefetch(fetch) | |
445 | return super(remotefilectx, self).annotate(*args, **kwargs) |
|
446 | return super(remotefilectx, self).annotate(*args, **kwargs) | |
446 |
|
447 | |||
447 | # Return empty set so that the hg serve and thg don't stack trace |
|
448 | # Return empty set so that the hg serve and thg don't stack trace | |
448 | def children(self): |
|
449 | def children(self): | |
449 | return [] |
|
450 | return [] | |
450 |
|
451 | |||
451 | class remoteworkingfilectx(context.workingfilectx, remotefilectx): |
|
452 | class remoteworkingfilectx(context.workingfilectx, remotefilectx): | |
452 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
453 | def __init__(self, repo, path, filelog=None, workingctx=None): | |
453 | self._ancestormap = None |
|
454 | self._ancestormap = None | |
454 | return super(remoteworkingfilectx, self).__init__(repo, path, |
|
455 | return super(remoteworkingfilectx, self).__init__(repo, path, | |
455 | filelog, workingctx) |
|
456 | filelog, workingctx) | |
456 |
|
457 | |||
457 | def parents(self): |
|
458 | def parents(self): | |
458 | return remotefilectx.parents(self) |
|
459 | return remotefilectx.parents(self) | |
459 |
|
460 | |||
460 | def ancestormap(self): |
|
461 | def ancestormap(self): | |
461 | if not self._ancestormap: |
|
462 | if not self._ancestormap: | |
462 | path = self._path |
|
463 | path = self._path | |
463 | pcl = self._changectx._parents |
|
464 | pcl = self._changectx._parents | |
464 | renamed = self.renamed() |
|
465 | renamed = self.renamed() | |
465 |
|
466 | |||
466 | if renamed: |
|
467 | if renamed: | |
467 | p1 = renamed |
|
468 | p1 = renamed | |
468 | else: |
|
469 | else: | |
469 | p1 = (path, pcl[0]._manifest.get(path, nullid)) |
|
470 | p1 = (path, pcl[0]._manifest.get(path, nullid)) | |
470 |
|
471 | |||
471 | p2 = (path, nullid) |
|
472 | p2 = (path, nullid) | |
472 | if len(pcl) > 1: |
|
473 | if len(pcl) > 1: | |
473 | p2 = (path, pcl[1]._manifest.get(path, nullid)) |
|
474 | p2 = (path, pcl[1]._manifest.get(path, nullid)) | |
474 |
|
475 | |||
475 | m = {} |
|
476 | m = {} | |
476 | if p1[1] != nullid: |
|
477 | if p1[1] != nullid: | |
477 | p1ctx = self._repo.filectx(p1[0], fileid=p1[1]) |
|
478 | p1ctx = self._repo.filectx(p1[0], fileid=p1[1]) | |
478 | m.update(p1ctx.filelog().ancestormap(p1[1])) |
|
479 | m.update(p1ctx.filelog().ancestormap(p1[1])) | |
479 |
|
480 | |||
480 | if p2[1] != nullid: |
|
481 | if p2[1] != nullid: | |
481 | p2ctx = self._repo.filectx(p2[0], fileid=p2[1]) |
|
482 | p2ctx = self._repo.filectx(p2[0], fileid=p2[1]) | |
482 | m.update(p2ctx.filelog().ancestormap(p2[1])) |
|
483 | m.update(p2ctx.filelog().ancestormap(p2[1])) | |
483 |
|
484 | |||
484 | copyfrom = '' |
|
485 | copyfrom = '' | |
485 | if renamed: |
|
486 | if renamed: | |
486 | copyfrom = renamed[0] |
|
487 | copyfrom = renamed[0] | |
487 | m[None] = (p1[1], p2[1], nullid, copyfrom) |
|
488 | m[None] = (p1[1], p2[1], nullid, copyfrom) | |
488 | self._ancestormap = m |
|
489 | self._ancestormap = m | |
489 |
|
490 | |||
490 | return self._ancestormap |
|
491 | return self._ancestormap |
@@ -1,294 +1,294 | |||||
1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories |
|
1 | # shallowbundle.py - bundle10 implementation for use with shallow repositories | |
2 | # |
|
2 | # | |
3 | # Copyright 2013 Facebook, Inc. |
|
3 | # Copyright 2013 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | from mercurial.i18n import _ |
|
9 | from mercurial.i18n import _ | |
10 | from mercurial.node import bin, hex, nullid |
|
10 | from mercurial.node import bin, hex, nullid | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | bundlerepo, |
|
12 | bundlerepo, | |
13 | changegroup, |
|
13 | changegroup, | |
14 | error, |
|
14 | error, | |
15 | match, |
|
15 | match, | |
16 | mdiff, |
|
16 | mdiff, | |
17 | pycompat, |
|
17 | pycompat, | |
18 | ) |
|
18 | ) | |
19 | from . import ( |
|
19 | from . import ( | |
20 | constants, |
|
20 | constants, | |
21 | remotefilelog, |
|
21 | remotefilelog, | |
22 | shallowutil, |
|
22 | shallowutil, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | NoFiles = 0 |
|
25 | NoFiles = 0 | |
26 | LocalFiles = 1 |
|
26 | LocalFiles = 1 | |
27 | AllFiles = 2 |
|
27 | AllFiles = 2 | |
28 |
|
28 | |||
29 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): |
|
29 | def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None): | |
30 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
30 | if not isinstance(rlog, remotefilelog.remotefilelog): | |
31 | for c in super(cls, self).group(nodelist, rlog, lookup, |
|
31 | for c in super(cls, self).group(nodelist, rlog, lookup, | |
32 | units=units): |
|
32 | units=units): | |
33 | yield c |
|
33 | yield c | |
34 | return |
|
34 | return | |
35 |
|
35 | |||
36 | if len(nodelist) == 0: |
|
36 | if len(nodelist) == 0: | |
37 | yield self.close() |
|
37 | yield self.close() | |
38 | return |
|
38 | return | |
39 |
|
39 | |||
40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) |
|
40 | nodelist = shallowutil.sortnodes(nodelist, rlog.parents) | |
41 |
|
41 | |||
42 | # add the parent of the first rev |
|
42 | # add the parent of the first rev | |
43 | p = rlog.parents(nodelist[0])[0] |
|
43 | p = rlog.parents(nodelist[0])[0] | |
44 | nodelist.insert(0, p) |
|
44 | nodelist.insert(0, p) | |
45 |
|
45 | |||
46 | # build deltas |
|
46 | # build deltas | |
47 | for i in pycompat.xrange(len(nodelist) - 1): |
|
47 | for i in pycompat.xrange(len(nodelist) - 1): | |
48 | prev, curr = nodelist[i], nodelist[i + 1] |
|
48 | prev, curr = nodelist[i], nodelist[i + 1] | |
49 | linknode = lookup(curr) |
|
49 | linknode = lookup(curr) | |
50 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
50 | for c in self.nodechunk(rlog, curr, prev, linknode): | |
51 | yield c |
|
51 | yield c | |
52 |
|
52 | |||
53 | yield self.close() |
|
53 | yield self.close() | |
54 |
|
54 | |||
55 | class shallowcg1packer(changegroup.cgpacker): |
|
55 | class shallowcg1packer(changegroup.cgpacker): | |
56 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
56 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): | |
57 | if shallowutil.isenabled(self._repo): |
|
57 | if shallowutil.isenabled(self._repo): | |
58 | fastpathlinkrev = False |
|
58 | fastpathlinkrev = False | |
59 |
|
59 | |||
60 | return super(shallowcg1packer, self).generate(commonrevs, clnodes, |
|
60 | return super(shallowcg1packer, self).generate(commonrevs, clnodes, | |
61 | fastpathlinkrev, source) |
|
61 | fastpathlinkrev, source) | |
62 |
|
62 | |||
63 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): |
|
63 | def group(self, nodelist, rlog, lookup, units=None, reorder=None): | |
64 | return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup, |
|
64 | return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup, | |
65 | units=units) |
|
65 | units=units) | |
66 |
|
66 | |||
67 | def generatefiles(self, changedfiles, *args): |
|
67 | def generatefiles(self, changedfiles, *args): | |
68 | try: |
|
68 | try: | |
69 | linknodes, commonrevs, source = args |
|
69 | linknodes, commonrevs, source = args | |
70 | except ValueError: |
|
70 | except ValueError: | |
71 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args |
|
71 | commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args | |
72 | if shallowutil.isenabled(self._repo): |
|
72 | if shallowutil.isenabled(self._repo): | |
73 | repo = self._repo |
|
73 | repo = self._repo | |
74 | if isinstance(repo, bundlerepo.bundlerepository): |
|
74 | if isinstance(repo, bundlerepo.bundlerepository): | |
75 | # If the bundle contains filelogs, we can't pull from it, since |
|
75 | # If the bundle contains filelogs, we can't pull from it, since | |
76 | # bundlerepo is heavily tied to revlogs. Instead require that |
|
76 | # bundlerepo is heavily tied to revlogs. Instead require that | |
77 | # the user use unbundle instead. |
|
77 | # the user use unbundle instead. | |
78 | # Force load the filelog data. |
|
78 | # Force load the filelog data. | |
79 | bundlerepo.bundlerepository.file(repo, 'foo') |
|
79 | bundlerepo.bundlerepository.file(repo, 'foo') | |
80 | if repo._cgfilespos: |
|
80 | if repo._cgfilespos: | |
81 | raise error.Abort("cannot pull from full bundles", |
|
81 | raise error.Abort("cannot pull from full bundles", | |
82 | hint="use `hg unbundle` instead") |
|
82 | hint="use `hg unbundle` instead") | |
83 | return [] |
|
83 | return [] | |
84 | filestosend = self.shouldaddfilegroups(source) |
|
84 | filestosend = self.shouldaddfilegroups(source) | |
85 | if filestosend == NoFiles: |
|
85 | if filestosend == NoFiles: | |
86 | changedfiles = list([f for f in changedfiles |
|
86 | changedfiles = list([f for f in changedfiles | |
87 | if not repo.shallowmatch(f)]) |
|
87 | if not repo.shallowmatch(f)]) | |
88 |
|
88 | |||
89 | return super(shallowcg1packer, self).generatefiles( |
|
89 | return super(shallowcg1packer, self).generatefiles( | |
90 | changedfiles, *args) |
|
90 | changedfiles, *args) | |
91 |
|
91 | |||
92 | def shouldaddfilegroups(self, source): |
|
92 | def shouldaddfilegroups(self, source): | |
93 | repo = self._repo |
|
93 | repo = self._repo | |
94 | if not shallowutil.isenabled(repo): |
|
94 | if not shallowutil.isenabled(repo): | |
95 | return AllFiles |
|
95 | return AllFiles | |
96 |
|
96 | |||
97 | if source == "push" or source == "bundle": |
|
97 | if source == "push" or source == "bundle": | |
98 | return AllFiles |
|
98 | return AllFiles | |
99 |
|
99 | |||
100 | caps = self._bundlecaps or [] |
|
100 | caps = self._bundlecaps or [] | |
101 | if source == "serve" or source == "pull": |
|
101 | if source == "serve" or source == "pull": | |
102 | if constants.BUNDLE2_CAPABLITY in caps: |
|
102 | if constants.BUNDLE2_CAPABLITY in caps: | |
103 | return LocalFiles |
|
103 | return LocalFiles | |
104 | else: |
|
104 | else: | |
105 | # Serving to a full repo requires us to serve everything |
|
105 | # Serving to a full repo requires us to serve everything | |
106 | repo.ui.warn(_("pulling from a shallow repo\n")) |
|
106 | repo.ui.warn(_("pulling from a shallow repo\n")) | |
107 | return AllFiles |
|
107 | return AllFiles | |
108 |
|
108 | |||
109 | return NoFiles |
|
109 | return NoFiles | |
110 |
|
110 | |||
111 | def prune(self, rlog, missing, commonrevs): |
|
111 | def prune(self, rlog, missing, commonrevs): | |
112 | if not isinstance(rlog, remotefilelog.remotefilelog): |
|
112 | if not isinstance(rlog, remotefilelog.remotefilelog): | |
113 | return super(shallowcg1packer, self).prune(rlog, missing, |
|
113 | return super(shallowcg1packer, self).prune(rlog, missing, | |
114 | commonrevs) |
|
114 | commonrevs) | |
115 |
|
115 | |||
116 | repo = self._repo |
|
116 | repo = self._repo | |
117 | results = [] |
|
117 | results = [] | |
118 | for fnode in missing: |
|
118 | for fnode in missing: | |
119 | fctx = repo.filectx(rlog.filename, fileid=fnode) |
|
119 | fctx = repo.filectx(rlog.filename, fileid=fnode) | |
120 | if fctx.linkrev() not in commonrevs: |
|
120 | if fctx.linkrev() not in commonrevs: | |
121 | results.append(fnode) |
|
121 | results.append(fnode) | |
122 | return results |
|
122 | return results | |
123 |
|
123 | |||
124 | def nodechunk(self, revlog, node, prevnode, linknode): |
|
124 | def nodechunk(self, revlog, node, prevnode, linknode): | |
125 | prefix = '' |
|
125 | prefix = '' | |
126 | if prevnode == nullid: |
|
126 | if prevnode == nullid: | |
127 | delta = revlog.revision(node, raw=True) |
|
127 | delta = revlog.revision(node, raw=True) | |
128 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
128 | prefix = mdiff.trivialdiffheader(len(delta)) | |
129 | else: |
|
129 | else: | |
130 | # Actually uses remotefilelog.revdiff which works on nodes, not revs |
|
130 | # Actually uses remotefilelog.revdiff which works on nodes, not revs | |
131 | delta = revlog.revdiff(prevnode, node) |
|
131 | delta = revlog.revdiff(prevnode, node) | |
132 | p1, p2 = revlog.parents(node) |
|
132 | p1, p2 = revlog.parents(node) | |
133 | flags = revlog.flags(node) |
|
133 | flags = revlog.flags(node) | |
134 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) |
|
134 | meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags) | |
135 | meta += prefix |
|
135 | meta += prefix | |
136 | l = len(meta) + len(delta) |
|
136 | l = len(meta) + len(delta) | |
137 | yield changegroup.chunkheader(l) |
|
137 | yield changegroup.chunkheader(l) | |
138 | yield meta |
|
138 | yield meta | |
139 | yield delta |
|
139 | yield delta | |
140 |
|
140 | |||
141 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): |
|
141 | def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs): | |
142 | if not shallowutil.isenabled(repo): |
|
142 | if not shallowutil.isenabled(repo): | |
143 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
143 | return orig(repo, outgoing, version, source, *args, **kwargs) | |
144 |
|
144 | |||
145 | original = repo.shallowmatch |
|
145 | original = repo.shallowmatch | |
146 | try: |
|
146 | try: | |
147 | # if serving, only send files the clients has patterns for |
|
147 | # if serving, only send files the clients has patterns for | |
148 | if source == 'serve': |
|
148 | if source == 'serve': | |
149 | bundlecaps = kwargs.get('bundlecaps') |
|
149 | bundlecaps = kwargs.get(r'bundlecaps') | |
150 | includepattern = None |
|
150 | includepattern = None | |
151 | excludepattern = None |
|
151 | excludepattern = None | |
152 | for cap in (bundlecaps or []): |
|
152 | for cap in (bundlecaps or []): | |
153 | if cap.startswith("includepattern="): |
|
153 | if cap.startswith("includepattern="): | |
154 | raw = cap[len("includepattern="):] |
|
154 | raw = cap[len("includepattern="):] | |
155 | if raw: |
|
155 | if raw: | |
156 | includepattern = raw.split('\0') |
|
156 | includepattern = raw.split('\0') | |
157 | elif cap.startswith("excludepattern="): |
|
157 | elif cap.startswith("excludepattern="): | |
158 | raw = cap[len("excludepattern="):] |
|
158 | raw = cap[len("excludepattern="):] | |
159 | if raw: |
|
159 | if raw: | |
160 | excludepattern = raw.split('\0') |
|
160 | excludepattern = raw.split('\0') | |
161 | if includepattern or excludepattern: |
|
161 | if includepattern or excludepattern: | |
162 | repo.shallowmatch = match.match(repo.root, '', None, |
|
162 | repo.shallowmatch = match.match(repo.root, '', None, | |
163 | includepattern, excludepattern) |
|
163 | includepattern, excludepattern) | |
164 | else: |
|
164 | else: | |
165 | repo.shallowmatch = match.always(repo.root, '') |
|
165 | repo.shallowmatch = match.always(repo.root, '') | |
166 | return orig(repo, outgoing, version, source, *args, **kwargs) |
|
166 | return orig(repo, outgoing, version, source, *args, **kwargs) | |
167 | finally: |
|
167 | finally: | |
168 | repo.shallowmatch = original |
|
168 | repo.shallowmatch = original | |
169 |
|
169 | |||
170 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): |
|
170 | def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args): | |
171 | if not shallowutil.isenabled(repo): |
|
171 | if not shallowutil.isenabled(repo): | |
172 | return orig(repo, source, revmap, trp, expectedfiles, *args) |
|
172 | return orig(repo, source, revmap, trp, expectedfiles, *args) | |
173 |
|
173 | |||
174 | files = 0 |
|
174 | files = 0 | |
175 | newfiles = 0 |
|
175 | newfiles = 0 | |
176 | visited = set() |
|
176 | visited = set() | |
177 | revisiondatas = {} |
|
177 | revisiondatas = {} | |
178 | queue = [] |
|
178 | queue = [] | |
179 |
|
179 | |||
180 | # Normal Mercurial processes each file one at a time, adding all |
|
180 | # Normal Mercurial processes each file one at a time, adding all | |
181 | # the new revisions for that file at once. In remotefilelog a file |
|
181 | # the new revisions for that file at once. In remotefilelog a file | |
182 | # revision may depend on a different file's revision (in the case |
|
182 | # revision may depend on a different file's revision (in the case | |
183 | # of a rename/copy), so we must lay all revisions down across all |
|
183 | # of a rename/copy), so we must lay all revisions down across all | |
184 | # files in topological order. |
|
184 | # files in topological order. | |
185 |
|
185 | |||
186 | # read all the file chunks but don't add them |
|
186 | # read all the file chunks but don't add them | |
187 | while True: |
|
187 | while True: | |
188 | chunkdata = source.filelogheader() |
|
188 | chunkdata = source.filelogheader() | |
189 | if not chunkdata: |
|
189 | if not chunkdata: | |
190 | break |
|
190 | break | |
191 | files += 1 |
|
191 | files += 1 | |
192 | f = chunkdata["filename"] |
|
192 | f = chunkdata["filename"] | |
193 | repo.ui.debug("adding %s revisions\n" % f) |
|
193 | repo.ui.debug("adding %s revisions\n" % f) | |
194 | repo.ui.progress(_('files'), files, total=expectedfiles) |
|
194 | repo.ui.progress(_('files'), files, total=expectedfiles) | |
195 |
|
195 | |||
196 | if not repo.shallowmatch(f): |
|
196 | if not repo.shallowmatch(f): | |
197 | fl = repo.file(f) |
|
197 | fl = repo.file(f) | |
198 | deltas = source.deltaiter() |
|
198 | deltas = source.deltaiter() | |
199 | fl.addgroup(deltas, revmap, trp) |
|
199 | fl.addgroup(deltas, revmap, trp) | |
200 | continue |
|
200 | continue | |
201 |
|
201 | |||
202 | chain = None |
|
202 | chain = None | |
203 | while True: |
|
203 | while True: | |
204 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None |
|
204 | # returns: (node, p1, p2, cs, deltabase, delta, flags) or None | |
205 | revisiondata = source.deltachunk(chain) |
|
205 | revisiondata = source.deltachunk(chain) | |
206 | if not revisiondata: |
|
206 | if not revisiondata: | |
207 | break |
|
207 | break | |
208 |
|
208 | |||
209 | chain = revisiondata[0] |
|
209 | chain = revisiondata[0] | |
210 |
|
210 | |||
211 | revisiondatas[(f, chain)] = revisiondata |
|
211 | revisiondatas[(f, chain)] = revisiondata | |
212 | queue.append((f, chain)) |
|
212 | queue.append((f, chain)) | |
213 |
|
213 | |||
214 | if f not in visited: |
|
214 | if f not in visited: | |
215 | newfiles += 1 |
|
215 | newfiles += 1 | |
216 | visited.add(f) |
|
216 | visited.add(f) | |
217 |
|
217 | |||
218 | if chain is None: |
|
218 | if chain is None: | |
219 | raise error.Abort(_("received file revlog group is empty")) |
|
219 | raise error.Abort(_("received file revlog group is empty")) | |
220 |
|
220 | |||
221 | processed = set() |
|
221 | processed = set() | |
222 | def available(f, node, depf, depnode): |
|
222 | def available(f, node, depf, depnode): | |
223 | if depnode != nullid and (depf, depnode) not in processed: |
|
223 | if depnode != nullid and (depf, depnode) not in processed: | |
224 | if not (depf, depnode) in revisiondatas: |
|
224 | if not (depf, depnode) in revisiondatas: | |
225 | # It's not in the changegroup, assume it's already |
|
225 | # It's not in the changegroup, assume it's already | |
226 | # in the repo |
|
226 | # in the repo | |
227 | return True |
|
227 | return True | |
228 | # re-add self to queue |
|
228 | # re-add self to queue | |
229 | queue.insert(0, (f, node)) |
|
229 | queue.insert(0, (f, node)) | |
230 | # add dependency in front |
|
230 | # add dependency in front | |
231 | queue.insert(0, (depf, depnode)) |
|
231 | queue.insert(0, (depf, depnode)) | |
232 | return False |
|
232 | return False | |
233 | return True |
|
233 | return True | |
234 |
|
234 | |||
235 | skipcount = 0 |
|
235 | skipcount = 0 | |
236 |
|
236 | |||
237 | # Prefetch the non-bundled revisions that we will need |
|
237 | # Prefetch the non-bundled revisions that we will need | |
238 | prefetchfiles = [] |
|
238 | prefetchfiles = [] | |
239 | for f, node in queue: |
|
239 | for f, node in queue: | |
240 | revisiondata = revisiondatas[(f, node)] |
|
240 | revisiondata = revisiondatas[(f, node)] | |
241 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
241 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
242 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] |
|
242 | dependents = [revisiondata[1], revisiondata[2], revisiondata[4]] | |
243 |
|
243 | |||
244 | for dependent in dependents: |
|
244 | for dependent in dependents: | |
245 | if dependent == nullid or (f, dependent) in revisiondatas: |
|
245 | if dependent == nullid or (f, dependent) in revisiondatas: | |
246 | continue |
|
246 | continue | |
247 | prefetchfiles.append((f, hex(dependent))) |
|
247 | prefetchfiles.append((f, hex(dependent))) | |
248 |
|
248 | |||
249 | repo.fileservice.prefetch(prefetchfiles) |
|
249 | repo.fileservice.prefetch(prefetchfiles) | |
250 |
|
250 | |||
251 | # Apply the revisions in topological order such that a revision |
|
251 | # Apply the revisions in topological order such that a revision | |
252 | # is only written once it's deltabase and parents have been written. |
|
252 | # is only written once it's deltabase and parents have been written. | |
253 | while queue: |
|
253 | while queue: | |
254 | f, node = queue.pop(0) |
|
254 | f, node = queue.pop(0) | |
255 | if (f, node) in processed: |
|
255 | if (f, node) in processed: | |
256 | continue |
|
256 | continue | |
257 |
|
257 | |||
258 | skipcount += 1 |
|
258 | skipcount += 1 | |
259 | if skipcount > len(queue) + 1: |
|
259 | if skipcount > len(queue) + 1: | |
260 | raise error.Abort(_("circular node dependency")) |
|
260 | raise error.Abort(_("circular node dependency")) | |
261 |
|
261 | |||
262 | fl = repo.file(f) |
|
262 | fl = repo.file(f) | |
263 |
|
263 | |||
264 | revisiondata = revisiondatas[(f, node)] |
|
264 | revisiondata = revisiondatas[(f, node)] | |
265 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) |
|
265 | # revisiondata: (node, p1, p2, cs, deltabase, delta, flags) | |
266 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata |
|
266 | node, p1, p2, linknode, deltabase, delta, flags = revisiondata | |
267 |
|
267 | |||
268 | if not available(f, node, f, deltabase): |
|
268 | if not available(f, node, f, deltabase): | |
269 | continue |
|
269 | continue | |
270 |
|
270 | |||
271 | base = fl.revision(deltabase, raw=True) |
|
271 | base = fl.revision(deltabase, raw=True) | |
272 | text = mdiff.patch(base, delta) |
|
272 | text = mdiff.patch(base, delta) | |
273 | if isinstance(text, buffer): |
|
273 | if isinstance(text, buffer): | |
274 | text = str(text) |
|
274 | text = str(text) | |
275 |
|
275 | |||
276 | meta, text = shallowutil.parsemeta(text) |
|
276 | meta, text = shallowutil.parsemeta(text) | |
277 | if 'copy' in meta: |
|
277 | if 'copy' in meta: | |
278 | copyfrom = meta['copy'] |
|
278 | copyfrom = meta['copy'] | |
279 | copynode = bin(meta['copyrev']) |
|
279 | copynode = bin(meta['copyrev']) | |
280 | if not available(f, node, copyfrom, copynode): |
|
280 | if not available(f, node, copyfrom, copynode): | |
281 | continue |
|
281 | continue | |
282 |
|
282 | |||
283 | for p in [p1, p2]: |
|
283 | for p in [p1, p2]: | |
284 | if p != nullid: |
|
284 | if p != nullid: | |
285 | if not available(f, node, f, p): |
|
285 | if not available(f, node, f, p): | |
286 | continue |
|
286 | continue | |
287 |
|
287 | |||
288 | fl.add(text, meta, trp, linknode, p1, p2) |
|
288 | fl.add(text, meta, trp, linknode, p1, p2) | |
289 | processed.add((f, node)) |
|
289 | processed.add((f, node)) | |
290 | skipcount = 0 |
|
290 | skipcount = 0 | |
291 |
|
291 | |||
292 | repo.ui.progress(_('files'), None) |
|
292 | repo.ui.progress(_('files'), None) | |
293 |
|
293 | |||
294 | return len(revisiondatas), newfiles |
|
294 | return len(revisiondatas), newfiles |
@@ -1,491 +1,491 | |||||
1 | # shallowutil.py -- remotefilelog utilities |
|
1 | # shallowutil.py -- remotefilelog utilities | |
2 | # |
|
2 | # | |
3 | # Copyright 2014 Facebook, Inc. |
|
3 | # Copyright 2014 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | from __future__ import absolute_import |
|
7 | from __future__ import absolute_import | |
8 |
|
8 | |||
9 | import collections |
|
9 | import collections | |
10 | import errno |
|
10 | import errno | |
11 | import hashlib |
|
11 | import hashlib | |
12 | import os |
|
12 | import os | |
13 | import stat |
|
13 | import stat | |
14 | import struct |
|
14 | import struct | |
15 | import tempfile |
|
15 | import tempfile | |
16 |
|
16 | |||
17 | from mercurial.i18n import _ |
|
17 | from mercurial.i18n import _ | |
18 | from mercurial import ( |
|
18 | from mercurial import ( | |
19 | error, |
|
19 | error, | |
20 | pycompat, |
|
20 | pycompat, | |
21 | revlog, |
|
21 | revlog, | |
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 | from mercurial.utils import ( |
|
24 | from mercurial.utils import ( | |
25 | storageutil, |
|
25 | storageutil, | |
26 | stringutil, |
|
26 | stringutil, | |
27 | ) |
|
27 | ) | |
28 | from . import constants |
|
28 | from . import constants | |
29 |
|
29 | |||
30 | if not pycompat.iswindows: |
|
30 | if not pycompat.iswindows: | |
31 | import grp |
|
31 | import grp | |
32 |
|
32 | |||
33 | def isenabled(repo): |
|
33 | def isenabled(repo): | |
34 | """returns whether the repository is remotefilelog enabled or not""" |
|
34 | """returns whether the repository is remotefilelog enabled or not""" | |
35 | return constants.SHALLOWREPO_REQUIREMENT in repo.requirements |
|
35 | return constants.SHALLOWREPO_REQUIREMENT in repo.requirements | |
36 |
|
36 | |||
37 | def getcachekey(reponame, file, id): |
|
37 | def getcachekey(reponame, file, id): | |
38 | pathhash = hashlib.sha1(file).hexdigest() |
|
38 | pathhash = hashlib.sha1(file).hexdigest() | |
39 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) |
|
39 | return os.path.join(reponame, pathhash[:2], pathhash[2:], id) | |
40 |
|
40 | |||
41 | def getlocalkey(file, id): |
|
41 | def getlocalkey(file, id): | |
42 | pathhash = hashlib.sha1(file).hexdigest() |
|
42 | pathhash = hashlib.sha1(file).hexdigest() | |
43 | return os.path.join(pathhash, id) |
|
43 | return os.path.join(pathhash, id) | |
44 |
|
44 | |||
45 | def getcachepath(ui, allowempty=False): |
|
45 | def getcachepath(ui, allowempty=False): | |
46 | cachepath = ui.config("remotefilelog", "cachepath") |
|
46 | cachepath = ui.config("remotefilelog", "cachepath") | |
47 | if not cachepath: |
|
47 | if not cachepath: | |
48 | if allowempty: |
|
48 | if allowempty: | |
49 | return None |
|
49 | return None | |
50 | else: |
|
50 | else: | |
51 | raise error.Abort(_("could not find config option " |
|
51 | raise error.Abort(_("could not find config option " | |
52 | "remotefilelog.cachepath")) |
|
52 | "remotefilelog.cachepath")) | |
53 | return util.expandpath(cachepath) |
|
53 | return util.expandpath(cachepath) | |
54 |
|
54 | |||
55 | def getcachepackpath(repo, category): |
|
55 | def getcachepackpath(repo, category): | |
56 | cachepath = getcachepath(repo.ui) |
|
56 | cachepath = getcachepath(repo.ui) | |
57 | if category != constants.FILEPACK_CATEGORY: |
|
57 | if category != constants.FILEPACK_CATEGORY: | |
58 | return os.path.join(cachepath, repo.name, 'packs', category) |
|
58 | return os.path.join(cachepath, repo.name, 'packs', category) | |
59 | else: |
|
59 | else: | |
60 | return os.path.join(cachepath, repo.name, 'packs') |
|
60 | return os.path.join(cachepath, repo.name, 'packs') | |
61 |
|
61 | |||
62 | def getlocalpackpath(base, category): |
|
62 | def getlocalpackpath(base, category): | |
63 | return os.path.join(base, 'packs', category) |
|
63 | return os.path.join(base, 'packs', category) | |
64 |
|
64 | |||
65 | def createrevlogtext(text, copyfrom=None, copyrev=None): |
|
65 | def createrevlogtext(text, copyfrom=None, copyrev=None): | |
66 | """returns a string that matches the revlog contents in a |
|
66 | """returns a string that matches the revlog contents in a | |
67 | traditional revlog |
|
67 | traditional revlog | |
68 | """ |
|
68 | """ | |
69 | meta = {} |
|
69 | meta = {} | |
70 | if copyfrom or text.startswith('\1\n'): |
|
70 | if copyfrom or text.startswith('\1\n'): | |
71 | if copyfrom: |
|
71 | if copyfrom: | |
72 | meta['copy'] = copyfrom |
|
72 | meta['copy'] = copyfrom | |
73 | meta['copyrev'] = copyrev |
|
73 | meta['copyrev'] = copyrev | |
74 | text = storageutil.packmeta(meta, text) |
|
74 | text = storageutil.packmeta(meta, text) | |
75 |
|
75 | |||
76 | return text |
|
76 | return text | |
77 |
|
77 | |||
78 | def parsemeta(text): |
|
78 | def parsemeta(text): | |
79 | """parse mercurial filelog metadata""" |
|
79 | """parse mercurial filelog metadata""" | |
80 | meta, size = storageutil.parsemeta(text) |
|
80 | meta, size = storageutil.parsemeta(text) | |
81 | if text.startswith('\1\n'): |
|
81 | if text.startswith('\1\n'): | |
82 | s = text.index('\1\n', 2) |
|
82 | s = text.index('\1\n', 2) | |
83 | text = text[s + 2:] |
|
83 | text = text[s + 2:] | |
84 | return meta or {}, text |
|
84 | return meta or {}, text | |
85 |
|
85 | |||
86 | def sumdicts(*dicts): |
|
86 | def sumdicts(*dicts): | |
87 | """Adds all the values of *dicts together into one dictionary. This assumes |
|
87 | """Adds all the values of *dicts together into one dictionary. This assumes | |
88 | the values in *dicts are all summable. |
|
88 | the values in *dicts are all summable. | |
89 |
|
89 | |||
90 | e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1} |
|
90 | e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1} | |
91 | """ |
|
91 | """ | |
92 | result = collections.defaultdict(lambda: 0) |
|
92 | result = collections.defaultdict(lambda: 0) | |
93 | for dict in dicts: |
|
93 | for dict in dicts: | |
94 | for k, v in dict.iteritems(): |
|
94 | for k, v in dict.iteritems(): | |
95 | result[k] += v |
|
95 | result[k] += v | |
96 | return result |
|
96 | return result | |
97 |
|
97 | |||
98 | def prefixkeys(dict, prefix): |
|
98 | def prefixkeys(dict, prefix): | |
99 | """Returns ``dict`` with ``prefix`` prepended to all its keys.""" |
|
99 | """Returns ``dict`` with ``prefix`` prepended to all its keys.""" | |
100 | result = {} |
|
100 | result = {} | |
101 | for k, v in dict.iteritems(): |
|
101 | for k, v in dict.iteritems(): | |
102 | result[prefix + k] = v |
|
102 | result[prefix + k] = v | |
103 | return result |
|
103 | return result | |
104 |
|
104 | |||
105 | def reportpackmetrics(ui, prefix, *stores): |
|
105 | def reportpackmetrics(ui, prefix, *stores): | |
106 | dicts = [s.getmetrics() for s in stores] |
|
106 | dicts = [s.getmetrics() for s in stores] | |
107 | dict = prefixkeys(sumdicts(*dicts), prefix + '_') |
|
107 | dict = prefixkeys(sumdicts(*dicts), prefix + '_') | |
108 | ui.log(prefix + "_packsizes", "", **dict) |
|
108 | ui.log(prefix + "_packsizes", "", **pycompat.strkwargs(dict)) | |
109 |
|
109 | |||
110 | def _parsepackmeta(metabuf): |
|
110 | def _parsepackmeta(metabuf): | |
111 | """parse datapack meta, bytes (<metadata-list>) -> dict |
|
111 | """parse datapack meta, bytes (<metadata-list>) -> dict | |
112 |
|
112 | |||
113 | The dict contains raw content - both keys and values are strings. |
|
113 | The dict contains raw content - both keys and values are strings. | |
114 | Upper-level business may want to convert some of them to other types like |
|
114 | Upper-level business may want to convert some of them to other types like | |
115 | integers, on their own. |
|
115 | integers, on their own. | |
116 |
|
116 | |||
117 | raise ValueError if the data is corrupted |
|
117 | raise ValueError if the data is corrupted | |
118 | """ |
|
118 | """ | |
119 | metadict = {} |
|
119 | metadict = {} | |
120 | offset = 0 |
|
120 | offset = 0 | |
121 | buflen = len(metabuf) |
|
121 | buflen = len(metabuf) | |
122 | while buflen - offset >= 3: |
|
122 | while buflen - offset >= 3: | |
123 | key = metabuf[offset] |
|
123 | key = metabuf[offset] | |
124 | offset += 1 |
|
124 | offset += 1 | |
125 | metalen = struct.unpack_from('!H', metabuf, offset)[0] |
|
125 | metalen = struct.unpack_from('!H', metabuf, offset)[0] | |
126 | offset += 2 |
|
126 | offset += 2 | |
127 | if offset + metalen > buflen: |
|
127 | if offset + metalen > buflen: | |
128 | raise ValueError('corrupted metadata: incomplete buffer') |
|
128 | raise ValueError('corrupted metadata: incomplete buffer') | |
129 | value = metabuf[offset:offset + metalen] |
|
129 | value = metabuf[offset:offset + metalen] | |
130 | metadict[key] = value |
|
130 | metadict[key] = value | |
131 | offset += metalen |
|
131 | offset += metalen | |
132 | if offset != buflen: |
|
132 | if offset != buflen: | |
133 | raise ValueError('corrupted metadata: redundant data') |
|
133 | raise ValueError('corrupted metadata: redundant data') | |
134 | return metadict |
|
134 | return metadict | |
135 |
|
135 | |||
136 | def _buildpackmeta(metadict): |
|
136 | def _buildpackmeta(metadict): | |
137 | """reverse of _parsepackmeta, dict -> bytes (<metadata-list>) |
|
137 | """reverse of _parsepackmeta, dict -> bytes (<metadata-list>) | |
138 |
|
138 | |||
139 | The dict contains raw content - both keys and values are strings. |
|
139 | The dict contains raw content - both keys and values are strings. | |
140 | Upper-level business may want to serialize some of other types (like |
|
140 | Upper-level business may want to serialize some of other types (like | |
141 | integers) to strings before calling this function. |
|
141 | integers) to strings before calling this function. | |
142 |
|
142 | |||
143 | raise ProgrammingError when metadata key is illegal, or ValueError if |
|
143 | raise ProgrammingError when metadata key is illegal, or ValueError if | |
144 | length limit is exceeded |
|
144 | length limit is exceeded | |
145 | """ |
|
145 | """ | |
146 | metabuf = '' |
|
146 | metabuf = '' | |
147 | for k, v in sorted((metadict or {}).iteritems()): |
|
147 | for k, v in sorted((metadict or {}).iteritems()): | |
148 | if len(k) != 1: |
|
148 | if len(k) != 1: | |
149 | raise error.ProgrammingError('packmeta: illegal key: %s' % k) |
|
149 | raise error.ProgrammingError('packmeta: illegal key: %s' % k) | |
150 | if len(v) > 0xfffe: |
|
150 | if len(v) > 0xfffe: | |
151 | raise ValueError('metadata value is too long: 0x%x > 0xfffe' |
|
151 | raise ValueError('metadata value is too long: 0x%x > 0xfffe' | |
152 | % len(v)) |
|
152 | % len(v)) | |
153 | metabuf += k |
|
153 | metabuf += k | |
154 | metabuf += struct.pack('!H', len(v)) |
|
154 | metabuf += struct.pack('!H', len(v)) | |
155 | metabuf += v |
|
155 | metabuf += v | |
156 | # len(metabuf) is guaranteed representable in 4 bytes, because there are |
|
156 | # len(metabuf) is guaranteed representable in 4 bytes, because there are | |
157 | # only 256 keys, and for each value, len(value) <= 0xfffe. |
|
157 | # only 256 keys, and for each value, len(value) <= 0xfffe. | |
158 | return metabuf |
|
158 | return metabuf | |
159 |
|
159 | |||
160 | _metaitemtypes = { |
|
160 | _metaitemtypes = { | |
161 | constants.METAKEYFLAG: (int, pycompat.long), |
|
161 | constants.METAKEYFLAG: (int, pycompat.long), | |
162 | constants.METAKEYSIZE: (int, pycompat.long), |
|
162 | constants.METAKEYSIZE: (int, pycompat.long), | |
163 | } |
|
163 | } | |
164 |
|
164 | |||
165 | def buildpackmeta(metadict): |
|
165 | def buildpackmeta(metadict): | |
166 | """like _buildpackmeta, but typechecks metadict and normalize it. |
|
166 | """like _buildpackmeta, but typechecks metadict and normalize it. | |
167 |
|
167 | |||
168 | This means, METAKEYSIZE and METAKEYSIZE should have integers as values, |
|
168 | This means, METAKEYSIZE and METAKEYSIZE should have integers as values, | |
169 | and METAKEYFLAG will be dropped if its value is 0. |
|
169 | and METAKEYFLAG will be dropped if its value is 0. | |
170 | """ |
|
170 | """ | |
171 | newmeta = {} |
|
171 | newmeta = {} | |
172 | for k, v in (metadict or {}).iteritems(): |
|
172 | for k, v in (metadict or {}).iteritems(): | |
173 | expectedtype = _metaitemtypes.get(k, (bytes,)) |
|
173 | expectedtype = _metaitemtypes.get(k, (bytes,)) | |
174 | if not isinstance(v, expectedtype): |
|
174 | if not isinstance(v, expectedtype): | |
175 | raise error.ProgrammingError('packmeta: wrong type of key %s' % k) |
|
175 | raise error.ProgrammingError('packmeta: wrong type of key %s' % k) | |
176 | # normalize int to binary buffer |
|
176 | # normalize int to binary buffer | |
177 | if int in expectedtype: |
|
177 | if int in expectedtype: | |
178 | # optimization: remove flag if it's 0 to save space |
|
178 | # optimization: remove flag if it's 0 to save space | |
179 | if k == constants.METAKEYFLAG and v == 0: |
|
179 | if k == constants.METAKEYFLAG and v == 0: | |
180 | continue |
|
180 | continue | |
181 | v = int2bin(v) |
|
181 | v = int2bin(v) | |
182 | newmeta[k] = v |
|
182 | newmeta[k] = v | |
183 | return _buildpackmeta(newmeta) |
|
183 | return _buildpackmeta(newmeta) | |
184 |
|
184 | |||
185 | def parsepackmeta(metabuf): |
|
185 | def parsepackmeta(metabuf): | |
186 | """like _parsepackmeta, but convert fields to desired types automatically. |
|
186 | """like _parsepackmeta, but convert fields to desired types automatically. | |
187 |
|
187 | |||
188 | This means, METAKEYFLAG and METAKEYSIZE fields will be converted to |
|
188 | This means, METAKEYFLAG and METAKEYSIZE fields will be converted to | |
189 | integers. |
|
189 | integers. | |
190 | """ |
|
190 | """ | |
191 | metadict = _parsepackmeta(metabuf) |
|
191 | metadict = _parsepackmeta(metabuf) | |
192 | for k, v in metadict.iteritems(): |
|
192 | for k, v in metadict.iteritems(): | |
193 | if k in _metaitemtypes and int in _metaitemtypes[k]: |
|
193 | if k in _metaitemtypes and int in _metaitemtypes[k]: | |
194 | metadict[k] = bin2int(v) |
|
194 | metadict[k] = bin2int(v) | |
195 | return metadict |
|
195 | return metadict | |
196 |
|
196 | |||
197 | def int2bin(n): |
|
197 | def int2bin(n): | |
198 | """convert a non-negative integer to raw binary buffer""" |
|
198 | """convert a non-negative integer to raw binary buffer""" | |
199 | buf = bytearray() |
|
199 | buf = bytearray() | |
200 | while n > 0: |
|
200 | while n > 0: | |
201 | buf.insert(0, n & 0xff) |
|
201 | buf.insert(0, n & 0xff) | |
202 | n >>= 8 |
|
202 | n >>= 8 | |
203 | return bytes(buf) |
|
203 | return bytes(buf) | |
204 |
|
204 | |||
205 | def bin2int(buf): |
|
205 | def bin2int(buf): | |
206 | """the reverse of int2bin, convert a binary buffer to an integer""" |
|
206 | """the reverse of int2bin, convert a binary buffer to an integer""" | |
207 | x = 0 |
|
207 | x = 0 | |
208 | for b in bytearray(buf): |
|
208 | for b in bytearray(buf): | |
209 | x <<= 8 |
|
209 | x <<= 8 | |
210 | x |= b |
|
210 | x |= b | |
211 | return x |
|
211 | return x | |
212 |
|
212 | |||
213 | def parsesizeflags(raw): |
|
213 | def parsesizeflags(raw): | |
214 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) |
|
214 | """given a remotefilelog blob, return (headersize, rawtextsize, flags) | |
215 |
|
215 | |||
216 | see remotefilelogserver.createfileblob for the format. |
|
216 | see remotefilelogserver.createfileblob for the format. | |
217 | raise RuntimeError if the content is illformed. |
|
217 | raise RuntimeError if the content is illformed. | |
218 | """ |
|
218 | """ | |
219 | flags = revlog.REVIDX_DEFAULT_FLAGS |
|
219 | flags = revlog.REVIDX_DEFAULT_FLAGS | |
220 | size = None |
|
220 | size = None | |
221 | try: |
|
221 | try: | |
222 | index = raw.index('\0') |
|
222 | index = raw.index('\0') | |
223 | header = raw[:index] |
|
223 | header = raw[:index] | |
224 | if header.startswith('v'): |
|
224 | if header.startswith('v'): | |
225 | # v1 and above, header starts with 'v' |
|
225 | # v1 and above, header starts with 'v' | |
226 | if header.startswith('v1\n'): |
|
226 | if header.startswith('v1\n'): | |
227 | for s in header.split('\n'): |
|
227 | for s in header.split('\n'): | |
228 | if s.startswith(constants.METAKEYSIZE): |
|
228 | if s.startswith(constants.METAKEYSIZE): | |
229 | size = int(s[len(constants.METAKEYSIZE):]) |
|
229 | size = int(s[len(constants.METAKEYSIZE):]) | |
230 | elif s.startswith(constants.METAKEYFLAG): |
|
230 | elif s.startswith(constants.METAKEYFLAG): | |
231 | flags = int(s[len(constants.METAKEYFLAG):]) |
|
231 | flags = int(s[len(constants.METAKEYFLAG):]) | |
232 | else: |
|
232 | else: | |
233 | raise RuntimeError('unsupported remotefilelog header: %s' |
|
233 | raise RuntimeError('unsupported remotefilelog header: %s' | |
234 | % header) |
|
234 | % header) | |
235 | else: |
|
235 | else: | |
236 | # v0, str(int(size)) is the header |
|
236 | # v0, str(int(size)) is the header | |
237 | size = int(header) |
|
237 | size = int(header) | |
238 | except ValueError: |
|
238 | except ValueError: | |
239 | raise RuntimeError("unexpected remotefilelog header: illegal format") |
|
239 | raise RuntimeError("unexpected remotefilelog header: illegal format") | |
240 | if size is None: |
|
240 | if size is None: | |
241 | raise RuntimeError("unexpected remotefilelog header: no size found") |
|
241 | raise RuntimeError("unexpected remotefilelog header: no size found") | |
242 | return index + 1, size, flags |
|
242 | return index + 1, size, flags | |
243 |
|
243 | |||
244 | def buildfileblobheader(size, flags, version=None): |
|
244 | def buildfileblobheader(size, flags, version=None): | |
245 | """return the header of a remotefilelog blob. |
|
245 | """return the header of a remotefilelog blob. | |
246 |
|
246 | |||
247 | see remotefilelogserver.createfileblob for the format. |
|
247 | see remotefilelogserver.createfileblob for the format. | |
248 | approximately the reverse of parsesizeflags. |
|
248 | approximately the reverse of parsesizeflags. | |
249 |
|
249 | |||
250 | version could be 0 or 1, or None (auto decide). |
|
250 | version could be 0 or 1, or None (auto decide). | |
251 | """ |
|
251 | """ | |
252 | # choose v0 if flags is empty, otherwise v1 |
|
252 | # choose v0 if flags is empty, otherwise v1 | |
253 | if version is None: |
|
253 | if version is None: | |
254 | version = int(bool(flags)) |
|
254 | version = int(bool(flags)) | |
255 | if version == 1: |
|
255 | if version == 1: | |
256 | header = ('v1\n%s%d\n%s%d' |
|
256 | header = ('v1\n%s%d\n%s%d' | |
257 | % (constants.METAKEYSIZE, size, |
|
257 | % (constants.METAKEYSIZE, size, | |
258 | constants.METAKEYFLAG, flags)) |
|
258 | constants.METAKEYFLAG, flags)) | |
259 | elif version == 0: |
|
259 | elif version == 0: | |
260 | if flags: |
|
260 | if flags: | |
261 | raise error.ProgrammingError('fileblob v0 does not support flag') |
|
261 | raise error.ProgrammingError('fileblob v0 does not support flag') | |
262 | header = '%d' % size |
|
262 | header = '%d' % size | |
263 | else: |
|
263 | else: | |
264 | raise error.ProgrammingError('unknown fileblob version %d' % version) |
|
264 | raise error.ProgrammingError('unknown fileblob version %d' % version) | |
265 | return header |
|
265 | return header | |
266 |
|
266 | |||
267 | def ancestormap(raw): |
|
267 | def ancestormap(raw): | |
268 | offset, size, flags = parsesizeflags(raw) |
|
268 | offset, size, flags = parsesizeflags(raw) | |
269 | start = offset + size |
|
269 | start = offset + size | |
270 |
|
270 | |||
271 | mapping = {} |
|
271 | mapping = {} | |
272 | while start < len(raw): |
|
272 | while start < len(raw): | |
273 | divider = raw.index('\0', start + 80) |
|
273 | divider = raw.index('\0', start + 80) | |
274 |
|
274 | |||
275 | currentnode = raw[start:(start + 20)] |
|
275 | currentnode = raw[start:(start + 20)] | |
276 | p1 = raw[(start + 20):(start + 40)] |
|
276 | p1 = raw[(start + 20):(start + 40)] | |
277 | p2 = raw[(start + 40):(start + 60)] |
|
277 | p2 = raw[(start + 40):(start + 60)] | |
278 | linknode = raw[(start + 60):(start + 80)] |
|
278 | linknode = raw[(start + 60):(start + 80)] | |
279 | copyfrom = raw[(start + 80):divider] |
|
279 | copyfrom = raw[(start + 80):divider] | |
280 |
|
280 | |||
281 | mapping[currentnode] = (p1, p2, linknode, copyfrom) |
|
281 | mapping[currentnode] = (p1, p2, linknode, copyfrom) | |
282 | start = divider + 1 |
|
282 | start = divider + 1 | |
283 |
|
283 | |||
284 | return mapping |
|
284 | return mapping | |
285 |
|
285 | |||
286 | def readfile(path): |
|
286 | def readfile(path): | |
287 | f = open(path, 'rb') |
|
287 | f = open(path, 'rb') | |
288 | try: |
|
288 | try: | |
289 | result = f.read() |
|
289 | result = f.read() | |
290 |
|
290 | |||
291 | # we should never have empty files |
|
291 | # we should never have empty files | |
292 | if not result: |
|
292 | if not result: | |
293 | os.remove(path) |
|
293 | os.remove(path) | |
294 | raise IOError("empty file: %s" % path) |
|
294 | raise IOError("empty file: %s" % path) | |
295 |
|
295 | |||
296 | return result |
|
296 | return result | |
297 | finally: |
|
297 | finally: | |
298 | f.close() |
|
298 | f.close() | |
299 |
|
299 | |||
300 | def unlinkfile(filepath): |
|
300 | def unlinkfile(filepath): | |
301 | if pycompat.iswindows: |
|
301 | if pycompat.iswindows: | |
302 | # On Windows, os.unlink cannnot delete readonly files |
|
302 | # On Windows, os.unlink cannnot delete readonly files | |
303 | os.chmod(filepath, stat.S_IWUSR) |
|
303 | os.chmod(filepath, stat.S_IWUSR) | |
304 | os.unlink(filepath) |
|
304 | os.unlink(filepath) | |
305 |
|
305 | |||
306 | def renamefile(source, destination): |
|
306 | def renamefile(source, destination): | |
307 | if pycompat.iswindows: |
|
307 | if pycompat.iswindows: | |
308 | # On Windows, os.rename cannot rename readonly files |
|
308 | # On Windows, os.rename cannot rename readonly files | |
309 | # and cannot overwrite destination if it exists |
|
309 | # and cannot overwrite destination if it exists | |
310 | os.chmod(source, stat.S_IWUSR) |
|
310 | os.chmod(source, stat.S_IWUSR) | |
311 | if os.path.isfile(destination): |
|
311 | if os.path.isfile(destination): | |
312 | os.chmod(destination, stat.S_IWUSR) |
|
312 | os.chmod(destination, stat.S_IWUSR) | |
313 | os.unlink(destination) |
|
313 | os.unlink(destination) | |
314 |
|
314 | |||
315 | os.rename(source, destination) |
|
315 | os.rename(source, destination) | |
316 |
|
316 | |||
317 | def writefile(path, content, readonly=False): |
|
317 | def writefile(path, content, readonly=False): | |
318 | dirname, filename = os.path.split(path) |
|
318 | dirname, filename = os.path.split(path) | |
319 | if not os.path.exists(dirname): |
|
319 | if not os.path.exists(dirname): | |
320 | try: |
|
320 | try: | |
321 | os.makedirs(dirname) |
|
321 | os.makedirs(dirname) | |
322 | except OSError as ex: |
|
322 | except OSError as ex: | |
323 | if ex.errno != errno.EEXIST: |
|
323 | if ex.errno != errno.EEXIST: | |
324 | raise |
|
324 | raise | |
325 |
|
325 | |||
326 | fd, temp = tempfile.mkstemp(prefix='.%s-' % filename, dir=dirname) |
|
326 | fd, temp = tempfile.mkstemp(prefix='.%s-' % filename, dir=dirname) | |
327 | os.close(fd) |
|
327 | os.close(fd) | |
328 |
|
328 | |||
329 | try: |
|
329 | try: | |
330 | f = util.posixfile(temp, 'wb') |
|
330 | f = util.posixfile(temp, 'wb') | |
331 | f.write(content) |
|
331 | f.write(content) | |
332 | f.close() |
|
332 | f.close() | |
333 |
|
333 | |||
334 | if readonly: |
|
334 | if readonly: | |
335 | mode = 0o444 |
|
335 | mode = 0o444 | |
336 | else: |
|
336 | else: | |
337 | # tempfiles are created with 0o600, so we need to manually set the |
|
337 | # tempfiles are created with 0o600, so we need to manually set the | |
338 | # mode. |
|
338 | # mode. | |
339 | oldumask = os.umask(0) |
|
339 | oldumask = os.umask(0) | |
340 | # there's no way to get the umask without modifying it, so set it |
|
340 | # there's no way to get the umask without modifying it, so set it | |
341 | # back |
|
341 | # back | |
342 | os.umask(oldumask) |
|
342 | os.umask(oldumask) | |
343 | mode = ~oldumask |
|
343 | mode = ~oldumask | |
344 |
|
344 | |||
345 | renamefile(temp, path) |
|
345 | renamefile(temp, path) | |
346 | os.chmod(path, mode) |
|
346 | os.chmod(path, mode) | |
347 | except Exception: |
|
347 | except Exception: | |
348 | try: |
|
348 | try: | |
349 | unlinkfile(temp) |
|
349 | unlinkfile(temp) | |
350 | except OSError: |
|
350 | except OSError: | |
351 | pass |
|
351 | pass | |
352 | raise |
|
352 | raise | |
353 |
|
353 | |||
354 | def sortnodes(nodes, parentfunc): |
|
354 | def sortnodes(nodes, parentfunc): | |
355 | """Topologically sorts the nodes, using the parentfunc to find |
|
355 | """Topologically sorts the nodes, using the parentfunc to find | |
356 | the parents of nodes.""" |
|
356 | the parents of nodes.""" | |
357 | nodes = set(nodes) |
|
357 | nodes = set(nodes) | |
358 | childmap = {} |
|
358 | childmap = {} | |
359 | parentmap = {} |
|
359 | parentmap = {} | |
360 | roots = [] |
|
360 | roots = [] | |
361 |
|
361 | |||
362 | # Build a child and parent map |
|
362 | # Build a child and parent map | |
363 | for n in nodes: |
|
363 | for n in nodes: | |
364 | parents = [p for p in parentfunc(n) if p in nodes] |
|
364 | parents = [p for p in parentfunc(n) if p in nodes] | |
365 | parentmap[n] = set(parents) |
|
365 | parentmap[n] = set(parents) | |
366 | for p in parents: |
|
366 | for p in parents: | |
367 | childmap.setdefault(p, set()).add(n) |
|
367 | childmap.setdefault(p, set()).add(n) | |
368 | if not parents: |
|
368 | if not parents: | |
369 | roots.append(n) |
|
369 | roots.append(n) | |
370 |
|
370 | |||
371 | roots.sort() |
|
371 | roots.sort() | |
372 | # Process roots, adding children to the queue as they become roots |
|
372 | # Process roots, adding children to the queue as they become roots | |
373 | results = [] |
|
373 | results = [] | |
374 | while roots: |
|
374 | while roots: | |
375 | n = roots.pop(0) |
|
375 | n = roots.pop(0) | |
376 | results.append(n) |
|
376 | results.append(n) | |
377 | if n in childmap: |
|
377 | if n in childmap: | |
378 | children = childmap[n] |
|
378 | children = childmap[n] | |
379 | for c in children: |
|
379 | for c in children: | |
380 | childparents = parentmap[c] |
|
380 | childparents = parentmap[c] | |
381 | childparents.remove(n) |
|
381 | childparents.remove(n) | |
382 | if len(childparents) == 0: |
|
382 | if len(childparents) == 0: | |
383 | # insert at the beginning, that way child nodes |
|
383 | # insert at the beginning, that way child nodes | |
384 | # are likely to be output immediately after their |
|
384 | # are likely to be output immediately after their | |
385 | # parents. This gives better compression results. |
|
385 | # parents. This gives better compression results. | |
386 | roots.insert(0, c) |
|
386 | roots.insert(0, c) | |
387 |
|
387 | |||
388 | return results |
|
388 | return results | |
389 |
|
389 | |||
390 | def readexactly(stream, n): |
|
390 | def readexactly(stream, n): | |
391 | '''read n bytes from stream.read and abort if less was available''' |
|
391 | '''read n bytes from stream.read and abort if less was available''' | |
392 | s = stream.read(n) |
|
392 | s = stream.read(n) | |
393 | if len(s) < n: |
|
393 | if len(s) < n: | |
394 | raise error.Abort(_("stream ended unexpectedly" |
|
394 | raise error.Abort(_("stream ended unexpectedly" | |
395 | " (got %d bytes, expected %d)") |
|
395 | " (got %d bytes, expected %d)") | |
396 | % (len(s), n)) |
|
396 | % (len(s), n)) | |
397 | return s |
|
397 | return s | |
398 |
|
398 | |||
399 | def readunpack(stream, fmt): |
|
399 | def readunpack(stream, fmt): | |
400 | data = readexactly(stream, struct.calcsize(fmt)) |
|
400 | data = readexactly(stream, struct.calcsize(fmt)) | |
401 | return struct.unpack(fmt, data) |
|
401 | return struct.unpack(fmt, data) | |
402 |
|
402 | |||
403 | def readpath(stream): |
|
403 | def readpath(stream): | |
404 | rawlen = readexactly(stream, constants.FILENAMESIZE) |
|
404 | rawlen = readexactly(stream, constants.FILENAMESIZE) | |
405 | pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0] |
|
405 | pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0] | |
406 | return readexactly(stream, pathlen) |
|
406 | return readexactly(stream, pathlen) | |
407 |
|
407 | |||
408 | def readnodelist(stream): |
|
408 | def readnodelist(stream): | |
409 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) |
|
409 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) | |
410 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] |
|
410 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] | |
411 | for i in pycompat.xrange(nodecount): |
|
411 | for i in pycompat.xrange(nodecount): | |
412 | yield readexactly(stream, constants.NODESIZE) |
|
412 | yield readexactly(stream, constants.NODESIZE) | |
413 |
|
413 | |||
414 | def readpathlist(stream): |
|
414 | def readpathlist(stream): | |
415 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) |
|
415 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) | |
416 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] |
|
416 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] | |
417 | for i in pycompat.xrange(pathcount): |
|
417 | for i in pycompat.xrange(pathcount): | |
418 | yield readpath(stream) |
|
418 | yield readpath(stream) | |
419 |
|
419 | |||
420 | def getgid(groupname): |
|
420 | def getgid(groupname): | |
421 | try: |
|
421 | try: | |
422 | gid = grp.getgrnam(groupname).gr_gid |
|
422 | gid = grp.getgrnam(groupname).gr_gid | |
423 | return gid |
|
423 | return gid | |
424 | except KeyError: |
|
424 | except KeyError: | |
425 | return None |
|
425 | return None | |
426 |
|
426 | |||
427 | def setstickygroupdir(path, gid, warn=None): |
|
427 | def setstickygroupdir(path, gid, warn=None): | |
428 | if gid is None: |
|
428 | if gid is None: | |
429 | return |
|
429 | return | |
430 | try: |
|
430 | try: | |
431 | os.chown(path, -1, gid) |
|
431 | os.chown(path, -1, gid) | |
432 | os.chmod(path, 0o2775) |
|
432 | os.chmod(path, 0o2775) | |
433 | except (IOError, OSError) as ex: |
|
433 | except (IOError, OSError) as ex: | |
434 | if warn: |
|
434 | if warn: | |
435 | warn(_('unable to chown/chmod on %s: %s\n') % (path, ex)) |
|
435 | warn(_('unable to chown/chmod on %s: %s\n') % (path, ex)) | |
436 |
|
436 | |||
437 | def mkstickygroupdir(ui, path): |
|
437 | def mkstickygroupdir(ui, path): | |
438 | """Creates the given directory (if it doesn't exist) and give it a |
|
438 | """Creates the given directory (if it doesn't exist) and give it a | |
439 | particular group with setgid enabled.""" |
|
439 | particular group with setgid enabled.""" | |
440 | gid = None |
|
440 | gid = None | |
441 | groupname = ui.config("remotefilelog", "cachegroup") |
|
441 | groupname = ui.config("remotefilelog", "cachegroup") | |
442 | if groupname: |
|
442 | if groupname: | |
443 | gid = getgid(groupname) |
|
443 | gid = getgid(groupname) | |
444 | if gid is None: |
|
444 | if gid is None: | |
445 | ui.warn(_('unable to resolve group name: %s\n') % groupname) |
|
445 | ui.warn(_('unable to resolve group name: %s\n') % groupname) | |
446 |
|
446 | |||
447 | # we use a single stat syscall to test the existence and mode / group bit |
|
447 | # we use a single stat syscall to test the existence and mode / group bit | |
448 | st = None |
|
448 | st = None | |
449 | try: |
|
449 | try: | |
450 | st = os.stat(path) |
|
450 | st = os.stat(path) | |
451 | except OSError: |
|
451 | except OSError: | |
452 | pass |
|
452 | pass | |
453 |
|
453 | |||
454 | if st: |
|
454 | if st: | |
455 | # exists |
|
455 | # exists | |
456 | if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid: |
|
456 | if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid: | |
457 | # permission needs to be fixed |
|
457 | # permission needs to be fixed | |
458 | setstickygroupdir(path, gid, ui.warn) |
|
458 | setstickygroupdir(path, gid, ui.warn) | |
459 | return |
|
459 | return | |
460 |
|
460 | |||
461 | oldumask = os.umask(0o002) |
|
461 | oldumask = os.umask(0o002) | |
462 | try: |
|
462 | try: | |
463 | missingdirs = [path] |
|
463 | missingdirs = [path] | |
464 | path = os.path.dirname(path) |
|
464 | path = os.path.dirname(path) | |
465 | while path and not os.path.exists(path): |
|
465 | while path and not os.path.exists(path): | |
466 | missingdirs.append(path) |
|
466 | missingdirs.append(path) | |
467 | path = os.path.dirname(path) |
|
467 | path = os.path.dirname(path) | |
468 |
|
468 | |||
469 | for path in reversed(missingdirs): |
|
469 | for path in reversed(missingdirs): | |
470 | try: |
|
470 | try: | |
471 | os.mkdir(path) |
|
471 | os.mkdir(path) | |
472 | except OSError as ex: |
|
472 | except OSError as ex: | |
473 | if ex.errno != errno.EEXIST: |
|
473 | if ex.errno != errno.EEXIST: | |
474 | raise |
|
474 | raise | |
475 |
|
475 | |||
476 | for path in missingdirs: |
|
476 | for path in missingdirs: | |
477 | setstickygroupdir(path, gid, ui.warn) |
|
477 | setstickygroupdir(path, gid, ui.warn) | |
478 | finally: |
|
478 | finally: | |
479 | os.umask(oldumask) |
|
479 | os.umask(oldumask) | |
480 |
|
480 | |||
481 | def getusername(ui): |
|
481 | def getusername(ui): | |
482 | try: |
|
482 | try: | |
483 | return stringutil.shortuser(ui.username()) |
|
483 | return stringutil.shortuser(ui.username()) | |
484 | except Exception: |
|
484 | except Exception: | |
485 | return 'unknown' |
|
485 | return 'unknown' | |
486 |
|
486 | |||
487 | def getreponame(ui): |
|
487 | def getreponame(ui): | |
488 | reponame = ui.config('paths', 'default') |
|
488 | reponame = ui.config('paths', 'default') | |
489 | if reponame: |
|
489 | if reponame: | |
490 | return os.path.basename(reponame) |
|
490 | return os.path.basename(reponame) | |
491 | return "unknown" |
|
491 | return "unknown" |
General Comments 0
You need to be logged in to leave comments.
Login now